hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c122719417b141746298621cd97a233ace933af
| 1,029 |
py
|
Python
|
main.py
|
marcotessarotto/learnslo
|
51bcb75364353f4ae26f33fc613555751c2fa310
|
[
"MIT"
] | 1 |
2020-07-08T20:03:10.000Z
|
2020-07-08T20:03:10.000Z
|
main.py
|
marcotessarotto/learnslo
|
51bcb75364353f4ae26f33fc613555751c2fa310
|
[
"MIT"
] | null | null | null |
main.py
|
marcotessarotto/learnslo
|
51bcb75364353f4ae26f33fc613555751c2fa310
|
[
"MIT"
] | null | null | null |
import enota1
import enota2
import enota3
import enota4
import enota5
import enota6
import extra
import numbers
import time
import verbs
while 1:
print("a: enota 1")
print("b: enota 2")
# print("c: enota 3")
# print("d: enota 4")
print("e: enota 5")
print("f: enota 6")
print("g: extra")
print("h: numbers")
print("i: time")
print("l: verbs")
# print("z: tutti i test")
print("q: esci")
cmd = input("scegli il test:")
if cmd == 'q':
print("bye!")
exit(0)
elif cmd == 'a':
enota1.run_me()
elif cmd == 'b':
enota2.run_me()
elif cmd == 'c':
enota3.run_me()
elif cmd == 'd':
enota4.run_me()
elif cmd == 'e':
enota5.run_me()
elif cmd == 'f':
enota6.run_me()
elif cmd == 'g':
extra.run_me()
elif cmd == 'h':
numbers.run_me()
elif cmd == 'i':
time.run_me()
elif cmd == 'l':
verbs.run_me()
else:
print("non ho capito la scelta!")
| 19.415094 | 41 | 0.515063 |
5c99d478d02da12fef57504dad618519a2d12085
| 3,776 |
py
|
Python
|
eggdriver/resources/math/linear/__init__.py
|
PythonForChange/eggdriver
|
bcf1da6dcb2a8daf3144c7af8d1d04f8844be2fc
|
[
"MIT"
] | 3 |
2021-09-25T01:22:31.000Z
|
2021-11-28T23:25:46.000Z
|
eggdriver/resources/math/linear/__init__.py
|
PythonForChange/eggdriver
|
bcf1da6dcb2a8daf3144c7af8d1d04f8844be2fc
|
[
"MIT"
] | null | null | null |
eggdriver/resources/math/linear/__init__.py
|
PythonForChange/eggdriver
|
bcf1da6dcb2a8daf3144c7af8d1d04f8844be2fc
|
[
"MIT"
] | null | null | null |
from eggdriver.resources.structures.lists import List
from eggdriver.resources.utils import indexes
from eggdriver.resources.math.linear.utils import *
class Vector(List):
def __init__(self, vanillaList = []):
if type(vanillaList) == str:
temp = vanillaList.replace("[", "").replace("]", "")
vanillaList = list(map(int, temp.split()))
super().__init__(vanillaList)
def plus(self, vector):
return plus(self, vector)
def dot(self, vector):
return dot(self, vector)
def scale(self, scalar):
return scale(self, scalar)
def expand(self, scalar):
[self.append(0) for i in range(0, scalar)]
class Matrix(Vector):
def __init__(self, vectorOfVectors = [], rows = 1, columns = 1, auto_size = True):
if vectorOfVectors == []:
vectorOfVectors = Vector()
for j in range(columns):
v = Vector()
v.expand(rows)
vectorOfVectors.append(v)
elif type(vectorOfVectors) == str:
if rows == 0 or columns == 0:
raise LinearError(2)
else:
temp1 = vectorOfVectors.replace("|", "")
if auto_size:
temp2 = temp1.split("\n")
temp3 = temp2[1] # Index is 1 because temp2[0] always is a void string ""
columns = len(temp3.split())
M = list(map(int, temp1.split()))
output = [M[i:i + columns] for i in range(0, len(M), columns)]
vectorOfVectors = list(map(Vector, output))
super().__init__(vectorOfVectors)
def display(self):
for i in self:
print(i.display(True, ["|", "|"]))
@property
def det(self):
return determinant(self)
@property
def n(self):
return len(self)
@property
def m(self):
return len(self[0])
def rowReduce(matrix: Matrix):
result = Vector()
return result
def determinant(M):
result = M[0][0]
if M.n != M.m:
raise LinearError(1)
elif M.n > 1:
row = M[0]
result = 0
for i in indexes(row):
minor = subMatrix(M, 0, i)
result = result + (((-1) ** i) * M[0][i] * determinant(minor))
return result
def subMatrix(M, row, column):
result = Matrix()
for i in range(M.n):
if i != row:
v = Vector()
for j in range(M.m):
if j != column:
v.append(M[i][j])
result.append(v)
return result
def plus(a, b, autoExpand = False):
result = Vector()
if len(a) != len(b):
if autoExpand:
dualExpand(a, b)
else:
raise LinearError
if len(a) != 0 and len(a) == len(b):
for i in range(0, a.size):
result.append(a[i] + b[i])
return result
def scale(vector, scalar):
result = Vector()
if vector.size != 0:
for i in vector:
result.append(scalar * i)
return result
def vectorize(poly: str):
"""Transform a string polynomial into a coordinates vector"""
p = poly.split(" ")
result = Vector()
coefs = List()
exps = List()
for i in p:
temp = i + "^1"
monomial = temp.strip("+").split("^")
c = monomial[0].strip("x")
if c == "":
coef = 1.0
else:
coef = float(c)
exp = int(monomial[1])
if "x" not in i:
exp = 0
coefs.append(coef)
exps.append(exp)
degree = 0
for i in indexes(exps):
if exps[i] > degree:
degree = exps[i]
result.expand(degree + 1)
for i in indexes(coefs):
result[exps[i]] += coefs[i]
return result
| 29.968254 | 93 | 0.516949 |
fbfac94a988e33c2d42c8cfcfb4f246884878f37
| 1,074 |
py
|
Python
|
PfamScripts/adda/lib/testComponents.py
|
ankitskvmdam/rfam-website
|
5b200ab64fa7c70778c991f239b8ddb11dce4ed5
|
[
"Apache-2.0"
] | 6 |
2017-01-25T12:53:23.000Z
|
2021-03-17T04:52:35.000Z
|
PfamScripts/adda/lib/testComponents.py
|
ankitskvmdam/rfam-website
|
5b200ab64fa7c70778c991f239b8ddb11dce4ed5
|
[
"Apache-2.0"
] | 50 |
2015-11-06T10:31:46.000Z
|
2021-12-03T16:17:28.000Z
|
PfamScripts/adda/lib/testComponents.py
|
ankitskvmdam/rfam-website
|
5b200ab64fa7c70778c991f239b8ddb11dce4ed5
|
[
"Apache-2.0"
] | 2 |
2019-05-30T00:10:26.000Z
|
2021-04-12T09:42:17.000Z
|
import unittest, os, glob, re, tempfile, gzip
from Components import *
class MyTest:
links = ( ( 1, 2),
( 1, 3),
( 2, 3),
( 3, 3),
( 4, 5),
( 5, 6),
)
class TestIComponents(unittest.TestCase, MyTest):
def testAdd( self ):
c = IComponents()
for a, b in self.links: c.add( a, b )
components = [ c.get(x) for x in range(0,8) ]
self.assertEqual( components, [0,1,1,1,4,4,4,0] )
self.assertEqual( c.getNumNodes(), 6 )
self.assertEqual( c.getComponents(), [[1, 2, 3], [4, 5, 6]] )
class TestSComponents(unittest.TestCase, MyTest):
def testAdd( self ):
c = SComponents()
for a, b in self.links: c.add( str(a), str(b) )
components = [ c.get(str(x)) for x in range(0,8) ]
self.assertEqual( components, [0,1,1,1,4,4,4,0] )
self.assertEqual( c.getNumNodes(), 6 )
self.assertEqual( c.getComponents(), [["1", "2", "3"], ["4", "5", "6"]] )
if __name__ == '__main__':
unittest.main()
| 28.263158 | 81 | 0.511173 |
dab08f82a6395386161ee5e87a91e8d671e0314f
| 1,700 |
py
|
Python
|
scripts/find_max_seq_length.py
|
stianste/BERT-NLI
|
7d23c9e8704435ba9f673e66ff4b3eb181671782
|
[
"MIT"
] | 4 |
2020-02-17T14:19:48.000Z
|
2020-09-15T12:38:24.000Z
|
scripts/find_max_seq_length.py
|
stianste/BERT-NLI
|
7d23c9e8704435ba9f673e66ff4b3eb181671782
|
[
"MIT"
] | null | null | null |
scripts/find_max_seq_length.py
|
stianste/BERT-NLI
|
7d23c9e8704435ba9f673e66ff4b3eb181671782
|
[
"MIT"
] | null | null | null |
import os
from pytorch_pretrained_bert.tokenization import BertTokenizer
def get_toefl_token_sizes():
token_sizes = []
full_path = './data/NLI-shared-task-2017/data/essays/train/original/'
for filename in os.listdir(full_path):
with open(os.path.join(full_path, filename), 'r') as f:
text = ''.join(f.readlines()).lower()
tokens = tokenizer.tokenize(text)
token_sizes.append(len(tokens))
return token_sizes
def get_reddit_token_sizes():
token_sizes = []
data_dir = './data/RedditL2/text_chunks/europe_data'
for language_folder in os.listdir(data_dir):
for username in os.listdir(f'{data_dir}/{language_folder}'):
for chunk in os.listdir(f'{data_dir}/{language_folder}/{username}'):
full_path = f'{data_dir}/{language_folder}/{username}/{chunk}'
with open(full_path, 'r') as f:
text = ''.join(f.readlines()).lower()
tokens = tokenizer.tokenize(text)
token_sizes.append(len(tokens))
return token_sizes
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
token_sizes = get_reddit_token_sizes()
print('Max token size', max(token_sizes)) # 910, 747
avg_size = sum(token_sizes) // len(token_sizes)
print('Avg token size', avg_size) # 369, 365
num_above_average = len([x for x in token_sizes if x > avg_size])
print('Num token size > avg', num_above_average)
print('Percentage', num_above_average / len(token_sizes))
n = 512
num_above_n = len([x for x in token_sizes if x > n])
print(f'Num token size > {n}', num_above_n)
print('Percentage', num_above_n / len(token_sizes))
| 35.416667 | 82 | 0.668824 |
f8e81a70cebb983f178e00feda1e4950123465ed
| 20 |
py
|
Python
|
mpy/t2.py
|
jeelabs/monty
|
add6da22dd446cda5e93e023e90520cfdb3fc712
|
[
"Unlicense"
] | 11 |
2021-02-02T02:32:50.000Z
|
2021-12-30T12:55:41.000Z
|
mpy/t2.py
|
jeelabs/monty
|
add6da22dd446cda5e93e023e90520cfdb3fc712
|
[
"Unlicense"
] | 75 |
2021-01-27T10:53:10.000Z
|
2021-06-30T10:59:49.000Z
|
mpy/t2.py
|
jeelabs/monty
|
add6da22dd446cda5e93e023e90520cfdb3fc712
|
[
"Unlicense"
] | 1 |
2021-09-25T11:18:38.000Z
|
2021-09-25T11:18:38.000Z
|
assert 42 == 40 + 2
| 10 | 19 | 0.55 |
ff50631f5e741e658bc8b61ee58460d6f64f810f
| 386 |
py
|
Python
|
ocean_lib/models/order.py
|
LinuxIsCool/ocean.py
|
da44b96620f5e941583e26e772bbebb3813740dc
|
[
"Apache-2.0"
] | 89 |
2020-10-27T08:50:47.000Z
|
2022-03-21T08:42:23.000Z
|
ocean_lib/models/order.py
|
LinuxIsCool/ocean.py
|
da44b96620f5e941583e26e772bbebb3813740dc
|
[
"Apache-2.0"
] | 480 |
2020-10-30T07:56:39.000Z
|
2022-03-31T20:01:27.000Z
|
ocean_lib/models/order.py
|
LinuxIsCool/ocean.py
|
da44b96620f5e941583e26e772bbebb3813740dc
|
[
"Apache-2.0"
] | 50 |
2020-11-07T15:01:02.000Z
|
2022-03-06T05:49:54.000Z
|
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
"""
Defines namedtuple `Order`
"""
from collections import namedtuple
Order = namedtuple(
"Order",
(
"datatoken",
"amount",
"timestamp",
"transactionId",
"did",
"payer",
"consumer",
"serviceId",
"serviceType",
),
)
| 15.44 | 42 | 0.546632 |
8bf295f961664614f7d9d6895618772f77aeba80
| 2,841 |
py
|
Python
|
app/core/tests/test_models.py
|
DamienPond001/recipe-app-api
|
586aa3c37175ef6628da4971120880d2c3c2bae2
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
DamienPond001/recipe-app-api
|
586aa3c37175ef6628da4971120880d2c3c2bae2
|
[
"MIT"
] | 2 |
2020-08-31T09:25:54.000Z
|
2020-08-31T10:30:17.000Z
|
app/core/tests/test_models.py
|
DamienPond001/recipe-app-api
|
586aa3c37175ef6628da4971120880d2c3c2bae2
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
# note that we can import the User Model directly, but if we ever change our
# user model then we would have to change it everywhere instead of just in the
# settings
from core import models
def sample_user(email='test@email.com', password='testPW'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_success(self):
"""Test creating a new user with and email"""
email = 'test@live.com'
password = 'password'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normailse(self):
"""Test the email for a new user is normalised"""
email = "test@UPPERcASE.Com"
user = get_user_model().objects.create_user(
email=email,
password='password'
)
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '123')
def test_create_superuser(self):
"""Test creating new superuser"""
user = get_user_model().objects.create_superuser(
email='test@live.com',
password='password'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""test tag string repr"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""test ingredient string repr"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name="Cucumber"
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""test recipe string repr"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Test Recipe',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
expected_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, expected_path)
| 27.317308 | 78 | 0.632172 |
2c414102d448219ba288b3542fc794484e0e623a
| 2,105 |
py
|
Python
|
tests/conftest.py
|
rnag/wystia
|
dfe21764a54d2737814157072c3262aa7b1cec7d
|
[
"MIT"
] | 1 |
2022-02-02T21:22:20.000Z
|
2022-02-02T21:22:20.000Z
|
tests/conftest.py
|
rnag/wystia
|
dfe21764a54d2737814157072c3262aa7b1cec7d
|
[
"MIT"
] | 9 |
2021-06-17T15:11:31.000Z
|
2021-12-01T18:49:13.000Z
|
tests/conftest.py
|
rnag/wystia
|
dfe21764a54d2737814157072c3262aa7b1cec7d
|
[
"MIT"
] | null | null | null |
"""
Common test fixtures and utilities
"""
import sys
from typing import Union, List
import pytest
from wystia.utils.parse import as_list
# Check if system Python version is 3.6 or greater
PY36 = sys.version_info[0:2] >= (3, 6)
@pytest.fixture(scope='session')
def mock_video_id():
"""Returns a dummy hashed video ID for testing purposes."""
return 'abc-01234567'
@pytest.fixture(scope='session')
def mock_project_id():
"""Returns a dummy hashed project ID for testing purposes."""
return 'xyz-76543210'
@pytest.fixture(scope='session')
def mock_api_token():
"""Returns a dummy Wistia API token for testing purposes."""
return 'abc-xyz-123456789'
@pytest.fixture(scope='session')
def mock_file_name():
"""Returns a dummy file path for testing purposes."""
return 'abc-1234567.mp4'
class TestsWithMarkSkipper:
"""
Util to skip tests with mark, unless cli option provided.
"""
def __init__(self, test_marks: Union[str, List[str]],
cli_option_name: str,
cli_option_help: str):
self.test_marks = as_list(test_marks)
self.cli_option_name = cli_option_name
self.cli_option_help = cli_option_help
def pytest_addoption_hook(self, parser):
parser.addoption(
self.cli_option_name,
action='store_true',
default=False,
help=self.cli_option_help,
)
def pytest_runtest_setup(self, item):
if any(mark in item.keywords for mark in self.test_marks) \
and not item.config.getoption(self.cli_option_name):
self._skip_test()
def _skip_test(self):
reason = 'need {} option to run this test'.format(self.cli_option_name)
pytest.skip(reason)
mark_skipper = TestsWithMarkSkipper(
test_marks=['mutative', 'long'],
cli_option_name="--run-all",
cli_option_help="run all test cases, including any potentially "
"destructive tests",
)
pytest_addoption = mark_skipper.pytest_addoption_hook
pytest_runtest_setup = mark_skipper.pytest_runtest_setup
| 26.3125 | 79 | 0.674109 |
4c235d43a8c2da49d22fbe7ee99e7c3153b1c737
| 5,457 |
py
|
Python
|
pages/migrations/0006_auto_20160403_1849.py
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
3d7e5daeddbee218cb96f3b88c7c7010620225fd
|
[
"MIT"
] | null | null | null |
pages/migrations/0006_auto_20160403_1849.py
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
3d7e5daeddbee218cb96f3b88c7c7010620225fd
|
[
"MIT"
] | null | null | null |
pages/migrations/0006_auto_20160403_1849.py
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
3d7e5daeddbee218cb96f3b88c7c7010620225fd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-03 18:49
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0005_auto_20160402_2218'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.StreamField([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))])), (b'Row', wagtail.wagtailcore.blocks.StructBlock([(b'background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'background_color', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'padding', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'max_width', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'vertical_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'top', b'Top'), (b'bottom', b'Bottom'), (b'middle', b'Middle'), (b'baseline', b'Baseline')])), (b'content', wagtail.wagtailcore.blocks.StreamBlock([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))])), (b'Column', wagtail.wagtailcore.blocks.StructBlock([(b'background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'background_color', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'padding', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'max_width', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'content', wagtail.wagtailcore.blocks.StreamBlock([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))]))]))]))]))])), (b'Hero', wagtail.wagtailcore.blocks.StructBlock([(b'hero_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'background_color', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'padding', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'logo', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'hide', b'Hide'), (b'show', b'Show'), (b'animate', b'Animate')])), (b'hero_content', wagtail.wagtailcore.blocks.StreamBlock([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))])), (b'Row', wagtail.wagtailcore.blocks.StructBlock([(b'background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'background_color', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'padding', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'max_width', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'vertical_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'top', b'Top'), (b'bottom', b'Bottom'), (b'middle', b'Middle'), (b'baseline', b'Baseline')])), (b'content', wagtail.wagtailcore.blocks.StreamBlock([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))])), (b'Column', wagtail.wagtailcore.blocks.StructBlock([(b'background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), (b'background_color', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'padding', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'max_width', wagtail.wagtailcore.blocks.TextBlock(required=False)), (b'content', wagtail.wagtailcore.blocks.StreamBlock([(b'HTML', wagtail.wagtailcore.blocks.StructBlock([(b'html_content', wagtail.wagtailcore.blocks.TextBlock(required=False))])), (b'WYSIWYG', wagtail.wagtailcore.blocks.StructBlock([(b'wysiwyg_content', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'horizontal_alignment', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]))]))]))]))]))]))]))]))], blank=True, null=True),
),
]
| 227.375 | 4,963 | 0.757009 |
8d1dd91bd2560dc42815f22209c871ecf6222fbb
| 610 |
py
|
Python
|
my_app/blog/migrations/0018_orderitem_user.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | 1 |
2021-06-19T00:17:02.000Z
|
2021-06-19T00:17:02.000Z
|
my_app/blog/migrations/0018_orderitem_user.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | null | null | null |
my_app/blog/migrations/0018_orderitem_user.py
|
Faisal-Sey/official1
|
49af7a9fd60c980bd5d4ef7075a4c1f27ecc9642
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-09-13 13:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0017_remove_orderitem_user'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| 26.521739 | 108 | 0.662295 |
297a903034b6135b8efcd813cc32876f6222d8ca
| 5,606 |
py
|
Python
|
colour/plotting/characterisation.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | null | null | null |
colour/plotting/characterisation.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | null | null | null |
colour/plotting/characterisation.py
|
OmarWagih1/colour
|
bdc880a2783ff523dafb19f1233212dd03a639bd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Characterisation Plotting
=========================
Defines the characterisation plotting objects:
- :func:`colour.plotting.plot_single_colour_checker`
- :func:`colour.plotting.plot_multi_colour_checkers`
"""
from __future__ import division
import numpy as np
from colour.models import xyY_to_XYZ
from colour.plotting import (
COLOUR_STYLE_CONSTANTS, ColourSwatch, XYZ_to_plotting_colourspace, artist,
filter_colour_checkers, plot_multi_colour_swatches, override_style, render)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['plot_single_colour_checker', 'plot_multi_colour_checkers']
@override_style(
**{
'axes.grid': False,
'xtick.bottom': False,
'ytick.left': False,
'xtick.labelbottom': False,
'ytick.labelleft': False,
})
def plot_single_colour_checker(colour_checker='ColorChecker 2005', **kwargs):
"""
Plots given colour checker.
Parameters
----------
colour_checker : unicode, optional
Color checker to plot. ``colour_checker`` can be of any type or form
supported by the
:func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
Please refer to the documentation of the previously listed definitions.
Returns
-------
tuple
Current figure and axes.
Examples
--------
>>> plot_single_colour_checker('ColorChecker 2005') # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Single_Colour_Checker.png
:align: center
:alt: plot_single_colour_checker
"""
return plot_multi_colour_checkers([colour_checker], **kwargs)
@override_style(
**{
'axes.grid': False,
'xtick.bottom': False,
'ytick.left': False,
'xtick.labelbottom': False,
'ytick.labelleft': False,
})
def plot_multi_colour_checkers(colour_checkers=None, **kwargs):
"""
Plots and compares given colour checkers.
Parameters
----------
colour_checkers : array_like, optional
Color checker to plot, count must be less than or equal to 2.
``colour_checkers`` elements can be of any type or form supported by
the :func:`colour.plotting.filter_colour_checkers` definition.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.artist`,
:func:`colour.plotting.plot_multi_colour_swatches`,
:func:`colour.plotting.render`},
Please refer to the documentation of the previously listed definitions.
Returns
-------
tuple
Current figure and axes.
Examples
--------
>>> plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Multi_Colour_Checkers.png
:align: center
:alt: plot_multi_colour_checkers
"""
if colour_checkers is None:
colour_checkers = ['ColorChecker 1976', 'ColorChecker 2005']
else:
assert len(colour_checkers) <= 2, (
'Only two colour checkers can be compared at a time!')
colour_checkers = filter_colour_checkers(colour_checkers).values()
_figure, axes = artist(**kwargs)
compare_swatches = len(colour_checkers) == 2
colour_swatches = []
colour_checker_names = []
for colour_checker in colour_checkers:
colour_checker_names.append(colour_checker.name)
for label, xyY in colour_checker.data.items():
XYZ = xyY_to_XYZ(xyY)
RGB = XYZ_to_plotting_colourspace(XYZ, colour_checker.illuminant)
colour_swatches.append(
ColourSwatch(label.title(), np.clip(np.ravel(RGB), 0, 1)))
if compare_swatches:
colour_swatches = [
swatch
for pairs in zip(colour_swatches[0:len(colour_swatches) // 2],
colour_swatches[len(colour_swatches) // 2:])
for swatch in pairs
]
background_colour = '0.1'
width = height = 1.0
spacing = 0.25
columns = 6
settings = {
'axes': axes,
'width': width,
'height': height,
'spacing': spacing,
'columns': columns,
'direction': '-y',
'text_kwargs': {
'size': 8
},
'background_colour': background_colour,
'compare_swatches': 'Stacked' if compare_swatches else None,
}
settings.update(kwargs)
settings['standalone'] = False
plot_multi_colour_swatches(colour_swatches, **settings)
axes.text(
0.5,
0.005,
'{0} - {1} - Colour Rendition Chart'.format(
', '.join(colour_checker_names),
COLOUR_STYLE_CONSTANTS.colour.colourspace.name),
transform=axes.transAxes,
color=COLOUR_STYLE_CONSTANTS.colour.bright,
ha='center',
va='bottom')
settings.update({
'axes': axes,
'standalone': True,
'title': ', '.join(colour_checker_names),
})
return render(**settings)
| 29.505263 | 79 | 0.631645 |
1a426fa85fbb0da0e3f52629b19b56809eaa7ac4
| 1,095 |
py
|
Python
|
detkit/_functions/_utilities.py
|
ameli/detkit
|
54a1d5116e668f18be57954ba012f6061044668e
|
[
"BSD-3-Clause"
] | null | null | null |
detkit/_functions/_utilities.py
|
ameli/detkit
|
54a1d5116e668f18be57954ba012f6061044668e
|
[
"BSD-3-Clause"
] | null | null | null |
detkit/_functions/_utilities.py
|
ameli/detkit
|
54a1d5116e668f18be57954ba012f6061044668e
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-FileCopyrightText: Copyright 2022, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
__all__ = ['get_data_type_name']
# ==================
# get data type name
# ==================
def get_data_type_name(data):
"""
Returns the typename of data as string.
"""
if data.dtype in [b'float32', 'float32']:
data_type_name = b'float32'
elif data.dtype in [b'float64', 'float64']:
data_type_name = b'float64'
elif data.dtype in [b'float128', 'float128']:
data_type_name = b'float128'
elif data.dtype in [b'int32', 'int32']:
data_type_name = b'int32'
elif data.dtype in [b'int64', 'int64']:
data_type_name = b'int64'
else:
raise TypeError('Data type should be "float32", "float64", ' +
'"float128", "int32", or "int64".')
return data_type_name
| 29.594595 | 79 | 0.627397 |
e93d1d3d0a7895c4a2339c2299da650174e01248
| 10,262 |
py
|
Python
|
Tests/test_mmtf.py
|
JJTimmons/biopython
|
3d96ed0c9047ec0e7987000e6e99d800c515db41
|
[
"BSD-3-Clause"
] | 2 |
2019-10-25T18:20:34.000Z
|
2019-10-28T15:26:40.000Z
|
Tests/test_mmtf.py
|
JJTimmons/biopython
|
3d96ed0c9047ec0e7987000e6e99d800c515db41
|
[
"BSD-3-Clause"
] | null | null | null |
Tests/test_mmtf.py
|
JJTimmons/biopython
|
3d96ed0c9047ec0e7987000e6e99d800c515db41
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 by Anthony Bradley. All rights reserved.
# Revisions copyright 2017 by Peter Cock. All rights reserved.
# Revisions copyright 2019 by Joe Greener. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Tests for mmtf module."""
import unittest
import warnings
import os
import tempfile
from Bio.PDB import PDBParser, Select
from Bio.PDB.mmtf import MMTFParser, MMTFIO
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.PDBExceptions import PDBConstructionWarning
import mmtf
class ParseMMTF(unittest.TestCase):
"""Testing with real mmtf file(s)."""
def check_atoms(self):
"""Check all atoms in self.mmtf_atoms and self.mmcif_atoms are equivalent."""
self.assertEqual(len(self.mmcif_atoms), len(self.mmtf_atoms))
for i, e in enumerate(self.mmcif_atoms):
mmtf_atom = self.mmtf_atoms[i]
mmcif_atom = self.mmcif_atoms[i]
self.assertEqual(mmtf_atom.name, mmcif_atom.name) # eg. CA, spaces are removed from atom name
self.assertEqual(mmtf_atom.fullname, mmcif_atom.fullname) # e.g. " CA ", spaces included
self.assertAlmostEqual(mmtf_atom.coord[0], mmcif_atom.coord[0], places=3)
self.assertAlmostEqual(mmtf_atom.coord[1], mmcif_atom.coord[1], places=3)
self.assertAlmostEqual(mmtf_atom.coord[2], mmcif_atom.coord[2], places=3)
self.assertEqual(mmtf_atom.bfactor, mmcif_atom.bfactor)
self.assertEqual(mmtf_atom.occupancy, mmcif_atom.occupancy)
self.assertEqual(mmtf_atom.altloc, mmcif_atom.altloc)
self.assertEqual(mmtf_atom.full_id,
mmcif_atom.full_id) # (structure id, model id, chain id, residue id, atom id)
self.assertEqual(mmtf_atom.id, mmcif_atom.name) # id of atom is the atom name (e.g. "CA")
# self.assertEqual(mmtf_atom.serial_number,mmcif_atom.serial_number) # mmCIF serial number is none
self.assertEqual(mmtf_atom - mmtf_atom, 0)
self.assertEqual(mmtf_atom - mmcif_atom, 0)
def check_residues(self):
"""Check all residues in self.mmcif_res and self.mmtf_res are equivalent."""
self.assertEqual(len(self.mmcif_res), len(self.mmtf_res))
for i, e in enumerate(self.mmcif_res):
mmcif_r = self.mmcif_res[i]
mmtf_r = self.mmtf_res[i]
self.assertEqual(mmtf_r.level, mmcif_r.level)
self.assertEqual(mmtf_r.disordered, mmcif_r.disordered)
self.assertEqual(mmtf_r.resname, mmcif_r.resname)
self.assertEqual(mmtf_r.segid, mmcif_r.segid)
self.mmcif_atoms = [x for x in mmcif_r.get_atoms()]
self.mmtf_atoms = [x for x in mmtf_r.get_atoms()]
self.check_atoms()
def check_mmtf_vs_cif(self, mmtf_filename, cif_filename):
"""Compare parsed structures for MMTF and CIF files."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
mmtf_struct = MMTFParser.get_structure(mmtf_filename)
mmcif_parser = MMCIFParser()
mmcif_struct = mmcif_parser.get_structure("4CUP", cif_filename)
self.mmcif_atoms = [x for x in mmcif_struct.get_atoms()]
self.mmtf_atoms = [x for x in mmtf_struct.get_atoms()]
self.check_atoms()
mmcif_chains = [x for x in mmcif_struct.get_chains()]
mmtf_chains = [x for x in mmtf_struct.get_chains()]
self.assertEqual(len(mmcif_chains), len(mmtf_chains))
for i, e in enumerate(mmcif_chains):
self.mmcif_res = [x for x in mmcif_chains[i].get_residues()]
self.mmtf_res = [x for x in mmtf_chains[i].get_residues()]
self.check_residues()
self.mmcif_res = [x for x in mmcif_struct.get_residues()]
self.mmtf_res = [x for x in mmtf_struct.get_residues()]
self.check_residues()
self.assertEqual(len([x for x in mmcif_struct.get_models()]), len([x for x in mmtf_struct.get_models()]))
def test_4CUP(self):
"""Compare parsing 4CUP.mmtf and 4CUP.cif."""
self.check_mmtf_vs_cif("PDB/4CUP.mmtf", "PDB/4CUP.cif")
# TODO:
# def test_1A8O(self):
# """Compare parsing 1A8O.mmtf and 1A8O.cif"""
# self.check_mmtf_vs_cif("PDB/1A8O.mmtf", "PDB/1A8O.cif")
# TODO:
# def test_4ZHL(self):
# """Compare parsing 4ZHL.mmtf and 4ZHL.cif"""
# self.check_mmtf_vs_cif("PDB/4ZHL.mmtf", "PDB/4ZHL.cif")
class SimpleParseMMTF(unittest.TestCase):
"""Just parse some real mmtf files."""
def test_4ZHL(self):
"""Parse 4ZHL.mmtf."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = MMTFParser.get_structure("PDB/4ZHL.mmtf")
def test_1A80(self):
"""Parse 1A8O.mmtf."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = MMTFParser.get_structure("PDB/1A8O.mmtf")
class WriteMMTF(unittest.TestCase):
"""Write some MMTF files, read them back in and check them."""
def test_write(self):
"""Test a simple structure object is written out correctly to MMTF."""
parser = MMCIFParser()
struc = parser.get_structure("1A8O", "PDB/1A8O.cif")
io = MMTFIO()
io.set_structure(struc)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struc_back = MMTFParser.get_structure(filename)
dict_back = mmtf.parse(filename)
self.assertEqual(dict_back.structure_id, "1A8O")
self.assertEqual(dict_back.num_models, 1)
self.assertEqual(dict_back.num_chains, 2)
self.assertEqual(dict_back.num_groups, 158)
self.assertEqual(dict_back.num_atoms, 644)
self.assertEqual(len(dict_back.x_coord_list), 644)
self.assertEqual(len(dict_back.y_coord_list), 644)
self.assertEqual(len(dict_back.z_coord_list), 644)
self.assertEqual(len(dict_back.b_factor_list), 644)
self.assertEqual(len(dict_back.occupancy_list), 644)
self.assertEqual(dict_back.x_coord_list[5], 20.022)
self.assertEqual(set(dict_back.ins_code_list), {"\x00"})
self.assertEqual(set(dict_back.alt_loc_list), {"\x00"})
self.assertEqual(list(dict_back.atom_id_list), list(range(1, 645)))
self.assertEqual(list(dict_back.sequence_index_list), list(range(70)) + [-1] * 88)
self.assertEqual(dict_back.chain_id_list, ["A", "B"])
self.assertEqual(dict_back.chain_name_list, ["A", "A"])
self.assertEqual(dict_back.chains_per_model, [2])
self.assertEqual(len(dict_back.group_list), 21)
self.assertEqual(len(dict_back.group_id_list), 158)
self.assertEqual(len(dict_back.group_type_list), 158)
self.assertEqual(dict_back.groups_per_chain, [70, 88])
self.assertEqual(len(dict_back.entity_list), 2)
self.assertEqual(dict_back.entity_list[0]["type"], "polymer")
self.assertEqual(dict_back.entity_list[0]["chainIndexList"], [0])
self.assertEqual(dict_back.entity_list[0]["sequence"], "MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQNANPDCKTILKALGPGATLEEMMTACQG")
self.assertEqual(dict_back.entity_list[1]["type"], "water")
self.assertEqual(dict_back.entity_list[1]["chainIndexList"], [1])
self.assertEqual(dict_back.entity_list[1]["sequence"], "")
finally:
os.remove(filename)
def test_multi_model_write(self):
"""Test multiple models are written out correctly to MMTF."""
parser = PDBParser()
struc = parser.get_structure("1SSU_mod", "PDB/1SSU_mod.pdb")
io = MMTFIO()
io.set_structure(struc)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struc_back = MMTFParser.get_structure(filename)
dict_back = mmtf.parse(filename)
self.assertEqual(dict_back.num_models, 2)
self.assertEqual(dict_back.num_chains, 4)
self.assertEqual(dict_back.num_groups, 4)
self.assertEqual(dict_back.num_atoms, 4)
self.assertEqual(list(dict_back.x_coord_list), [-1.058, -0.025, 7.024, 6.259])
self.assertEqual(dict_back.chain_id_list, ["A", "B", "A", "B"])
self.assertEqual(dict_back.chain_name_list, ["A", "B", "A", "B"])
self.assertEqual(dict_back.chains_per_model, [2, 2])
self.assertEqual(len(dict_back.group_list), 1)
self.assertEqual(len(dict_back.group_id_list), 4)
self.assertEqual(len(dict_back.group_type_list), 4)
self.assertEqual(dict_back.groups_per_chain, [1, 1, 1, 1])
self.assertEqual(len(dict_back.entity_list), 4)
finally:
os.remove(filename)
def test_selection_write(self):
"""Test the use of a Select subclass when writing MMTF files."""
struc = MMTFParser.get_structure("PDB/4CUP.mmtf")
io = MMTFIO()
io.set_structure(struc)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
class CAonly(Select):
"""Accepts only CA residues."""
def accept_atom(self, atom):
if atom.name == "CA" and atom.element == "C":
return 1
try:
io.save(filename, CAonly())
struc_back = MMTFParser.get_structure(filename)
dict_back = mmtf.parse(filename)
self.assertEqual(dict_back.num_atoms, 116)
self.assertEqual(len(dict_back.x_coord_list), 116)
self.assertEqual(set(dict_back.alt_loc_list), {"\x00", "A", "B"})
finally:
os.remove(filename)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 47.073394 | 140 | 0.648996 |
d3a8d5cb02d1ab160477ab17bcf2396de3735364
| 2,045 |
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/json/tests/test_encode_basestring_ascii.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757 |
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/json/tests/test_encode_basestring_ascii.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20 |
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/json/tests/test_encode_basestring_ascii.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449 |
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
from collections import OrderedDict
from json.tests import PyTest, CTest
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBasestringAscii(object):
def test_encode_basestring_ascii(self):
fname = self.json.encoder.encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = self.json.encoder.encode_basestring_ascii(input_string)
self.assertEqual(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))
def test_ordered_dict(self):
# See issue 6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(OrderedDict(items))
self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
class TestPyEncodeBasestringAscii(TestEncodeBasestringAscii, PyTest): pass
class TestCEncodeBasestringAscii(TestEncodeBasestringAscii, CTest): pass
| 48.690476 | 200 | 0.533496 |
c9fe9bdb7707abdcda691279d580c36aff6dcff0
| 1,815 |
py
|
Python
|
Python/DigitalClock.py
|
Swarga-codes/Hacktoberfest-Projects
|
ad58b81908c48a9b80d7aa3c29ba6be448028435
|
[
"Apache-2.0"
] | 30 |
2020-10-07T09:16:29.000Z
|
2020-10-19T06:50:37.000Z
|
Python/DigitalClock.py
|
Swarga-codes/Hacktoberfest-Projects
|
ad58b81908c48a9b80d7aa3c29ba6be448028435
|
[
"Apache-2.0"
] | 70 |
2020-10-07T03:26:13.000Z
|
2020-10-25T06:58:07.000Z
|
Python/DigitalClock.py
|
Swarga-codes/Hacktoberfest-Projects
|
ad58b81908c48a9b80d7aa3c29ba6be448028435
|
[
"Apache-2.0"
] | 280 |
2020-10-07T03:39:21.000Z
|
2020-10-25T07:16:33.000Z
|
#import all useful module in the program
from tkinter import *
import datetime
import time
# Create a window by using the Tkinter
root = Tk()
# write the Heading on the window
root.title('Digital Clock By HS')
# geomerty of the window
root.geometry('500x300')
# fixed the window
root.resizable(0,0)
# set the colour of backroundwindow
root.configure(background='black')
# create a Label Widget for print on the window
t1 = Label(root,text='Digital Clock',font=('Arial',30),fg='red',bg='black').place(x=140,y=0)
# create a function for finding the time
def clock():
l1=time.strftime("%H:%M:%S %p")
l2=Label(root,text=l1,font=('ds digital',40,'bold'),fg='light green',bg='black')
l2.after(200,clock)
l2.place(x=80,y=190)
l5=datetime.date.today()
l8=Label(root,text=l5.day,font=('ds digital',40,'bold'),fg='light green',bg='black').place(x=355,y=90)
l7=Label(root,text=l5.month,font=('ds digital',40,'bold'),fg='light green',bg='black').place(x=260,y=90)
l6=Label(root,text=l5.year,font=('ds digital',40,'bold'),fg='light green',bg='black').place(x=80,y=90)
l11=Label(root,text="DATE",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=355,y=160)
l10=Label(root,text="MONTH",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=235,y=160)
l9=Label(root,text="YEAR",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=90,y=160)
l12=Label(root,text="HOUR",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=80,y=260)
l13=Label(root,text="MIN",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=185,y=260)
l14=Label(root,text="SEC",font=('ds digital',15,'bold'),fg='light green',bg='black').place(x=280,y=260)
# clock function
clock()
# end the program
root.mainloop()
| 40.333333 | 114 | 0.672727 |
22675bb9b82a59f177213da7e0c2ac38b277a68e
| 7,965 |
py
|
Python
|
test/functional/rpc_net.py
|
SkateFish/Auroracoin
|
89d83a0c2980f9eff337c4e9743dc7f0181a78a1
|
[
"MIT"
] | 35 |
2016-03-14T02:24:46.000Z
|
2021-08-12T16:19:19.000Z
|
test/functional/rpc_net.py
|
SkateFish/Auroracoin
|
89d83a0c2980f9eff337c4e9743dc7f0181a78a1
|
[
"MIT"
] | 40 |
2016-03-11T20:21:12.000Z
|
2022-03-29T11:07:54.000Z
|
test/functional/rpc_net.py
|
SkateFish/Auroracoin
|
89d83a0c2980f9eff337c4e9743dc7f0181a78a1
|
[
"MIT"
] | 34 |
2016-03-10T13:31:43.000Z
|
2022-01-01T06:05:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
import test_framework.messages
from test_framework.messages import (
CAddress,
msg_addr,
NODE_NETWORK,
NODE_WITNESS,
)
def assert_net_servicesnames(servicesflag, servicenames):
"""Utility that checks if all flags are correctly decoded in
`getpeerinfo` and `getnetworkinfo`.
:param servicesflag: The services as an integer.
:param servicenames: The list of decoded services names, as strings.
"""
servicesflag_generated = 0
for servicename in servicenames:
servicesflag_generated |= getattr(test_framework.messages, 'NODE_' + servicename)
assert servicesflag_generated == servicesflag
class NetTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]]
def run_test(self):
self.log.info('Connect nodes both way')
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
self.nodes[0].setnetworkactive(state=True)
self.log.info('Connect nodes both way')
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
# check the `servicesnames` field
network_info = [node.getnetworkinfo() for node in self.nodes]
for info in network_info:
assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"])
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
# check the `servicesnames` field
for info in peer_info:
assert_net_servicesnames(int(info[0]["services"], 0x10), info[0]["servicesnames"])
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# send some addresses to the node via the p2p message addr
msg = msg_addr()
imported_addrs = []
for i in range(256):
a = "123.123.123.{}".format(i)
imported_addrs.append(a)
addr = CAddress()
addr.time = 100000000
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = a
addr.port = 8333
msg.addrs.append(addr)
self.nodes[0].p2p.send_and_ping(msg)
# obtain addresses via rpc call and check they were ones sent in before
REQUEST_COUNT = 10
node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
assert_equal(len(node_addresses), REQUEST_COUNT)
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8333)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main()
| 44.497207 | 134 | 0.684997 |
fdeed698d8cb0b5d1d67cc97daee6a8fad604a47
| 72 |
py
|
Python
|
python/tests/__main__.py
|
duijf/boilerplates
|
a0aecb0de2c2a3f77ae8eeab1a030f4abc38959f
|
[
"MIT"
] | null | null | null |
python/tests/__main__.py
|
duijf/boilerplates
|
a0aecb0de2c2a3f77ae8eeab1a030f4abc38959f
|
[
"MIT"
] | null | null | null |
python/tests/__main__.py
|
duijf/boilerplates
|
a0aecb0de2c2a3f77ae8eeab1a030f4abc38959f
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
subprocess.run(["pytest"] + sys.argv[1:])
| 14.4 | 41 | 0.722222 |
426153b57c3250c93436936de2fd0a7bc5085673
| 2,332 |
py
|
Python
|
docs/source/conf.py
|
caubut-charter/matter-rpi4-nRF52840-dongle
|
a07de857bc86461d7d396092989fe92c83d03332
|
[
"CC0-1.0"
] | 3 |
2021-09-21T19:48:54.000Z
|
2021-11-03T22:56:33.000Z
|
docs/source/conf.py
|
caubut-charter/matter-rpi4-nRF52840-dongle
|
a07de857bc86461d7d396092989fe92c83d03332
|
[
"CC0-1.0"
] | 8 |
2021-12-28T18:15:59.000Z
|
2022-03-28T18:05:28.000Z
|
docs/source/conf.py
|
caubut-charter/matter-rpi4-nRF52840-dongle
|
a07de857bc86461d7d396092989fe92c83d03332
|
[
"CC0-1.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'matter-rpi4-nRF52840-dongle'
copyright = '2021, Christopher Aubut <christopher.aubut@charter.com>'
author = 'Christopher Aubut <christopher.aubut@charter.com>'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_tabs.tabs',
'sphinx_rtd_theme'
]
sphinx_tabs_disable_tab_closing = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
highlight_language = 'bash'
def setup(app):
app.add_css_file('my_theme.css')
html_context = {
'display_github': True,
'github_user': 'caubut-charter',
'github_repo': 'matter-rpi4-nRF52840-dongle',
'github_version': 'main/docs/source/',
}
| 32.388889 | 79 | 0.677959 |
e6a27c1548a0492ffe890cd66faf51659b62f9ca
| 110 |
py
|
Python
|
wrappers/serial/visibility/visibility_fitting.py
|
ChrisHad/algorithm-reference-library
|
bded1b62ea801ea4f4f5bd0794c18cd81d4b2810
|
[
"Apache-2.0"
] | 22 |
2016-12-14T11:20:07.000Z
|
2021-08-13T15:23:41.000Z
|
wrappers/serial/visibility/visibility_fitting.py
|
ChrisHad/algorithm-reference-library
|
bded1b62ea801ea4f4f5bd0794c18cd81d4b2810
|
[
"Apache-2.0"
] | 30 |
2017-06-27T09:15:38.000Z
|
2020-09-11T18:16:37.000Z
|
wrappers/serial/visibility/visibility_fitting.py
|
ChrisHad/algorithm-reference-library
|
bded1b62ea801ea4f4f5bd0794c18cd81d4b2810
|
[
"Apache-2.0"
] | 20 |
2017-07-02T03:45:49.000Z
|
2019-12-11T17:19:01.000Z
|
""" Visibility operations
"""
from processing_components.visibility.visibility_fitting import fit_visibility
| 22 | 78 | 0.845455 |
3532e6f7fa09d85bf5dc3c62b258e86b3286c220
| 979 |
py
|
Python
|
extensions/vue_backend/object_properties.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | null | null | null |
extensions/vue_backend/object_properties.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | 14 |
2020-09-30T21:16:46.000Z
|
2021-11-08T18:54:34.000Z
|
extensions/vue_backend/object_properties.py
|
XIThing/esdl-mapeditor
|
9f4cd4a58714ea67aeb532e88e88f0435a87dbd5
|
[
"Apache-2.0"
] | 1 |
2020-09-17T12:48:57.000Z
|
2020-09-17T12:48:57.000Z
|
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
from extensions.session_manager import get_session, set_session
import src.log as log
logger = log.get_logger(__name__)
def get_object_properties_info(obj):
"""
Builds up a dictionary with information about the object, with categorized parameters such that the frontend
can visualize these in a proper way
:param EObject obj: the object for which the information must be collected
:return dict: the dictionary with all required information
"""
| 32.633333 | 112 | 0.743616 |
5b4a69fc6381458174fdce418c24e619418e49e8
| 275 |
py
|
Python
|
gpytorch/likelihoods/__init__.py
|
imsrgadich/gpytorch
|
6d312392515599e5b79735c8fa3174c1a2a7e698
|
[
"MIT"
] | 1 |
2018-05-30T07:32:29.000Z
|
2018-05-30T07:32:29.000Z
|
gpytorch/likelihoods/__init__.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
gpytorch/likelihoods/__init__.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
from .likelihood import Likelihood
from .gaussian_likelihood import GaussianLikelihood
from .bernoulli_likelihood import BernoulliLikelihood
from .softmax_likelihood import SoftmaxLikelihood
__all__ = [Likelihood, GaussianLikelihood, BernoulliLikelihood, SoftmaxLikelihood]
| 39.285714 | 82 | 0.88 |
6400aabbbd279439e7d27b8d6a59c18eb7fab3e2
| 49,260 |
py
|
Python
|
src/sage/plot/plot3d/plot3d.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
src/sage/plot/plot3d/plot3d.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
src/sage/plot/plot3d/plot3d.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
r"""
Plotting Functions
EXAMPLES::
sage: x, y = var('x y')
sage: W = plot3d(sin(pi*((x)^2+(y)^2))/2,(x,-1,1),(y,-1,1), frame=False, color='purple', opacity=0.8)
sage: S = sphere((0,0,0),size=0.3, color='red', aspect_ratio=[1,1,1])
sage: show(W + S, figsize=8)
.. PLOT::
x, y = var('x y')
W = plot3d(sin(pi*((x)**2+(y)**2))/2,(x,-1,1),(y,-1,1), frame=False, color='purple', opacity=0.8)
S = sphere((0,0,0),size=0.3, color='red', aspect_ratio=[1,1,1])
sphinx_plot(W + S)
::
sage: def f(x,y):
....: return math.sin(y^2+x^2)/math.sqrt(x^2+y^2+0.0001)
sage: P = plot3d(f,(-3,3),(-3,3), adaptive=True, color=rainbow(60, 'rgbtuple'), max_bend=.1, max_depth=15)
sage: P.show()
.. PLOT::
def f(x,y): return math.sin(y*y+x*x)/math.sqrt(x*x+y*y+0.0001)
P = plot3d(f,(-3,3),(-3,3), adaptive=True, color=rainbow(60, 'rgbtuple'), max_bend=.1, max_depth=15)
sphinx_plot(P)
::
sage: def f(x,y):
....: return math.exp(x/5)*math.sin(y)
...
sage: P = plot3d(f,(-5,5),(-5,5), adaptive=True, color=['red','yellow'])
sage: from sage.plot.plot3d.plot3d import axes
sage: S = P + axes(6, color='black')
sage: S.show()
.. PLOT::
def f(x,y): return math.exp(x/5)*math.sin(y)
P = plot3d(f,(-5,5),(-5,5), adaptive=True, color=['red','yellow'])
from sage.plot.plot3d.plot3d import axes
S = P + axes(6, color='black')
sphinx_plot(S)
Here is an example using a colormap and a color function ``c``::
sage: x, y = var('x y')
sage: cm = colormaps.hsv
sage: def c(x,y): return float((x+y+x*y)/15) % 1
sage: plot3d(x*x+y*y,(x,-4,4),(y,-4,4),color=(c,cm))
Graphics3d Object
.. PLOT::
x, y = var('x y')
cm = colormaps.hsv
def c(x,y): return float((x+y+x*y)/15) % 1
sphinx_plot(plot3d(x*x+y*y,(x,-4,4),(y,-4,4),color=(c,cm)))
Beware that the color function must take values between 0 and 1.
We plot "cape man"::
sage: S = sphere(size=.5, color='yellow')
::
sage: from sage.plot.plot3d.shapes import Cone
sage: S += Cone(.5, .5, color='red').translate(0,0,.3)
::
sage: S += sphere((.45,-.1,.15), size=.1, color='white') + sphere((.51,-.1,.17), size=.05, color='black')
sage: S += sphere((.45, .1,.15),size=.1, color='white') + sphere((.51, .1,.17), size=.05, color='black')
sage: S += sphere((.5,0,-.2),size=.1, color='yellow')
sage: def f(x,y): return math.exp(x/5)*math.cos(y)
sage: P = plot3d(f,(-5,5),(-5,5), adaptive=True, color=['red','yellow'], max_depth=10)
sage: cape_man = P.scale(.2) + S.translate(1,0,0)
sage: cape_man.show(aspect_ratio=[1,1,1])
.. PLOT::
S = sphere(size=.5, color='yellow')
from sage.plot.plot3d.shapes import Cone
S += Cone(.5, .5, color='red').translate(0,0,.3)
S += sphere((.45,-.1,.15), size=.1, color='white') + sphere((.51,-.1,.17), size=.05, color='black')
S += sphere((.45, .1,.15),size=.1, color='white') + sphere((.51, .1,.17), size=.05, color='black')
S += sphere((.5,0,-.2),size=.1, color='yellow')
def f(x,y): return math.exp(x/5)*math.cos(y)
P = plot3d(f,(-5,5),(-5,5), adaptive=True, color=['red','yellow'], max_depth=10)
cape_man = P.scale(.2) + S.translate(1,0,0)
cape_man.aspect_ratio([1,1,1])
sphinx_plot(cape_man)
Or, we plot a very simple function indeed::
sage: plot3d(pi, (-1,1), (-1,1))
Graphics3d Object
.. PLOT::
sphinx_plot(plot3d(pi, (-1,1), (-1,1)))
Transparent with fractional opacity value::
sage: plot3d(lambda x, y: x^2 + y^2, (-2,2), (-2,2), opacity=8/10)
Graphics3d Object
.. TODO::
Add support for smooth triangles.
AUTHORS:
- Tom Boothby: adaptive refinement triangles
- Josh Kantor: adaptive refinement triangles
- Robert Bradshaw (2007-08): initial version of this file
- William Stein (2007-12, 2008-01): improving 3d plotting
- Oscar Lazo, William Cauchois, Jason Grout (2009-2010): Adding coordinate transformations
"""
from __future__ import absolute_import
from six import iteritems
#*****************************************************************************
# Copyright (C) 2007 Robert Bradshaw <robertwb@math.washington.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .tri_plot import TrianglePlot
from .index_face_set import IndexFaceSet
from .shapes import arrow3d
from .base import Graphics3dGroup
from sage.plot.colors import rainbow
from .texture import Texture
from sage.ext.fast_eval import fast_float_arg
from sage.functions.trig import cos, sin
class _Coordinates(object):
"""
This abstract class encapsulates a new coordinate system for plotting.
Sub-classes must implement the :meth:`transform` method which, given
symbolic variables to use, generates a 3-tuple of functions in terms of
those variables that can be used to find the Cartesian (X, Y, and Z)
coordinates for any point in this space.
"""
def __init__(self, dep_var, indep_vars):
"""
INPUT:
- ``dep_var`` - The dependent variable (the function value will be
substituted for this).
- ``indep_vars`` - A list of independent variables (the parameters will be
substituted for these).
TESTS:
Because the base :class:`_Coordinates` class automatically checks the
initializing variables with the transform method, :class:`_Coordinates`
cannot be instantiated by itself. We test a subclass.
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates as arb
sage: x,y,z=var('x,y,z')
sage: arb((x+z,y*z,z), z, (x,y))
Arbitrary Coordinates coordinate transform (z in terms of x, y)
"""
import inspect
all_vars=inspect.getargspec(self.transform).args[1:]
if set(all_vars) != set(indep_vars + [dep_var]):
raise ValueError('variables were specified incorrectly for this coordinate system; incorrect variables were %s'%list(set(all_vars).symmetric_difference(set(indep_vars+[dep_var]))))
self.dep_var = dep_var
self.indep_vars = indep_vars
@property
def _name(self):
"""
A default name for a coordinate system. Override this in a
subclass to set a different name.
TESTS::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates as arb
sage: x,y,z=var('x,y,z')
sage: c=arb((x+z,y*z,z), z, (x,y))
sage: c._name
'Arbitrary Coordinates'
"""
return self.__class__.__name__
def transform(self, **kwds):
"""
Return the transformation for this coordinate system in terms of the
specified variables (which should be keywords).
TESTS::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates as arb
sage: x,y,z=var('x,y,z')
sage: c=arb((x+z,y*z,z), z, (x,y))
sage: c.transform(x=1,y=2,z=3)
(4, 6, 3)
"""
raise NotImplementedError
def to_cartesian(self, func, params=None):
"""
Returns a 3-tuple of functions, parameterized over ``params``, that
represents the Cartesian coordinates of the value of ``func``.
INPUT:
- ``func`` - A function in this coordinate space. Corresponds to the
independent variable.
- ``params`` - The parameters of ``func``. Corresponds to the dependent
variables.
EXAMPLES::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates
sage: x, y, z = var('x y z')
sage: T = _ArbitraryCoordinates((x + y, x - y, z), z,[x,y])
sage: f(x, y) = 2*x+y
sage: T.to_cartesian(f, [x, y])
(x + y, x - y, 2*x + y)
sage: [h(1,2) for h in T.to_cartesian(lambda x,y: 2*x+y)]
[3.0, -1.0, 4.0]
We try to return a function having the same variable names as
the function passed in::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates
sage: x, y, z = var('x y z')
sage: T = _ArbitraryCoordinates((x + y, x - y, z), z,[x,y])
sage: f(a, b) = 2*a+b
sage: T.to_cartesian(f, [a, b])
(a + b, a - b, 2*a + b)
sage: t1,t2,t3=T.to_cartesian(lambda a,b: 2*a+b)
sage: import inspect
sage: inspect.getargspec(t1)
ArgSpec(args=['a', 'b'], varargs=None, keywords=None, defaults=None)
sage: inspect.getargspec(t2)
ArgSpec(args=['a', 'b'], varargs=None, keywords=None, defaults=None)
sage: inspect.getargspec(t3)
ArgSpec(args=['a', 'b'], varargs=None, keywords=None, defaults=None)
sage: def g(a,b): return 2*a+b
sage: t1,t2,t3=T.to_cartesian(g)
sage: inspect.getargspec(t1)
ArgSpec(args=['a', 'b'], varargs=None, keywords=None, defaults=None)
sage: t1,t2,t3=T.to_cartesian(2*a+b)
sage: inspect.getargspec(t1)
ArgSpec(args=['a', 'b'], varargs=None, keywords=None, defaults=None)
If we cannot guess the right parameter names, then the
parameters are named `u` and `v`::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates
sage: x, y, z = var('x y z')
sage: T = _ArbitraryCoordinates((x + y, x - y, z), z,[x,y])
sage: t1,t2,t3=T.to_cartesian(operator.add)
sage: inspect.getargspec(t1)
ArgSpec(args=['u', 'v'], varargs=None, keywords=None, defaults=None)
sage: [h(1,2) for h in T.to_cartesian(operator.mul)]
[3.0, -1.0, 2.0]
sage: [h(u=1,v=2) for h in T.to_cartesian(operator.mul)]
[3.0, -1.0, 2.0]
The output of the function ``func`` is coerced to a float when
it is evaluated if the function is something like a lambda or
python callable. This takes care of situations like f returning a
singleton numpy array, for example.
sage: from numpy import array
sage: v_phi=array([ 0., 1.57079637, 3.14159274, 4.71238911, 6.28318548])
sage: v_theta=array([ 0., 0.78539819, 1.57079637, 2.35619456, 3.14159274])
sage: m_r=array([[ 0.16763356, 0.25683223, 0.16649297, 0.10594339, 0.55282422],
....: [ 0.16763356, 0.19993708, 0.31403568, 0.47359696, 0.55282422],
....: [ 0.16763356, 0.25683223, 0.16649297, 0.10594339, 0.55282422],
....: [ 0.16763356, 0.19993708, 0.31403568, 0.47359696, 0.55282422],
....: [ 0.16763356, 0.25683223, 0.16649297, 0.10594339, 0.55282422]])
sage: import scipy.interpolate
sage: f=scipy.interpolate.RectBivariateSpline(v_phi,v_theta,m_r)
sage: spherical_plot3d(f,(0,2*pi),(0,pi))
Graphics3d Object
"""
from sage.symbolic.expression import is_Expression
from sage.rings.real_mpfr import is_RealNumber
from sage.rings.integer import is_Integer
if params is not None and (is_Expression(func) or is_RealNumber(func) or is_Integer(func)):
return self.transform(**{
self.dep_var: func,
self.indep_vars[0]: params[0],
self.indep_vars[1]: params[1]
})
else:
# func might be a lambda or a Python callable; this makes it slightly
# more complex.
import sage.symbolic.ring
dep_var_dummy = sage.symbolic.ring.var(self.dep_var)
indep_var_dummies = sage.symbolic.ring.var(','.join(self.indep_vars))
transformation = self.transform(**{
self.dep_var: dep_var_dummy,
self.indep_vars[0]: indep_var_dummies[0],
self.indep_vars[1]: indep_var_dummies[1]
})
if params is None:
if callable(func):
params = _find_arguments_for_callable(func)
if params is None:
params=['u','v']
else:
raise ValueError("function is not callable")
def subs_func(t):
# We use eval so that the lambda function has the same
# variable names as the original function
ll="""lambda {x},{y}: t.subs({{
dep_var_dummy: float(func({x}, {y})),
indep_var_dummies[0]: float({x}),
indep_var_dummies[1]: float({y})
}})""".format(x=params[0], y=params[1])
return eval(ll,dict(t=t, func=func, dep_var_dummy=dep_var_dummy,
indep_var_dummies=indep_var_dummies))
return [subs_func(_) for _ in transformation]
def __repr__(self):
"""
Print out a coordinate system
::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates as arb
sage: x,y,z=var('x,y,z')
sage: c=arb((x+z,y*z,z), z, (x,y))
sage: c
Arbitrary Coordinates coordinate transform (z in terms of x, y)
sage: c.__dict__['_name'] = 'My Special Coordinates'
sage: c
My Special Coordinates coordinate transform (z in terms of x, y)
"""
return '%s coordinate transform (%s in terms of %s)' % \
(self._name, self.dep_var, ', '.join(self.indep_vars))
import inspect
def _find_arguments_for_callable(func):
"""
Find the names of arguments (that do not have default values) for
a callable function, taking care of several special cases in Sage.
If the parameters cannot be found, then return None.
EXAMPLES::
sage: from sage.plot.plot3d.plot3d import _find_arguments_for_callable
sage: _find_arguments_for_callable(lambda x,y: x+y)
['x', 'y']
sage: def f(a,b,c): return a+b+c
sage: _find_arguments_for_callable(f)
['a', 'b', 'c']
sage: _find_arguments_for_callable(lambda x,y,z=2: x+y+z)
['x', 'y']
sage: def f(a,b,c,d=2,e=1): return a+b+c+d+e
sage: _find_arguments_for_callable(f)
['a', 'b', 'c']
sage: g(w,r,t)=w+r+t
sage: _find_arguments_for_callable(g)
['w', 'r', 't']
sage: a,b = var('a,b')
sage: _find_arguments_for_callable(a+b)
['a', 'b']
sage: _find_arguments_for_callable(operator.add)
"""
if inspect.isfunction(func):
f_args=inspect.getargspec(func)
if f_args.defaults is None:
params=f_args.args
else:
params=f_args.args[:-len(f_args.defaults)]
else:
try:
f_args=inspect.getargspec(func.__call__)
if f_args.defaults is None:
params=f_args.args
else:
params=f_args.args[:-len(f_args.defaults)]
except TypeError:
# func.__call__ may be a built-in (or Cython) function
if hasattr(func, 'arguments'):
params=[repr(s) for s in func.arguments()]
else:
params=None
return params
class _ArbitraryCoordinates(_Coordinates):
"""
An arbitrary coordinate system.
"""
_name = "Arbitrary Coordinates"
def __init__(self, custom_trans, dep_var, indep_vars):
"""
Initialize an arbitrary coordinate system.
INPUT:
- ``custom_trans`` - A 3-tuple of transformation
functions.
- ``dep_var`` - The dependent (function) variable.
- ``indep_vars`` - a list of the two other independent
variables.
EXAMPLES::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates
sage: x, y, z = var('x y z')
sage: T = _ArbitraryCoordinates((x + y, x - y, z), z,[x,y])
sage: f(x, y) = 2*x + y
sage: T.to_cartesian(f, [x, y])
(x + y, x - y, 2*x + y)
sage: [h(1,2) for h in T.to_cartesian(lambda x,y: 2*x+y)]
[3.0, -1.0, 4.0]
"""
self.dep_var = str(dep_var)
self.indep_vars = [str(i) for i in indep_vars]
self.custom_trans = tuple(custom_trans)
def transform(self, **kwds):
"""
EXAMPLES::
sage: from sage.plot.plot3d.plot3d import _ArbitraryCoordinates
sage: x, y, z = var('x y z')
sage: T = _ArbitraryCoordinates((x + y, x - y, z), x,[y,z])
sage: T.transform(x=z,y=1)
(z + 1, z - 1, z)
"""
return tuple(t.subs(**kwds) for t in self.custom_trans)
class Spherical(_Coordinates):
"""
A spherical coordinate system for use with ``plot3d(transformation=...)``
where the position of a point is specified by three numbers:
- the *radial distance* (``radius``) from the origin
- the *azimuth angle* (``azimuth``) from the positive `x`-axis
- the *inclination angle* (``inclination``) from the positive `z`-axis
These three variables must be specified in the constructor.
EXAMPLES:
Construct a spherical transformation for a function for the radius
in terms of the azimuth and inclination::
sage: T = Spherical('radius', ['azimuth', 'inclination'])
If we construct some concrete variables, we can get a
transformation in terms of those variables::
sage: r, phi, theta = var('r phi theta')
sage: T.transform(radius=r, azimuth=theta, inclination=phi)
(r*cos(theta)*sin(phi), r*sin(phi)*sin(theta), r*cos(phi))
We can plot with this transform. Remember that the dependent
variable is the radius, and the independent variables are the
azimuth and the inclination (in that order)::
sage: plot3d(phi * theta, (theta, 0, pi), (phi, 0, 1), transformation=T)
Graphics3d Object
.. PLOT::
r, phi, theta = var('r phi theta')
T = Spherical('radius', ['azimuth', 'inclination'])
sphinx_plot(plot3d(phi * theta, (theta, 0, pi), (phi, 0, 1), transformation=T))
We next graph the function where the inclination angle is constant::
sage: S=Spherical('inclination', ['radius', 'azimuth'])
sage: r,theta=var('r,theta')
sage: plot3d(3, (r,0,3), (theta, 0, 2*pi), transformation=S)
Graphics3d Object
.. PLOT::
S=Spherical('inclination', ['radius', 'azimuth'])
r,theta=var('r,theta')
sphinx_plot(plot3d(r-r+3, (r,0,3), (theta, 0, 2*pi), transformation=S))
See also :func:`spherical_plot3d` for more examples of plotting in spherical
coordinates.
"""
def transform(self, radius=None, azimuth=None, inclination=None):
"""
A spherical coordinates transform.
EXAMPLES::
sage: T = Spherical('radius', ['azimuth', 'inclination'])
sage: T.transform(radius=var('r'), azimuth=var('theta'), inclination=var('phi'))
(r*cos(theta)*sin(phi), r*sin(phi)*sin(theta), r*cos(phi))
"""
return (radius * sin(inclination) * cos(azimuth),
radius * sin(inclination) * sin(azimuth),
radius * cos(inclination))
class SphericalElevation(_Coordinates):
"""
A spherical coordinate system for use with ``plot3d(transformation=...)``
where the position of a point is specified by three numbers:
- the *radial distance* (``radius``) from the origin
- the *azimuth angle* (``azimuth``) from the positive `x`-axis
- the *elevation angle* (``elevation``) from the `xy`-plane toward the
positive `z`-axis
These three variables must be specified in the constructor.
EXAMPLES:
Construct a spherical transformation for the radius
in terms of the azimuth and elevation. Then, get a
transformation in terms of those variables::
sage: T = SphericalElevation('radius', ['azimuth', 'elevation'])
sage: r, theta, phi = var('r theta phi')
sage: T.transform(radius=r, azimuth=theta, elevation=phi)
(r*cos(phi)*cos(theta), r*cos(phi)*sin(theta), r*sin(phi))
We can plot with this transform. Remember that the dependent
variable is the radius, and the independent variables are the
azimuth and the elevation (in that order)::
sage: plot3d(phi * theta, (theta, 0, pi), (phi, 0, 1), transformation=T)
Graphics3d Object
.. PLOT::
T = SphericalElevation('radius', ['azimuth', 'elevation'])
r, theta, phi = var('r theta phi')
sphinx_plot(plot3d(phi * theta, (theta, 0, pi), (phi, 0, 1), transformation=T))
We next graph the function where the elevation angle is constant. This
should be compared to the similar example for the ``Spherical`` coordinate
system::
sage: SE=SphericalElevation('elevation', ['radius', 'azimuth'])
sage: r,theta=var('r,theta')
sage: plot3d(3, (r,0,3), (theta, 0, 2*pi), transformation=SE)
Graphics3d Object
.. PLOT::
SE=SphericalElevation('elevation', ['radius', 'azimuth'])
r,theta=var('r,theta')
sphinx_plot(plot3d(3+r-r, (r,0,3), (theta, 0, 2*pi), transformation=SE))
Plot a sin curve wrapped around the equator::
sage: P1=plot3d( (pi/12)*sin(8*theta), (r,0.99,1), (theta, 0, 2*pi), transformation=SE, plot_points=(10,200))
sage: P2=sphere(center=(0,0,0), size=1, color='red', opacity=0.3)
sage: P1+P2
Graphics3d Object
.. PLOT::
r,theta=var('r,theta')
SE=SphericalElevation('elevation', ['radius', 'azimuth'])
P1=plot3d( (pi/12)*sin(8*theta), (r,0.99,1), (theta, 0, 2*pi), transformation=SE, plot_points=(10,200))
P2=sphere(center=(0,0,0), size=1, color='red', opacity=0.3)
sphinx_plot(P1+P2)
Now we graph several constant elevation functions alongside several constant
inclination functions. This example illustrates the difference between the
``Spherical`` coordinate system and the ``SphericalElevation`` coordinate
system::
sage: r, phi, theta = var('r phi theta')
sage: SE = SphericalElevation('elevation', ['radius', 'azimuth'])
sage: angles = [pi/18, pi/12, pi/6]
sage: P1 = [plot3d( a, (r,0,3), (theta, 0, 2*pi), transformation=SE, opacity=0.85, color='blue') for a in angles]
sage: S = Spherical('inclination', ['radius', 'azimuth'])
sage: P2 = [plot3d( a, (r,0,3), (theta, 0, 2*pi), transformation=S, opacity=0.85, color='red') for a in angles]
sage: show(sum(P1+P2), aspect_ratio=1)
.. PLOT::
r, phi, theta = var('r phi theta')
SE = SphericalElevation('elevation', ['radius', 'azimuth'])
S = Spherical('inclination', ['radius', 'azimuth'])
angles = [pi/18, pi/12, pi/6]
P1=Graphics()
P2=Graphics()
for a in angles:
P1 += plot3d( a, (r,0,3), (theta, 0, 2*pi), transformation=SE, opacity=0.85, color='blue')
P2 += plot3d( a, (r,0,3), (theta, 0, 2*pi), transformation=S, opacity=0.85, color='red')
sphinx_plot(P1+P2)
See also :func:`spherical_plot3d` for more examples of plotting in spherical
coordinates.
"""
def transform(self, radius=None, azimuth=None, elevation=None):
"""
A spherical elevation coordinates transform.
EXAMPLES::
sage: T = SphericalElevation('radius', ['azimuth', 'elevation'])
sage: T.transform(radius=var('r'), azimuth=var('theta'), elevation=var('phi'))
(r*cos(phi)*cos(theta), r*cos(phi)*sin(theta), r*sin(phi))
"""
return (radius * cos(elevation) * cos(azimuth),
radius * cos(elevation) * sin(azimuth),
radius * sin(elevation))
class Cylindrical(_Coordinates):
"""
A cylindrical coordinate system for use with ``plot3d(transformation=...)``
where the position of a point is specified by three numbers:
- the *radial distance* (``radius``) from the `z`-axis
- the *azimuth angle* (``azimuth``) from the positive `x`-axis
- the *height* or *altitude* (``height``) above the `xy`-plane
These three variables must be specified in the constructor.
EXAMPLES:
Construct a cylindrical transformation for a function for ``height`` in terms of
``radius`` and ``azimuth``::
sage: T = Cylindrical('height', ['radius', 'azimuth'])
If we construct some concrete variables, we can get a transformation::
sage: r, theta, z = var('r theta z')
sage: T.transform(radius=r, azimuth=theta, height=z)
(r*cos(theta), r*sin(theta), z)
We can plot with this transform. Remember that the dependent
variable is the height, and the independent variables are the
radius and the azimuth (in that order)::
sage: plot3d(9-r^2, (r, 0, 3), (theta, 0, pi), transformation=T)
Graphics3d Object
.. PLOT::
T = Cylindrical('height', ['radius', 'azimuth'])
r, theta, z = var('r theta z')
sphinx_plot(plot3d(9-r**2, (r, 0, 3), (theta, 0, pi), transformation=T))
We next graph the function where the radius is constant::
sage: S=Cylindrical('radius', ['azimuth', 'height'])
sage: theta,z=var('theta, z')
sage: plot3d(3, (theta,0,2*pi), (z, -2, 2), transformation=S)
Graphics3d Object
.. PLOT::
S=Cylindrical('radius', ['azimuth', 'height'])
theta,z=var('theta, z')
sphinx_plot(plot3d(3+z-z, (theta,0,2*pi), (z, -2, 2), transformation=S))
See also :func:`cylindrical_plot3d` for more examples of plotting in cylindrical
coordinates.
"""
def transform(self, radius=None, azimuth=None, height=None):
"""
A cylindrical coordinates transform.
EXAMPLES::
sage: T = Cylindrical('height', ['azimuth', 'radius'])
sage: T.transform(radius=var('r'), azimuth=var('theta'), height=var('z'))
(r*cos(theta), r*sin(theta), z)
"""
return (radius * cos(azimuth),
radius * sin(azimuth),
height)
class TrivialTriangleFactory:
"""
Class emulating behavior of :class:`~sage.plot.plot3d.tri_plot.TriangleFactory`
but simply returning a list of vertices for both regular and
smooth triangles.
"""
def triangle(self, a, b, c, color = None):
"""
Function emulating behavior of
:meth:`~sage.plot.plot3d.tri_plot.TriangleFactory.triangle`
but simply returning a list of vertices.
INPUT:
- ``a``, ``b``, ``c`` : triples (x,y,z) representing corners
on a triangle in 3-space
- ``color``: ignored
OUTPUT:
- the list ``[a,b,c]``
TESTS::
sage: from sage.plot.plot3d.plot3d import TrivialTriangleFactory
sage: factory = TrivialTriangleFactory()
sage: tri = factory.triangle([0,0,0],[0,0,1],[1,1,0])
sage: tri
[[0, 0, 0], [0, 0, 1], [1, 1, 0]]
"""
return [a,b,c]
def smooth_triangle(self, a, b, c, da, db, dc, color = None):
"""
Function emulating behavior of
:meth:`~sage.plot.plot3d.tri_plot.TriangleFactory.smooth_triangle`
but simply returning a list of vertices.
INPUT:
- ``a``, ``b``, ``c`` : triples (x,y,z) representing corners
on a triangle in 3-space
- ``da``, ``db``, ``dc`` : ignored
- ``color`` : ignored
OUTPUT:
- the list ``[a,b,c]``
TESTS::
sage: from sage.plot.plot3d.plot3d import TrivialTriangleFactory
sage: factory = TrivialTriangleFactory()
sage: sm_tri = factory.smooth_triangle([0,0,0],[0,0,1],[1,1,0],[0,0,1],[0,2,0],[1,0,0])
sage: sm_tri
[[0, 0, 0], [0, 0, 1], [1, 1, 0]]
"""
return [a,b,c]
from . import parametric_plot3d
def plot3d(f, urange, vrange, adaptive=False, transformation=None, **kwds):
"""
Plots a function in 3d.
INPUT:
- ``f`` - a symbolic expression or function of 2
variables
- ``urange`` - a 2-tuple (u_min, u_max) or a 3-tuple
(u, u_min, u_max)
- ``vrange`` - a 2-tuple (v_min, v_max) or a 3-tuple
(v, v_min, v_max)
- ``adaptive`` - (default: False) whether to use
adaptive refinement to draw the plot (slower, but may look better).
This option does NOT work in conjunction with a transformation
(see below).
- ``mesh`` - bool (default: False) whether to display
mesh grid lines
- ``dots`` - bool (default: False) whether to display
dots at mesh grid points
- ``plot_points`` - (default: "automatic") initial number of sample
points in each direction; an integer or a pair of integers
- ``transformation`` - (default: None) a transformation to
apply. May be a 3 or 4-tuple (x_func, y_func, z_func,
independent_vars) where the first 3 items indicate a
transformation to Cartesian coordinates (from your coordinate
system) in terms of u, v, and the function variable fvar (for
which the value of f will be substituted). If a 3-tuple is
specified, the independent variables are chosen from the range
variables. If a 4-tuple is specified, the 4th element is a list
of independent variables. ``transformation`` may also be a
predefined coordinate system transformation like Spherical or
Cylindrical.
.. note::
``mesh`` and ``dots`` are not supported when using the Tachyon
raytracer renderer.
EXAMPLES: We plot a 3d function defined as a Python function::
sage: plot3d(lambda x, y: x^2 + y^2, (-2,2), (-2,2))
Graphics3d Object
.. PLOT::
sphinx_plot(plot3d(lambda x, y: x**2 + y**2, (-2,2), (-2,2)))
We plot the same 3d function but using adaptive refinement::
sage: plot3d(lambda x, y: x^2 + y^2, (-2,2), (-2,2), adaptive=True)
Graphics3d Object
.. PLOT::
sphinx_plot(plot3d(lambda x, y: x**2 + y**2, (-2,2), (-2,2), adaptive=True))
Adaptive refinement but with more points::
sage: plot3d(lambda x, y: x^2 + y^2, (-2,2), (-2,2), adaptive=True, initial_depth=5)
Graphics3d Object
.. PLOT::
sphinx_plot(plot3d(lambda x, y: x**2 + y**2, (-2,2), (-2,2), adaptive=True, initial_depth=5))
We plot some 3d symbolic functions::
sage: var('x,y')
(x, y)
sage: plot3d(x^2 + y^2, (x,-2,2), (y,-2,2))
Graphics3d Object
.. PLOT::
var('x y')
sphinx_plot(plot3d(x**2 + y**2, (x,-2,2), (y,-2,2)))
::
sage: plot3d(sin(x*y), (x, -pi, pi), (y, -pi, pi))
Graphics3d Object
.. PLOT::
var('x y')
sphinx_plot(plot3d(sin(x*y), (x, -pi, pi), (y, -pi, pi)))
We give a plot with extra sample points::
sage: var('x,y')
(x, y)
sage: plot3d(sin(x^2+y^2),(x,-5,5),(y,-5,5), plot_points=200)
Graphics3d Object
.. PLOT::
var('x y')
sphinx_plot(plot3d(sin(x**2+y**2),(x,-5,5),(y,-5,5), plot_points=200))
::
sage: plot3d(sin(x^2+y^2),(x,-5,5),(y,-5,5), plot_points=[10,100])
Graphics3d Object
.. PLOT::
var('x y')
sphinx_plot(plot3d(sin(x**2+y**2),(x,-5,5),(y,-5,5), plot_points=[10,100]))
A 3d plot with a mesh::
sage: var('x,y')
(x, y)
sage: plot3d(sin(x-y)*y*cos(x),(x,-3,3),(y,-3,3), mesh=True)
Graphics3d Object
.. PLOT::
var('x y')
sphinx_plot(plot3d(sin(x-y)*y*cos(x),(x,-3,3),(y,-3,3), mesh=True))
Two wobby translucent planes::
sage: x,y = var('x,y')
sage: P = plot3d(x+y+sin(x*y), (x,-10,10),(y,-10,10), opacity=0.87, color='blue')
sage: Q = plot3d(x-2*y-cos(x*y),(x,-10,10),(y,-10,10),opacity=0.3,color='red')
sage: P + Q
Graphics3d Object
.. PLOT::
x,y=var('x y')
P = plot3d(x+y+sin(x*y), (x,-10,10),(y,-10,10), opacity=0.87, color='blue')
Q = plot3d(x-2*y-cos(x*y),(x,-10,10),(y,-10,10),opacity=0.3,color='red')
sphinx_plot(P+Q)
We draw two parametric surfaces and a transparent plane::
sage: L = plot3d(lambda x,y: 0, (-5,5), (-5,5), color="lightblue", opacity=0.8)
sage: P = plot3d(lambda x,y: 4 - x^3 - y^2, (-2,2), (-2,2), color='green')
sage: Q = plot3d(lambda x,y: x^3 + y^2 - 4, (-2,2), (-2,2), color='orange')
sage: L + P + Q
Graphics3d Object
.. PLOT::
L = plot3d(lambda x,y: 0, (-5,5), (-5,5), color="lightblue", opacity=0.8)
P = plot3d(lambda x,y: 4 - x**3 - y**2, (-2,2), (-2,2), color='green')
Q = plot3d(lambda x,y: x**3 + y**2 - 4, (-2,2), (-2,2), color='orange')
sphinx_plot(L+P+Q)
We draw the "Sinus" function (water ripple-like surface)::
sage: x, y = var('x y')
sage: plot3d(sin(pi*(x^2+y^2))/2,(x,-1,1),(y,-1,1))
Graphics3d Object
.. PLOT::
x, y = var('x y')
sphinx_plot(plot3d(sin(pi*(x**2+y**2))/2,(x,-1,1),(y,-1,1)))
Hill and valley (flat surface with a bump and a dent)::
sage: x, y = var('x y')
sage: plot3d( 4*x*exp(-x^2-y^2), (x,-2,2), (y,-2,2))
Graphics3d Object
.. PLOT::
x, y = var('x y')
sphinx_plot(plot3d( 4*x*exp(-x**2-y**2), (x,-2,2), (y,-2,2)))
An example of a transformation::
sage: r, phi, z = var('r phi z')
sage: trans=(r*cos(phi),r*sin(phi),z)
sage: plot3d(cos(r),(r,0,17*pi/2),(phi,0,2*pi),transformation=trans,opacity=0.87).show(aspect_ratio=(1,1,2),frame=False)
.. PLOT::
r, phi, z = var('r phi z')
trans = (r*cos(phi),r*sin(phi),z)
P = plot3d(cos(r),(r,0,17*pi/2),(phi,0,2*pi),transformation=trans,opacity=0.87)
P.aspect_ratio([1,1,2])
sphinx_plot(P)
An example of a transformation with symbolic vector::
sage: cylindrical(r,theta,z)=[r*cos(theta),r*sin(theta),z]
sage: plot3d(3,(theta,0,pi/2),(z,0,pi/2),transformation=cylindrical)
Graphics3d Object
.. PLOT::
r, theta, z = var('r theta z')
cylindrical=(r*cos(theta),r*sin(theta),z)
P = plot3d(z-z+3,(theta,0,pi/2),(z,0,pi/2),transformation=cylindrical)
sphinx_plot(P)
Many more examples of transformations::
sage: u, v, w = var('u v w')
sage: rectangular=(u,v,w)
sage: spherical=(w*cos(u)*sin(v),w*sin(u)*sin(v),w*cos(v))
sage: cylindric_radial=(w*cos(u),w*sin(u),v)
sage: cylindric_axial=(v*cos(u),v*sin(u),w)
sage: parabolic_cylindrical=(w*v,(v^2-w^2)/2,u)
Plot a constant function of each of these to get an idea of what it does::
sage: A = plot3d(2,(u,-pi,pi),(v,0,pi),transformation=rectangular,plot_points=[100,100])
sage: B = plot3d(2,(u,-pi,pi),(v,0,pi),transformation=spherical,plot_points=[100,100])
sage: C = plot3d(2,(u,-pi,pi),(v,0,pi),transformation=cylindric_radial,plot_points=[100,100])
sage: D = plot3d(2,(u,-pi,pi),(v,0,pi),transformation=cylindric_axial,plot_points=[100,100])
sage: E = plot3d(2,(u,-pi,pi),(v,-pi,pi),transformation=parabolic_cylindrical,plot_points=[100,100])
sage: @interact
....: def _(which_plot=[A,B,C,D,E]):
....: show(which_plot)
<html>...
Now plot a function::
sage: g=3+sin(4*u)/2+cos(4*v)/2
sage: F = plot3d(g,(u,-pi,pi),(v,0,pi),transformation=rectangular,plot_points=[100,100])
sage: G = plot3d(g,(u,-pi,pi),(v,0,pi),transformation=spherical,plot_points=[100,100])
sage: H = plot3d(g,(u,-pi,pi),(v,0,pi),transformation=cylindric_radial,plot_points=[100,100])
sage: I = plot3d(g,(u,-pi,pi),(v,0,pi),transformation=cylindric_axial,plot_points=[100,100])
sage: J = plot3d(g,(u,-pi,pi),(v,0,pi),transformation=parabolic_cylindrical,plot_points=[100,100])
sage: @interact
....: def _(which_plot=[F, G, H, I, J]):
....: show(which_plot)
<html>...
TESTS:
Make sure the transformation plots work::
sage: show(A + B + C + D + E)
sage: show(F + G + H + I + J)
Listing the same plot variable twice gives an error::
sage: x, y = var('x y')
sage: plot3d( 4*x*exp(-x^2-y^2), (x,-2,2), (x,-2,2))
Traceback (most recent call last):
...
ValueError: range variables should be distinct, but there are duplicates
"""
if transformation is not None:
params=None
from sage.symbolic.callable import is_CallableSymbolicExpression
# First, determine the parameters for f (from the first item of urange
# and vrange, preferably).
if len(urange) == 3 and len(vrange) == 3:
params = (urange[0], vrange[0])
elif is_CallableSymbolicExpression(f):
params = f.variables()
from sage.modules.vector_callable_symbolic_dense import Vector_callable_symbolic_dense
if isinstance(transformation, (tuple, list,Vector_callable_symbolic_dense)):
if len(transformation)==3:
if params is None:
raise ValueError("must specify independent variable names in the ranges when using generic transformation")
indep_vars = params
elif len(transformation)==4:
indep_vars = transformation[3]
transformation = transformation[0:3]
else:
raise ValueError("unknown transformation type")
# find out which variable is the function variable by
# eliminating the parameter variables.
all_vars = set(sum([list(s.variables()) for s in transformation],[]))
dep_var=all_vars - set(indep_vars)
if len(dep_var)==1:
dep_var = dep_var.pop()
transformation = _ArbitraryCoordinates(transformation, dep_var, indep_vars)
else:
raise ValueError("unable to determine the function variable in the transform")
if isinstance(transformation, _Coordinates):
R = transformation.to_cartesian(f, params)
return parametric_plot3d.parametric_plot3d(R, urange, vrange, **kwds)
else:
raise ValueError('unknown transformation type')
elif adaptive:
P = plot3d_adaptive(f, urange, vrange, **kwds)
else:
u=fast_float_arg(0)
v=fast_float_arg(1)
P=parametric_plot3d.parametric_plot3d((u,v,f), urange, vrange, **kwds)
P.frame_aspect_ratio([1.0,1.0,0.5])
return P
def plot3d_adaptive(f, x_range, y_range, color="automatic",
grad_f=None,
max_bend=.5, max_depth=5, initial_depth=4, num_colors=128, **kwds):
r"""
Adaptive 3d plotting of a function of two variables.
This is used internally by the plot3d command when the option
``adaptive=True`` is given.
INPUT:
- ``f`` - a symbolic function or a Python function of
3 variables.
- ``x_range`` - x range of values: 2-tuple (xmin,
xmax) or 3-tuple (x,xmin,xmax)
- ``y_range`` - y range of values: 2-tuple (ymin,
ymax) or 3-tuple (y,ymin,ymax)
- ``grad_f`` - gradient of f as a Python function
- ``color`` - "automatic" - a rainbow of num_colors
colors
- ``num_colors`` - (default: 128) number of colors to
use with default color
- ``max_bend`` - (default: 0.5)
- ``max_depth`` - (default: 5)
- ``initial_depth`` - (default: 4)
- ``**kwds`` - standard graphics parameters
EXAMPLES:
We plot `\sin(xy)`::
sage: from sage.plot.plot3d.plot3d import plot3d_adaptive
sage: x,y=var('x,y'); plot3d_adaptive(sin(x*y), (x,-pi,pi), (y,-pi,pi), initial_depth=5)
Graphics3d Object
.. PLOT::
from sage.plot.plot3d.plot3d import plot3d_adaptive
x,y=var('x,y')
sphinx_plot(plot3d_adaptive(sin(x*y), (x,-pi,pi), (y,-pi,pi), initial_depth=5))
"""
if initial_depth >= max_depth:
max_depth = initial_depth
from sage.plot.misc import setup_for_eval_on_grid
g, ranges = setup_for_eval_on_grid(f, [x_range,y_range], plot_points=2)
xmin,xmax = ranges[0][:2]
ymin,ymax = ranges[1][:2]
opacity = float(kwds.get('opacity',1))
if color == "automatic":
texture = rainbow(num_colors, 'rgbtuple')
else:
if isinstance(color, list):
texture = color
else:
kwds['color'] = color
texture = Texture(kwds)
factory = TrivialTriangleFactory()
plot = TrianglePlot(factory, g, (xmin, xmax), (ymin, ymax), g = grad_f,
min_depth=initial_depth, max_depth=max_depth,
max_bend=max_bend, num_colors = None)
P = IndexFaceSet(plot._objects)
if isinstance(texture, (list, tuple)):
if len(texture) == 2:
# do a grid coloring
xticks = (xmax - xmin)/2**initial_depth
yticks = (ymax - ymin)/2**initial_depth
parts = P.partition(lambda x,y,z: (int((x-xmin)/xticks) + int((y-ymin)/yticks)) % 2)
else:
# do a topo coloring
bounds = P.bounding_box()
min_z = bounds[0][2]
max_z = bounds[1][2]
if max_z == min_z:
span = 0
else:
span = (len(texture)-1) / (max_z - min_z) # max to avoid dividing by 0
parts = P.partition(lambda x, y, z: int((z-min_z)*span))
all = []
for k, G in iteritems(parts):
G.set_texture(texture[k], opacity=opacity)
all.append(G)
P = Graphics3dGroup(all)
else:
P.set_texture(texture)
P.frame_aspect_ratio([1.0, 1.0, 0.5])
P._set_extra_kwds(kwds)
return P
def spherical_plot3d(f, urange, vrange, **kwds):
"""
Plots a function in spherical coordinates. This function is
equivalent to::
sage: r,u,v=var('r,u,v')
sage: f=u*v; urange=(u,0,pi); vrange=(v,0,pi)
sage: T = (r*cos(u)*sin(v), r*sin(u)*sin(v), r*cos(v), [u,v])
sage: plot3d(f, urange, vrange, transformation=T)
Graphics3d Object
or equivalently::
sage: T = Spherical('radius', ['azimuth', 'inclination'])
sage: f=lambda u,v: u*v; urange=(u,0,pi); vrange=(v,0,pi)
sage: plot3d(f, urange, vrange, transformation=T)
Graphics3d Object
INPUT:
- ``f`` - a symbolic expression or function of two variables.
- ``urange`` - a 3-tuple (u, u_min, u_max), the domain of the azimuth variable.
- ``vrange`` - a 3-tuple (v, v_min, v_max), the domain of the inclination variable.
EXAMPLES:
A sphere of radius 2::
sage: x,y=var('x,y')
sage: spherical_plot3d(2,(x,0,2*pi),(y,0,pi))
Graphics3d Object
.. PLOT::
x,y=var('x,y')
sphinx_plot(spherical_plot3d(x-x+2,(x,0,2*pi),(y,0,pi)))
The real and imaginary parts of a spherical harmonic with `l=2` and `m=1`::
sage: phi, theta = var('phi, theta')
sage: Y = spherical_harmonic(2, 1, theta, phi)
sage: rea = spherical_plot3d(abs(real(Y)), (phi,0,2*pi), (theta,0,pi), color='blue', opacity=0.6)
sage: ima = spherical_plot3d(abs(imag(Y)), (phi,0,2*pi), (theta,0,pi), color='red', opacity=0.6)
sage: (rea + ima).show(aspect_ratio=1) # long time (4s on sage.math, 2011)
.. PLOT::
phi, theta = var('phi, theta')
Y = spherical_harmonic(2, 1, theta, phi)
rea = spherical_plot3d(abs(real(Y)), (phi,0,2*pi), (theta,0,pi), color='blue', opacity=0.6)
ima = spherical_plot3d(abs(imag(Y)), (phi,0,2*pi), (theta,0,pi), color='red', opacity=0.6)
sphinx_plot(rea+ima)
A drop of water::
sage: x,y=var('x,y')
sage: spherical_plot3d(e^-y,(x,0,2*pi),(y,0,pi),opacity=0.5).show(frame=False)
.. PLOT::
x,y=var('x,y')
sphinx_plot(spherical_plot3d(e**-y,(x,0,2*pi),(y,0,pi),opacity=0.5))
An object similar to a heart::
sage: x,y=var('x,y')
sage: spherical_plot3d((2+cos(2*x))*(y+1),(x,0,2*pi),(y,0,pi),rgbcolor=(1,.1,.1))
Graphics3d Object
.. PLOT::
x,y=var('x,y')
sphinx_plot(spherical_plot3d((2+cos(2*x))*(y+1),(x,0,2*pi),(y,0,pi),rgbcolor=(1,.1,.1)))
Some random figures:
::
sage: x,y=var('x,y')
sage: spherical_plot3d(1+sin(5*x)/5,(x,0,2*pi),(y,0,pi),rgbcolor=(1,0.5,0),plot_points=(80,80),opacity=0.7)
Graphics3d Object
.. PLOT::
x,y=var('x,y')
sphinx_plot(spherical_plot3d(1+sin(5*x)/5,(x,0,2*pi),(y,0,pi),rgbcolor=(1,0.5,0),plot_points=(80,80),opacity=0.7))
::
sage: x,y=var('x,y')
sage: spherical_plot3d(1+2*cos(2*y),(x,0,3*pi/2),(y,0,pi)).show(aspect_ratio=(1,1,1))
.. PLOT::
x,y=var('x,y')
sphinx_plot(spherical_plot3d(1+2*cos(2*y),(x,0,3*pi/2),(y,0,pi)))
"""
return plot3d(f, urange, vrange, transformation=Spherical('radius', ['azimuth', 'inclination']), **kwds)
def cylindrical_plot3d(f, urange, vrange, **kwds):
"""
Plots a function in cylindrical coordinates. This function is
equivalent to::
sage: r,u,v=var('r,u,v')
sage: f=u*v; urange=(u,0,pi); vrange=(v,0,pi)
sage: T = (r*cos(u), r*sin(u), v, [u,v])
sage: plot3d(f, urange, vrange, transformation=T)
Graphics3d Object
.. PLOT::
r,u,v=var('r,u,v')
f=u*v; urange=(u,0,pi); vrange=(v,0,pi)
T = (r*cos(u), r*sin(u), v, [u,v])
sphinx_plot(plot3d(f, urange, vrange, transformation=T))
or equivalently::
sage: T = Cylindrical('radius', ['azimuth', 'height'])
sage: f=lambda u,v: u*v; urange=(u,0,pi); vrange=(v,0,pi)
sage: plot3d(f, urange, vrange, transformation=T)
Graphics3d Object
INPUT:
- ``f`` - a symbolic expression or function of two variables,
representing the radius from the `z`-axis.
- ``urange`` - a 3-tuple (u, u_min, u_max), the domain of the
azimuth variable.
- ``vrange`` - a 3-tuple (v, v_min, v_max), the domain of the
elevation (`z`) variable.
EXAMPLES:
A portion of a cylinder of radius 2::
sage: theta,z=var('theta,z')
sage: cylindrical_plot3d(2,(theta,0,3*pi/2),(z,-2,2))
Graphics3d Object
.. PLOT::
theta,z=var('theta,z')
sphinx_plot(cylindrical_plot3d(z-z+2,(theta,0,3*pi/2),(z,-2,2)))
Some random figures:
::
sage: cylindrical_plot3d(cosh(z),(theta,0,2*pi),(z,-2,2))
Graphics3d Object
.. PLOT::
theta,z=var('theta,z')
sphinx_plot(cylindrical_plot3d(cosh(z),(theta,0,2*pi),(z,-2,2)))
::
sage: cylindrical_plot3d(e^(-z^2)*(cos(4*theta)+2)+1,(theta,0,2*pi),(z,-2,2),plot_points=[80,80]).show(aspect_ratio=(1,1,1))
.. PLOT::
theta,z=var('theta,z')
P = cylindrical_plot3d(e**(-z**2)*(cos(4*theta)+2)+1,(theta,0,2*pi),(z,-2,2),plot_points=[80,80])
P.aspect_ratio([1,1,1])
sphinx_plot(P)
"""
return plot3d(f, urange, vrange, transformation=Cylindrical('radius', ['azimuth', 'height']), **kwds)
def axes(scale=1, radius=None, **kwds):
"""
Creates basic axes in three dimensions. Each axis is a three
dimensional arrow object.
INPUT:
- ``scale`` - (default: 1) The length of the axes (all three
will be the same).
- ``radius`` - (default: .01) The radius of the axes as arrows.
EXAMPLES::
sage: from sage.plot.plot3d.plot3d import axes
sage: S = axes(6, color='black'); S
Graphics3d Object
.. PLOT::
from sage.plot.plot3d.plot3d import axes
S = axes(6, color='black')
sphinx_plot(S)
::
sage: T = axes(2, .5); T
Graphics3d Object
.. PLOT::
from sage.plot.plot3d.plot3d import axes
T = axes(2, .5)
sphinx_plot(T)
"""
if radius is None:
radius = scale/100.0
return Graphics3dGroup([arrow3d((0,0,0),(scale,0,0), radius, **kwds),
arrow3d((0,0,0),(0,scale,0), radius, **kwds),
arrow3d((0,0,0),(0,0,scale), radius, **kwds)])
| 34.960965 | 192 | 0.571681 |
3e10e8db537a09b44ad846eb57b462f43a068618
| 1,630 |
py
|
Python
|
src/programy/config/section.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345 |
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
src/programy/config/section.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275 |
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
src/programy/config/section.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159 |
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from abc import abstractmethod
from programy.config.base import BaseConfigurationData
from programy.utils.substitutions.substitues import Substitutions
class BaseSectionConfigurationData(BaseConfigurationData):
def __init__(self, name):
BaseConfigurationData.__init__(self, name)
@abstractmethod
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
"""
Never Implemented
"""
raise NotImplementedError() # pragma: no cover
| 49.393939 | 120 | 0.784049 |
1c9f78ae39288f25d8ddd3d379f6ef60bde15620
| 1,143 |
py
|
Python
|
numpy/_pyinstaller/pyinstaller-smoke.py
|
sukritingupta/numpy
|
2c44c93164c274a5865799eefd3e401effa948a9
|
[
"BSD-3-Clause"
] | 1 |
2022-02-24T10:16:43.000Z
|
2022-02-24T10:16:43.000Z
|
numpy/_pyinstaller/pyinstaller-smoke.py
|
sukritingupta/numpy
|
2c44c93164c274a5865799eefd3e401effa948a9
|
[
"BSD-3-Clause"
] | 8 |
2021-10-07T10:59:49.000Z
|
2021-11-22T20:06:49.000Z
|
numpy/_pyinstaller/pyinstaller-smoke.py
|
sukritingupta/numpy
|
2c44c93164c274a5865799eefd3e401effa948a9
|
[
"BSD-3-Clause"
] | 1 |
2022-03-22T11:47:01.000Z
|
2022-03-22T11:47:01.000Z
|
"""A crude *bit of everything* smoke test to verify PyInstaller compatibility.
PyInstaller typically goes wrong by forgetting to package modules, extension
modules or shared libraries. This script should aim to touch as many of those
as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure
due to an uncollected resource. Missing resources are unlikely to lead to
arithmitic errors so there's generally no need to verify any calculation's
output - merely that it made it to the end OK. This script should not
explicitly import any of numpy's submodules as that gives PyInstaller undue
hints that those submodules exist and should be collected (accessing implicitly
loaded submodules is OK).
"""
import numpy as np
a = np.arange(1., 10.).reshape((3, 3)) % 5
np.linalg.det(a)
a @ a
a @ a.T
np.linalg.inv(a)
np.sin(np.exp(a))
np.linalg.svd(a)
np.linalg.eigh(a)
np.unique(np.random.randint(0, 10, 100))
np.sort(np.random.uniform(0, 10, 100))
np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum()
np.polynomial.Legendre([7, 8, 9]).roots()
print("I made it!")
| 34.636364 | 79 | 0.747157 |
da13727c369b97ce81c6e4727264690d11f5f7e2
| 3,709 |
py
|
Python
|
ontask/models/sqlconnection.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33 |
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/models/sqlconnection.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189 |
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/models/sqlconnection.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30 |
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
"""SQL Connection model."""
from typing import Dict
from django.db import models
from django.utils.translation import ugettext_lazy as _
from fernet_fields import EncryptedCharField
from ontask.models.common import CHAR_FIELD_LONG_SIZE
from ontask.models.connection import Connection
from ontask.models.logs import Log
class SQLConnection(Connection):
"""Model representing a SQL connection with SQLAlchemy.
@DynamicAttrs
The parameters for the connection are:
name
conn_type
db_user
db_password
db_host
db_port
db_name
db_table
"""
# Connection type: postgresql, mysql, etc.
conn_type = models.CharField(
verbose_name=_('Type'),
max_length=CHAR_FIELD_LONG_SIZE,
blank=False,
help_text=_('Postgresql, Mysql, etc.'))
# Connection driver
conn_driver = models.CharField(
verbose_name=_('Driver'),
default='',
max_length=CHAR_FIELD_LONG_SIZE,
blank=True,
help_text=_('Driver implementing the DBAPI'))
# User name to connect
db_user = models.CharField(
verbose_name=_('User'),
max_length=CHAR_FIELD_LONG_SIZE,
default='',
blank=True)
# db_password is optional (will be asked in run time, if not stored here)
db_password = EncryptedCharField(
default='',
max_length=CHAR_FIELD_LONG_SIZE,
verbose_name=_('Password'),
null=True,
blank=True,
help_text=_('Leave empty to enter at execution'))
# DB host
db_host = models.CharField(
verbose_name=_('Host'),
max_length=CHAR_FIELD_LONG_SIZE,
default='',
blank=True)
# DB port
db_port = models.IntegerField(
verbose_name=_('Port'),
null=True,
blank=True)
# DB name
db_name = models.CharField(
max_length=CHAR_FIELD_LONG_SIZE,
verbose_name=_('Database name'),
default='',
blank=False)
# DB table name: Optional, requested upon execution if not given here.
db_table = models.CharField(
max_length=CHAR_FIELD_LONG_SIZE,
verbose_name=_('Database table'),
default='',
null=True,
blank=True,
help_text=_('Leave empty to enter at execution'))
clone_event = Log.SQL_CONNECTION_CLONE
create_event = Log.SQL_CONNECTION_CREATE
delete_event = Log.SQL_CONNECTION_DELETE
edit_event = Log.SQL_CONNECTION_EDIT
toggle_event = Log.SQL_CONNECTION_TOGGLE
optional_fields = ['db_password', 'db_table']
@classmethod
def get(cls, primary_key):
"""Get the object with the given PK."""
return SQLConnection.objects.get(pk=primary_key)
def get_display_dict(self) -> Dict:
"""Create dictionary with (verbose_name, value)"""
d_dict = super().get_display_dict()
remove_title = self._meta.get_field('db_password').verbose_name.title()
if remove_title in d_dict:
d_dict[remove_title] = _('REMOVED')
return d_dict
def log(self, user, operation_type: str, **kwargs) -> int:
"""Log the operation with the object."""
payload = {
'id': self.id,
'name': self.name,
'conn_type': self.conn_type,
'conn_driver': self.conn_driver,
'db_host': self.db_host,
'db_port': self.db_port,
'db_name': self.db_name,
'db_user': self.db_user,
'db_password': 'SECRET' if self.db_password else '',
'db_table': self.db_table,
}
payload.update(kwargs)
return Log.objects.register(user, operation_type, None, payload)
| 28.530769 | 79 | 0.634672 |
f39523e25a10afef4607dc63ed5f5dff5384d17e
| 1,878 |
py
|
Python
|
nurses_2/widgets/particle_field/graphic_field.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 171 |
2021-06-23T15:29:15.000Z
|
2022-03-25T18:53:10.000Z
|
nurses_2/widgets/particle_field/graphic_field.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 1 |
2022-01-07T05:08:35.000Z
|
2022-01-10T04:53:57.000Z
|
nurses_2/widgets/particle_field/graphic_field.py
|
salt-die/nurses_2
|
29b76c34b9a28bf7c115998f4e81979966c82df0
|
[
"MIT"
] | 3 |
2021-10-01T09:12:15.000Z
|
2022-01-14T21:31:11.000Z
|
import numpy as np
from ...colors import AColor, TRANSPARENT
from ._field_base import _ParticleFieldBase, _ParticleBase
class GraphicParticleField(_ParticleFieldBase):
"""
A widget that only has GraphicParticle children.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._buffer = np.zeros((3, ), dtype=float)
def render(self, canvas_view, colors_view, source: tuple[slice, slice]):
buffer = self._buffer
subtract, add = np.subtract, np.add
vert_slice, hori_slice = source
t = vert_slice.start
h = vert_slice.stop - t
l = hori_slice.start
w = hori_slice.stop - l
for child in self.children:
if not child.is_enabled or not child.is_visible:
continue
ct = child.top
pos = top, left = int(ct) - t, child.left - l
if 0 <= top < h and 0 <= left < w:
canvas_view[pos] = "▀"
*rgb, a = child.color
if child.is_transparent:
color = colors_view[pos][np.s_[3:] if (ct % 1) >= .5 else np.s_[:3]]
subtract(rgb, color, out=buffer, dtype=float)
buffer *= a / 255
add(buffer, color, out=color, casting="unsafe")
else:
colors_view[pos][np.s_[3:] if (ct % 1) >= .5 else np.s_[:3]] = rgb
class GraphicParticle(_ParticleBase):
"""
A .5x1 TUI element.
Notes
-----
The y-component of `pos` can be a float. The fractional part determines
whether the half block is upper or lower.
"""
def __init__(self, *, color: AColor=TRANSPARENT, is_transparent=True, **kwargs):
super().__init__(is_transparent=is_transparent, **kwargs)
self.color = color
GraphicParticleField._child_type = GraphicParticle
| 30.290323 | 88 | 0.576145 |
27f6c18c352982155d8539ba7288377e287e6357
| 5,791 |
py
|
Python
|
resnet-test.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
resnet-test.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
resnet-test.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-03-20 23:44
# @Author : Your Name (you@example.org)
# @Link : https://www.kaggle.com/c/semi-conductor-image-classification-first
import os
import random
import numpy as np
import pandas as pd
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator, load_img
from sklearn.model_selection import train_test_split
from resnet import model_depth, resnet_v2, lr_schedule
from model import auc
# Training parameters
IF_DATA_AUGMENTATION = True
NUM_CLASSES = 2
IMAGE_WIDTH = IMAGE_HEIGHT = 224
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 1
INPUT_SHAPE = [IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS]
# data path
TRAIN_DATA_DIR = "./data/train/"
TEST_DATA_DIR = "./data/test/all_tests"
SAVES_DIR = "./models-resnetv2/"
# constants
IF_FAST_RUN = True
EPOCHS_OVER_NIGHT = 50
BATCH_SIZE = 15
# BATCH_SIZE = 32 # orig paper trained all networks with batch_size=128
def main():
""" Prepare Model """
n = 2
version = 2
depth = model_depth(n, version)
model_type = 'ResNet%dv%d' % (depth, version)
model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)
model.compile(loss='categorical_crossentropy',
# optimizer=Adam(learning_rate=lr_schedule(0)),
optimizer='adam',
metrics=['accuracy', auc])
model.summary()
print(model_type)
print("loading weights")
model.load_weights(os.path.join(SAVES_DIR, "ResNet20v2.022-auc-0.9588.h5"))
""" Prepare Data Frame """
filenames = os.listdir(TRAIN_DATA_DIR)
random.shuffle(filenames)
categories = []
for filename in filenames:
category = filename.split('.')[0]
if category == 'bad': # bad 1
categories.append(1)
else: # good 0
categories.append(0)
df = pd.DataFrame({
'filename': filenames,
'category': categories
})
df["category"] = df["category"].replace({0: 'good', 1: 'bad'})
""" 这里用来自动划分 train 集和 val 集 """
train_df, validate_df = train_test_split(
df, test_size=0.20, random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
"""Traning Generator"""
train_datagen = ImageDataGenerator(
rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1
)
train_generator = train_datagen.flow_from_dataframe(
train_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
color_mode="grayscale",
class_mode='categorical',
batch_size=BATCH_SIZE
)
"""Prepare Testing Data"""
test_filenames = os.listdir(TEST_DATA_DIR)
test_df = pd.DataFrame({
'filename': test_filenames
})
# TODO 不知道为什么叫 nb_samples
nb_samples = test_df.shape[0]
test_gen = ImageDataGenerator(rescale=1./255)
"""Create Testing Generator"""
test_generator = test_gen.flow_from_dataframe(
test_df,
TEST_DATA_DIR,
x_col='filename',
y_col=None,
class_mode=None,
target_size=IMAGE_SIZE,
color_mode="grayscale",
batch_size=BATCH_SIZE,
shuffle=False
) # Found 12500 images.
""" Predict """
# predict 要时间
import time
start = time.clock()
predict = model.predict_generator(
test_generator, steps=np.ceil(nb_samples / BATCH_SIZE))
elapsed = (time.clock() - start)
print("Prediction time used:", elapsed)
np.save(model_type + "-predict.npy", predict)
"""
# numpy average max
# test_df['category'] = np.argmax(predict, axis=-1)
# We will convert the predict category back into our generator classes by using train_generator.class_indices. It is the classes that image generator map while converting data into computer vision
label_map = dict((v, k) for k, v in train_generator.class_indices.items())
test_df['category'] = test_df['category'].replace(label_map)
# From our prepare data part. We map data with {1: 'dog', 0: 'cat'}. Now we will map the result back to dog is 1 and cat is 0
test_df['category'] = test_df['category'].replace({'dog': 1, 'cat': 0})
test_df['category'].value_counts().plot.bar()
plt.show()
"""
# 要提交是 good 的概率,是第 0 列
# 总之是第 0 列,第 0 列是不是就是 good?
test_df['category'] = predict[:, 0]
print("Predict Samples: ")
print(type(test_df))
print(test_df.head(10))
"""See predicted result with sample images"""
"""
sample_test = test_df.head(18)
sample_test.head()
plt.figure(figsize=(12, 24))
for index, row in sample_test.iterrows():
filename = row['filename']
category = row['category']
img = load_img("../input/test1/test1/"+filename, target_size=IMAGE_SIZE)
plt.subplot(6, 3, index+1)
plt.imshow(img)
plt.xlabel(filename + '(' + "{}".format(category) + ')')
plt.tight_layout()
plt.show()
"""
"""Submission"""
submission_df = test_df.copy()
submission_df["id"] = submission_df["filename"].str.split(".").str[0]
submission_df["label"] = submission_df["category"]
submission_df.drop(["filename", "category"], axis=1, inplace=True)
submission_df.to_csv("result.csv", index=False)
if __name__ == "__main__":
main()
| 31.994475 | 201 | 0.632533 |
5db442deca04960d77f919ff0276820c81d2d4b1
| 14,529 |
py
|
Python
|
sdk/python/pulumi_azure_native/datashare/v20181101preview/kusto_cluster_data_set_mapping.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20181101preview/kusto_cluster_data_set_mapping.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datashare/v20181101preview/kusto_cluster_data_set_mapping.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['KustoClusterDataSetMappingArgs', 'KustoClusterDataSetMapping']
@pulumi.input_type
class KustoClusterDataSetMappingArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
data_set_id: pulumi.Input[str],
kind: pulumi.Input[str],
kusto_cluster_resource_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
share_subscription_name: pulumi.Input[str],
data_set_mapping_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a KustoClusterDataSetMapping resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_id: The id of the source data set.
:param pulumi.Input[str] kind: Kind of data set mapping.
Expected value is 'KustoCluster'.
:param pulumi.Input[str] kusto_cluster_resource_id: Resource id of the sink kusto cluster.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "data_set_id", data_set_id)
pulumi.set(__self__, "kind", 'KustoCluster')
pulumi.set(__self__, "kusto_cluster_resource_id", kusto_cluster_resource_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_subscription_name", share_subscription_name)
if data_set_mapping_name is not None:
pulumi.set(__self__, "data_set_mapping_name", data_set_mapping_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Input[str]:
"""
The id of the source data set.
"""
return pulumi.get(self, "data_set_id")
@data_set_id.setter
def data_set_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_set_id", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of data set mapping.
Expected value is 'KustoCluster'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="kustoClusterResourceId")
def kusto_cluster_resource_id(self) -> pulumi.Input[str]:
"""
Resource id of the sink kusto cluster.
"""
return pulumi.get(self, "kusto_cluster_resource_id")
@kusto_cluster_resource_id.setter
def kusto_cluster_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "kusto_cluster_resource_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareSubscriptionName")
def share_subscription_name(self) -> pulumi.Input[str]:
"""
The name of the share subscription which will hold the data set sink.
"""
return pulumi.get(self, "share_subscription_name")
@share_subscription_name.setter
def share_subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_subscription_name", value)
@property
@pulumi.getter(name="dataSetMappingName")
def data_set_mapping_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data set mapping to be created.
"""
return pulumi.get(self, "data_set_mapping_name")
@data_set_mapping_name.setter
def data_set_mapping_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_mapping_name", value)
class KustoClusterDataSetMapping(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_id: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
kusto_cluster_resource_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Kusto cluster data set mapping
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_id: The id of the source data set.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
:param pulumi.Input[str] kind: Kind of data set mapping.
Expected value is 'KustoCluster'.
:param pulumi.Input[str] kusto_cluster_resource_id: Resource id of the sink kusto cluster.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KustoClusterDataSetMappingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Kusto cluster data set mapping
:param str resource_name: The name of the resource.
:param KustoClusterDataSetMappingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KustoClusterDataSetMappingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_id: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
kusto_cluster_resource_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KustoClusterDataSetMappingArgs.__new__(KustoClusterDataSetMappingArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if data_set_id is None and not opts.urn:
raise TypeError("Missing required property 'data_set_id'")
__props__.__dict__["data_set_id"] = data_set_id
__props__.__dict__["data_set_mapping_name"] = data_set_mapping_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'KustoCluster'
if kusto_cluster_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'kusto_cluster_resource_id'")
__props__.__dict__["kusto_cluster_resource_id"] = kusto_cluster_resource_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'share_subscription_name'")
__props__.__dict__["share_subscription_name"] = share_subscription_name
__props__.__dict__["data_set_mapping_status"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-native:datashare:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20191101:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20200901:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20201001preview:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20210801:KustoClusterDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20210801:KustoClusterDataSetMapping")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(KustoClusterDataSetMapping, __self__).__init__(
'azure-native:datashare/v20181101preview:KustoClusterDataSetMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'KustoClusterDataSetMapping':
"""
Get an existing KustoClusterDataSetMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = KustoClusterDataSetMappingArgs.__new__(KustoClusterDataSetMappingArgs)
__props__.__dict__["data_set_id"] = None
__props__.__dict__["data_set_mapping_status"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["kusto_cluster_resource_id"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return KustoClusterDataSetMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Output[str]:
"""
The id of the source data set.
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="dataSetMappingStatus")
def data_set_mapping_status(self) -> pulumi.Output[str]:
"""
Gets the status of the data set mapping.
"""
return pulumi.get(self, "data_set_mapping_status")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of data set mapping.
Expected value is 'KustoCluster'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="kustoClusterResourceId")
def kusto_cluster_resource_id(self) -> pulumi.Output[str]:
"""
Resource id of the sink kusto cluster.
"""
return pulumi.get(self, "kusto_cluster_resource_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the sink kusto cluster.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the data set mapping.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
| 45.121118 | 973 | 0.663501 |
649f699e655314525445c397ff96dc397a1897f6
| 4,192 |
py
|
Python
|
api/managers/auth.py
|
aarongrisez/bellga.me2API
|
324fe95f455f2c8053473202f294a0f1d100305b
|
[
"MIT"
] | null | null | null |
api/managers/auth.py
|
aarongrisez/bellga.me2API
|
324fe95f455f2c8053473202f294a0f1d100305b
|
[
"MIT"
] | null | null | null |
api/managers/auth.py
|
aarongrisez/bellga.me2API
|
324fe95f455f2c8053473202f294a0f1d100305b
|
[
"MIT"
] | null | null | null |
from fastapi import Request
from functools import wraps
from api.config.settings import settings
from api.exceptions.auth import AuthError
from six.moves.urllib.request import urlopen
import json
from jose import jwt
import time
def get_token_auth_header(request: Request):
"""Obtains the access token from the Authorization Header
cp from https://github.com/auth0-samples/auth0-python-api-samples/blob/5471f305f158fd86e0b52c21cc81c2010dbfe20a/00-Starter-Seed/server.py
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def require_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
if "request" not in kwargs.keys():
raise ValueError(f"All protected endpoints must have the request passed explicitly. \
No request object was passed through the current router")
token = get_token_auth_header(kwargs["request"])
jsonurl = urlopen(f"{settings.auth0_domain}.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
try:
unverified_header = jwt.get_unverified_header(token)
except jwt.JWTError:
raise AuthError({"code": "invalid_header",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401)
if unverified_header["alg"] == "HS256":
raise AuthError({"code": "invalid_header",
"description":
"Invalid header. "
"Use an RS256 signed JWT Access Token"}, 401)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=settings.jwt_signing_algo,
audience=settings.api_identifier,
issuer=settings.auth0_domain
)
except jwt.ExpiredSignatureError:
raise AuthError({"code": "token_expired",
"description": "token signature is expired"}, 401)
except jwt.JWTClaimsError as e:
raise AuthError({"code": "invalid_claims",
"description":
"incorrect claims,"
" please check the audience and issuer"}, 401)
except Exception as e:
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token."}, 401)
kwargs["request"].state.user_info = payload
return f(*args, **kwargs)
raise AuthError({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 401)
return decorated
| 41.92 | 141 | 0.505487 |
a931af27f78593eb5b28a7399dbc764809df83d0
| 456 |
py
|
Python
|
src/widgets/food.py
|
oDallas/Snaky
|
895ba053f7be150cc6c99ae5aaa7cb31bce46f9c
|
[
"MIT"
] | null | null | null |
src/widgets/food.py
|
oDallas/Snaky
|
895ba053f7be150cc6c99ae5aaa7cb31bce46f9c
|
[
"MIT"
] | null | null | null |
src/widgets/food.py
|
oDallas/Snaky
|
895ba053f7be150cc6c99ae5aaa7cb31bce46f9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from curses import color_pair
from random import randint
from src.conf import MAX_Y, MAX_X
__all__ = ['Food']
class Food:
def __init__(self, window):
self.coor = (randint(1, MAX_Y), randint(1, MAX_X))
self.window = window
def render(self):
self.window.addstr(*self.coor, "█", color_pair(3))
def renew(self):
self.coor = (randint(1, MAX_Y), randint(1, MAX_X))
| 21.714286 | 58 | 0.629386 |
e4d57014b32d4386661955821a0c8d13b611dd37
| 2,502 |
py
|
Python
|
zoomus/components/report.py
|
peerlearning/zoomus
|
471b6a4338f4ce6f44249bafae177f2514a9b672
|
[
"Apache-2.0"
] | 1 |
2021-08-19T11:07:06.000Z
|
2021-08-19T11:07:06.000Z
|
zoomus/components/report.py
|
avantifellows/zoomus
|
471b6a4338f4ce6f44249bafae177f2514a9b672
|
[
"Apache-2.0"
] | null | null | null |
zoomus/components/report.py
|
avantifellows/zoomus
|
471b6a4338f4ce6f44249bafae177f2514a9b672
|
[
"Apache-2.0"
] | 1 |
2021-08-19T11:07:05.000Z
|
2021-08-19T11:07:05.000Z
|
"""Zoom.us REST API Python Client -- Report component"""
from __future__ import absolute_import
from zoomus import util
from zoomus.components import base
class ReportComponent(base.BaseComponent):
"""Component dealing with all report related matters"""
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getaccountreport", params=kwargs)
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getuserreport", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"], kwargs)
return self.post_request("/report/getdailyreport", params=kwargs)
class ReportComponentV2(base.BaseComponent):
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["user_id", "start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request(
"/report/users/{}/meetings".format(kwargs.get("user_id")), params=kwargs
)
def get_meeting_detail_report(self, **kwargs):
"""
/report/meetings/{meetingId}/participants
"""
util.require_keys(kwargs, ["meeting_id"])
return self.get_request(
"/report/meetings/{}/participants".format(kwargs.get("meeting_id")), params=kwargs
)
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request("/report/users", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"])
return self.get_request("/report/daily", params=kwargs)
| 39.09375 | 94 | 0.654277 |
04d99f8f21bfca4fd3a4c910d45914d28181eab8
| 2,722 |
py
|
Python
|
mercado_livre_corrigido.py
|
farofaDeCachorro/myprojects
|
607a97f3fe273e6f41fef43da13f47d38632c8ac
|
[
"CC0-1.0"
] | null | null | null |
mercado_livre_corrigido.py
|
farofaDeCachorro/myprojects
|
607a97f3fe273e6f41fef43da13f47d38632c8ac
|
[
"CC0-1.0"
] | null | null | null |
mercado_livre_corrigido.py
|
farofaDeCachorro/myprojects
|
607a97f3fe273e6f41fef43da13f47d38632c8ac
|
[
"CC0-1.0"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import sys
def pega_items(caixa_produto):
# para cada div produto achado ele faz isso:
for produtos_achados in caixa_produto:
# aqui pega os atributos desejados dos seguintes items
produto_descricao = produtos_achados.find('h2', class_='ui-search-item__title')
produto_preco = produtos_achados.find('span', class_='price-tag-fraction')
produto_url = produtos_achados.find('a', class_="ui-search-item__group__element ui-search-link")['href']
# daqui até o final da funcão verifica se o objeto existe e contem texto, se sim ele é printado, se não, é ignorado
if(produto_descricao and produto_descricao.text != ""):
print(f'\nDescrição: {produto_descricao.text}')
if(produto_preco and produto_preco.text != ""):
print(f'Preço: R${produto_preco.text},00')
if(produto_url and produto_url != ""):
print(f'Url: {produto_url}')
def pega_numero_paginas():
while True:
try:
numero_paginas = int(input('Quantas páginas de resultado deseja visualizar? ')) # pergunta o numero de paginas,
return numero_paginas # se conseguir ele retorna e quebra o loop,
break # se não só ignora
except:
pass
num_paginas = pega_numero_paginas()
if(num_paginas > 40): # se o numero de páginas for maior que 40, que é o máximo mostrado pelo mercado livre,
num_paginas = 40 # ele simplesmente seta o valor pra 40 mesmo
nome_produto = str(input('Digite o número do produto desejado: ')) # pergunta o nome do produto e converte para string
url = f'https://lista.mercadolivre.com.br/{nome_produto}' # pega a url com o nome de produto
# faz um loop para pegar todas as páginas que o usuario deseja
for i in range(0,num_paginas):
site_html = requests.get(url) #pega o html da pagina
site_bs4 = BeautifulSoup(site_html.text, 'html.parser') # transforma o html da pagina num formato aceito pelo beautiful soup
caixa_produto = site_bs4.findAll('div', class_='ui-search-result__content-wrapper') # pega os produtos e armazenam numa lista
print(f'\n\nPágina n {i+1}:') # printa o número da página para melhor entendimento
pega_items(caixa_produto) # chama a função pega_items passando como parametro a lista de produtos anteriormente mencionada: caixa_produto
try:
url = site_bs4.find('a', class_='andes-pagination__link ui-search-link')['href'] # ele vai tentar pegar a próxima url, se conseguir pega
except:
sys.exit() # se não, ele fecha a aplicação, porque provavelmente ele fechou por não ter mais páginas de resultado
#get_item()
| 43.903226 | 140 | 0.695077 |
a67a97e84cf9a9b7c24df9e017f29cfee1a118d8
| 1,133 |
py
|
Python
|
test/test_timesheet_config.py
|
MPW1412/kimai-python
|
7c89b0866b85fbc4b1092b30eca21f1be48db533
|
[
"MIT"
] | 6 |
2019-12-19T16:01:58.000Z
|
2022-01-19T18:10:16.000Z
|
test/test_timesheet_config.py
|
MPW1412/kimai-python
|
7c89b0866b85fbc4b1092b30eca21f1be48db533
|
[
"MIT"
] | 4 |
2020-05-16T23:33:15.000Z
|
2021-07-06T20:53:32.000Z
|
test/test_timesheet_config.py
|
MPW1412/kimai-python
|
7c89b0866b85fbc4b1092b30eca21f1be48db533
|
[
"MIT"
] | 3 |
2020-05-16T23:14:13.000Z
|
2021-06-30T08:53:11.000Z
|
# coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kimai_python
from kimai_python.models.timesheet_config import TimesheetConfig # noqa: E501
from kimai_python.rest import ApiException
class TestTimesheetConfig(unittest.TestCase):
"""TimesheetConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTimesheetConfig(self):
"""Test TimesheetConfig"""
# FIXME: construct object with mandatory attributes with example values
# model = kimai_python.models.timesheet_config.TimesheetConfig() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.634146 | 314 | 0.714034 |
bd29173e3815e232d49ae2d26396e15bbfd8b5f0
| 1,960 |
py
|
Python
|
telstar/tests/sqlalchemy_test_consumer.py
|
Bitspark/telstar
|
aec3b6b54eb3869fa60e6e3b3066692b1493d37a
|
[
"MIT"
] | 5 |
2019-06-20T13:55:11.000Z
|
2020-10-26T08:55:15.000Z
|
telstar/tests/sqlalchemy_test_consumer.py
|
Bitspark/telstar
|
aec3b6b54eb3869fa60e6e3b3066692b1493d37a
|
[
"MIT"
] | 146 |
2019-06-23T14:12:51.000Z
|
2021-02-01T05:04:18.000Z
|
telstar/tests/sqlalchemy_test_consumer.py
|
Bitspark/telstar
|
aec3b6b54eb3869fa60e6e3b3066692b1493d37a
|
[
"MIT"
] | 1 |
2019-06-21T18:39:19.000Z
|
2019-06-21T18:39:19.000Z
|
import logging
import os
import random
import sys
from time import sleep
import pymysql
import redis
from sqlalchemy import BIGINT, Column, String, UniqueConstraint, create_engine
from sqlalchemy.orm import sessionmaker
from telstar import config as tlconfig
from telstar.com import Message
from telstar.com.sqla import Base, StagedMessageRepository
from telstar.consumer import MultiConsumer
logger = logging.getLogger('telstar')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
pymysql.install_as_MySQLdb()
link = redis.from_url(os.environ["REDIS"])
engine = create_engine(os.environ["DATABASE"])
Session = sessionmaker(bind=engine)
session = Session(autocommit=True)
tlconfig.staging.repository = StagedMessageRepository
tlconfig.staging.repository.setup(session)
class Test(Base):
__tablename__ = "test"
id = Column(BIGINT(), primary_key=True)
number = Column(BIGINT)
group_name = Column(String(length=80))
topic = Column(String(length=80))
__table_args__ = (UniqueConstraint('number', 'group_name', 'topic', name='all_together'),)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "setup":
print("Recreating table in order to start from scratch")
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
def simple(consumer, record: Message, done):
with session.begin():
t = Test(number=int(record.data["value"]), group_name=consumer.group_name, topic=record.stream)
session.add(t)
sleep(random.randrange(int(os.environ.get("SLEEPINESS"))) / 100)
done()
MultiConsumer(link=link,
group_name=os.environ.get("GROUP_NAME"),
consumer_name=os.environ.get("CONSUMER_NAME"),
config={
os.environ["STREAM_NAME"]: simple,
os.environ["STREAM_NAME_TWO"]: simple
}).run()
| 30.153846 | 107 | 0.688265 |
fa532060734080ef929cd591258d37af09354c0b
| 25,236 |
py
|
Python
|
src/ms_io.py
|
statisticalbiotechnology/specpride
|
6240dfbb1fcf5df0115512c604f5dc37a003355a
|
[
"Apache-2.0"
] | 2 |
2020-01-14T12:02:52.000Z
|
2020-01-14T14:03:30.000Z
|
src/ms_io.py
|
ypriverol/specpride
|
9fe9101bd6bb04592aa8f020f93a75ae3a306b26
|
[
"Apache-2.0"
] | 5 |
2019-12-09T10:59:10.000Z
|
2020-01-16T14:32:00.000Z
|
src/ms_io.py
|
statisticalbiotechnology/specpride
|
6240dfbb1fcf5df0115512c604f5dc37a003355a
|
[
"Apache-2.0"
] | 9 |
2020-01-14T12:26:54.000Z
|
2020-01-16T08:26:06.000Z
|
import logging
import os
from typing import Dict, Iterable, List
import numpy as np
import pandas as pd
import pyopenms
import pyteomics.mgf
import pyteomics.mzid
import pyteomics.mzml
import pyteomics.mztab
import pyteomics.mzxml
import spectrum_utils.spectrum as sus
import tqdm
from lxml.etree import LxmlError
logger = logging.getLogger('cluster_representative')
def read_spectra(filename: str) -> Iterable[sus.MsmsSpectrum]:
"""
Read MS/MS spectra from a peak file.
Supported formats: MGF, mzML, mzXML.
Parameters
----------
filename : str
The peak file name.
Returns
-------
Iterable[sus.MsmsSpectrum]
An iterable of spectra in the given peak file.
"""
ext = os.path.splitext(filename.lower())[1]
if ext == '.mgf':
_read_spectra = _read_spectra_mgf
elif ext == '.mzml':
_read_spectra = _read_spectra_mzml
elif ext == '.mzxml':
_read_spectra = _read_spectra_mzxml
else:
logger.error('Unsupported peak file format (supported formats: MGF, '
'mzML, mzXML)')
raise ValueError('Unsupported peak file format (supported formats: '
'MGF, mzML, mzXML)')
for spec in _read_spectra(filename):
if spec is not None:
yield spec
def read_spectra_list(filename: str) -> List[sus.MsmsSpectrum]:
"""
Read MS/MS spectra from a peak file.
Supported formats: MGF, mzML, mzXML.
Parameters
----------
filename : str
The peak file name.
Returns
-------
List[sus.MsmsSpectrum]
An list of spectra in the given peak file.
"""
return list(read_spectra(filename))
def _read_spectra_mgf(filename: str) -> Iterable[sus.MsmsSpectrum]:
"""
Read MS/MS spectra from an MGF file.
Parameters
----------
filename : str
The MGF file name.
Returns
-------
Iterable[sus.MsmsSpectrum]
An iterable of spectra in the given MGF file.
"""
for spectrum_dict in tqdm.tqdm(pyteomics.mgf.read(filename),
desc='Spectra read', unit='spectra'):
identifier = spectrum_dict['params']['title']
mz_array = spectrum_dict['m/z array']
intensity_array = spectrum_dict['intensity array']
retention_time = float(spectrum_dict['params'].get('rtinseconds', -1))
precursor_mz = float(spectrum_dict['params']['pepmass'][0])
if 'charge' in spectrum_dict['params']:
precursor_charge = int(spectrum_dict['params']['charge'][0])
else:
return None
spectrum = sus.MsmsSpectrum(
identifier, precursor_mz, precursor_charge, mz_array,
intensity_array, None, retention_time)
spectrum.filename = spectrum_dict['params'].get(
'filename', os.path.splitext(os.path.basename(filename))[0])
if 'scan' in spectrum_dict['params']:
spectrum.scan = spectrum_dict['params']['scan']
if 'cluster' in spectrum_dict['params']:
spectrum.cluster = spectrum_dict['params']['cluster']
yield spectrum
def _read_spectra_mzml(filename: str) -> Iterable[sus.MsmsSpectrum]:
"""
Read MS/MS spectra from an mzML file.
Parameters
----------
filename : str
The mzML file name.
Returns
-------
Iterable[sus.MsmsSpectrum]
An iterable of spectra in the given mzML file.
"""
with pyteomics.mzml.MzML(filename) as f_in:
try:
for spectrum_dict in tqdm.tqdm(f_in, desc='Spectra read',
unit='spectra'):
if int(spectrum_dict.get('ms level', -1)) == 2:
precursor = spectrum_dict['precursorList']['precursor'][0]
precursor_ion = (precursor['selectedIonList']
['selectedIon'][0])
if 'charge state' in precursor_ion:
precursor_charge = int(precursor_ion['charge state'])
elif 'possible charge state' in precursor_ion:
precursor_charge = int(
precursor_ion['possible charge state'])
else:
logger.warning('Unknown precursor charge, skipped '
'spectrum...')
continue
spectrum = sus.MsmsSpectrum(
spectrum_dict['id'],
precursor_ion['selected ion m/z'],
precursor_charge,
spectrum_dict['m/z array'],
spectrum_dict['intensity array'],
None,
(spectrum_dict['scanList']['scan'][0]
['scan start time']))
spectrum.filename = spectrum_dict.get(
'filename',
os.path.splitext(os.path.basename(filename))[0])
if 'scan' in spectrum_dict:
spectrum.scan = str(int(spectrum_dict['scan']))
elif 'scan=' in spectrum.identifier:
spectrum.scan = int(
spectrum.identifier[
spectrum.identifier.find('scan=')
+ len('scan='):])
if 'cluster' in spectrum_dict:
spectrum.cluster = int(spectrum_dict['cluster'])
yield spectrum
except LxmlError as e:
logger.error('Failed to read file %s: %s', filename, e)
def _read_spectra_mzxml(filename: str) -> Iterable[sus.MsmsSpectrum]:
"""
Read MS/MS spectra from an mzXML file.
Attention: Reading intermediate mzXML files with clustering information is
not supported, only original mzXML files.
Parameters
----------
filename : str
The mzXML file name.
Returns
-------
Iterable[sus.MsmsSpectrum]
An iterable of spectra in the given mzXML file.
"""
with pyteomics.mzxml.MzXML(filename) as f_in:
try:
for spectrum_dict in tqdm.tqdm(f_in, desc='Spectra read',
unit='spectra'):
if int(spectrum_dict.get('msLevel', -1)) == 2:
if 'precursorCharge' in spectrum_dict['precursorMz'][0]:
precursor_charge = (spectrum_dict['precursorMz'][0]
['precursorCharge'])
else:
logger.warning('Unknown precursor charge, skipped '
'spectrum...')
continue
spectrum = sus.MsmsSpectrum(
spectrum_dict['id'],
spectrum_dict['precursorMz'][0]['precursorMz'],
precursor_charge,
spectrum_dict['m/z array'],
spectrum_dict['intensity array'],
None,
spectrum_dict['retentionTime'])
spectrum.scan = int(spectrum_dict['id'])
spectrum.filename = os.path.splitext(
os.path.basename(filename))[0]
yield spectrum
except LxmlError as e:
logger.warning('Failed to read file %s: %s', filename, e)
###############################################################################
def read_clusters(filename: str, fmt: str) -> Dict[str, int]:
"""
Read cluster assignments.
Parameters
----------
filename : str
The cluster assignment file name.
fmt : str
Format of the cluster assignment file. Supported formats:
"falcon", "maracluster", "spectra-cluster", "ms-cluster".
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
if fmt == 'falcon':
return _read_clusters_falcon(filename)
elif fmt == 'maracluster':
return _read_clusters_maracluster(filename)
elif fmt == 'spectra-cluster':
return _read_clusters_spectracluster(filename)
elif fmt == 'ms-cluster':
return _read_clusters_mscluster(filename)
elif fmt == 'mscrush':
return _read_clusters_mscrush(filename)
else:
raise ValueError('Unsupported cluster file format (supported formats: '
'falcon, MaRaCluster, spectra-cluster, MS-Cluster, '
'msCRUSH)')
def _read_clusters_falcon(filename: str) -> Dict[str, int]:
"""
Read falcon cluster assignments.
Parameters
----------
filename : str
The falcon cluster assignment file name.
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
clusters = pd.read_csv(filename, comment='#')
# Assign noise points to singleton clusters.
noise_mask = clusters['cluster'] == -1
num_clusters = clusters['cluster'].max() + 1
clusters.loc[noise_mask, 'cluster'] = np.arange(
num_clusters, num_clusters + noise_mask.sum())
clusters['identifier'] = (clusters['identifier'].str.split(':').str[2:]
.str.join(':'))
return (clusters[['identifier', 'cluster']]
.set_index('identifier', drop=True).squeeze().to_dict())
def _read_clusters_maracluster(filename: str) -> Dict[str, int]:
"""
Read MaRaCluster cluster assignments.
Parameters
----------
filename : str
The MaRaCluster cluster assignment file name.
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
with open(filename) as f_in, tqdm.tqdm(
desc='Cluster assignments read', unit='spectra') as progress_bar:
clusters, cluster_i = {}, 0
for line in f_in:
if not line.strip():
cluster_i += 1
else:
fn, scan, *_ = line.split('\t')
clusters[f'{os.path.splitext(os.path.basename(fn))[0]}:scan:'
f'{scan}'] = cluster_i
progress_bar.update(1)
return clusters
def _read_clusters_mscluster(dir_name: str) -> Dict[str, int]:
"""
Read MS-Cluster cluster assignments.
Parameters
----------
dir_name : str
The MS-Cluster cluster assignment directory.
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
filenames = pd.read_csv(
os.path.join(dir_name, 'mscluster_0_spec_list.txt'), header=None,
squeeze=True)
filenames = filenames.apply(
lambda x: os.path.splitext(os.path.basename(x))[0])
clusters, cluster_i = {}, -1
with tqdm.tqdm(desc='Cluster assignments read',
unit='spectra') as progress_bar:
for filename in os.listdir(os.path.join(dir_name, 'clust')):
if filename.endswith('.clust'):
with open(os.path.join(dir_name, 'clust', filename)) as f_in:
for line in f_in:
if line.startswith('mscluster'):
cluster_i += 1
elif not line.isspace():
splits = line.split('\t')
clusters[f'{filenames.iat[int(splits[1])]}:scan:'
f'{int(splits[2])}'] = cluster_i
progress_bar.update(1)
return clusters
def _read_clusters_mscrush(dir_name: str) -> Dict[str, int]:
"""
Read msCRUSH cluster assignments.
Parameters
----------
dir_name : str
The msCRUSH cluster assignment directory.
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
clusters = []
with tqdm.tqdm(desc='Cluster assignments read',
unit='spectra') as progress_bar:
for filename in os.listdir(dir_name):
if filename.endswith('.txt'):
clusters_file = pd.read_csv(os.path.join(dir_name, filename),
sep='\t')
clusters_file['Titles'] = (clusters_file['Titles']
.str.split('|'))
clusters_file = clusters_file.explode('Titles')
clusters_file['Titles'] = clusters_file['Titles'].str.strip()
if len(clusters) > 0:
clusters_file['ID'] += clusters[-1].iat[-1, 1] + 1
clusters.append(clusters_file[['Titles', 'ID']])
progress_bar.update(len(clusters_file))
return (pd.concat(clusters, ignore_index=True)
.set_index('Titles', drop=True).squeeze().to_dict())
def _read_clusters_spectracluster(filename: str) -> Dict[str, int]:
"""
Read spectra-cluster cluster assignments.
Parameters
----------
filename : str
The spectra-cluster cluster assignment file name.
Returns
-------
Dict[str, int]
A dictionary with as keys the spectrum identifiers (format
"{filename}:scan:{scan}") and as value the cluster index.
"""
with open(filename) as f_in, tqdm.tqdm(
desc='Cluster assignments read', unit='spectra') as progress_bar:
clusters, cluster_i = {}, 0
for line in f_in:
if line.startswith('=Cluster='):
cluster_i += 1
elif line.startswith('SPEC'):
fn = line[line.find('#file=') + len('#file='):line.find('#id')]
scan_start_i = line.find('scan=') + len('scan=')
scan_stop_i = line.find('\t', scan_start_i)
scan = line[scan_start_i:scan_stop_i]
clusters[f'{os.path.splitext(os.path.basename(fn))[0]}:scan:'
f'{scan}'] = cluster_i
progress_bar.update(1)
return clusters
###############################################################################
def write_spectra(filename: str, spectra: Iterable[sus.MsmsSpectrum]) -> None:
"""
Write the given spectra to a peak file.
Supported formats: MGF, mzML.
Parameters
----------
filename : str
The file name where the spectra will be written.
spectra : Iterable[sus.MsmsSpectrum]
The spectra to be written to the peak file.
"""
ext = os.path.splitext(filename.lower())[1]
if ext == '.mgf':
_write_spectra_mgf(filename, spectra)
elif ext == '.mzml':
_write_spectra_mzml(filename, spectra)
else:
logger.error('Unsupported peak file format (supported formats: MGF, '
'mzML)')
raise ValueError('Unsupported peak file format (supported formats: '
'MGF, mzML)')
def _write_spectra_mgf(filename: str, spectra: Iterable[sus.MsmsSpectrum]) \
-> None:
"""
Write the given spectra to an MGF file.
Parameters
----------
filename : str
The MGF file name where the spectra will be written.
spectra : Iterable[sus.MsmsSpectrum]
The spectra to be written to the MGF file.
"""
with open(filename, 'w') as f_out:
pyteomics.mgf.write(_spectra_to_dicts(spectra), f_out, use_numpy=True)
def _spectra_to_dicts(spectra: Iterable[sus.MsmsSpectrum]) -> Iterable[Dict]:
"""
Convert MsmsSpectrum objects to Pyteomics MGF spectrum dictionaries.
Parameters
----------
spectra : Iterable[sus.MsmsSpectrum]
The spectra to be converted to Pyteomics MGF dictionaries.
Returns
-------
Iterable[Dict]
The given spectra as Pyteomics MGF dictionaries.
"""
for spectrum in tqdm.tqdm(spectra, desc='Spectra written',
unit='spectra'):
params = {'title': spectrum.identifier,
'pepmass': spectrum.precursor_mz,
'charge': spectrum.precursor_charge}
if hasattr(spectrum, 'retention_time'):
params['rtinseconds'] = spectrum.retention_time
if hasattr(spectrum, 'filename'):
params['filename'] = spectrum.filename
if hasattr(spectrum, 'scan'):
params['scan'] = spectrum.scan
if hasattr(spectrum, 'cluster'):
params['cluster'] = spectrum.cluster
yield {'params': params,
'm/z array': spectrum.mz,
'intensity array': spectrum.intensity}
def _write_spectra_mzml(filename: str, spectra: Iterable[sus.MsmsSpectrum]) \
-> None:
"""
Write the given spectra to an mzML file.
Parameters
----------
filename : str
The mzML file name where the spectra will be written.
spectra : Iterable[sus.MsmsSpectrum]
The spectra to be written to the mzML file.
"""
experiment = pyopenms.MSExperiment()
for spectrum in tqdm.tqdm(spectra, desc='Spectra written', unit='spectra'):
mzml_spectrum = pyopenms.MSSpectrum()
mzml_spectrum.setMSLevel(2)
mzml_spectrum.setNativeID(spectrum.identifier)
precursor = pyopenms.Precursor()
precursor.setMZ(spectrum.precursor_mz)
precursor.setCharge(spectrum.precursor_charge)
mzml_spectrum.setPrecursors([precursor])
mzml_spectrum.set_peaks([spectrum.mz, spectrum.intensity])
if hasattr(spectrum, 'retention_time'):
mzml_spectrum.setRT(spectrum.retention_time)
if hasattr(spectrum, 'filename'):
mzml_spectrum.setMetaValue(
'filename', str.encode(spectrum.filename))
if hasattr(spectrum, 'scan'):
mzml_spectrum.setMetaValue('scan', str.encode(str(spectrum.scan)))
if hasattr(spectrum, 'cluster'):
mzml_spectrum.setMetaValue(
'cluster', str.encode(str(spectrum.cluster)))
experiment.addSpectrum(mzml_spectrum)
pyopenms.MzMLFile().store(filename, experiment)
###############################################################################
def read_psms(filename: str) -> pd.DataFrame:
"""
Read spectrum identifications.
Parameters
----------
filename : str
The PSM file name. Supported formats: mzTab, mzIdentML, JSON, MaxQuant.
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
ext = os.path.splitext(filename.lower())[1]
if ext == '.mztab':
return _read_psms_mztab(filename)
elif ext == '.mzidentml' or ext == '.mzid':
return _read_psms_mzidentml(filename)
elif ext == '.idxml':
return _read_psms_idxml(filename)
elif ext == '.json':
return _read_psms_json(filename)
elif os.path.basename(filename) == 'msms.txt':
return _read_psms_maxquant(filename)
else:
raise ValueError('Unsupported PSM file format (supported formats: '
'mzTab, mzIdentML, JSON, MaxQuant)')
def _read_psms_mztab(filename: str) -> pd.DataFrame:
"""
Read mzTab spectrum identifications.
To correctly have the PSM index the "spectra_ref" column of the mzTab file
should use the scan number specification.
Parameters
----------
filename : str
The mzTab file name.
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
mztab = pyteomics.mztab.MzTab(filename)
run_names = {key[key.find('ms_run['):key.find('-location')]:
os.path.splitext(os.path.basename(value))[0]
for key, value in mztab.metadata.items()
if key.startswith('ms_run') and key.endswith('location')}
psms = (mztab.spectrum_match_table
.rename(columns={'search_engine_score[1]': 'score'}))
if not all(psms['spectra_ref'].str.contains('scan')):
raise ValueError('Spectrum references with scan information required')
run = pd.Series([spectra_ref[:spectra_ref.find(':')]
for spectra_ref in psms['spectra_ref']])
scan = pd.Series([spectra_ref[spectra_ref.find('=') + 1:]
for spectra_ref in psms['spectra_ref']])
psms['spectra_ref'] = (run.map(run_names) + ':scan:' + scan).values
return (psms[['spectra_ref', 'sequence', 'score']]
.drop_duplicates('spectra_ref')
.set_index('spectra_ref'))
def _read_psms_mzidentml(filename: str) -> pd.DataFrame:
"""
Read mzIdentML spectrum identifications.
Parameters
----------
filename : str
The mzIdentML file name.
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
raise NotImplementedError('mzIdentML support is incomplete')
filenames, scan, sequences, score = [], [], [], []
for psm_dict in tqdm.tqdm(pyteomics.mzid.read(filename),
desc='PSMs read', unit='PSMs'):
filenames.append(os.path.splitext(psm_dict['name'])[0])
scan.append(-1) # FIXME
sequences.append(
psm_dict['SpectrumIdentificationItem'][0]['PeptideSequence'])
score.append(-1) # FIXME
psms = pd.DataFrame({'filename': filenames, 'scan': scan,
'sequence': sequences, 'score': score})
psms['spectra_ref'] = (psms['filename'] + ':scan:' +
psms['scan'].astype(str))
return (psms[['spectra_ref', 'sequence', 'score']]
.drop_duplicates('spectra_ref')
.set_index('spectra_ref'))
def _read_psms_idxml(filename: str) -> pd.DataFrame:
"""
Read idXML spectrum identifications.
Parameters
----------
filename : str
The idXML file name.
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
protein_ids, psms, scans, sequences, scores = [], [], [], [], []
pyopenms.IdXMLFile().load(filename, protein_ids, psms)
peak_filename = os.path.splitext(os.path.basename(
protein_ids[0].getMetaValue('spectra_data')[0].decode()))[0]
for psm in tqdm.tqdm(psms, desc='PSMs read', unit='PSMs'):
spectrum_index = psm.getMetaValue('spectrum_reference').decode()
scans.append(
int(spectrum_index[spectrum_index.find('scan=') + len('scan='):]))
sequences.append(psm.getHits()[0].getSequence().toString().decode())
scores.append(psm.getHits()[0].getScore())
psms = pd.DataFrame({'filename': peak_filename, 'scan': scans,
'sequence': sequences, 'score': scores})
psms['spectra_ref'] = (psms['filename'] + ':scan:' +
psms['scan'].astype(str))
return (psms[['spectra_ref', 'sequence', 'score']]
.drop_duplicates('spectra_ref')
.set_index('spectra_ref'))
def _read_psms_json(filename: str) -> pd.DataFrame:
"""
Read JSON spectrum identifications.
Parameters
----------
filename : str
The JSON file name.
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
raise NotImplementedError
def _read_psms_maxquant(filename: str) -> pd.DataFrame:
"""
Read MaxQuant spectrum identifications.
Parameters
----------
filename : str
The MaxQuant PSM file name (msms.txt).
Returns
-------
pd.DataFrame
A Pandas DataFrame with as rows the PSMs and as columns "sequence" and
"score", indexed by their spectrum reference in the form of
{filename}:scan:{scan}.
"""
psms = (pd.read_csv(filename, sep='\t', usecols=['Raw file', 'Scan number',
'Sequence', 'Score'])
.rename(columns={'Sequence': 'sequence', 'Score': 'score'}))
psms['spectra_ref'] = (psms['Raw file'] + ':scan:' +
psms['Scan number'].astype(str))
return (psms[['spectra_ref', 'sequence', 'score']]
.drop_duplicates('spectra_ref')
.set_index('spectra_ref'))
| 35.493671 | 79 | 0.567404 |
5368ee664fdde6b058f1b67afecf4c5e43f2a303
| 4,028 |
py
|
Python
|
test/AS/ASPP.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1 |
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/AS/ASPP.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/AS/ASPP.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/AS/ASPP.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import sys
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
if sys.platform == 'win32':
test.write('mylink.py', r"""
import sys
args = sys.argv[1:]
while args:
a = args[0]
if a == '-o':
out = args[1]
args = args[2:]
continue
if not a[0] in '/-':
break
args = args[1:]
if a[:5].lower() == '/out:': out = a[5:]
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:5] != '#link':
outfile.write(l)
sys.exit(0)
""")
test.write('myas.py', r"""
import sys
args = sys.argv[1:]
inf = None
while args:
a = args[0]
if a == '-o':
out = args[1]
args = args[2:]
continue
args = args[1:]
if not a[0] in "/-":
if not inf:
inf = a
continue
if a[:3] == '/Fo': out = a[3:]
infile = open(inf, 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:3] != '#as':
outfile.write(l)
sys.exit(0)
""")
else:
test.write('mylink.py', r"""
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], 'o:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:5] != '#link':
outfile.write(l)
sys.exit(0)
""")
test.write('myas.py', r"""
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:3] != '#as':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
ASPP = r'%(_python_)s myas.py',
CC = r'%(_python_)s myas.py')
env.Program(target = 'test1', source = 'test1.spp')
env.Program(target = 'test2', source = 'test2.SPP')
env.Program(target = 'test3', source = 'test3.sx')
""" % locals())
test.write('test1.spp', r"""This is a .spp file.
#as
#link
""")
test.write('test2.SPP', r"""This is a .SPP file.
#as
#link
""")
test.write('foo.h', r"""// this is foo.h
""")
test.write('test3.sx', r"""This is a .sx file.
#include "foo.h"
#as
#link
""")
test.run(arguments = '.', stderr = None)
test.fail_test(test.read('test1' + _exe) != "This is a .spp file.\n")
test.fail_test(test.read('test2' + _exe) != "This is a .SPP file.\n")
# Ensure the source scanner was run on test3.sx by
# checking for foo.h in the dependency tree output
test.run(arguments = '. --tree=prune')
test.fail_test("foo.h" not in test.stdout())
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 24.864198 | 89 | 0.6286 |
795de5c2c8f8b6840e76a6854597b84a6ab81389
| 28,471 |
py
|
Python
|
reviewboard/accounts/models.py
|
grimmy/reviewboard
|
ca673e1ba77985a5dc1f3261595ba0389b48e093
|
[
"MIT"
] | 921 |
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/accounts/models.py
|
grimmy/reviewboard
|
ca673e1ba77985a5dc1f3261595ba0389b48e093
|
[
"MIT"
] | 5 |
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/accounts/models.py
|
grimmy/reviewboard
|
ca673e1ba77985a5dc1f3261595ba0389b48e093
|
[
"MIT"
] | 285 |
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djblets.auth.signals import user_registered
from djblets.cache.backend import cache_memoize
from djblets.db.fields import CounterField, JSONField
from djblets.forms.fields import TIMEZONE_CHOICES
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.managers import (ProfileManager,
ReviewRequestVisitManager,
TrophyManager)
from reviewboard.accounts.trophies import trophies_registry
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.avatars import avatar_services
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import (reply_published,
review_published,
review_request_published)
from reviewboard.site.models import LocalSite
from reviewboard.site.signals import local_site_user_added
@python_2_unicode_compatible
class ReviewRequestVisit(models.Model):
"""
A recording of the last time a review request was visited by a user.
Users have one ReviewRequestVisit entry in the database per review
request they've visited. This is used to keep track of any updates
to review requests they've already seen, so that we can intelligently
inform them that new discussions have taken place.
"""
VISIBLE = 'V'
ARCHIVED = 'A'
MUTED = 'M'
VISIBILITY = (
(VISIBLE, 'Visible'),
(ARCHIVED, 'Archived'),
(MUTED, 'Muted'),
)
user = models.ForeignKey(User, related_name='review_request_visits')
review_request = models.ForeignKey(ReviewRequest, related_name='visits')
timestamp = models.DateTimeField(_('last visited'), default=timezone.now)
visibility = models.CharField(max_length=1, choices=VISIBILITY,
default=VISIBLE)
# Set this up with a ReviewRequestVisitManager, which inherits from
# ConcurrencyManager to help prevent race conditions.
objects = ReviewRequestVisitManager()
def __str__(self):
"""Return a string used for the admin site listing."""
return 'Review request visit'
class Meta:
db_table = 'accounts_reviewrequestvisit'
unique_together = ('user', 'review_request')
index_together = [('user', 'visibility')]
verbose_name = _('Review Request Visit')
verbose_name_plural = _('Review Request Visits')
@python_2_unicode_compatible
class Profile(models.Model):
"""User profile which contains some basic configurable settings."""
user = models.ForeignKey(User, unique=True)
# This will redirect new users to the account settings page the first time
# they log in (or immediately after creating an account). This allows
# people to fix their real name and join groups.
first_time_setup_done = models.BooleanField(
default=False,
verbose_name=_("first time setup done"),
help_text=_("Indicates whether the user has already gone through "
"the first time setup process by saving their user "
"preferences."))
# Whether the user wants to receive emails
should_send_email = models.BooleanField(
default=True,
verbose_name=_("send email"),
help_text=_("Indicates whether the user wishes to receive emails."))
should_send_own_updates = models.BooleanField(
default=True,
verbose_name=_("receive emails about own actions"),
help_text=_("Indicates whether the user wishes to receive emails "
"about their own activity."))
collapsed_diffs = models.BooleanField(
default=True,
verbose_name=_("collapsed diffs"),
help_text=_("Indicates whether diffs should be shown in their "
"collapsed state by default."))
wordwrapped_diffs = models.BooleanField(
default=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
syntax_highlighting = models.BooleanField(
default=True,
verbose_name=_("syntax highlighting"),
help_text=_("Indicates whether the user wishes to see "
"syntax highlighting in the diffs."))
is_private = models.BooleanField(
default=False,
verbose_name=_("profile private"),
help_text=_("Indicates whether the user wishes to keep his/her "
"profile private."))
open_an_issue = models.BooleanField(
default=True,
verbose_name=_("opens an issue"),
help_text=_("Indicates whether the user wishes to default "
"to opening an issue or not."))
default_use_rich_text = models.NullBooleanField(
default=None,
verbose_name=_('enable Markdown by default'),
help_text=_('Indicates whether new posts or comments should default '
'to being in Markdown format.'))
# Indicate whether closed review requests should appear in the
# review request lists (excluding the dashboard).
show_closed = models.BooleanField(default=True)
sort_review_request_columns = models.CharField(max_length=256, blank=True)
sort_dashboard_columns = models.CharField(max_length=256, blank=True)
sort_submitter_columns = models.CharField(max_length=256, blank=True)
sort_group_columns = models.CharField(max_length=256, blank=True)
review_request_columns = models.CharField(max_length=256, blank=True)
dashboard_columns = models.CharField(max_length=256, blank=True)
submitter_columns = models.CharField(max_length=256, blank=True)
group_columns = models.CharField(max_length=256, blank=True)
# A list of starred review requests. This allows users to monitor a
# review request and receive e-mails on updates without actually being
# on the reviewer list or commenting on the review. This is similar to
# adding yourself to a CC list.
starred_review_requests = models.ManyToManyField(ReviewRequest, blank=True,
related_name="starred_by")
# A list of watched groups. This is so that users can monitor groups
# without actually joining them, preventing e-mails being sent to the
# user and review requests from entering the Incoming Reviews list.
starred_groups = models.ManyToManyField(Group, blank=True,
related_name="starred_by")
# Allows per-user timezone settings
timezone = models.CharField(choices=TIMEZONE_CHOICES, default='UTC',
max_length=30)
settings = JSONField(null=True, default=dict)
extra_data = JSONField(null=True, default=dict)
objects = ProfileManager()
@property
def should_use_rich_text(self):
"""Get whether rich text should be used by default for this user.
If the user has chosen whether or not to use rich text explicitly,
then that choice will be respected. Otherwise, the system default is
used.
"""
if self.default_use_rich_text is None:
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('default_use_rich_text')
else:
return self.default_use_rich_text
@property
def should_enable_desktop_notifications(self):
"""Return whether desktop notifications should be used for this user.
If the user has chosen whether or not to use desktop notifications
explicitly, then that choice will be respected. Otherwise, we
enable desktop notifications by default.
Returns:
bool:
If the user has set whether they wish to recieve desktop
notifications, then use their preference. Otherwise, we return
``True``.
"""
return (not self.settings or
self.settings.get('enable_desktop_notifications', True))
def star_review_request(self, review_request):
"""Mark a review request as starred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.add(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.increment_starred_public_request_count()
def unstar_review_request(self, review_request):
"""Mark a review request as unstarred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.remove(review_request)
if (review_request.public and
review_request.status in (ReviewRequest.PENDING_REVIEW,
ReviewRequest.SUBMITTED)):
site_profile = \
self.user.get_site_profile(review_request.local_site)
site_profile.decrement_starred_public_request_count()
def star_review_group(self, review_group):
"""Mark a review group as starred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.add(review_group)
def unstar_review_group(self, review_group):
"""Mark a review group as unstarred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
self.starred_groups.remove(review_group)
def __str__(self):
"""Return a string used for the admin site listing."""
return self.user.username
@property
def avatar_service(self):
"""The avatar service the user has selected.
Returns:
djblets.avatars.services.base.AvatarService:
The avatar service.
"""
service_id = self.settings.get('avatars', {}).get('avatar_service_id')
return avatar_services.get_or_default(service_id)
@avatar_service.setter
def avatar_service(self, service):
"""Set the avatar service.
Args:
service (djblets.avatars.services.base.AvatarService):
The avatar service.
"""
self.settings.setdefault('avatars', {})['avatar_service_id'] = \
service.avatar_service_id
def get_display_name(self, viewing_user):
"""Return the name to display to the given user.
If any of the following is True and the user this profile belongs to
has a full name set, the display name will be the the user's full name:
* The viewing user is authenticated and this profile is public.
* The viewing user is the user this profile belongs to.
* The viewing user is an administrator.
* The viewing user is a LocalSite administrator on any LocalSite for
which the user whose this profile belongs to is a user.
Otherwise the display name will be the user's username.
Args:
viewing_user (django.contrib.auth.models.User):
The user who is viewing the profile.
Returns:
unicode:
The name to display.
"""
if (viewing_user is not None and
viewing_user.is_authenticated() and
(not self.is_private or
viewing_user.pk == self.user_id or
viewing_user.is_admin_for_user(self.user))):
return self.user.get_full_name() or self.user.username
else:
return self.user.username
def save(self, *args, **kwargs):
"""Save the profile to the database.
The profile will only be saved if the user is not affected by read-only
mode.
Args:
*args (tuple):
Positional arguments to pass through to the superclass.
**kwargs (dict):
Keyword arguments to pass through to the superclass.
"""
if not is_site_read_only_for(self.user):
super(Profile, self).save(*args, **kwargs)
class Meta:
db_table = 'accounts_profile'
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
@python_2_unicode_compatible
class LocalSiteProfile(models.Model):
"""User profile information specific to a LocalSite."""
user = models.ForeignKey(User, related_name='site_profiles')
profile = models.ForeignKey(Profile, related_name='site_profiles')
local_site = models.ForeignKey(LocalSite, null=True, blank=True,
related_name='site_profiles')
# A dictionary of permission that the user has granted. Any permission
# missing is considered to be False.
permissions = JSONField(null=True)
# Counts for quickly knowing how many review requests are incoming
# (both directly and total), outgoing (pending and total ever made),
# and starred (public).
direct_incoming_request_count = CounterField(
_('direct incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user_directly(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_incoming_request_count = CounterField(
_('total incoming review request count'),
initializer=lambda p: (
ReviewRequest.objects.to_user(
p.user, local_site=p.local_site).count()
if p.user_id else 0))
pending_outgoing_request_count = CounterField(
_('pending outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, local_site=p.local_site).count()
if p.user_id else 0))
total_outgoing_request_count = CounterField(
_('total outgoing review request count'),
initializer=lambda p: (
ReviewRequest.objects.from_user(
p.user, p.user, None, local_site=p.local_site).count()
if p.user_id else 0))
starred_public_request_count = CounterField(
_('starred public review request count'),
initializer=lambda p: (
p.profile.starred_review_requests.public(
user=None, local_site=p.local_site).count()
if p.pk else 0))
def __str__(self):
"""Return a string used for the admin site listing."""
return '%s (%s)' % (self.user.username, self.local_site)
class Meta:
db_table = 'accounts_localsiteprofile'
unique_together = (('user', 'local_site'),
('profile', 'local_site'))
verbose_name = _('Local Site Profile')
verbose_name_plural = _('Local Site Profiles')
class Trophy(models.Model):
"""A trophy represents an achievement given to the user.
It is associated with a ReviewRequest and a User and can be associated
with a LocalSite.
"""
category = models.CharField(max_length=100)
received_date = models.DateTimeField(default=timezone.now)
review_request = models.ForeignKey(ReviewRequest, related_name="trophies")
local_site = models.ForeignKey(LocalSite, null=True,
related_name="trophies")
user = models.ForeignKey(User, related_name="trophies")
objects = TrophyManager()
@cached_property
def trophy_type(self):
"""The TrophyType instance for this trophy."""
return trophies_registry.get_for_category(self.category)
def get_display_text(self):
"""Get the display text for this trophy."""
return self.trophy_type.get_display_text(self)
class Meta:
db_table = 'accounts_trophy'
verbose_name = _('Trophy')
verbose_name_plural = _('Trophies')
#
# The following functions are patched onto the User model.
#
def _is_user_profile_visible(self, user=None):
"""Return whether or not the given user can view this user's profile.
Profiles are hidden from unauthenticated users. For authenticated users, a
profile is visible if one of the following is true:
* The profile is not marked as private.
* The viewing user owns the profile.
* The viewing user is a staff member.
* The viewing user is an administrator on a Local Site which the viewed
user is a member.
Args:
user (django.contrib.auth.models.User, optional):
The user for which visibility to the profile is to be determined.
Returns:
bool:
Whether or not the given user can view the profile.
"""
if user is None or user.is_anonymous():
return False
if hasattr(self, 'is_private'):
# This is an optimization used by the web API. It will set
# is_private on this User instance through a query, saving a
# lookup for each instance.
#
# This must be done because select_related() and
# prefetch_related() won't cache reverse foreign key relations.
is_private = self.is_private
else:
is_private = self.get_profile().is_private
return (not is_private or
user == self or
user.is_admin_for_user(self))
def _should_send_email(self):
"""Get whether a user wants to receive emails.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_email
def _should_send_own_updates(self):
"""Get whether a user wants to receive emails about their activity.
This is patched into the user object to make it easier to deal with missing
Profile objects.
"""
return self.get_profile().should_send_own_updates
def _get_profile(self, cached_only=False, create_if_missing=True,
return_is_new=False):
"""Return the profile for the User.
The profile will be cached, preventing queries for future lookups.
If a profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database.
Version Changed:
3.0.12:
Added support for ``create_if_missing`` and ``return_is_new``
arguments.
Args:
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
Profile or tuple.
The user's profile.
If ``return_is_new`` is ``True``, then this will instead return
``(Profile, is_new)``.
Raises:
Profile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
# Note that we use the same cache variable that a select_related() call
# would use, ensuring that we benefit from Django's caching when possible.
profile = getattr(self, '_profile_set_cache', None)
is_new = False
if profile is None and not cached_only:
if create_if_missing:
profile, is_new = Profile.objects.get_or_create(user=self)
else:
# This may raise Profile.DoesNotExist.
profile = Profile.objects.get(user=self)
profile.user = self
self._profile_set_cache = profile
# While modern versions of Review Board set this to an empty dictionary,
# old versions would initialize this to None. Since we don't want to litter
# our code with extra None checks everywhere we use it, normalize it here.
if profile is not None and profile.extra_data is None:
profile.extra_data = {}
if return_is_new:
return profile, is_new
return profile
def _get_site_profile(self, local_site, cached_only=False,
create_if_missing=True, return_is_new=False):
"""Return the LocalSiteProfile for a given LocalSite for the User.
The site profile will be cached, preventing queries for future lookups.
If a site profile doesn't exist in the database, and a cached-only copy
isn't being returned, then a profile will be created in the database,
unless passing ``create_if_missing=False``.
Version Changed:
3.0.12:
* In previous versions, this would not create a site profile if one
didn't already exist. Now it does, unless passing
``create_if_missing=False``. This change was made to standardize
behavior between this and :py:meth:`User.get_profile`.
* Added support for ``cached_only``, ``create_if_missing`` and
``return_is_new`` arguments.
Args:
local_site (reviewboard.site.models.LocalSite):
The LocalSite to return a profile for. This is allowed to be
``None``, which means the profile applies to their global site
account.
cached_only (bool, optional):
Whether we should only return the profile cached for the user.
If True, this function will not retrieve an uncached profile or
create one that doesn't exist. Instead, it will return ``None``.
create_if_missing (bool, optional):
Whether to create a site profile if one doesn't already exist.
return_is_new (bool, optional);
If ``True``, the result of the call will be a tuple containing
the profile and a boolean indicating if the profile was
newly-created.
Returns:
LocalSiteProfile or tuple:
The user's LocalSite profile.
If ``return_is_new`` is ``True``, then this will instead return
``(LocalSiteProfile, is_new)``.
Raises:
LocalSiteProfile.DoesNotExist:
The profile did not exist. This can only be raised if passing
``create_if_missing=False``.
"""
if not hasattr(self, '_site_profiles'):
self._site_profiles = {}
if local_site is None:
local_site_id = None
else:
local_site_id = local_site.pk
is_new = False
site_profile = self._site_profiles.get(local_site_id)
if site_profile is None and not cached_only:
profile = self.get_profile()
if create_if_missing:
site_profile, is_new = LocalSiteProfile.objects.get_or_create(
user=self,
profile=profile,
local_site=local_site)
else:
# This may raise LocalSiteProfile.DoesNotExist.
site_profile = LocalSiteProfile.objects.get(
user=self,
profile=profile,
local_site=local_site)
# Set these directly in order to avoid further lookups.
site_profile.user = self
site_profile.profile = profile
site_profile.local_site = local_site
self._site_profiles[local_site_id] = site_profile
if return_is_new:
return site_profile, is_new
return site_profile
def _is_admin_for_user(self, user):
"""Return whether or not this user is an administrator for the given user.
Results will be cached for this user so that at most one query is done.
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
Whether or not this user is an administrator for the given user.
"""
if self.is_staff:
return True
if not user or user.is_anonymous():
return False
if not hasattr(self, '_cached_admin_for_users'):
self._cached_admin_for_users = cache_memoize(
'%s-admin-for-users' % self.pk,
lambda: tuple(
User.objects
.filter(local_site__admins=self)
.values_list('pk', flat=True)
))
return user.pk in self._cached_admin_for_users
User.is_profile_visible = _is_user_profile_visible
User.get_profile = _get_profile
User.get_site_profile = _get_site_profile
User.should_send_email = _should_send_email
User.should_send_own_updates = _should_send_own_updates
User.is_admin_for_user = _is_admin_for_user
User._meta.ordering = ('username',)
@receiver(review_request_published)
def _call_compute_trophies(sender, review_request, **kwargs):
if review_request.public and not review_request.changedescs.exists():
Trophy.objects.compute_trophies(review_request)
@receiver(review_request_published)
def _call_unarchive_all_for_review_request(sender, review_request, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review_request)
@receiver(review_published)
def _call_unarchive_all_for_review(sender, review, **kwargs):
ReviewRequestVisit.objects.unarchive_all(review.review_request_id)
@receiver(reply_published)
def _call_unarchive_all_for_reply(sender, reply, **kwargs):
ReviewRequestVisit.objects.unarchive_all(reply.review_request_id)
@receiver(user_registered)
@receiver(local_site_user_added)
def _add_default_groups(sender, user, local_site=None, **kwargs):
"""Add user to default groups.
When a user is registered, add the user to global default groups.
When a user is added to a LocalSite, add the user to default groups of the
LocalSite.
"""
if local_site:
default_groups = local_site.groups.filter(is_default_group=True)
else:
default_groups = Group.objects.filter(is_default_group=True,
local_site=None)
for default_group in default_groups:
default_group.users.add(user)
@receiver(m2m_changed, sender=Group.users.through)
def _on_group_user_membership_changed(instance, action, pk_set, reverse,
**kwargs):
"""Handler for when a review group's membership has changed.
When a user is added to or removed from a review group, their
:py:attr:`~LocalSiteProfile.total_incoming_request_count` counter will
be cleared, forcing it to be recomputed on next access. This ensures that
their incoming count will be correct when group memberships change.
Args:
instance (django.db.models.Model):
The instance that was updated. If ``reverse`` is ``True``, then
this will be a :py:class:`~django.contrib.auth.models.User`.
Otherwise, it will be ignored.
action (unicode):
The membership change action. The incoming count is only cleared
if this ``post_add``, ``post_remove``, or ``pre_clear``.
pk_set (set of int):
The user IDs added to the group. If ``reverse`` is ``True``,
then this is ignored in favor of ``instance``.
reverse (bool):
Whether this signal is emitted when adding through the forward
relation (``True`` -- :py:attr:`Group.users
<reviewboard.reviews.models.group.Group.users>`) or the reverse
relation (``False`` -- ``User.review_groups``).
**kwargs (dict):
Additional keyword arguments passed to the signal.
"""
if action in ('post_add', 'post_remove', 'pre_clear'):
q = None
if reverse:
if instance is not None:
q = Q(user=instance)
else:
if pk_set:
q = Q(user__in=pk_set)
if q is not None:
LocalSiteProfile.objects.filter(q).update(
total_incoming_request_count=None)
| 37.216993 | 79 | 0.661059 |
07e74546fd1a9510b23d6fb67349009e7a6ab9b2
| 8,370 |
py
|
Python
|
africanus/model/spectral/spec_model.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 13 |
2018-04-06T09:36:13.000Z
|
2021-04-13T13:11:00.000Z
|
africanus/model/spectral/spec_model.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 153 |
2018-03-28T14:13:48.000Z
|
2022-02-03T07:49:17.000Z
|
africanus/model/spectral/spec_model.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 14 |
2018-03-29T13:30:52.000Z
|
2021-06-12T02:56:55.000Z
|
# -*- coding: utf-8 -*-
from numba import types
import numpy as np
from africanus.util.numba import generated_jit, njit
from africanus.util.docs import DocstringTemplate
def numpy_spectral_model(stokes, spi, ref_freq, frequency, base):
out_shape = (stokes.shape[0], frequency.shape[0]) + stokes.shape[1:]
# Add in missing pol dimensions
if stokes.ndim == 1:
stokes = stokes[:, None]
if spi.ndim == 2:
spi = spi[:, :, None]
npol = spi.shape[2]
if isinstance(base, list):
base = base + [base[-1]] * (npol - len(base))
else:
base = [base]*npol
spi_exps = np.arange(1, spi.shape[1] + 1)
spectral_model = np.empty((stokes.shape[0], frequency.shape[0], npol),
dtype=stokes.dtype)
spectral_model[:, :, :] = stokes[:, None, :]
for p, b in enumerate(base):
if b in ("std", 0):
freq_ratio = (frequency[None, :] / ref_freq[:, None])
term = freq_ratio[:, None, :]**spi[:, :, p, None]
spectral_model[:, :, p] *= term.prod(axis=1)
elif b in ("log", 1):
freq_ratio = np.log(frequency[None, :] / ref_freq[:, None])
term = freq_ratio[:, None, :]**spi_exps[None, :, None]
term = spi[:, :, p, None] * term
spectral_model[:, :, p] = (np.exp(np.log(stokes[:, p, None]) +
term.sum(axis=1)))
elif b in ("log10", 2):
freq_ratio = np.log10(frequency[None, :] / ref_freq[:, None])
term = freq_ratio[:, None, :]**spi_exps[None, :, None]
term = spi[:, :, p, None] * term
spectral_model[:, :, p] = (10**(np.log10(stokes[:, p, None]) +
term.sum(axis=1)))
else:
raise ValueError("Invalid base %s" % base)
return spectral_model.reshape(out_shape)
def pol_getter_factory(npoldims):
if npoldims == 0:
def impl(pol_shape):
return 1
else:
def impl(pol_shape):
npols = 1
for c in pol_shape:
npols *= c
return npols
return njit(nogil=True, cache=True)(impl)
def promote_base_factory(is_base_list):
if is_base_list:
def impl(base, npol):
return base + [base[-1]] * (npol - len(base))
else:
def impl(base, npol):
return [base] * npol
return njit(nogil=True, cache=True)(impl)
def add_pol_dim_factory(have_pol_dim):
if have_pol_dim:
def impl(array):
return array
else:
def impl(array):
return array.reshape(array.shape + (1,))
return njit(nogil=True, cache=True)(impl)
@generated_jit(nopython=True, nogil=True, cache=True)
def spectral_model(stokes, spi, ref_freq, frequency, base=0):
arg_dtypes = tuple(np.dtype(a.dtype.name) for a
in (stokes, spi, ref_freq, frequency))
dtype = np.result_type(*arg_dtypes)
if isinstance(base, types.containers.List):
is_base_list = True
base = base.dtype
else:
is_base_list = False
promote_base = promote_base_factory(is_base_list)
if isinstance(base, types.scalars.Integer):
def is_std(base):
return base == 0
def is_log(base):
return base == 1
def is_log10(base):
return base == 2
elif isinstance(base, types.misc.UnicodeType):
def is_std(base):
return base == "std"
def is_log(base):
return base == "log"
def is_log10(base):
return base == "log10"
else:
raise TypeError("base '%s' should be a string or integer" % base)
is_std = njit(nogil=True, cache=True)(is_std)
is_log = njit(nogil=True, cache=True)(is_log)
is_log10 = njit(nogil=True, cache=True)(is_log10)
npoldims = stokes.ndim - 1
pol_get_fn = pol_getter_factory(npoldims)
add_pol_dim = add_pol_dim_factory(npoldims > 0)
if spi.ndim - 2 != npoldims:
raise ValueError("Dimensions on stokes and spi don't agree")
def impl(stokes, spi, ref_freq, frequency, base=0):
nsrc = stokes.shape[0]
nchan = frequency.shape[0]
nspi = spi.shape[1]
npol = pol_get_fn(stokes.shape[1:])
if npol != pol_get_fn(spi.shape[2:]):
raise ValueError("Correlations on stokes and spi don't agree")
# Promote base argument to a per-polarisation list
list_base = promote_base(base, npol)
# Reshape adding a polarisation dimension if necessary
estokes = add_pol_dim(stokes)
espi = add_pol_dim(spi)
spectral_model = np.empty((nsrc, nchan, npol), dtype=dtype)
# TODO(sjperkins)
# Polarisation + associated base on the outer loop
# The output cache patterns could be improved.
for p, b in enumerate(list_base[:npol]):
if is_std(b):
for s in range(nsrc):
rf = ref_freq[s]
for f in range(nchan):
freq_ratio = frequency[f] / rf
spec_model = estokes[s, p]
for si in range(0, nspi):
term = freq_ratio ** espi[s, si, p]
spec_model *= term
spectral_model[s, f, p] = spec_model
elif is_log(b):
for s in range(nsrc):
rf = ref_freq[s]
for f in range(nchan):
freq_ratio = np.log(frequency[f] / rf)
spec_model = np.log(estokes[s, p])
for si in range(0, nspi):
term = espi[s, si, p] * freq_ratio**(si + 1)
spec_model += term
spectral_model[s, f, p] = np.exp(spec_model)
elif is_log10(b):
for s in range(nsrc):
rf = ref_freq[s]
for f in range(nchan):
freq_ratio = np.log10(frequency[f] / rf)
spec_model = np.log10(estokes[s, p])
for si in range(0, nspi):
term = espi[s, si, p] * freq_ratio**(si + 1)
spec_model += term
spectral_model[s, f, p] = 10**spec_model
else:
raise ValueError("Invalid base")
out_shape = (stokes.shape[0], frequency.shape[0]) + stokes.shape[1:]
return spectral_model.reshape(out_shape)
return impl
SPECTRAL_MODEL_DOC = DocstringTemplate(r"""
Compute a spectral model, per polarisation.
.. math::
:nowrap:
\begin{eqnarray}
I(\lambda) & = & I_0 \prod_{i=1} (\lambda / \lambda_0)^{\alpha_{i}} \\
\ln( I(\lambda) ) & = & \sum_{i=0} \alpha_{i}
\ln (\lambda / \lambda_0)^i
\, \textrm{where} \, \alpha_0 = \ln I_0 \\
\log_{10}( I(\lambda) ) & = & \sum_{i=0} \alpha_{i}
\log_{10} (\lambda / \lambda_0)^i
\, \textrm{where} \, \alpha_0 = \log_{10} I_0 \\
\end{eqnarray}
Parameters
----------
stokes : $(array_type)
Stokes parameters of shape :code:`(source,)` or :code:`(source, pol)`.
If a ``pol`` dimension is present, then it must also be present on ``spi``.
spi : $(array_type)
Spectral index of shape :code:`(source, spi-comps)`
or :code:`(source, spi-comps, pol)`.
ref_freq : $(array_type)
Reference frequencies of shape :code:`(source,)`
frequencies : $(array_type)
Frequencies of shape :code:`(chan,)`
base : {"std", "log", "log10"} or {0, 1, 2} or list.
string or corresponding enumeration specifying the polynomial base.
Defaults to 0.
If a list is provided, a polynomial base can be specified for each
stokes parameter or polarisation in the ``pol`` dimension.
string specification of the base is only supported in python 3.
while the corresponding integer enumerations are supported
on all python versions.
Returns
-------
spectral_model : $(array_type)
Spectral Model of shape :code:`(source, chan)` or
:code:`(source, chan, pol)`.
""")
try:
spectral_model.__doc__ = SPECTRAL_MODEL_DOC.substitute(
array_type=":class:`numpy.ndarray`")
except AttributeError:
pass
| 31.348315 | 79 | 0.54767 |
923c98f77c231b2987a3c96c10d1deaffc30960c
| 3,668 |
py
|
Python
|
main.py
|
joelouismarino/variational_rl
|
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
|
[
"MIT"
] | 15 |
2020-10-20T22:09:36.000Z
|
2021-12-24T13:40:36.000Z
|
main.py
|
joelouismarino/variational_rl
|
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
|
[
"MIT"
] | null | null | null |
main.py
|
joelouismarino/variational_rl
|
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
|
[
"MIT"
] | 1 |
2020-10-23T19:48:06.000Z
|
2020-10-23T19:48:06.000Z
|
from comet_ml import Experiment
import argparse
import torch
import numpy as np
from distutils.util import strtobool
from util.env_util import create_env
from lib import create_agent
from misc.buffer import Buffer
from misc.optimizer import Optimizer
from util.plot_util import Plotter
from util.train_util import train
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, help='environment name')
parser.add_argument('--device_id', default=None, type=int, help='GPU ID number')
parser.add_argument('--seed', default=None, type=int, help='random seed')
parser.add_argument('--batch_size', default=256, type=int, help='batch size')
parser.add_argument('--lr', default=3e-4, type=float, help='learning rate')
parser.add_argument('--train_seq_len', default=2, type=int, help='training sequence length')
parser.add_argument('--n_total_steps', default=3e6, type=int, help='total number of environment steps to collect')
parser.add_argument('--semi_am', default=False, type=lambda x:bool(strtobool(x)), help='whether to use semi-amortization for collection')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer')
parser.add_argument('--grad_norm', default=None, help='gradient norm constraint')
parser.add_argument('--weight_decay', default=0., type=float, help='L2 weight decay')
parser.add_argument('--critic_delay', default=0, type=int, help='delay period of critic updates')
parser.add_argument('--value_tau', default=5e-3, type=float, help='value update rate')
parser.add_argument('--value_update', default='hard', type=str, help='value target update type; hard or soft')
parser.add_argument('--policy_tau', default=2e-3, type=float, help='policy update rate')
parser.add_argument('--policy_update', default='hard', type=str, help='policy prior target update type; hard or soft')
parser.add_argument('--n_initial_steps', default=5000, type=int, help='number of initial batches')
parser.add_argument('--n_pretrain_updates', default=1000, type=int, help='number of pre-training iterations for the model')
parser.add_argument('--update_factor', default=1, type=int, help='number of updates to perform per training step')
parser.add_argument('--checkpoint_exp_key', default=None, type=str, help='experiment key for the checkpoint to load')
parser.add_argument('--checkpoint_interval', default=1e4, type=int, help='frequency of model checkpointing in environment steps')
parser.add_argument('--eval_interval', default=1e3, type=int, help='frequency for evaluation in environment steps')
parser.add_argument('--plotting', default=True, type=lambda x:bool(strtobool(x)), help='whether or not to log/plot with comet')
# other arguments here
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device_id is not None and torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
# create the environment
env = create_env(args.env, args.seed)
# create the agent
agent, agent_args = create_agent(env, args.device_id)
# create the data buffer
buffer = Buffer(batch_size=args.batch_size, seq_len=args.train_seq_len)
# create the optimizer
optimizer = Optimizer(agent, optimizer=args.optimizer, lr=args.lr,
norm_grad=args.grad_norm, weight_decay=args.weight_decay,
value_tau=args.value_tau,
policy_tau=args.policy_tau,
value_update=args.value_update,
policy_update=args.policy_update)
# create the logger / plotter
plotter = Plotter(args, agent_args, agent)
# train the agent
train(agent, env, buffer, optimizer, plotter, args)
| 53.15942 | 137 | 0.751908 |
b4faae8807f418cc618807e2aa1b126a245bbf62
| 242 |
py
|
Python
|
Python/Warmup-1/missing_char.py
|
LucasHenrique-dev/exerc-cios-codingbat
|
ff92db10387757b9a2e3f72be6b7e51824b1ffa6
|
[
"MIT"
] | 2 |
2020-12-09T13:36:44.000Z
|
2021-08-16T01:17:16.000Z
|
Python/Warmup-1/missing_char.py
|
LucasHenrique-dev/exerc-cios-codingbat
|
ff92db10387757b9a2e3f72be6b7e51824b1ffa6
|
[
"MIT"
] | null | null | null |
Python/Warmup-1/missing_char.py
|
LucasHenrique-dev/exerc-cios-codingbat
|
ff92db10387757b9a2e3f72be6b7e51824b1ffa6
|
[
"MIT"
] | null | null | null |
"""
Dado uma String "str", retorne uma nova onde será removido o caractere indicado por
"n", cujo valor será um válido (0..length-1, inclusive).
"""
def missing_char(str, n):
return str[:n] + str[n+1:]
print(missing_char("hello", 2))
| 20.166667 | 83 | 0.673554 |
10ab732de504f77e0e5e9a69b58956f01fc4ab1a
| 310 |
py
|
Python
|
scnym/__init__.py
|
jdasam/scnym
|
0b506170f4cb944e498d97ba0e086cbe2a62a18e
|
[
"Apache-2.0"
] | 45 |
2019-07-30T00:13:47.000Z
|
2022-03-23T08:04:19.000Z
|
scnym/__init__.py
|
jdasam/scnym
|
0b506170f4cb944e498d97ba0e086cbe2a62a18e
|
[
"Apache-2.0"
] | 10 |
2019-09-25T18:03:01.000Z
|
2022-03-05T00:31:41.000Z
|
scnym/__init__.py
|
jdasam/scnym
|
0b506170f4cb944e498d97ba0e086cbe2a62a18e
|
[
"Apache-2.0"
] | 3 |
2019-08-13T17:59:39.000Z
|
2022-01-20T03:22:42.000Z
|
__author__ = 'Jacob C. Kimmel, David R. Kelley'
__email__ = 'jacobkimmel+scnym@gmail.com, drk@calicolabs.com'
__version__ = '0.3.1'
# populate the namespace so top level imports work
# e.g.
# >> from scnym.model import CellTypeCLF
from . import api, main, dataprep, interpret, model, predict, trainer, utils
| 31 | 76 | 0.73871 |
2968f8f323c5a870deeb201e4c07ac98f482bf39
| 3,322 |
py
|
Python
|
kubernetes/client/models/v1_session_affinity_config.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_session_affinity_config.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_session_affinity_config.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1SessionAffinityConfig(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_ip': 'V1ClientIPConfig'
}
attribute_map = {
'client_ip': 'clientIP'
}
def __init__(self, client_ip=None):
"""
V1SessionAffinityConfig - a model defined in Swagger
"""
self._client_ip = None
self.discriminator = None
if client_ip is not None:
self.client_ip = client_ip
@property
def client_ip(self):
"""
Gets the client_ip of this V1SessionAffinityConfig.
clientIP contains the configurations of Client IP based session affinity.
:return: The client_ip of this V1SessionAffinityConfig.
:rtype: V1ClientIPConfig
"""
return self._client_ip
@client_ip.setter
def client_ip(self, client_ip):
"""
Sets the client_ip of this V1SessionAffinityConfig.
clientIP contains the configurations of Client IP based session affinity.
:param client_ip: The client_ip of this V1SessionAffinityConfig.
:type: V1ClientIPConfig
"""
self._client_ip = client_ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1SessionAffinityConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.15748 | 105 | 0.566827 |
c0a21c7de8ad09fa5830d745068295952314c347
| 520 |
py
|
Python
|
challenge2and3/bareBones/main.py
|
miam-miam100/SpaceCadets
|
92fe1e66a978436ae3d2ed6b092a9fc550150da3
|
[
"MIT"
] | null | null | null |
challenge2and3/bareBones/main.py
|
miam-miam100/SpaceCadets
|
92fe1e66a978436ae3d2ed6b092a9fc550150da3
|
[
"MIT"
] | null | null | null |
challenge2and3/bareBones/main.py
|
miam-miam100/SpaceCadets
|
92fe1e66a978436ae3d2ed6b092a9fc550150da3
|
[
"MIT"
] | null | null | null |
def multiply(X, Y, Total): # tsd
while X != 0: # d
W = 0 # ds
while Y != 0:
Total += 1
W += 1 # sdfaa
Y -= 1
while W != 0:
Y += 1
W -= 1
X -= 1
return X, Y, Total
nine = 0
three0 = 0
three0 += 1
three0 += 1
three0 += 1
three1 = 0 # l
while three0 != 0:
while three0 != 0:
three0 -= 1
three1 += 1
three1 += 1
three1 += 1
test = 0
(_, _, nine) = multiply(three0, three0, nine) # op
test += 1
test += 1
| 16.774194 | 51 | 0.425 |
87d840d7c74e391005fa87f627262d753a873558
| 1,533 |
py
|
Python
|
products/migrations/0001_initial.py
|
josygeorge/django-fullstack-milestone-main-project
|
8fbcd3617542a7a0f74ad1afb1b806cc42a493c4
|
[
"Unlicense"
] | null | null | null |
products/migrations/0001_initial.py
|
josygeorge/django-fullstack-milestone-main-project
|
8fbcd3617542a7a0f74ad1afb1b806cc42a493c4
|
[
"Unlicense"
] | null | null | null |
products/migrations/0001_initial.py
|
josygeorge/django-fullstack-milestone-main-project
|
8fbcd3617542a7a0f74ad1afb1b806cc42a493c4
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-08-09 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=254)),
('friendly_name', models.CharField(blank=True, max_length=254, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(blank=True, max_length=254, null=True)),
('name', models.CharField(max_length=254)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('rating', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True)),
('image_url', models.URLField(blank=True, max_length=1024, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.category')),
],
),
]
| 40.342105 | 141 | 0.592303 |
23113c2455a331a146ab1d43543f63aa1bda683f
| 358 |
py
|
Python
|
pyspark_base/stats.py
|
miguelgfierro/pybase
|
de8e4f11ed5c655e748178e65195c7e70a9c98af
|
[
"BSD-3-Clause"
] | 14 |
2020-02-07T21:36:39.000Z
|
2022-03-12T22:37:04.000Z
|
pyspark_base/stats.py
|
miguelgfierro/pybase
|
de8e4f11ed5c655e748178e65195c7e70a9c98af
|
[
"BSD-3-Clause"
] | 19 |
2019-05-18T23:58:30.000Z
|
2022-01-09T16:45:35.000Z
|
pyspark_base/stats.py
|
miguelgfierro/pybase
|
de8e4f11ed5c655e748178e65195c7e70a9c98af
|
[
"BSD-3-Clause"
] | 5 |
2020-10-06T06:10:27.000Z
|
2021-07-08T12:58:46.000Z
|
if __name__ == "__main__":
df = spark.createDataFrame([("a", 1), ("a", 2), ("c", 3)], ["letters", "numbers"])
print("(rows, columns)={}".format((df.count(), len(df.columns)))) # (rows, columns)=(3,2)
df.describe().show() # Compute summary statistics
df.distinct().count() # Count the number of distinct rows
df.schema # Return the schema
| 51.142857 | 93 | 0.608939 |
1d21d4c9015419501974331ed495ac27b2d1b9e1
| 12,992 |
py
|
Python
|
implementations/python/mzlib/backends/json.py
|
wulongict/mzSpecLib
|
c42b993e025a9c7a48806daa5919611a8ed5807f
|
[
"Apache-2.0"
] | null | null | null |
implementations/python/mzlib/backends/json.py
|
wulongict/mzSpecLib
|
c42b993e025a9c7a48806daa5919611a8ed5807f
|
[
"Apache-2.0"
] | null | null | null |
implementations/python/mzlib/backends/json.py
|
wulongict/mzSpecLib
|
c42b993e025a9c7a48806daa5919611a8ed5807f
|
[
"Apache-2.0"
] | null | null | null |
import io
import json
import re
from pathlib import Path
from mzlib.index import MemoryIndex
from mzlib.attributes import AttributeManager
from mzlib.annotation import parse_annotation, IonAnnotationBase
from .base import SpectralLibraryBackendBase, SpectralLibraryWriterBase, FORMAT_VERSION_TERM
LIBRARY_METADATA_KEY = "attributes"
ELEMENT_ATTRIBUTES_KEY = "attributes"
LIBRARY_SPECTRA_KEY = "spectra"
FORMAT_VERSION_KEY = "format_version"
FORMAT_VERSION_ACC = FORMAT_VERSION_TERM.split("|")[0]
class JSONSpectralLibrary(SpectralLibraryBackendBase):
file_format = "mzlb.json"
format_name = "json"
def __init__(self, filename, index_type=None, read_metadata=True):
if index_type is None:
index_type = MemoryIndex
super(JSONSpectralLibrary, self).__init__(filename)
self.buffer = {}
self._load_buffer(self.filename)
self.attributes = AttributeManager(
self.buffer.get(LIBRARY_METADATA_KEY))
self.index, was_initialized = index_type.from_filename(self.filename)
if not was_initialized:
self.create_index()
@classmethod
def guess_from_filename(cls, filename):
if isinstance(filename, dict):
return LIBRARY_SPECTRA_KEY in filename and LIBRARY_METADATA_KEY in filename
if not isinstance(filename, (str, Path)):
return False
return filename.endswith(cls.file_format)
def _load_buffer(self, filename_or_stream):
if isinstance(filename_or_stream, dict):
self.buffer = filename_or_stream
else:
if hasattr(filename_or_stream, 'read'):
self.handle = filename_or_stream
else:
self.handle = open(filename_or_stream, 'rt')
self.buffer = json.load(self.handle)
self.handle.close()
def read_header(self):
if self.buffer:
pass
return False
def create_index(self):
for i, record in enumerate(self.buffer[LIBRARY_SPECTRA_KEY]):
for attrib in record['attributes']:
if attrib["accession"] == "MS:1003061":
self.index.add(i, i, attrib['value'], None, None)
break
else:
raise ValueError(f"Unidentified spectrum at index {i}")
def get_spectrum(self, spectrum_number=None, spectrum_name=None):
"""Retrieve a single spectrum from the library.
Parameters
----------
spectrum_number : int, optional
The index of the specturm in the library
spectrum_name : str, optional
The name of the spectrum in the library
Returns
-------
:class:`~.Spectrum`
"""
if spectrum_number is not None:
if spectrum_name is not None:
raise ValueError(
"Provide only one of spectrum_number or spectrum_name")
offset = self.index.offset_for(spectrum_number)
elif spectrum_name is not None:
offset = self.index.offset_for(spectrum_name)
data = self.buffer[LIBRARY_SPECTRA_KEY][offset]
spectrum = self.make_spectrum_from_payload(data)
return spectrum
def make_spectrum_from_payload(self, data):
spectrum = self._new_spectrum()
for attrib in data['attributes']:
key = f'{attrib["accession"]}|{attrib["name"]}'
if "value_accession" in attrib:
value = f'{attrib["value_accession"]}|{attrib["value"]}'
else:
value = attrib['value']
group = attrib.get("cv_param_group")
spectrum.add_attribute(key, value, group_identifier=group)
if group is not None:
spectrum.group_counter = int(group)
analytes = []
for analyte_id, analyte in data['analytes'].items():
analyte_d = self._new_analyte(analyte_id)
for attrib in analyte['attributes']:
key = f'{attrib["accession"]}|{attrib["name"]}'
if "value_accession" in attrib:
value = f'{attrib["value_accession"]}|{attrib["value"]}'
else:
value = attrib['value']
group = attrib.get("cv_param_group")
analyte_d.add_attribute(key, value, group_identifier=group)
if group is not None:
analyte_d.group_counter = int(group)
analytes.append(analyte_d)
spectrum.analytes = analytes
peak_list = []
n = len(data['mzs'])
mzs = data['mzs']
intensities = data['intensities']
interpretations = data['interpretations']
aggregations = data.get("aggregations", None)
for i in range(n):
interpretation = interpretations[i]
if isinstance(interpretation, str):
interpretation = parse_annotation(interpretation)
elif isinstance(interpretation, list):
interpretation = [IonAnnotationBase.from_json(interp) for interp in interpretation]
elif isinstance(interpretation, dict):
interpretation = [IonAnnotationBase.from_json(interpretation)]
else:
raise TypeError(f"Cannot reconstruct interpretation from type {interpretation.__class__}")
peak = [
mzs[i],
intensities[i],
parse_annotation(interpretations[i]),
aggregations[i] if aggregations else ''
]
peak_list.append(peak)
spectrum.peak_list = peak_list
return spectrum
def read(self):
n = len(self.buffer[LIBRARY_SPECTRA_KEY])
for offset in range(n):
data = self.buffer[LIBRARY_SPECTRA_KEY][offset]
spectrum = self.make_spectrum_from_payload(data)
yield spectrum
class JSONSpectralLibraryWriter(SpectralLibraryWriterBase):
file_format = "mzlb.json"
format_name = "json"
default_version = '1.0'
def __init__(self, filename, version=None, pretty_print=True, format_annotations=True, simplify=True, **kwargs):
if version is None:
version = self.default_version
super(JSONSpectralLibraryWriter, self).__init__(filename)
self._coerce_handle(self.filename)
self.version = version
self.pretty_print = pretty_print
self.wrote_library = False
self.simplify = simplify
self.format_annotations = format_annotations
self.buffer = {
FORMAT_VERSION_KEY: self.version,
LIBRARY_METADATA_KEY: [],
LIBRARY_SPECTRA_KEY: []
}
def write_library(self, library):
self.wrote_library = True
return super().write_library(library)
def split_compound_value(self, value):
value = str(value)
# Don't process quoted values
if value.startswith('"'):
return [value]
components = value.split('|', 1)
return components
def write_header(self, library):
attributes = []
for attribute in library.attributes:
reformed_attribute = {}
if len(attribute) == 2:
key, value = attribute
elif len(attribute) == 3:
key, value, cv_param_group = attribute
reformed_attribute['cv_param_group'] = cv_param_group
else:
raise ValueError(
f"Unsupported number of items in attribute: {attribute}")
if FORMAT_VERSION_ACC == key:
# Already covered by format_version
continue
components = key.split('|', 1)
if len(components) == 2:
accession,name = components
reformed_attribute['accession'] = accession
reformed_attribute['name'] = name
else:
raise ValueError(
f"Unsupported number of items in components: {components}")
components = self.split_compound_value(value)
if len(components) == 2:
value_accession,value = components
reformed_attribute['value_accession'] = value_accession
reformed_attribute['value'] = value
elif len(components) == 1:
reformed_attribute['value'] = value
else:
raise ValueError(
f"Unsupported number of items in components: {components}")
attributes.append(reformed_attribute)
self.buffer[LIBRARY_METADATA_KEY] = attributes
def _format_attributes(self, attributes_manager):
attributes = []
for attribute in attributes_manager:
reformed_attribute = {}
if len(attribute) == 2:
key, value = attribute
elif len(attribute) == 3:
key, value, cv_param_group = attribute
reformed_attribute['cv_param_group'] = cv_param_group
else:
raise ValueError(
f"Unsupported number of items in attribute: {attribute}")
components = key.split('|', 1)
if len(components) == 2:
accession, name = components
reformed_attribute['accession'] = accession
reformed_attribute['name'] = name
else:
raise ValueError(
f"Unsupported number of items in components: {components}")
components = self.split_compound_value(value)
if len(components) == 2:
value_accession, value = components
reformed_attribute['value_accession'] = value_accession
reformed_attribute['value'] = value
elif len(components) == 1:
reformed_attribute['value'] = value
else:
raise ValueError(
f"Unsupported number of items in components: {components}")
attributes.append(reformed_attribute)
return attributes
def write_spectrum(self, spectrum):
mzs = []
intensities = []
interpretations = []
aggregations = []
for peak in spectrum.peak_list:
mzs.append(peak[0])
intensities.append(peak[1])
if self.format_annotations:
interpretations.append(
'?' if not peak[2] else ",".join(map(str, peak[2])))
else:
interpretations.append([c.to_json() for c in peak[2]])
aggregations.append(peak[3])
#### Organize the attributes from the simple list into the appropriate JSON format
attributes = self._format_attributes(spectrum)
analytes = {}
for analyte in spectrum.analytes:
analyte_d = {
"id": analyte.id,
ELEMENT_ATTRIBUTES_KEY: self._format_attributes(analyte)
}
analytes[analyte.id] = (analyte_d)
spectrum = {
ELEMENT_ATTRIBUTES_KEY: attributes,
"mzs": mzs,
"intensities": intensities,
"interpretations": interpretations,
"aggregations": aggregations,
"analytes": analytes
}
if not any(aggregations):
spectrum.pop('aggregations')
self.buffer[LIBRARY_SPECTRA_KEY].append(spectrum)
def flush(self):
# If we know we're writing a complete library, skip the probably-doing-too-many-things
# formatting logic for single vs. many spectra.
if self.wrote_library:
if self.pretty_print:
json.dump(self.buffer, self.handle, indent=2, sort_keys=True)
else:
json.dump(self.buffer, self.handle)
else:
# We don't have a header section to format, so write just the spectra,
# and if the number of spectra is one and the simplify flag is true,
# skip the wrapping array
spectra = self.buffer[LIBRARY_SPECTRA_KEY]
n_spectra = len(spectra)
if n_spectra == 1 and self.simplify:
if self.pretty_print:
json.dump(spectra[0], self.handle,
indent=2, sort_keys=True)
else:
json.dump(spectra[0], self.handle)
else:
if self.pretty_print:
json.dump(spectra, self.handle, indent=2, sort_keys=True)
else:
json.dump(spectra, self.handle)
def close(self):
self.flush()
self.handle.close()
def format_spectrum(spectrum, pretty_print=True, **kwargs):
buffer = io.StringIO()
with JSONSpectralLibraryWriter(buffer, pretty_print=pretty_print, **kwargs) as writer:
writer.write_spectrum(spectrum)
writer.flush()
return buffer.getvalue()
| 38.324484 | 116 | 0.587208 |
fb2c85a4264b025824dd7ce643b8fc09b59925f8
| 30,318 |
py
|
Python
|
v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/experimental/inception/inception_v3.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44 |
2018-11-07T18:52:33.000Z
|
2019-07-06T12:48:18.000Z
|
v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/experimental/inception/inception_v3.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 12 |
2018-12-13T18:04:36.000Z
|
2019-06-14T20:49:33.000Z
|
v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/experimental/inception/inception_v3.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44 |
2018-11-09T21:04:52.000Z
|
2019-06-24T07:40:28.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Open-source TensorFlow Inception v3 Example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import inception_preprocessing
import vgg_preprocessing
from tensorflow.contrib import summary
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.training.python.training import evaluation
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (workers).')
flags.DEFINE_integer(
'iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_bool(
'skip_host_call', default=True,
help=('Skip the host call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the computation running on the TPU.'))
flags.DEFINE_integer(
'train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_total_size', 0,
'Total batch size for evaluation, use the entire validation set if 0')
flags.DEFINE_integer(
'eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_integer(
'train_steps', 213000,
'Number of steps use for training.')
flags.DEFINE_integer(
'train_steps_per_eval', 2000,
'Number of training steps to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer(
'min_eval_interval', 180,
'Minimum number of seconds between evaluations')
flags.DEFINE_integer(
'eval_timeout', None,
'Evaluation timeout: Maximum number of seconds that '
'may elapse while no new checkpoints are observed')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than plain CPUs')
flags.DEFINE_boolean(
'per_host_input_for_training', True,
'If true, input_fn is invoked per host rather than per shard.')
flags.DEFINE_string(
'use_data', 'real',
'One of "fake","real"')
flags.DEFINE_float(
'learning_rate', 0.165,
'Learning rate.')
flags.DEFINE_string(
'optimizer', 'RMS',
'Optimizer (one of sgd, RMS, momentum)')
flags.DEFINE_integer(
'num_classes', 1001,
'Number of classes to distinguish')
flags.DEFINE_integer(
'width', 299,
'Width of input image')
flags.DEFINE_integer(
'height', 299,
'Height of input image')
flags.DEFINE_bool(
'transpose_enabled', False,
'Boolean to enable/disable explicit I/O transpose')
flags.DEFINE_bool(
'log_device_placement', False,
'Boolean to enable/disable log device placement')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'moving_average', True,
'Whether to enable moving average computation on variables')
flags.DEFINE_string(
'preprocessing', 'inception',
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_float(
'learning_rate_decay', 0.94,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'learning_rate_decay_epochs', 3,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_bool(
'display_tensors', False,
'Whether to dump prediction tensors for comparison')
flags.DEFINE_bool(
'clear_update_collections', True,
'Set batchnorm update_collections to None if true, else use default value')
flags.DEFINE_integer(
'cold_epochs', 2,
'Number of epochs using cold learning rate')
flags.DEFINE_integer(
'warmup_epochs', 7,
'Number of epochs using linearly increasing learning rate')
flags.DEFINE_bool(
'use_learning_rate_warmup', False,
'Apply learning rate warmup if true')
# Dataset specific paramenters
flags.DEFINE_bool(
'prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_string(
'precision', 'float32',
help=('Precision to use; one of: {bfloat16, float32}'))
FLAGS = flags.FLAGS
# Dataset constants
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
# Random cropping constants
_RESIZE_SIDE_MIN = 300
_RESIZE_SIDE_MAX = 600
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
# Constants dictating moving average.
MOVING_AVERAGE_DECAY = 0.995
# Batchnorm moving mean/variance parameters
BATCH_NORM_DECAY = 0.996
BATCH_NORM_EPSILON = 1e-3
WEIGHT_DECAY = 0.00004
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
class InputPipeline(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
"""
def __init__(self, is_training, data_dir, use_bfloat16):
self.is_training = is_training
self.data_dir = data_dir
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
image = features['image/encoded']
image = preprocess_raw_bytes(image, is_training=self.is_training, bbox=bbox)
label = tf.cast(
tf.reshape(features['image/class/label'], shape=[]), dtype=tf.int32)
if self.use_bfloat16:
image = tf.cast(image, tf.bfloat16)
return image, label
def dataset_iterator(self, batch_size, shuffle):
"""Constructs a real-data iterator over batches for train or eval.
Args:
batch_size: The effective batch size.
shuffle: Whether or not to shuffle the data.
Returns:
A tf.data iterator.
"""
file_pattern = os.path.join(self.data_dir, 'train-*'
if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
prefetch_dataset, cycle_length=FLAGS.num_files_infeed, sloppy=True))
if shuffle and FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser, num_parallel_calls=FLAGS.num_parallel_calls)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
return dataset.make_one_shot_iterator()
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A (images, labels) tuple of `Tensor`s for a batch of samples.
"""
batch_size = params['batch_size']
if FLAGS.use_data == 'real':
images, labels = self.dataset_iterator(batch_size,
self.is_training).get_next()
else:
images = tf.random_uniform(
[batch_size, FLAGS.height, FLAGS.width, 3], minval=-1, maxval=1)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=999, dtype=tf.int32)
images = tensor_transform_fn(images, params['output_perm'])
return images, labels
def image_serving_input_fn():
"""Serving input fn for raw images.
This function is consumed when exporting a SavedModel.
Returns:
A ServingInputReceiver capable of serving MobileNet predictions.
"""
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
preprocess_raw_bytes, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: New ordering of dimensions
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
def inception_model_fn(features, labels, mode, params):
"""Inception v3 model using Estimator API."""
num_classes = FLAGS.num_classes
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
if isinstance(features, dict):
features = features['feature']
features = tensor_transform_fn(features, params['input_perm'])
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
if FLAGS.precision == 'bfloat16':
with tf.contrib.tpu.bfloat16_scope():
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits, end_points = inception.inception_v3(
features,
num_classes,
is_training=is_training)
return logits, end_points
if FLAGS.clear_update_collections:
# updates_collections must be set to None in order to use fused batchnorm
with arg_scope(inception.inception_v3_arg_scope(
weight_decay=0.0,
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON,
updates_collections=None)):
logits, end_points = build_network()
else:
with arg_scope(inception.inception_v3_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON)):
logits, end_points = build_network()
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL and FLAGS.display_tensors and (
not FLAGS.use_tpu):
with tf.control_dependencies([
tf.Print(
predictions['classes'], [predictions['classes']],
summarize=FLAGS.eval_batch_size,
message='prediction: ')
]):
labels = tf.Print(
labels, [labels], summarize=FLAGS.eval_batch_size, message='label: ')
one_hot_labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=tf.cast(end_points['AuxLogits'], tf.float32),
weights=0.4,
label_smoothing=0.1,
scope='aux_loss')
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=logits,
weights=1.0,
label_smoothing=0.1)
losses = tf.add_n(tf.losses.get_losses())
l2_loss = []
for v in tf.trainable_variables():
if 'BatchNorm' not in v.name and 'weights' in v.name:
l2_loss.append(tf.nn.l2_loss(v))
loss = losses + WEIGHT_DECAY * tf.add_n(l2_loss)
initial_learning_rate = FLAGS.learning_rate * FLAGS.train_batch_size / 256
if FLAGS.use_learning_rate_warmup:
# Adjust initial learning rate to match final warmup rate
warmup_decay = FLAGS.learning_rate_decay**(
(FLAGS.warmup_epochs + FLAGS.cold_epochs) /
FLAGS.learning_rate_decay_epochs)
adj_initial_learning_rate = initial_learning_rate * warmup_decay
final_learning_rate = 0.0001 * initial_learning_rate
host_call = None
train_op = None
if is_training:
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
global_step = tf.train.get_or_create_global_step()
current_epoch = tf.cast(
(tf.cast(global_step, tf.float32) / batches_per_epoch), tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=int(FLAGS.learning_rate_decay_epochs * batches_per_epoch),
decay_rate=FLAGS.learning_rate_decay,
staircase=True)
if FLAGS.use_learning_rate_warmup:
wlr = 0.1 * adj_initial_learning_rate
wlr_height = tf.cast(
0.9 * adj_initial_learning_rate /
(FLAGS.warmup_epochs + FLAGS.learning_rate_decay_epochs - 1),
tf.float32)
epoch_offset = tf.cast(FLAGS.cold_epochs - 1, tf.int32)
exp_decay_start = (FLAGS.warmup_epochs + FLAGS.cold_epochs +
FLAGS.learning_rate_decay_epochs)
lin_inc_lr = tf.add(
wlr, tf.multiply(
tf.cast(tf.subtract(current_epoch, epoch_offset), tf.float32),
wlr_height))
learning_rate = tf.where(
tf.greater_equal(current_epoch, FLAGS.cold_epochs),
(tf.where(tf.greater_equal(current_epoch, exp_decay_start),
learning_rate, lin_inc_lr)),
wlr)
# Set a minimum boundary for the learning rate.
learning_rate = tf.maximum(
learning_rate, final_learning_rate, name='learning_rate')
if FLAGS.optimizer == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
elif FLAGS.optimizer == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9)
elif FLAGS.optimizer == 'RMS':
tf.logging.info('Using RMS optimizer')
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
else:
tf.logging.fatal('Unknown optimizer:', FLAGS.optimizer)
if FLAGS.use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=global_step)
if FLAGS.moving_average:
ema = tf.train.ExponentialMovingAverage(
decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
with tf.control_dependencies([train_op]), tf.name_scope('moving_average'):
train_op = ema.apply(variables_to_average)
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
if not FLAGS.skip_host_call:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide them as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', tf.reduce_mean(loss), step=gs)
summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs)
summary.scalar('current_epoch', tf.reduce_mean(ce), step=gs)
return summary.all_summary_ops()
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
eval_metrics = None
if is_eval:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch, ]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'accuracy': top_1_accuracy,
'accuracy@5': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
class LoadEMAHook(tf.train.SessionRunHook):
"""Hook to load exponential moving averages into corresponding variables."""
def __init__(self, model_dir):
super(LoadEMAHook, self).__init__()
self._model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self._load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self._model_dir), variables_to_restore)
def after_create_session(self, sess, coord):
tf.logging.info('Reloading EMA...')
self._load_ema(sess)
def main(unused_argv):
del unused_argv # Unused
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
tf.logging.info('Precision: %s', FLAGS.precision)
params = {
'input_perm': [0, 1, 2, 3],
'output_perm': [0, 1, 2, 3],
}
batch_axis = 0
if FLAGS.transpose_enabled:
params['input_perm'] = [3, 0, 1, 2]
params['output_perm'] = [1, 2, 3, 0]
batch_axis = 3
if FLAGS.eval_total_size > 0:
eval_size = FLAGS.eval_total_size
else:
eval_size = _NUM_EVAL_IMAGES
eval_steps = eval_size // FLAGS.eval_batch_size
iterations = (eval_steps if FLAGS.mode == 'eval' else
FLAGS.iterations)
eval_batch_size = (None if FLAGS.mode == 'train' else
FLAGS.eval_batch_size)
per_host_input_for_training = (
FLAGS.num_shards <= 8 if FLAGS.mode == 'train' else True)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations,
num_shards=FLAGS.num_shards,
per_host_input_for_training=per_host_input_for_training))
inception_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=inception_model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params=params,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=eval_batch_size,
batch_axis=(batch_axis, 0))
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
use_bfloat16 = FLAGS.precision == 'bfloat16'
imagenet_train = InputPipeline(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
imagenet_eval = InputPipeline(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=use_bfloat16)
if FLAGS.moving_average:
eval_hooks = [LoadEMAHook(FLAGS.model_dir)]
else:
eval_hooks = []
if FLAGS.mode == 'eval':
# Run evaluation when there is a new checkpoint
for checkpoint in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # Includes compilation time
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info(
'Eval results: %s. Elapsed seconds: %d', eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(checkpoint).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', checkpoint)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.train_steps // FLAGS.train_steps_per_eval):
tf.logging.info('Starting training cycle %d.' % cycle)
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps_per_eval)
tf.logging.info('Starting evaluation cycle %d .' % cycle)
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Starting training ...')
inception_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
if FLAGS.export_dir is not None:
tf.logging.info('Starting to export model.')
inception_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| 34.180383 | 85 | 0.694373 |
e048df13cca3702197c4730d8952ba8653a41e53
| 5,736 |
py
|
Python
|
gedl/IDLGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1 |
2021-04-20T18:43:44.000Z
|
2021-04-20T18:43:44.000Z
|
gedl/IDLGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1 |
2021-09-23T14:55:43.000Z
|
2021-09-23T18:09:35.000Z
|
gedl/IDLGenerator.py
|
gaps-closure/capo
|
894d2f6d291ff79e18c77e0ca7073531147cbee8
|
[
"BSD-3-Clause"
] | 1 |
2020-05-21T03:12:16.000Z
|
2020-05-21T03:12:16.000Z
|
#!/usr/bin/env python3
import json
import sys
import os
import os.path
import jsonschema
from argparse import ArgumentParser
def argparser():
""" Command Line Argument Parsing"""
parser = ArgumentParser(description='CLOSURE IDL File Generator')
parser.add_argument('-g','--gedl', required=True, type=str, help='Input GEDL Filepath')
parser.add_argument('-o','--ofile', required=True, type=str, help='Output Filepath')
parser.add_argument('-i','--ipc', required=True, type=str,
choices=("Singlethreaded", "Multithreaded"), help='IPC Type')
parser.add_argument('-s', '--schema', required=False, type=str,
default='./GEDLSchema.json',
help='override the location of the of the schema if required')
parser.add_argument('-L', '--liberal',help="Liberal mode: disable gedl schema check",
default=False, action='store_true')
return parser.parse_args()
def get_gedl_schema(schema_location):
"""Load the schema json to verify against"""
basepath = ""
#get the module paths to help find the schema in a relitive to ourself
if(len(sys.path) > 1):
basepath = sys.path[0]
path = os.path.join(basepath,schema_location)
if(not os.path.exists(path)):
#schema not found relitive to the python enviroment, check a local path
path = schema_location
if(not os.path.exists(path)):
#Unable to get python schema
raise(IOError("Unable to fild cle schema (expected at): " + path))
#we found the schema load it into ram
print("Using GEDL schema: " + path)
with open(path,"r",encoding="UTF-8") as schemafile:
return(json.loads(schemafile.read()))
def check_jsonschema_version():
"""validate the json schema version is new enogh to process
Draft 7 schemas
"""
if(jsonschema.__version__ < "3.2.0"):
raise(ModuleNotFoundError("Newer version of jsonschema module required"
" (>= 3.2.0)"))
def validate_gedl(gedl_json, schema_json):
"""validate the GEDL entry is valid against the shcema"""
try:
jsonschema.validate(gedl_json,schema)
except Exception as e:
print("")
print("Error parsing GEDL")
raise
print("")
print("GEDL is valid")
def generate_idl(gedl, args):
"""Generate the output IDL file"""
with open(args.ofile,"w") as idl_file:
#Create NextRPC and Okay structs necessary for singlethreaded style
idl_file.write("struct NextRPC {\n\tint mux;\n\tint sec;\n\tint typ;\n};");
idl_file.write("\n\nstruct Okay {\n\tint x;\n};")
#For loop that iterates through each enclave pair in gedl
for enclavePair in gedl['gedl']:
#For loop that iterates through each cross-enclave call "caller" makes to "callee" and
#creates corresponding request/response structs
for call in enclavePair['calls']:
#Generate a request struct for this function
idl_file.write("\n\nstruct Request_%s {" % (call['func']))
#Initialize variable to check for input values
dummy = 1
#For loop that iterates through each call parameter and creates a struct variable
for arg in call['params']:
if "in" in arg['dir']:
dummy = 0
idl_file.write("\n\t%s %s" % (arg['type'],arg['name']))
if "sz" in arg:
idl_file.write("[%s]" % (arg['sz']))
idl_file.write(";")
#If there are no in parameters, create a dummy variable to meet marshalling requirements
if dummy == 1:
idl_file.write("\n\tint dummy;")
idl_file.write("\n};")
#Generate a response struct for this function
idl_file.write("\n\nstruct Response_%s {" % (call['func']))
#Initialize variable to check for return or output values
ret = 1
if call['return'] != "void":
ret = 0
idl_file.write("\n\t%s ret;" % (call['return']['type']))
#For loop that iterates through each call parameter and creates a struct variable
for arg in call['params']:
if "out" in arg['dir']:
ret = 0
idl_file.write("\n\t%s %s" % (arg['type'],arg['name']))
if "sz" in arg:
idl_file.write("[%s]" % (arg['sz']))
idl_file.write(";")
#If return type is void and no out parameters, generate dummy variable to meet marshalling requirements
if ret == 1:
idl_file.write("\n\tint ret;")
idl_file.write("\n};")
def main():
"""IDL Generator entry point"""
#Open the generated GEDL JSON file for parsing and IDL file for generation
args = argparser()
with open(args.gedl) as edl_file:
#Load in the json data to "gedl" variable
gedl = json.load(edl_file)
if(not args.liberal):
#validate the schema
check_jsonschema_version()
schema = get_gedl_schema(args.schema)
try:
jsonschema.validate(gedl,schema)
except jsonschema.exceptions.ValidationError as schemaerr:
print("")
print("Unable to validate GEDL json:\n%s"%(str(schemaerr),))
sys.exit(-1)
#generate the idl
generate_idl(gedl,args)
if(__name__=="__main__"):
main()
| 41.565217 | 119 | 0.572176 |
fadd72fe15de3ac9da40e0c05709e29121a88483
| 5,241 |
py
|
Python
|
third_party/scons/scons-local/SCons/Warnings.py
|
bluebellzhy/chromium
|
008c4fef2676506869a0404239da31e83fd6ccc7
|
[
"BSD-3-Clause"
] | 1 |
2016-05-08T15:35:17.000Z
|
2016-05-08T15:35:17.000Z
|
third_party/scons/scons-local/SCons/Warnings.py
|
bluebellzhy/chromium
|
008c4fef2676506869a0404239da31e83fd6ccc7
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/scons/scons-local/SCons/Warnings.py
|
bluebellzhy/chromium
|
008c4fef2676506869a0404239da31e83fd6ccc7
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Warnings
This file implements the warnings framework for SCons.
"""
__revision__ = "src/engine/SCons/Warnings.py 3424 2008/09/15 11:22:20 scons"
import string
import sys
import SCons.Errors
class Warning(SCons.Errors.UserError):
pass
# NOTE: If you add a new warning class, add it to the man page, too!
class CacheWriteErrorWarning(Warning):
pass
class CorruptSConsignWarning(Warning):
pass
class DependencyWarning(Warning):
pass
class DeprecatedWarning(Warning):
pass
class DeprecatedCopyWarning(DeprecatedWarning):
pass
class DeprecatedSourceSignaturesWarning(DeprecatedWarning):
pass
class DeprecatedTargetSignaturesWarning(DeprecatedWarning):
pass
class DuplicateEnvironmentWarning(Warning):
pass
class LinkWarning(Warning):
pass
class MisleadingKeywordsWarning(Warning):
pass
class MissingSConscriptWarning(Warning):
pass
class NoMD5ModuleWarning(Warning):
pass
class NoMetaclassSupportWarning(Warning):
pass
class NoObjectCountWarning(Warning):
pass
class NoParallelSupportWarning(Warning):
pass
class PythonVersionWarning(DeprecatedWarning):
pass
class ReservedVariableWarning(Warning):
pass
class StackSizeWarning(Warning):
pass
class FortranCxxMixWarning(LinkWarning):
pass
_warningAsException = 0
# The below is a list of 2-tuples. The first element is a class object.
# The second element is true if that class is enabled, false if it is disabled.
_enabled = []
_warningOut = None
def suppressWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 0))
def enableWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 1))
def warningAsException(flag=1):
"""Turn warnings into exceptions. Returns the old value of the flag."""
global _warningAsException
old = _warningAsException
_warningAsException = flag
return old
def warn(clazz, *args):
global _enabled, _warningAsException, _warningOut
warning = clazz(args)
for clazz, flag in _enabled:
if isinstance(warning, clazz):
if flag:
if _warningAsException:
raise warning
if _warningOut:
_warningOut(warning)
break
def process_warn_strings(arguments):
"""Process string specifications of enabling/disabling warnings,
as passed to the --warn option or the SetOption('warn') function.
An argument to this option should be of the form <warning-class>
or no-<warning-class>. The warning class is munged in order
to get an actual class name from the classes above, which we
need to pass to the {enable,disable}WarningClass() functions.
The supplied <warning-class> is split on hyphens, each element
is capitalized, then smushed back together. Then the string
"Warning" is appended to get the class name.
For example, 'deprecated' will enable the DeprecatedWarning
class. 'no-dependency' will disable the .DependencyWarning
class.
As a special case, --warn=all and --warn=no-all will enable or
disable (respectively) the base Warning class of all warnings.
"""
def _capitalize(s):
if s[:5] == "scons":
return "SCons" + s[5:]
else:
return string.capitalize(s)
for arg in arguments:
elems = string.split(string.lower(arg), '-')
enable = 1
if elems[0] == 'no':
enable = 0
del elems[0]
if len(elems) == 1 and elems[0] == 'all':
class_name = "Warning"
else:
class_name = string.join(map(_capitalize, elems), '') + "Warning"
try:
clazz = globals()[class_name]
except KeyError:
sys.stderr.write("No warning type: '%s'\n" % arg)
else:
if enable:
enableWarningClass(clazz)
else:
suppressWarningClass(clazz)
| 27.87766 | 83 | 0.695287 |
cd95f603f882218aec87c082f14f58343d8b56d2
| 6,306 |
py
|
Python
|
simple_sound_stream.py
|
frietz58/WoZ4U
|
1b3ff95e010c4c321b9efc88be0181e84622ee4f
|
[
"MIT"
] | 1 |
2020-10-30T18:14:18.000Z
|
2020-10-30T18:14:18.000Z
|
simple_sound_stream.py
|
frietz58/WoZ4U
|
1b3ff95e010c4c321b9efc88be0181e84622ee4f
|
[
"MIT"
] | 2 |
2021-01-30T14:47:38.000Z
|
2021-07-15T08:47:59.000Z
|
simple_sound_stream.py
|
frietz58/WoZ4U
|
1b3ff95e010c4c321b9efc88be0181e84622ee4f
|
[
"MIT"
] | 1 |
2021-07-18T07:09:05.000Z
|
2021-07-18T07:09:05.000Z
|
"""
Implements a naoqi ALModule for live streaming of Pepper's microphone to default host default audio output device.
Based on: https://github.com/JBramauer/pepperspeechrecognition/blob/master/module_speechrecognition.py
Author: Finn Rietz
"""
import sounddevice as sd
from sounddevice import PortAudioError
import time
import numpy as np
from utils import rawToWav
import naoqi
from naoqi import ALProxy
CHANNELS = 1
SAMPLE_RATE = 48000
IP = "130.239.182.11"
PORT = 9559
MOD_NAME = "SpeechRecognition"
# we need to inherit from ALModule so that we can subscribe to the audio device...
class SpeechRecognitionModule(naoqi.ALModule):
def __init__(self, strModuleName, strNaoIp, noaPort):
# kill previous instance, useful for developing ;)
try:
p = ALProxy(MOD_NAME)
p.exit()
except RuntimeError: # when there is no instance in the broke...
pass
self.strNaoIp = strNaoIp
self.naoPort = noaPort
self.broker = self.setup_broker() # setup naoqi broker for module communication
try:
naoqi.ALModule.__init__(self, strModuleName) # init module
except RuntimeError:
# When module is already registered (eg camera tab has been closed and is now reopened)
pass
self.BIND_PYTHON(self.getName(), "callback") # not sure what this does?
self.memory = naoqi.ALProxy("ALMemory")
self.memory.declareEvent(self.getName()) # needed for callback
self.audio = naoqi.ALProxy("ALAudioDevice")
# audio buffer
self.buffer = []
self.stream_latency = 0.5
# sounddevice stream for audio playback in realtime
# dtype=np.int16 is very important! This fixes the insane static noises
self.stream = sd.OutputStream(channels=CHANNELS, samplerate=SAMPLE_RATE, dtype=np.int16, latency=self.stream_latency)
self.livestream = True
self.isStarted = False
def setup_broker(self):
return naoqi.ALBroker(
"myBroker", # we need to use the broker when when we implement own module...
"0.0.0.0", # listen to anyone
0, # find a free port and use it
self.strNaoIp, # parent broker IP
self.naoPort) # parent broker port
def start(self):
# audio = naoqi.ALProxy("ALAudioDevice")
nNbrChannelFlag = 3 # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2 AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4.
nDeinterleave = 0
self.audio.setClientPreferences(self.getName(), SAMPLE_RATE, nNbrChannelFlag,
nDeinterleave) # setting same as default generate a bug !?!
# we can only subscribe to the ALAudiodevice with an implementation of ALModule...
# needs to have a "process" method that will be used as callback...
self.audio.subscribe(self.getName())
# also start the sounddevice stream so that we can write data on it
try:
self.stream.start()
self.isStarted = True
# print "SD STREAM ACTIVE: ", self.stream.active
except PortAudioError:
# when stream has been closed, pointer become invalid, so we have to make a new stream
self.stream = sd.OutputStream(channels=CHANNELS, samplerate=SAMPLE_RATE, dtype=np.int16, latency=self.stream_latency)
self.stream.start()
self.isStarted = True
def stop(self):
if not self.isStarted:
return
else:
self.isStarted = False
self.stream.close()
self.audio.unsubscribe(self.getName())
def processRemote(self, nbOfChannels, nbrOfSamplesByChannel, aTimeStamp, buffer):
"""
This is our callback method!
Due to inheritance, this will be called once our module subscribes to the audio device in start()
Name of method may not be changed!
:param nbOfChannels: param required for signature to work
:param nbrOfSamplesByChannel: param required for signature to work
:param aTimeStamp: param required for signature to work
:param buffer: the actual, buffer audio data from Pepper's mic
:return: None
"""
if self.isStarted:
# calculate a decimal seconds timestamp
# timestamp = float(str(aTimeStamp[0]) + "." + str(aTimeStamp[1]))
# print str(timestamp), "processRemote!!!!"
aSoundDataInterlaced = np.fromstring(str(buffer), dtype=np.int16)
aSoundData = np.reshape(aSoundDataInterlaced, (nbOfChannels, nbrOfSamplesByChannel), 'F')
self.buffer.append(aSoundData)
# write the callback data from ALAudiodevice to sounddevice stream, causing it to be played
# we need to transpose, because sounddevice expects columns to be channels, and we get rows as channels
if self.livestream:
print np.shape(aSoundData)
self.stream.write(aSoundData.T)
def save_buffer(self):
"""
Saves buffered audio data to physical .wav file.
:return:
"""
filename = "simple_out"
outfile = open(filename + ".raw", "wb")
data = self.transform_buffer()
data.tofile(outfile)
outfile.close()
rawToWav(filename)
print filename
def transform_buffer(self):
"""
Reshapes buffer matrix to 1d array of microphone energy values, so that it can be treated as audio data
:return:
"""
return np.concatenate(self.buffer, axis=1)[0]
def main():
# Warning: SpeechRecognition must be a global variable
# The name given to the constructor must be the name of the
# variable
global SpeechRecognition
SpeechRecognition = SpeechRecognitionModule("SpeechRecognition", IP, PORT)
SpeechRecognition.start()
# # sound quality testing
# time.sleep(10)
# SpeechRecognition.save_buffer()
# sd.play(SpeechRecognition.transform_buffer(), 48000)
# time.sleep(10)
# exit(0)
try:
while True:
pass
except KeyboardInterrupt:
print('interrupted!')
if __name__ == "__main__":
main()
| 35.627119 | 132 | 0.639391 |
e6ebe8325cabfeda2d87afa237d212a83548bd74
| 2,375 |
py
|
Python
|
4 Chiffres/4 chiffres.py
|
Meltwin/Le-Livre-Scolaire
|
cc621643ca8c081fbe2582f432cb6c3de6811aa6
|
[
"MIT"
] | null | null | null |
4 Chiffres/4 chiffres.py
|
Meltwin/Le-Livre-Scolaire
|
cc621643ca8c081fbe2582f432cb6c3de6811aa6
|
[
"MIT"
] | null | null | null |
4 Chiffres/4 chiffres.py
|
Meltwin/Le-Livre-Scolaire
|
cc621643ca8c081fbe2582f432cb6c3de6811aa6
|
[
"MIT"
] | null | null | null |
##########################################
# Functions #
##########################################
# Table de correspondance entre le numéro de la combinaison et sa forme
def returnForme(x):
if (x == 0):
return "abcd"
elif (x == 1):
return 'abc + d'
elif (x == 2):
return "ab + cd"
elif (x == 3):
return 'a + bcd'
elif (x == 4):
return "ab + c + d"
elif (x == 5):
return "a + bc + d"
elif (x == 6):
return "a + b + c + d"
##########################################
# Main #
##########################################
compte = [[0]*7 for i in range(7)] # Tableau de 7x7 pour chaque combinaison en ligne et en colonne
t = [0]*7 # Tableau pour chaque valeur de chaque combinaison
# Pour chaque chiffre strictement différent des autres
for a in range(10):
for b in range(10):
if (b != a):
for c in range(10):
if (c != b and c !=a):
for d in range(10):
if (d!=a and d!=b and d!=c):
# On calcule les combinaison
t[0] = a*1000 + b*100 + c*10 + d
t[1] = a*100 + b*10 + c + d
t[2] = a*10 + b + c*10 +d
t[3] = a+ b*100 + c*10 + d
t[4] = a*10 + b + c + d
t[5] = a + b*10 + c +d
t[6] = a +b +c +d
# On test les égalités des combinaisons
for i in range(7):
for j in range(7):
if (j != i):
if (t[i] == t[j]):
compte[i][j] +=1
print("m = {0},a = {1}, r= {2}, s= {3} {4} <-> {5} [{6}]".format(a,b,c,d,returnForme(i),returnForme(j),t[i]))
print()
##########################################
# Result #
# display #
##########################################
for i in range(7):
for j in range(7):
print("{0} <-> {1} = {2}".format(returnForme(i),returnForme(j),compte[i][j])) # Affiche le couple de combinaison
| 40.948276 | 153 | 0.325053 |
44926e6f0fd61740bfe9b2eb0a2077744ef752aa
| 13,569 |
py
|
Python
|
p2p/multiplexer.py
|
indi-ca/trinity
|
bbf5a5c7359c499f7ef234845364b4d24337d5c1
|
[
"MIT"
] | null | null | null |
p2p/multiplexer.py
|
indi-ca/trinity
|
bbf5a5c7359c499f7ef234845364b4d24337d5c1
|
[
"MIT"
] | null | null | null |
p2p/multiplexer.py
|
indi-ca/trinity
|
bbf5a5c7359c499f7ef234845364b4d24337d5c1
|
[
"MIT"
] | null | null | null |
import asyncio
import collections
import time
from typing import (
Any,
AsyncIterator,
cast,
DefaultDict,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from cached_property import cached_property
from eth_utils import ValidationError
from eth_utils.toolz import cons
import rlp
from p2p.abc import (
CommandAPI,
MultiplexerAPI,
NodeAPI,
ProtocolAPI,
SessionAPI,
TransportAPI,
TProtocol,
)
from p2p.constants import (
MAX_IN_LOOP_DECODE_SIZE,
)
from p2p.exceptions import (
MalformedMessage,
PeerConnectionLost,
UnknownProtocol,
UnknownProtocolCommand,
)
from p2p.p2p_proto import BaseP2PProtocol
from p2p._utils import (
get_logger,
snappy_CompressedLengthError,
)
async def stream_transport_messages(transport: TransportAPI,
base_protocol: BaseP2PProtocol,
*protocols: ProtocolAPI,
) -> AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]]:
"""
Streams 2-tuples of (Protocol, Command) over the provided `Transport`
"""
# A cache for looking up the proper protocol instance for a given command
# id.
command_id_cache: Dict[int, ProtocolAPI] = {}
loop = asyncio.get_event_loop()
while not transport.is_closing:
try:
msg = await transport.recv()
except PeerConnectionLost:
return
command_id = msg.command_id
if msg.command_id not in command_id_cache:
if command_id < base_protocol.command_length:
command_id_cache[command_id] = base_protocol
else:
for protocol in protocols:
if command_id < protocol.command_id_offset + protocol.command_length:
command_id_cache[command_id] = protocol
break
else:
protocol_infos = ' '.join(tuple(
(
f"{proto.name}@{proto.version}"
f"[offset={proto.command_id_offset},"
f"command_length={proto.command_length}]"
)
for proto in cons(base_protocol, protocols)
))
raise UnknownProtocolCommand(
f"No protocol found for command_id {command_id}: Available "
f"protocol/offsets are: {protocol_infos}"
)
msg_proto = command_id_cache[command_id]
command_type = msg_proto.get_command_type_for_command_id(command_id)
try:
if len(msg.body) > MAX_IN_LOOP_DECODE_SIZE:
cmd = await loop.run_in_executor(
None,
command_type.decode,
msg,
msg_proto.snappy_support,
)
else:
cmd = command_type.decode(msg, msg_proto.snappy_support)
except (rlp.exceptions.DeserializationError, snappy_CompressedLengthError) as err:
raise MalformedMessage(f"Failed to decode {msg} for {command_type}") from err
yield msg_proto, cmd
# yield to the event loop for a moment to allow `transport.is_closing`
# a chance to update.
await asyncio.sleep(0)
class Multiplexer(MultiplexerAPI):
_transport: TransportAPI
_msg_counts: DefaultDict[Type[CommandAPI[Any]], int]
_last_msg_time: float
_protocol_locks: Dict[Type[ProtocolAPI], asyncio.Lock]
_protocol_queues: Dict[Type[ProtocolAPI], 'asyncio.Queue[CommandAPI[Any]]']
def __init__(self,
transport: TransportAPI,
base_protocol: BaseP2PProtocol,
protocols: Sequence[ProtocolAPI],
max_queue_size: int = 4096) -> None:
self.logger = get_logger('p2p.multiplexer.Multiplexer')
self._transport = transport
# the base `p2p` protocol instance.
self._base_protocol = base_protocol
# the sub-protocol instances
self._protocols = protocols
self._streaming_task: asyncio.Future[None] = None
# Lock management on a per-protocol basis to ensure we only have one
# stream consumer for each protocol.
self._protocol_locks = {
type(protocol): asyncio.Lock()
for protocol
in self.get_protocols()
}
# Each protocol gets a queue where messages for the individual protocol
# are placed when streamed from the transport
self._protocol_queues = {
type(protocol): asyncio.Queue(max_queue_size)
for protocol
in self.get_protocols()
}
self._msg_counts = collections.defaultdict(int)
self._last_msg_time = 0
self._started_streaming = asyncio.Event()
def __str__(self) -> str:
protocol_infos = ','.join(tuple(
f"{proto.name}:{proto.version}"
for proto
in self.get_protocols()
))
return f"Multiplexer[{protocol_infos}]"
def __repr__(self) -> str:
return f"<{self}>"
#
# Transport API
#
def get_transport(self) -> TransportAPI:
return self._transport
#
# Message Counts
#
def get_total_msg_count(self) -> int:
return sum(self._msg_counts.values())
@property
def last_msg_time(self) -> float:
return self._last_msg_time
#
# Proxy Transport methods
#
@cached_property
def remote(self) -> NodeAPI:
return self._transport.remote
@cached_property
def session(self) -> SessionAPI:
return self._transport.session
@property
def is_closing(self) -> bool:
return self._transport.is_closing
async def close(self) -> None:
await self._transport.close()
#
# Protocol API
#
def has_protocol(self, protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]]) -> bool:
try:
if isinstance(protocol_identifier, ProtocolAPI):
self.get_protocol_by_type(type(protocol_identifier))
return True
elif isinstance(protocol_identifier, type):
self.get_protocol_by_type(protocol_identifier)
return True
else:
raise TypeError(
f"Unsupported protocol value: {protocol_identifier} of type "
f"{type(protocol_identifier)}"
)
except UnknownProtocol:
return False
def get_protocol_by_type(self, protocol_class: Type[TProtocol]) -> TProtocol:
if issubclass(protocol_class, BaseP2PProtocol):
return cast(TProtocol, self._base_protocol)
for protocol in self._protocols:
if type(protocol) is protocol_class:
return cast(TProtocol, protocol)
raise UnknownProtocol(f"No protocol found with type {protocol_class}")
def get_base_protocol(self) -> BaseP2PProtocol:
return self._base_protocol
def get_protocols(self) -> Tuple[ProtocolAPI, ...]:
return tuple(cons(self._base_protocol, self._protocols))
def get_protocol_for_command_type(self, command_type: Type[CommandAPI[Any]]) -> ProtocolAPI:
supported_protocols = tuple(
protocol
for protocol in self.get_protocols()
if protocol.supports_command(command_type)
)
if len(supported_protocols) == 1:
return supported_protocols[0]
elif not supported_protocols:
raise UnknownProtocol(
f"Connection does not have any protocols that support the "
f"request command: {command_type}"
)
elif len(supported_protocols) > 1:
raise ValidationError(
f"Could not determine appropriate protocol for command: "
f"{command_type}. Command was found in the "
f"protocols {supported_protocols}"
)
else:
raise Exception("This code path should be unreachable")
#
# Streaming API
#
def stream_protocol_messages(self,
protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
if isinstance(protocol_identifier, ProtocolAPI):
protocol_class = type(protocol_identifier)
elif isinstance(protocol_identifier, type) and issubclass(protocol_identifier, ProtocolAPI):
protocol_class = protocol_identifier
else:
raise TypeError("Unknown protocol identifier: {protocol}")
if not self.has_protocol(protocol_class):
raise UnknownProtocol(f"Unknown protocol '{protocol_class}'")
if self._protocol_locks[protocol_class].locked():
raise Exception(f"Streaming lock for {protocol_class} is not free.")
return self._stream_protocol_messages(protocol_class)
async def _stream_protocol_messages(self,
protocol_class: Type[ProtocolAPI],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
async with self._protocol_locks[protocol_class]:
self.raise_if_streaming_error()
msg_queue = self._protocol_queues[protocol_class]
while self.is_streaming:
try:
# We use an optimistic strategy here of using
# `get_nowait()` to reduce the number of times we yield to
# the event loop.
yield msg_queue.get_nowait()
# Manually release the event loop if it won't happen during
# the queue get().
await asyncio.sleep(0)
except asyncio.QueueEmpty:
yield await msg_queue.get()
#
# Message reading and streaming API
#
async def stream_in_background(self) -> None:
self._streaming_task = asyncio.ensure_future(self._do_multiplexing())
await self._started_streaming.wait()
@property
def is_streaming(self) -> bool:
return self._streaming_task is not None and not self._streaming_task.done()
async def wait_streaming_finished(self) -> None:
"""
Wait for our streaming task to finish.
The streaming must have been started via stream_in_background().
Upon returning, the multiplexer and transport will be closed.
"""
if self._streaming_task is None:
raise Exception("Multiplexer has not started streaming")
await self._streaming_task
def get_streaming_error(self) -> Optional[BaseException]:
if self._streaming_task is None:
raise Exception("Multiplexer has not started streaming")
elif self._streaming_task.cancelled():
return asyncio.CancelledError()
elif not self._streaming_task.done():
return None
return self._streaming_task.exception()
def raise_if_streaming_error(self) -> None:
err = self.get_streaming_error()
if err:
raise err
async def _do_multiplexing(self) -> None:
"""
Read messages from the transport and feeds them into individual queues for each of the
protocols.
Will close ourselves before returning.
"""
self._started_streaming.set()
msg_stream = stream_transport_messages(
self._transport,
self._base_protocol,
*self._protocols,
)
try:
await self._handle_commands(msg_stream)
except asyncio.TimeoutError as exc:
self.logger.warning(
"Timed out waiting for command from %s, exiting...", self,)
self.logger.debug("Timeout %r: %s", self, exc, exc_info=True)
finally:
await self.close()
async def _handle_commands(
self,
msg_stream: AsyncIterator[Tuple[ProtocolAPI, CommandAPI[Any]]]) -> None:
async for protocol, cmd in msg_stream:
self._last_msg_time = time.monotonic()
# track total number of messages received for each command type.
self._msg_counts[type(cmd)] += 1
queue = self._protocol_queues[type(protocol)]
try:
# We must use `put_nowait` here to ensure that in the event
# that a single protocol queue is full that we don't block
# other protocol messages getting through.
queue.put_nowait(cmd)
except asyncio.QueueFull:
self.logger.error(
(
"Multiplexing queue for protocol '%s' full. "
"discarding message: %s"
),
protocol,
cmd,
)
def cancel_streaming(self) -> None:
if self._streaming_task is not None and not self._streaming_task.done():
self._streaming_task.cancel()
async def stop_streaming(self) -> None:
self.cancel_streaming()
try:
await self._streaming_task
except asyncio.CancelledError:
pass
| 33.9225 | 100 | 0.595106 |
2da19ae957b7bd824dcc1a9f19b3573ce4217676
| 36,525 |
py
|
Python
|
python/ccxt/coss.py
|
jknight/ccxt
|
02cdef0247435a6c6557faad8a1793d3da67c085
|
[
"MIT"
] | null | null | null |
python/ccxt/coss.py
|
jknight/ccxt
|
02cdef0247435a6c6557faad8a1793d3da67c085
|
[
"MIT"
] | null | null | null |
python/ccxt/coss.py
|
jknight/ccxt
|
02cdef0247435a6c6557faad8a1793d3da67c085
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ArgumentsRequired
class coss(Exchange):
def describe(self):
return self.deep_extend(super(coss, self).describe(), {
'id': 'coss',
'name': 'COSS',
'countries': ['SG', 'NL'],
'rateLimit': 1000,
'version': 'v1',
'certified': False,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/50328158-22e53c00-0503-11e9-825c-c5cfd79bfa74.jpg',
'api': {
'trade': 'https://trade.coss.io/c/api/v1',
'engine': 'https://engine.coss.io/api/v1',
'public': 'https://trade.coss.io/c/api/v1',
'web': 'https://trade.coss.io/c', # undocumented
'exchange': 'https://exchange.coss.io/api',
},
'www': 'https://www.coss.io',
'doc': 'https://api.coss.io/v1/spec',
'referral': 'https://www.coss.io/c/reg?r=OWCMHQVW2Q',
},
'has': {
'fetchTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchBalance': True,
'fetchOrderBook': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchOHLCV': True,
'createOrder': True,
'cancelOrder': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'api': {
'exchange': {
'get': [
'getmarketsummaries',
],
},
'public': {
'get': [
'market-price',
'exchange-info',
],
},
'web': {
'get': [
'coins/getinfo/all', # undocumented
'order/symbols', # undocumented
'coins/get_base_list', # undocumented
],
},
'engine': {
'get': [
'dp',
'ht',
'cs',
],
},
'trade': {
'get': [
'ping',
'time',
'account/balances',
'account/details',
],
'post': [
'order/add',
'order/details',
'order/list/open',
'order/list/completed',
'order/list/all',
'order/trade-detail',
],
'delete': [
'order/cancel',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0025,
'maker': 0.0,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'COS': 'COSS',
'COSS': 'COSS.io',
},
})
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
#
# { timezone: "UTC",
# server_time: 1545171487108,
# rate_limits: [{ type: "REQUESTS",
# interval: "MINUTE",
# limit: 1000 }],
# base_currencies: [{currency_code: "BTC", minimum_total_order: "0.0001"},
# {currency_code: "USDT", minimum_total_order: "1"},
# {currency_code: "EUR", minimum_total_order: "1"}],
# coins: [{ currency_code: "ADI",
# name: "Aditus",
# minimum_order_amount: "0.00000001"},
# ...
# { currency_code: "NPXSXEM",
# name: "PundiX-XEM",
# minimum_order_amount: "0.00000001" } ],
# symbols: [{ symbol: "ADI_BTC",
# amount_limit_decimal: 0,
# price_limit_decimal: 8,
# allow_trading: True },
# ...
# { symbol: "ETH_GUSD",
# amount_limit_decimal: 5,
# price_limit_decimal: 3,
# allow_trading: True } ] }
#
result = []
markets = self.safe_value(response, 'symbols', [])
baseCurrencies = self.safe_value(response, 'base_currencies', [])
baseCurrenciesByIds = self.index_by(baseCurrencies, 'currency_code')
currencies = self.safe_value(response, 'coins', [])
currenciesByIds = self.index_by(currencies, 'currency_code')
for i in range(0, len(markets)):
market = markets[i]
marketId = market['symbol']
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount_limit_decimal'),
'price': self.safe_integer(market, 'price_limit_decimal'),
}
active = self.safe_value(market, 'allow_trading', False)
baseCurrency = self.safe_value(baseCurrenciesByIds, baseId, {})
minCost = self.safe_float(baseCurrency, 'minimum_total_order')
currency = self.safe_value(currenciesByIds, baseId, {})
defaultMinAmount = math.pow(10, -precision['amount'])
minAmount = self.safe_float(currency, 'minimum_order_amount', defaultMinAmount)
result.append({
'symbol': symbol,
'id': marketId,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def fetch_currencies(self, params={}):
response = self.webGetCoinsGetinfoAll(params)
#
# [{ currency_code: "VET",
# name: "VeChain",
# buy_limit: 0,
# sell_limit: 0,
# usdt: 0,
# transaction_time_limit: 5,
# status: "trade",
# withdrawn_fee: "0.6",
# minimum_withdrawn_amount: "1.2",
# minimum_deposit_amount: "0.6",
# minimum_order_amount: "0.00000001",
# decimal_format: "0.########",
# token_type: null, # "erc", "eos", "stellar", "tron", "ripple"...
# buy_at: 0,
# sell_at: 0,
# min_rate: 0,
# max_rate: 0,
# allow_withdrawn: False,
# allow_deposit: False,
# explorer_website_mainnet_link: null,
# explorer_website_testnet_link: null,
# deposit_block_confirmation: "6",
# withdraw_block_confirmation: "0",
# icon_url: "https://s2.coinmarketcap.com/static/img/coins/32x32/3077.png",
# is_fiat: False,
# allow_sell: True,
# allow_buy: True }]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
currencyId = self.safe_string(currency, 'currency_code')
code = self.safe_currency_code(currencyId)
name = self.safe_string(currency, 'name')
allowBuy = self.safe_value(currency, 'allow_buy')
allowSell = self.safe_value(currency, 'allow_sell')
allowWithdrawals = self.safe_value(currency, 'allow_withdrawn')
allowDeposits = self.safe_value(currency, 'allow_deposit')
active = allowBuy and allowSell and allowWithdrawals and allowDeposits
fee = self.safe_float(currency, 'withdrawn_fee')
type = self.safe_string(currency, 'token_type')
#
# decimal_format can be anything...
#
# 0.########
# #.########
# 0.##
# ''(empty string)
# 0.000000
# null(None)
# 0.0000
# 0.###
#
decimalFormat = self.safe_string(currency, 'decimal_format')
precision = 8
if decimalFormat is not None:
parts = decimalFormat.split('.')
numParts = len(parts) # transpiler workaround for array lengths
if numParts > 1:
if len(parts[1]) > 1:
precision = len(parts[1])
result[code] = {
'id': currencyId,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'type': type,
'limits': {
'amount': {
'min': self.safe_float(currency, 'minimum_order_amount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(currency, 'minimum_withdrawn_amount'),
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.tradeGetAccountBalances(params)
#
# [{currency_code: "ETH",
# address: "0x6820511d43111a941d3e187b9e36ec64af763bde", # deposit address
# total: "0.20399125",
# available: "0.20399125",
# in_order: "0",
# memo: null }, # tag, if any
# {currency_code: "ICX",
# address: "",
# total: "0",
# available: "0",
# in_order: "0",
# memo: null } ]
#
result = {}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
total = self.safe_float(balance, 'total')
used = self.safe_float(balance, 'in_order')
free = self.safe_float(balance, 'available')
result[code] = {
'total': total,
'used': used,
'free': free,
}
return self.parse_balance(result)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# [
# 1545138960000,
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.00000000"
# ]
#
return [
self.safe_integer(ohlcv, 0), # timestamp
self.safe_float(ohlcv, 1), # Open
self.safe_float(ohlcv, 2), # High
self.safe_float(ohlcv, 3), # Low
self.safe_float(ohlcv, 4), # Close
self.safe_float(ohlcv, 5), # base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'tt': self.timeframes[timeframe],
}
response = self.engineGetCs(self.extend(request, params))
#
# {
# tt: "1m",
# symbol: "ETH_BTC",
# nextTime: 1545138960000,
# series: [
# [
# 1545138960000,
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.02705000",
# "0.00000000"
# ],
# ],
# limit: 500
# }
#
series = self.safe_value(response, 'series', [])
return self.parse_ohlcvs(series, market)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
marketId = self.market_id(symbol)
request = {'symbol': marketId}
# limit argument is not supported on COSS's end
response = self.engineGetDp(self.extend(request, params))
#
# {symbol: "COSS_ETH",
# asks: [["0.00065200", "214.15000000"],
# ["0.00065300", "645.45000000"],
# ...
# ["0.00076400", "380.00000000"],
# ["0.00076900", "25.00000000"] ],
# limit: 100,
# bids: [["0.00065100", "666.99000000"],
# ["0.00065000", "1171.93000000"],
# ...
# ["0.00037700", "3300.00000000"],
# ["0.00037600", "2010.82000000"] ],
# time: 1545180569354 }
#
timestamp = self.safe_integer(response, 'time')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# {MarketName: "COSS-ETH",
# High: 0.00066,
# Low: 0.000628,
# BaseVolume: 131.09652674,
# Last: 0.000636,
# TimeStamp: "2018-12-19T05:16:41.369Z",
# Volume: 206126.6143710692,
# Ask: "0.00063600",
# Bid: "0.00063400",
# PrevDay: 0.000636 }
#
timestamp = self.parse8601(self.safe_string(ticker, 'TimeStamp'))
symbol = None
marketId = self.safe_string(ticker, 'MarketName')
if marketId is not None:
marketId = marketId.replace('-', '_')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
if marketId is not None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
previous = self.safe_float(ticker, 'PrevDay')
last = self.safe_float(ticker, 'Last')
change = None
percentage = None
if last is not None:
if previous is not None:
change = last - previous
if previous > 0:
percentage = (change / previous) * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'High'),
'low': self.safe_float(ticker, 'Low'),
'bid': self.safe_float(ticker, 'Bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'Ask'),
'askVolume': None,
'vwap': None,
'open': previous,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'Volume'),
'quoteVolume': self.safe_float(ticker, 'BaseVolume'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.exchangeGetGetmarketsummaries(params)
#
# {success: True,
# message: "",
# result: [{MarketName: "COSS-ETH",
# High: 0.00066,
# Low: 0.000628,
# BaseVolume: 131.09652674,
# Last: 0.000636,
# TimeStamp: "2018-12-19T05:16:41.369Z",
# Volume: 206126.6143710692,
# Ask: "0.00063600",
# Bid: "0.00063400",
# PrevDay: 0.000636 },
# ...
# {MarketName: "XLM-BTC",
# High: 0.0000309,
# Low: 0.0000309,
# BaseVolume: 0,
# Last: 0.0000309,
# TimeStamp: "2018-12-19T02:00:02.145Z",
# Volume: 0,
# Ask: "0.00003300",
# Bid: "0.00003090",
# PrevDay: 0.0000309 } ],
# volumes: [{CoinName: "ETH", Volume: 668.1928095999999}, # these are overall exchange volumes
# {CoinName: "USD", Volume: 9942.58480324},
# {CoinName: "BTC", Volume: 43.749184570000004},
# {CoinName: "COSS", Volume: 909909.26644574},
# {CoinName: "EUR", Volume: 0},
# {CoinName: "TUSD", Volume: 2613.3395026999997},
# {CoinName: "USDT", Volume: 1017152.07416519},
# {CoinName: "GUSD", Volume: 1.80438},
# {CoinName: "XRP", Volume: 15.95508},
# {CoinName: "GBP", Volume: 0},
# {CoinName: "USDC", Volume: 0} ],
# t: 1545196604371 }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_ticker(self, symbol, params={}):
tickers = self.fetch_tickers([symbol], params)
return tickers[symbol]
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.engineGetHt(self.extend(request, params))
#
# { symbol: "COSS_ETH",
# limit: 100,
# history: [{ id: 481321,
# price: "0.00065100",
# qty: "272.92000000",
# isBuyerMaker: False,
# time: 1545180845019 },
# { id: 481322,
# price: "0.00065200",
# qty: "1.90000000",
# isBuyerMaker: True,
# time: 1545180847535},
# ...
# { id: 481420,
# price: "0.00065300",
# qty: "2.00000000",
# isBuyerMaker: True,
# time: 1545181167702} ],
# time: 1545181171274 }
#
return self.parse_trades(response['history'], market, since, limit)
def parse_trade_fee(self, fee):
if fee is None:
return fee
parts = fee.split(' ')
numParts = len(parts)
cost = parts[0]
code = None
if numParts > 1:
code = self.safe_currency_code(parts[1])
return {
'cost': cost,
'currency': code,
}
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# { id: 481322,
# price: "0.00065200",
# qty: "1.90000000",
# isBuyerMaker: True,
# time: 1545180847535}
#
# fetchOrderTrades(private)
#
# [{ hex_id: null,
# symbol: "COSS_ETH",
# order_id: "ad6f6b47-3def-4add-a5d5-2549a9df1593",
# order_side: "BUY",
# price: "0.00065900",
# quantity: "10",
# fee: "0.00700000 COSS",
# additional_fee: "0.00000461 ETH",
# total: "0.00659000 ETH",
# timestamp: 1545152356075 }]
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_integer(trade, 'time')
orderId = self.safe_string(trade, 'order_id')
side = self.safe_string_lower(trade, 'order_side')
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
elif market is not None:
symbol = market['symbol']
cost = None
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'qty', 'quantity')
if amount is not None:
if price is not None:
cost = price * amount
result = {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
fee = self.parse_trade_fee(self.safe_string(trade, 'fee'))
if fee is not None:
additionalFee = self.parse_trade_fee(self.safe_string(trade, 'additional_fee'))
if additionalFee is None:
result['fee'] = fee
else:
result['fees'] = [
fee,
additionalFee,
]
return result
def fetch_orders_by_type(self, type, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
# 'from_id': 'b2a2d379-f9b6-418b-9414-cbf8330b20d1', # string(uuid), fetchOrders(all orders) only
# 'page': 0, # different pagination in fetchOpenOrders and fetchClosedOrders
# 'limit': 50, # optional, max = default = 50
'symbol': market['id'], # required
}
if limit is not None:
request['limit'] = limit # max = default = 50
method = 'tradePostOrderList' + type
response = getattr(self, method)(self.extend(request, params))
#
# fetchOrders, fetchClosedOrders
#
# [{ hex_id: "5c192784330fe51149f556bb",
# order_id: "5e46e1b1-93d5-4656-9b43-a5635b08eae9",
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# order_symbol: "COSS_ETH",
# order_side: "BUY",
# status: "filled",
# createTime: 1545152388019,
# type: "limit",
# timeMatching: 0,
# order_price: "0.00065900",
# order_size: "10",
# executed: "10",
# stop_price: "0.00000000",
# avg: "0.00065900",
# total: "0.00659000 ETH" } ]
#
# fetchOpenOrders
#
# {
# "total": 2,
# "list": [
# {
# "order_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4ty",
# "account_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4a6",
# "order_symbol": "eth-btc",
# "order_side": "BUY",
# "status": "OPEN",
# "createTime": 1538114348750,
# "type": "limit",
# "order_price": "0.12345678",
# "order_size": "10.12345678",
# "executed": "0",
# "stop_price": "02.12345678",
# "avg": "1.12345678",
# "total": "2.12345678"
# }
# ]
# }
#
# the following code is to handle the above difference in response formats
orders = None
if isinstance(response, list):
orders = response
else:
orders = self.safe_value(response, 'list', [])
return self.parse_orders(orders, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_type('All', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_type('Completed', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_type('Open', symbol, since, limit, params)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': id,
}
response = self.tradePostOrderDetails(self.extend(request, params))
return self.parse_order(response)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = self.tradePostOrderTradeDetail(self.extend(request, params))
#
# [{ hex_id: null,
# symbol: "COSS_ETH",
# order_id: "ad6f6b47-3def-4add-a5d5-2549a9df1593",
# order_side: "BUY",
# price: "0.00065900",
# quantity: "10",
# fee: "0.00700000 COSS",
# additional_fee: "0.00000461 ETH",
# total: "0.00659000 ETH",
# timestamp: 1545152356075 }]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
if status is None:
return status
statuses = {
'OPEN': 'open',
'CANCELLED': 'canceled',
'FILLED': 'closed',
'PARTIAL_FILL': 'closed',
'CANCELLING': 'open',
}
return self.safe_string(statuses, status.upper(), status)
def parse_order(self, order, market=None):
#
# { hex_id: "5c192784330fe51149f556bb", # missing in fetchOpenOrders
# order_id: "5e46e1b1-93d5-4656-9b43-a5635b08eae9",
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# order_symbol: "COSS_ETH", # coss-eth in docs
# order_side: "BUY",
# status: "filled",
# createTime: 1545152388019,
# type: "limit",
# timeMatching: 0, # missing in fetchOpenOrders
# order_price: "0.00065900",
# order_size: "10",
# executed: "10",
# stop_price: "0.00000000",
# avg: "0.00065900",
# total: "0.00659000 ETH" }
#
id = self.safe_string(order, 'order_id')
symbol = None
marketId = self.safe_string(order, 'order_symbol')
if marketId is None:
if market is not None:
symbol = market['symbol']
else:
# a minor workaround for lowercase eth-btc symbols
marketId = marketId.upper()
marketId = marketId.replace('-', '_')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is None:
baseId, quoteId = marketId.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'createTime')
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'order_price')
filled = self.safe_float(order, 'executed')
type = self.safe_string(order, 'type')
amount = self.safe_float(order, 'order_size')
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
average = self.safe_float(order, 'avg')
side = self.safe_string_lower(order, 'order_side')
cost = self.safe_float(order, 'total')
fee = None
trades = None
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'order_symbol': market['id'],
'order_size': self.amount_to_precision(symbol, amount),
'order_side': side.upper(),
'type': type,
}
if price is not None:
request['order_price'] = self.price_to_precision(symbol, price)
response = self.tradePostOrderAdd(self.extend(request, params))
#
# {
# "order_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4ty",
# "account_id": "9e5ae4dd-3369-401d-81f5-dff985e1c4a6",
# "order_symbol": "eth-btc",
# "order_side": "BUY",
# "status": "OPEN",
# "createTime": 1538114348750,
# "type": "limit",
# "order_price": "0.12345678",
# "order_size": "10.12345678",
# "executed": "0",
# "stop_price": "02.12345678",
# "avg": "1.12345678",
# "total": "2.12345678"
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'order_symbol': market['id'],
}
response = self.tradeDeleteOrderCancel(self.extend(request, params))
#
# {order_symbol: "COSS_ETH",
# order_id: "30f2d698-39a0-4b9f-a3a6-a179542373bd",
# order_size: 0,
# account_id: "a0c20128-b9e0-484e-9bc8-b8bb86340e5b",
# timestamp: 1545202728814,
# recvWindow: null }
#
return self.parse_order(response)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'trade':
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend({
'timestamp': timestamp, # required(int64)
# 'recvWindow': 10000, # optional(int32)
}, params)
request = None
if method == 'GET':
request = self.urlencode(query)
url += '?' + request
else:
request = self.json(query)
body = request
headers = {
'Signature': self.hmac(self.encode(request), self.encode(self.secret)),
'Authorization': self.apiKey,
'X-Requested-With': 'XMLHttpRequest',
}
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 40.901456 | 126 | 0.425873 |
1793aa69bc80047f6334f6ec60096700a83d537b
| 6,253 |
py
|
Python
|
chaluptests/methods/cond_mixnn.py
|
danthe96/independence_test
|
2236b2c6b8fb87bf40b9b41b03fec76de96a5f26
|
[
"MIT"
] | null | null | null |
chaluptests/methods/cond_mixnn.py
|
danthe96/independence_test
|
2236b2c6b8fb87bf40b9b41b03fec76de96a5f26
|
[
"MIT"
] | null | null | null |
chaluptests/methods/cond_mixnn.py
|
danthe96/independence_test
|
2236b2c6b8fb87bf40b9b41b03fec76de96a5f26
|
[
"MIT"
] | null | null | null |
""" A conditional (and unconditional!) independence test
based on neural network regression. This implementation
uses Tensorflow and sklearn.
Reference:
Chalupka, Krzysztof and Perona, Pietro and Eberhardt, Frederick, 2017.
"""
import sys
import time
import numpy as np
from scipy.stats import ttest_ind
import tensorflow as tf
from neural_networks import nn
from ..utils import equalize_dimensions
from scipy.stats import ttest_1samp
# Define available test statistic functions.
FS = {'min': lambda x, y: np.min(x) / np.min(y),
'mean': lambda x, y: np.mean(x) - np.mean(y)}
def bootstrap_mindiv(d0s, d1s):
f = lambda x, y: np.min(x) / np.min(y)
t_obs = f(d0s, d1s)
t_star = bootstrap(d0s, d1s, f=f)
p_value = np.sum(t_star > t_obs) / float(t_star.size)
return p_value
def bootstrap_ttest(d0s, d1s):
tstat, p_value = ttest_1samp(d0s / d1s, 1)
if np.mean(d0s / d1s) > 1:
p_value /= 2.
else:
p_value = 1 - p_value / 2.
return p_value
def bootstrap_mindiff(d0s, d1s):
f = lambda x, y: np.min(x) - np.min(y)
t_obs = f(d0s, d1s)
t_star = bootstrap(d0s, d1s, f=f)
p_value = np.sum(t_star > t_obs) / float(t_star.size)
return p_value
def mse(y_pred, y):
""" Compute the mean squared error.
Args:
y_pred (n_samples, x_dim): First variable.
y (n_samples, y_dim): Second variable.
Returns
mse: The test statistic: mean-squared error on a validation set.
"""
return np.mean((y - y_pred)**2)
def bootstrap(h0, h1, f, B=10000):
""" Bootstrap the test statistic.
Args:
h0: Iterable of length m.
h1: Iterable of length n.
f: Function taking (h0, h1) to a test statistic.
B (int): Number of bootstrap samples to create.
Returns:
t_star (B,): Bootstraped means of the two distributions.
"""
t_star = np.zeros(B)
m = len(h0)
n = len(h1)
all_h = np.concatenate([h0, h1])
for b_id in range(B):
b_data = np.random.choice(all_h, size=m + n, replace=True)
t_star[b_id] = f(b_data[:m], b_data[m:])
return t_star
def test(x, y, z=None, num_perm=10, prop_test=.1,
max_time=60, discrete=(False, False),
plot_return=False, test_type='min',
verbose=False, fixed_arch=False, bootstrap_type='mindiv', **kwargs):
""" The neural net probabilistic independence test.
See Chalupka, Perona, Eberhardt 2017.
Args:
x (n_samples, x_dim): First variable.
y (n_samples, y_dim): Second variable.
z (n_samples, z_dim): Conditioning variable.
num_perm: Number of data permutations to estimate
the p-value from marginal stats.
prop_test (int): Proportion of data to evaluate test stat on.
max_time (float): Time limit for the test (approximate).
discrete (bool, bool): Whether x or y are discrete.
plot_return (bool): If True, return statistics useful for plotting.
test_type (str): Test statistic type, can be 'min', 'mean'.
verbose (bool): Print out progress messages (or not).
fixed_arch (bool): If True, keep the NN training procedure constant.
If False, draw training parameters randomly at each permutation.
kwargs: Arguments to pass to the neural net constructor.
Returns:
p (float): The p-value for the null hypothesis
that x is independent of y.
"""
# If x xor y is discrete, use the continuous variable as input.
if discrete[0] and not discrete[1]:
x, y = y, x
# Otherwise, predict the variable with fewer dimensions.
elif x.shape[1] < y.shape[1]:
x, y = y, x
# Adjust the dimensionalities of x, y, z to be on the same
# order, by simple data duplication.
x, y, z = equalize_dimensions(x, y, z)
# Use this many datapoints as a test set.
n_samples = x.shape[0]
n_test = int(n_samples * prop_test)
# Attach the conditioning variable to the input.
x_z = np.hstack([x, z])
# Set up storage.
d0_preds = []
d1_preds = []
d0_stats = np.zeros(num_perm)
d1_stats = np.zeros(num_perm)
kwargs['epochs'] = 1000
kwargs['lr'] = 1e-2
kwargs['nn_verbose'] = True
kwargs['batch_size'] = 128
kwargs['ntype'] = 'plain'
# Construct the neural net.
if fixed_arch:
clf = nn.NN(x_dim=x_z.shape[1], y_dim=y.shape[1],
arch=[128]*2, ntype='plain')
for perm_id in range(num_perm):
# Create the d0 (reshuffled-x) dataset.
perm_ids = np.random.permutation(n_samples)
x_z_bootstrap = np.hstack([x[perm_ids], z])
# Sample NN training params.
if not fixed_arch:
kwargs['arch'] = [32] * (perm_id + 1)
clf = nn.NN(x_dim=x_z.shape[1], y_dim=y.shape[1], **kwargs)
print(('lr={lr:.2}, bs={batch_size}, '
'arch={arch}, ntype={ntype}').format(**kwargs))
with tf.Session() as sess:
# Train on the reshuffled data.
sess.run(tf.global_variables_initializer())
clf.saver.save(sess, './init_nn_save')
clf.fit(x_z_bootstrap[n_test:], y[n_test:], sess=sess, **kwargs)
y_pred0 = clf.predict(x_z_bootstrap[:n_test], sess=sess)
# Train on the original data.
sess.run(tf.global_variables_initializer())
clf.saver.restore(sess, './init_nn_save')
clf.fit(x_z[n_test:], y[n_test:], sess=sess, **kwargs)
y_pred1 = clf.predict(x_z[:n_test], sess=sess)
d0_preds.append(y_pred0)
d0_stats[perm_id] = mse(y_pred0, y[:n_test])
d1_preds.append(y_pred1)
d1_stats[perm_id] = mse(y_pred1, y[:n_test])
if verbose:
print('D0 statistic, iter {}: {}'.format(
perm_id, d0_stats[perm_id]))
print('D1 statistic, iter {}: {}'.format(
perm_id, d1_stats[perm_id]))
print('Resetting Tensorflow graph...')
tf.reset_default_graph()
# Compute the p-value.
p_value = globals()['bootstrap_' + bootstrap_type](d0_stats, d1_stats)
if plot_return:
return (p_value, d0_stats, d1_stats)
else:
return p_value
| 32.910526 | 81 | 0.614265 |
088e9174f29ffec14f5248224d28c5b603262612
| 885 |
py
|
Python
|
setup.py
|
KeynesYouDigIt/pyxform
|
6ca3484ea56bee2a286ed46f24fa2ef1f88611fe
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
KeynesYouDigIt/pyxform
|
6ca3484ea56bee2a286ed46f24fa2ef1f88611fe
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
KeynesYouDigIt/pyxform
|
6ca3484ea56bee2a286ed46f24fa2ef1f88611fe
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pyxform - Python library that converts XLSForms to XForms.
"""
from setuptools import find_packages, setup
setup(
name="pyxform",
version="1.7.0",
author="github.com/xlsform",
author_email="info@xlsform.org",
packages=find_packages(exclude=["tests", "tests.*"]),
package_data={
"pyxform.validators.odk_validate": ["bin/*.*"],
"pyxform": ["iana_subtags.txt"],
},
url="http://pypi.python.org/pypi/pyxform/",
description="A Python package to create XForms for ODK Collect.",
long_description=open("README.rst", "rt").read(),
python_requires=">=3.7",
install_requires=[
"xlrd==1.2.0",
],
entry_points={
"console_scripts": [
"xls2xform=pyxform.xls2xform:main_cli",
"pyxform_validator_update=pyxform.validators.updater:main_cli",
]
},
)
| 27.65625 | 75 | 0.619209 |
063f4853e530f6ba09f967328e10861a6af55b4d
| 7,387 |
py
|
Python
|
PiCN/Layers/ICNLayer/PendingInterestTable/BasePendingInterestTable.py
|
DimaMansour/PiCN
|
90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/ICNLayer/PendingInterestTable/BasePendingInterestTable.py
|
DimaMansour/PiCN
|
90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Layers/ICNLayer/PendingInterestTable/BasePendingInterestTable.py
|
DimaMansour/PiCN
|
90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Abstract BasePendingInterestTable for usage in BasicICNLayer"""
import abc
import multiprocessing
import time
from typing import List, Dict
from PiCN.Packets import Interest, Name
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseEntry
from PiCN.Layers.ICNLayer import BaseICNDataStruct
class PendingInterestTableEntry(object):
"""An entry in the Forwarding Information Base"""
def __init__(self, name: Name, faceid: int, outgoing_faces, interest:Interest = None, local_app: bool=False,
fib_faces_already_used: List[int]=None, faces_already_nacked=None,
number_of_forwards=0):
self.name = name
self._faceids: List[int] = []
if isinstance(faceid, list):
self._faceids.extend(faceid)
else:
self._faceids.append(faceid)
self.outgoing_faces: List[int] = []
if isinstance(outgoing_faces, list):
self.outgoing_faces.extend(outgoing_faces)
else:
self.outgoing_faces.append(outgoing_faces)
self._timestamp = time.time()
self._retransmits = 0
self._local_app: List[bool]= []
if isinstance(local_app, list):
self._local_app.extend(local_app)
else:
self._local_app.append(local_app)
self._interest = interest
if fib_faces_already_used: #default parameter is not [] but None and this if else is here because [] as default parameter leads to a strange behavior
self._fib_faces_already_used: List[int] = fib_faces_already_used
else:
self._fib_faces_already_used: List[int] = []
if faces_already_nacked:
self.faces_already_nacked = faces_already_nacked
else:
self.faces_already_nacked = []
self.number_of_forwards = number_of_forwards
def __eq__(self, other):
if other is None:
return False
return self.name == other.name
@property
def interest(self):
return self._interest
@interest.setter
def interest(self, interest):
self._interest = interest
@property
def faceids(self):
return self._faceids
@faceids.setter
def face_id(self, faceids):
self._faceids = faceids
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
self._timestamp
@property
def retransmits(self):
return self._retransmits
@retransmits.setter
def retransmits(self, retransmits):
self._retransmits = retransmits
@property
def local_app(self):
return self._local_app
@local_app.setter
def local_app(self, local_app):
self._local_app = local_app
@property
def interest(self):
return self._interest
@interest.setter
def interest(self, interest):
self._interest = interest
@property
def fib_faces_already_used(self):
return self._fib_faces_already_used
@fib_faces_already_used.setter
def fib_faces_already_used(self, fib_faces_already_used):
self._fib_faces_already_used = fib_faces_already_used
class BasePendingInterestTable(BaseICNDataStruct):
"""Abstract BasePendingInterestaTable for usage in BasicICNLayer
:param pit_timeout: timeout for a pit entry when calling the ageing function
"""
def __init__(self, pit_timeout: int=10, pit_retransmits: int=3):
super().__init__()
self.container: List[PendingInterestTableEntry] = []
self._pit_timeout = pit_timeout
self._pit_retransmits = pit_retransmits
@abc.abstractmethod
def add_pit_entry(self, name: Name, faceid: int, outgoing_face: int, interest: Interest = None, local_app: bool = False):
"""Add an new entry"""
@abc.abstractmethod
def add_interested_face(self, name, face: int):
"""update an existing entry by appending
the interested face to the faceids. If there is not entry, nothing happens."""
@abc.abstractmethod
def add_outgoing_face(self, name, face: int):
"""This method an existing entry by appending
the face to the outgoing_faces. If there is not entry, nothing happens."""
@abc.abstractmethod
def find_pit_entry(self, name: Name) -> PendingInterestTableEntry:
"""Find an entry in the PIT"""
@abc.abstractmethod
def remove_pit_entry(self, name: Name):
"""Remove an entry in the PIT"""
@abc.abstractmethod
def update_timestamp(self, pit_entry: PendingInterestTableEntry):
"""Update Timestamp of a PIT Entry"""
@abc.abstractmethod
def add_used_fib_face(self, name: Name, used_fib_face: List[int]):
"""Add a used fib entry to the already used fib faces"""
@abc.abstractmethod
def ageing(self) -> (List[PendingInterestTableEntry], List[PendingInterestTableEntry]):
"""Update the entries periodically
:return List of PIT entries to be retransmitted and a list of the removed entries
"""
@abc.abstractmethod
def append(self, entry):
"""append an pit_entry to the pit container
:param entry: entry to be appended
"""
@abc.abstractmethod
def get_already_used_pit_entries(self, name: Name):
"""Get already used fib entries"""
#@abc.abstractmethod
def occupancy_available_faces_per_name(self, fib_entry: ForwardingInformationBaseEntry) -> Dict:
"""This function takes a fib entry which matches the name with the availables faces
and return the occupancy of each face organized in a dictionary (it returns a dictionary of faces and its occupation"""
def set_number_of_forwards(self, name, forwards):
pit_entry = self.find_pit_entry(name)
if pit_entry:
self.remove_pit_entry(name)
pit_entry.number_of_forwards = forwards
self.append(pit_entry)
def increase_number_of_forwards(self, name):
pit_entry = self.find_pit_entry(name)
if pit_entry:
self.remove_pit_entry(name)
pit_entry.number_of_forwards = pit_entry.number_of_forwards + 1
self.append(pit_entry)
def decrease_number_of_forwards(self, name):
pit_entry = self.find_pit_entry(name)
if pit_entry:
self.remove_pit_entry(name)
pit_entry.number_of_forwards = pit_entry.number_of_forwards - 1
self.append(pit_entry)
def add_nacked_faceid(self, name, fid: int):
pit_entry = self.find_pit_entry(name)
if pit_entry:
self.remove_pit_entry(name)
pit_entry.faces_already_nacked.append(fid)
self.append(pit_entry)
def test_faceid_was_nacked(self, name, fid: int):
pit_entry = self.find_pit_entry(name)
if pit_entry:
return (fid in pit_entry.faces_already_nacked)
return False
def set_pit_timeout(self, timeout: float):
"""set the timeout intervall for a pit entry
:param timeout: timout value to be set
"""
self._pit_timeout = timeout
def set_pit_retransmits(self, retransmits: int):
"""set the max number of retransmits for a pit entry
:param retransmits: retransmit value to be set
"""
self._pit_retransmits = retransmits
| 32.831111 | 157 | 0.672127 |
9fe0f56466f6439f7caded8e690d82a300e951f5
| 5,119 |
py
|
Python
|
scripts/maintenance/make_i18n_dict.py
|
masti01/pywikibot
|
bebd2b77b983581bc302656083e8b283a61e8679
|
[
"MIT"
] | 2 |
2021-02-17T11:25:53.000Z
|
2021-06-17T19:15:35.000Z
|
scripts/maintenance/make_i18n_dict.py
|
masti01/pywikibot
|
bebd2b77b983581bc302656083e8b283a61e8679
|
[
"MIT"
] | 1 |
2021-06-21T03:59:14.000Z
|
2021-06-21T07:37:50.000Z
|
scripts/maintenance/make_i18n_dict.py
|
masti01/pywikibot
|
bebd2b77b983581bc302656083e8b283a61e8679
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Generate an i18n file from a given script.
run IDLE at topmost level:
>>> import pwb
>>> from scripts.maintenance.make_i18n_dict import i18nBot
>>> bot = i18nBot('<scriptname>', '<msg dict>')
>>> bot.run()
If you have more than one message dictionary, give all these names to the bot:
>>> bot = i18nBot('<scriptname>', '<msg dict1>', '<msg dict2>', '<msg dict3>')
If you want to rename the message index use keyword arguments. This may be
mixed with preleading positonal arguments:
>>> bot = i18nBot('<scriptname>', '<msg dict1>', the_other_msg='<msg dict2>')
If you have the messages as instance constants you may call the bot as follows:
>>> bot = i18nBot('<scriptname>.<class name>', '<msg dict1>', '<msg dict2>')
It's also possible to make json files too by using to_json method after
instantiating the bot. It also calls ``bot.run()`` to create the dictionaries:
>>> bot.to_json()
"""
#
# (C) Pywikibot team, 2013-2020
#
# Distributed under the terms of the MIT license.
#
import codecs
import json
import os
from importlib import import_module
from pywikibot import config
class i18nBot: # noqa: N801
"""I18n bot."""
def __init__(self, script, *args, **kwargs):
"""Initializer."""
modules = script.split('.')
self.scriptname = modules[0]
self.script = import_module('scripts.' + self.scriptname)
for m in modules:
self.script = getattr(self.script, m)
self.messages = {}
# setup the message dict
for msg in args:
if hasattr(self.script, msg):
self.messages[msg] = msg
else:
print('message {} not found'.format(msg))
for new, old in kwargs.items():
self.messages[old] = new.replace('_', '-')
self.dict = {}
def print_all(self):
"""Pretty print the dict as a file content to screen."""
if not self.dict:
print('No messages found, read them first.\n'
'Use "run" or "to_json" methods')
return
keys = list(self.dict.keys())
keys.remove('qqq')
keys.sort()
keys.insert(0, 'qqq')
if 'en' in keys:
keys.remove('en')
keys.insert(0, 'en')
print('msg = {')
for code in keys:
print(" '{}': {{".format(code))
for msg in sorted(self.messages.values()):
label = '{}-{}'.format(self.scriptname, msg)
if label in self.dict[code]:
print(" '{}': '{}',"
.format(label, self.dict[code][label]))
print(' },')
print('};')
def read(self, oldmsg, newmsg=None):
"""Read a single message from source script."""
msg = getattr(self.script, oldmsg)
keys = list(msg.keys())
keys.append('qqq')
if newmsg is None:
newmsg = oldmsg
for code in keys:
label = '{}-{}'.format(self.scriptname, newmsg)
if code == 'qqq':
if code not in self.dict:
self.dict[code] = {}
self.dict[code][label] = (
'Edit summary for message {} of {} report'
.format(newmsg, self.scriptname))
elif code != 'commons':
if code not in self.dict:
self.dict[code] = {}
self.dict[code][label] = msg[code]
if 'en' not in keys:
print('WARNING: "en" key missing for message ' + newmsg)
def run(self, quiet=False):
"""
Run the bot, read the messages from source and print the dict.
:param quiet: print the result if False
:type quiet: bool
"""
for item in self.messages.items():
self.read(*item)
if not quiet:
self.print_all()
def to_json(self, quiet=True):
"""
Run the bot and create json files.
:param quiet: Print the result if False
:type quiet: bool
"""
indent = 4
if not self.dict:
self.run(quiet)
json_dir = os.path.join(
config.base_dir, 'scripts/i18n', self.scriptname)
if not os.path.exists(json_dir):
os.makedirs(json_dir)
for lang in self.dict:
file_name = os.path.join(json_dir, '{}.json'.format(lang))
if os.path.isfile(file_name):
with codecs.open(file_name, 'r', 'utf-8') as json_file:
new_dict = json.loads(json_file.read())
else:
new_dict = {}
new_dict['@metadata'] = new_dict.get('@metadata', {'authors': []})
with codecs.open(file_name, 'w', 'utf-8') as json_file:
new_dict.update(self.dict[lang])
s = json.dumps(new_dict, ensure_ascii=False, sort_keys=True,
indent=indent, separators=(',', ': '))
s = s.replace(' ' * indent, '\t')
json_file.write(s)
if __name__ == '__main__':
print(__doc__)
| 32.605096 | 79 | 0.539363 |
4f4dd20ae6dc5b99203c36b3479581a6986a5031
| 7,144 |
py
|
Python
|
django-ocr-backend/settings.py
|
JosephVC/Django_OCR_backend
|
9478f5ee165517ff27e9b881b4ed27fcc1f389c1
|
[
"MIT"
] | 2 |
2020-09-24T23:04:55.000Z
|
2021-03-22T07:48:58.000Z
|
django-ocr-backend/settings.py
|
JosephVC/Django_OCR_backend
|
9478f5ee165517ff27e9b881b4ed27fcc1f389c1
|
[
"MIT"
] | 35 |
2020-05-21T03:45:45.000Z
|
2022-02-10T11:30:51.000Z
|
django-ocr-backend/settings.py
|
JosephVC/Django_OCR_backend
|
9478f5ee165517ff27e9b881b4ed27fcc1f389c1
|
[
"MIT"
] | null | null | null |
"""
Django settings for django-ocr-backend project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# Import package to work with Django
import django_heroku
# import python-dotenv to handle our environment variables
from datetime import timedelta
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.import os
import os
# False if not in os.environ
DEBUG = os.getenv('DEBUG')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#Security settings for deployment
# SECURITY WARNING: keep the secret key used in production secret!
# Raises django's ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS')
CORS_ALLOWED_ORIGINS = [
"http://localhost:8000",
"http://localhost:3000",
"https://ocr-app-frontend.herokuapp.com",
"https://django-ocr-backend.herokuapp.com"
]
ADMINS = [ ('Joseph', 'josephvcardenas@gmail.com') ]
ADMIN_HONEYPOT_EMAIL_ADMINS = True
# CSRF_COOKIE_SECURE = True
# SECURE_REFERRER_POLICY = 'origin'
# SECURE_SSL_REDIRECT = True
# SESSION_COOKIE_SECURE = True
# SESSION_COOKIE_DOMAIN = 'localhost'
# Application definition
INSTALLED_APPS = [
# 'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# needed to work with Django REST
'rest_framework',
# 'rest_framework.authtoken',
# needed for CORS handling
'corsheaders',
# apps created by user
'ocr',
'users',
# needed for Heroku deployment
'whitenoise',
# needed for working with AWS
'storages',
# oauth2
'oauth2_provider',
'social_django',
'drf_social_oauth2',
# honeypot /admin/ page to check for unauthorized access
'admin_honeypot',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'drf_social_oauth2.authentication.SocialAuthentication',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django-ocr-backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'django-ocr-backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# we're commenting out the below files as we'll have new settings
# as we push files to S3
# STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# AWS settings
# Like for my SECRET_KEY, I created environment variables for AWS keys
# and created config vars for these values in Heroku.
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')
# AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# AWS_S3_OBJECT_PARAMETERS = {
# 'CacheControl': 'max-age=86400',
# }
# AWS_LOCATION = 'static'
# STATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_DEFAULT_ACL = None
# Configure Django App for Heroku.
django_heroku.settings(locals())
# Custom user model
AUTH_USER_MODEL = 'users.NewUser'
# Authentication with Google
AUTHENTICATION_BACKENDS = (
# Others auth providers (e.g. Facebook, OpenId, etc)
# Google OAuth2
'social_core.backends.google.GoogleOAuth2',
# Twitter
'social_core.backends.twitter.TwitterOAuth',
# django-rest-framework-social-oauth2
'drf_social_oauth2.backends.DjangoOAuth2',
# Django
'django.contrib.auth.backends.ModelBackend',
)
# Google configuration
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.getenv('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.getenv('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
# Define SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE to get extra permissions from Google.
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = [
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
]
SOCIAL_AUTH_USER_FIELDS = ['email', 'username', 'first_name', 'password']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'root': {
'handlers': ['console'],
'level': 'WARNING',
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': False,
},
},
}
#
| 25.605735 | 91 | 0.698488 |
6578e0e3ed66e1616618ec2e4ea7955efe89896b
| 920 |
py
|
Python
|
tests/opcodes/cases/test_big_map_mem_string_122.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1 |
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
tests/opcodes/cases/test_big_map_mem_string_122.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1 |
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/opcodes/cases/test_big_map_mem_string_122.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1 |
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestbig_map_mem_string_122(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_big_map_mem_string_122(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/big_map_mem_string.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN "foo" (Pair { Elt "bar" 4 ; Elt "foo" 11 } None)')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Pair 0 (Some True))')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| 35.384615 | 95 | 0.691304 |
94d628e4efdcf1a568cc50774fb3e5bff55c03f2
| 6,211 |
py
|
Python
|
afterglow_core/resources/job_plugins/batch_download_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2 |
2021-05-24T15:12:07.000Z
|
2022-02-17T19:58:16.000Z
|
afterglow_core/resources/job_plugins/batch_download_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 1 |
2022-02-27T03:01:06.000Z
|
2022-02-27T03:01:06.000Z
|
afterglow_core/resources/job_plugins/batch_download_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2 |
2021-06-08T18:16:40.000Z
|
2021-07-09T14:19:49.000Z
|
"""
Afterglow Core: data file and data provider asset batch download job plugins
"""
from io import BytesIO
from zipfile import ZIP_DEFLATED, ZipFile
from typing import List as TList
from marshmallow.fields import Integer, List, String
from ...models import Job
from ...errors import MissingFieldError, ValidationError
from ...errors.data_file import UnknownDataFileGroupError
from ...errors.data_provider import (
NonBrowseableDataProviderError, UnknownDataProviderError)
from ..data_files import (
get_data_file, get_data_file_bytes, get_data_file_group, get_data_file_path)
from ..data_providers import providers
__all__ = ['BatchDownloadJob', 'BatchAssetDownloadJob']
class BatchDownloadJob(Job):
"""
Data file batch download job
"""
type = 'batch_download'
description = 'Download Multiple Data Files and Groups'
file_ids: TList[int] = List(Integer(), default=[])
group_names: TList[str] = List(String(), default=[])
def run(self):
if not self.file_ids and not self.group_names:
raise ValidationError('file_ids|group_names', 'Empty job')
if len(self.file_ids) == 1 and not self.group_names:
# Single data file; don't create archive
self.create_job_file(
'download', get_data_file_bytes(self.user_id, self.file_ids[0]),
mimetype='image/fits')
return
# Collect data files in groups
groups = {}
for file_id in self.file_ids:
try:
df = get_data_file(self.user_id, file_id)
groups.setdefault(df.group_name, set()).add((file_id, df.name))
except Exception as e:
self.add_error(e, {'file_id': file_id})
for group_name in self.group_names:
try:
group = get_data_file_group(self.user_id, group_name)
if not group:
raise UnknownDataFileGroupError(group_name=group_name)
for df in group:
groups.setdefault(group_name, set()).add((df.id, df.name))
except Exception as e:
self.add_error(e, {'group_name': group_name})
# Ensure unique filenames within the archive
file_id_lists, filenames = list(zip(
*[([f[0] for f in list(files)], list(files)[0][1])
for files in groups.values()]))
for i, real_filename in enumerate(filenames):
n = 1
filename = real_filename
while filename in filenames[:i] + filenames[i + 1:]:
filename = real_filename + '.' + str(n)
n += 1
if filename != real_filename:
filenames[i] = filename
# Add single-file groups to the archive as individual files at top
# level, multi-file groups as directories
data = BytesIO()
with ZipFile(data, 'w', ZIP_DEFLATED) as zf:
for file_no, (file_ids, filename) in enumerate(
zip(file_id_lists, filenames)):
if len(file_ids) == 1:
file_id = file_ids[0]
try:
zf.write(
get_data_file_path(self.user_id, file_id),
filename)
except Exception as e:
self.add_error(
e, {'file_id': file_id, 'filename': filename})
else:
for i, file_id in enumerate(file_ids):
try:
zf.write(
get_data_file_path(self.user_id, file_id),
filename + '/' + filename + '.' + str(i + 1))
except Exception as e:
self.add_error(
e, {'file_id': file_id, 'filename': filename})
self.update_progress((file_no + 1)/len(filenames)*100)
self.create_job_file('download', data.getvalue(), 'application/zip')
class BatchAssetDownloadJob(Job):
"""
Data provider asset batch download job
"""
type = 'batch_asset_download'
description = 'Download Multiple Data Provider Assets'
provider_id: int = Integer(default=None)
paths: TList[str] = List(String(), default=[])
def run(self):
if self.provider_id is None:
raise MissingFieldError(field='provider_id')
if not self.paths:
raise ValidationError('paths', 'Empty job')
try:
provider = providers[self.provider_id]
except KeyError:
raise UnknownDataProviderError(id=self.provider_id)
provider.check_auth()
# Recursively retrieve all non-collection assets at the given paths
assets = []
def walk(asset_path, name=None):
a = provider.get_asset(asset_path)
if name is None:
name = a.name
if a.collection:
if not provider.browseable:
raise NonBrowseableDataProviderError(id=provider.id)
for child_asset in provider.get_child_assets(asset_path)[0]:
walk(child_asset.path, name + '/' + child_asset.name)
else:
assets.append((a, name))
for path in set(self.paths):
walk(path)
if len(assets) == 1:
# Single non-collection asset; don't create archive
asset = assets[0][0]
self.create_job_file(
'download', provider.get_asset_data(asset.path),
mimetype=asset.mimetype)
return
# Add asset to archive
data = BytesIO()
with ZipFile(data, 'w', ZIP_DEFLATED) as zf:
for file_no, (asset, filename) in enumerate(assets):
try:
zf.writestr(filename, provider.get_asset_data(asset.path))
except Exception as e:
self.add_error(e, {'path': asset.path})
self.update_progress((file_no + 1)/len(assets)*100)
self.create_job_file('download', data.getvalue(), 'application/zip')
| 37.415663 | 80 | 0.566575 |
77522f21cf5b38a7e93496715eb62f57270bd4b8
| 1,181 |
py
|
Python
|
streamtest.py
|
fredryce/stocker
|
041fbe8348f7a035a607a214477cf423c4259171
|
[
"MIT"
] | null | null | null |
streamtest.py
|
fredryce/stocker
|
041fbe8348f7a035a607a214477cf423c4259171
|
[
"MIT"
] | null | null | null |
streamtest.py
|
fredryce/stocker
|
041fbe8348f7a035a607a214477cf423c4259171
|
[
"MIT"
] | null | null | null |
'''
import yfinance as yf
import streamlit as st
st.write("""
# Simple Stock Price App
Shown are the stock **closing price** and ***volume*** of Google!
""")
# https://towardsdatascience.com/how-to-get-stock-data-using-python-c0de1df17e75
#define the ticker symbol
tickerSymbol = 'GOOGL'
#get data on this ticker
tickerData = yf.Ticker(tickerSymbol)
#get the historical prices for this ticker
tickerDf = tickerData.history(period='1d', start='2010-5-31', end='2020-5-31')
# Open High Low Close Volume Dividends Stock Splits
st.write("""
## Closing Price
""")
st.line_chart(tickerDf.Close)
st.write("""
## Volume Price
""")
st.line_chart(tickerDf.Volume)
'''
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
import time
import mplfinance as mpf
from util import *
wb=login()
fig = plt.figure()
the_plot = st.pyplot(plt)
while True:
history_df = wb.get_bars(stock="BTCUSD", interval='m1', count=100, extendTrading=1)
plt.clf()
fig, axs = mpf.plot(history_df, fig, type='candle', style='yahoo',
ylabel='Price (USD)',
ylabel_lower='Volume',
volume=True,
figscale=2,
returnfig=True,
block=False
)
the_plot.pyplot(plt)
time.sleep(0.1)
| 20.016949 | 84 | 0.720576 |
cf45e2194efdc5a242c3049c2c0e6bba4f9e21c4
| 310 |
py
|
Python
|
program_synthesis/karel/tools/indexed_file.py
|
kavigupta/program_synthesis
|
0b04b1d3b63954ba3d404a8d96c4da18667a1b02
|
[
"Apache-2.0"
] | 123 |
2018-06-09T00:49:39.000Z
|
2022-03-09T05:41:20.000Z
|
program_synthesis/karel/tools/indexed_file.py
|
kavigupta/program_synthesis
|
0b04b1d3b63954ba3d404a8d96c4da18667a1b02
|
[
"Apache-2.0"
] | 9 |
2018-06-12T01:01:17.000Z
|
2022-03-18T09:06:39.000Z
|
program_synthesis/karel/tools/indexed_file.py
|
kavigupta/program_synthesis
|
0b04b1d3b63954ba3d404a8d96c4da18667a1b02
|
[
"Apache-2.0"
] | 24 |
2018-06-09T00:42:46.000Z
|
2021-09-29T08:23:32.000Z
|
import struct
def read_index(filename):
index = []
with open(filename, 'rb') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
return index
| 23.846154 | 49 | 0.53871 |
aa89191e64da5d884b741268f44ee71d08842b8d
| 262 |
py
|
Python
|
falcon_provider_cache/__init__.py
|
bcsummers/falcon-provider-cache
|
35fd7498d970fc3b836a629710c4a90271dda0a6
|
[
"Apache-2.0"
] | 1 |
2020-08-03T18:10:09.000Z
|
2020-08-03T18:10:09.000Z
|
falcon_provider_cache/__init__.py
|
bcsummers/falcon-provider-cache
|
35fd7498d970fc3b836a629710c4a90271dda0a6
|
[
"Apache-2.0"
] | null | null | null |
falcon_provider_cache/__init__.py
|
bcsummers/falcon-provider-cache
|
35fd7498d970fc3b836a629710c4a90271dda0a6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Falcon cache module."""
# flake8: noqa
from .__metadata__ import (
__author__,
__author_email__,
__description__,
__license__,
__package_name__,
__url__,
__version__,
)
from .utils import RedisCacheProvider
| 18.714286 | 37 | 0.679389 |
3ea7b041d0b3cf74376eb2135f29b0c583537337
| 1,683 |
py
|
Python
|
src/gagarin/core/predicate.py
|
kango31/SpaceIsOurs
|
d54f60e4cf25e5cdabd1463f1c730f9500531838
|
[
"MIT"
] | null | null | null |
src/gagarin/core/predicate.py
|
kango31/SpaceIsOurs
|
d54f60e4cf25e5cdabd1463f1c730f9500531838
|
[
"MIT"
] | null | null | null |
src/gagarin/core/predicate.py
|
kango31/SpaceIsOurs
|
d54f60e4cf25e5cdabd1463f1c730f9500531838
|
[
"MIT"
] | null | null | null |
#!encoding: utf-8
"""
This module defines a class to interpret predicates from strings.
"""
import re
from .parser import DslInterpreter
class PropertyError(KeyError):
"""
Custom exception when trying to get a non-existing property from component.
"""
def __init__(self, msg):
"""
Constructor.
:param msg: non-existing property
:type msg: str
"""
super(PropertyError, self).__init__(msg)
class Predicate():
"""
Convert a query in a form of a string into a predicate to be used on a
Component. If a function-like object is provided, then it is used as-is.
"""
def __init__(self, query):
"""
Constructor.
:param query: query string or function object
:type: str or callable
"""
if not callable(query):
self._query = query
self._func = None
else:
self._func = query
self._query = None
self._interpreter = DslInterpreter()
def __call__(self, component):
"""
Evaluate the query on given component.
:param component: component to be checked
:type component: Component
:return: True if component passes the predicate and False otherwise
:rtype: bool
"""
if self._func is not None:
try:
return self._func(component)
except PropertyError:
return False
else:
try:
self._interpreter.attach(component)
return self._interpreter.interpret(self._query)
except PropertyError:
return False
| 25.892308 | 79 | 0.578134 |
7d8d26c5db3af05aafb181f955ccb7806859932c
| 5,005 |
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_create_composed_model_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_create_composed_model_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_create_composed_model_async.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_create_composed_model_async.py
DESCRIPTION:
Model compose allows multiple models to be composed and called with a single model ID.
This is useful when you have built different models and want to aggregate a group of
them into a single model that you (or a user) could use to analyze a document. When doing
so, you can let the service decide which model more accurately represents the document to
analyze, instead of manually trying each built model against the document and selecting
the most accurate one.
In our case, we will be writing an application that collects the expenses a company is making.
There are 4 main areas where we get purchase orders from (office supplies, office equipment,
furniture, and cleaning supplies). Because each area has its own document with its own structure,
we need to build a model per document. Note that you can substitute your own models or container
SAS URLs for this sample.
USAGE:
python sample_create_composed_model_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) PURCHASE_ORDER_OFFICE_SUPPLIES_SAS_URL - a container SAS URL to your Azure Storage blob container.
4) PURCHASE_ORDER_OFFICE_EQUIPMENT_SAS_URL - a container SAS URL to your Azure Storage blob container.
5) PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL - a container SAS URL to your Azure Storage blob container.
6) PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL - a container SAS URL to your Azure Storage blob container.
"""
import os
import asyncio
async def sample_create_composed_model_async():
# [START composed_model]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
po_supplies = os.environ['PURCHASE_ORDER_OFFICE_SUPPLIES_SAS_URL']
po_equipment = os.environ['PURCHASE_ORDER_OFFICE_EQUIPMENT_SAS_URL']
po_furniture = os.environ['PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL']
po_cleaning_supplies = os.environ['PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL']
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with document_model_admin_client:
supplies_poller = await document_model_admin_client.begin_build_model(
po_supplies, description="Purchase order-Office supplies"
)
equipment_poller = await document_model_admin_client.begin_build_model(
po_equipment, description="Purchase order-Office Equipment"
)
furniture_poller = await document_model_admin_client.begin_build_model(
po_furniture, description="Purchase order-Furniture"
)
cleaning_supplies_poller = await document_model_admin_client.begin_build_model(
po_cleaning_supplies, description="Purchase order-Cleaning Supplies"
)
supplies_model = await supplies_poller.result()
equipment_model = await equipment_poller.result()
furniture_model = await furniture_poller.result()
cleaning_supplies_model = await cleaning_supplies_poller.result()
purchase_order_models = [
supplies_model.model_id,
equipment_model.model_id,
furniture_model.model_id,
cleaning_supplies_model.model_id
]
poller = await document_model_admin_client.begin_create_composed_model(
purchase_order_models, description="Office Supplies Composed Model"
)
model = await poller.result()
print("Office Supplies Composed Model Info:")
print("Model ID: {}".format(model.model_id))
print("Description: {}".format(model.description))
print("Model created on: {}\n".format(model.created_on))
print("Doc types the model can recognize:")
for name, doc_type in model.doc_types.items():
print("\nDoc Type: '{}' which has the following fields:".format(name))
for field_name, field in doc_type.field_schema.items():
print("Field: '{}' has type '{}' and confidence score {}".format(
field_name, field["type"], doc_type.field_confidence[field_name]
))
# [END composed_model]
async def main():
await sample_create_composed_model_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 47.666667 | 122 | 0.722278 |
54a6da630d38b931cb9300ca4cf78620f04fa837
| 3,340 |
py
|
Python
|
image_classification/resnet.py
|
xiaoyeye1117/models
|
16a2509e34c692e19ccd6b876a0fae1f3c06fa7a
|
[
"Apache-2.0"
] | null | null | null |
image_classification/resnet.py
|
xiaoyeye1117/models
|
16a2509e34c692e19ccd6b876a0fae1f3c06fa7a
|
[
"Apache-2.0"
] | null | null | null |
image_classification/resnet.py
|
xiaoyeye1117/models
|
16a2509e34c692e19ccd6b876a0fae1f3c06fa7a
|
[
"Apache-2.0"
] | 1 |
2019-01-22T15:40:57.000Z
|
2019-01-22T15:40:57.000Z
|
import paddle.v2 as paddle
__all__ = ['resnet_imagenet', 'resnet_cifar10']
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
active_type=paddle.activation.Relu(),
ch_in=None):
tmp = paddle.layer.img_conv(
input=input,
filter_size=filter_size,
num_channels=ch_in,
num_filters=ch_out,
stride=stride,
padding=padding,
act=paddle.activation.Linear(),
bias_attr=False)
return paddle.layer.batch_norm(input=tmp, act=active_type)
def shortcut(input, ch_out, stride):
if input.num_filters != ch_out:
return conv_bn_layer(input, ch_out, 1, stride, 0,
paddle.activation.Linear())
else:
return input
def basicblock(input, ch_out, stride):
short = shortcut(input, ch_out, stride)
conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)
conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())
return paddle.layer.addto(
input=[short, conv2], act=paddle.activation.Relu())
def bottleneck(input, ch_out, stride):
short = shortcut(input, ch_out * 4, stride)
conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)
conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)
conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,
paddle.activation.Linear())
return paddle.layer.addto(
input=[short, conv3], act=paddle.activation.Relu())
def layer_warp(block_func, input, ch_out, count, stride):
conv = block_func(input, ch_out, stride)
for i in range(1, count):
conv = block_func(conv, ch_out, 1)
return conv
def resnet_imagenet(input, class_dim, depth=50):
cfg = {
18: ([2, 2, 2, 1], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}
stages, block_func = cfg[depth]
conv1 = conv_bn_layer(
input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)
pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)
res1 = layer_warp(block_func, pool1, 64, stages[0], 1)
res2 = layer_warp(block_func, res1, 128, stages[1], 2)
res3 = layer_warp(block_func, res2, 256, stages[2], 2)
res4 = layer_warp(block_func, res3, 512, stages[3], 2)
pool2 = paddle.layer.img_pool(
input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())
out = paddle.layer.fc(
input=pool2, size=class_dim, act=paddle.activation.Softmax())
return out
def resnet_cifar10(input, class_dim, depth=32):
# depth should be one of 20, 32, 44, 56, 110, 1202
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
nStages = {16, 64, 128}
conv1 = conv_bn_layer(
input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
pool = paddle.layer.img_pool(
input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg())
out = paddle.layer.fc(
input=pool, size=class_dim, act=paddle.activation.Softmax())
return out
| 34.791667 | 77 | 0.614671 |
d2b4c768eb5ce7b23afd46c5b98cb4abad6be47e
| 8,615 |
py
|
Python
|
src/spaceone/inventory/model/keyvault/data.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 1 |
2020-12-08T11:59:54.000Z
|
2020-12-08T11:59:54.000Z
|
src/spaceone/inventory/model/keyvault/data.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 4 |
2021-01-26T10:43:37.000Z
|
2021-12-17T10:13:33.000Z
|
src/spaceone/inventory/model/keyvault/data.py
|
jean1042/plugin-azure-cloud-services
|
3a75a516c9a4d1e8a4962988934ead3fd40e8494
|
[
"Apache-2.0"
] | 2 |
2021-01-13T03:24:05.000Z
|
2021-01-19T07:25:45.000Z
|
from schematics import Model
from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, NumberType, DateTimeType, \
TimestampType, UTCDateTimeType, TimedeltaType, FloatType
class Tags(Model):
key = StringType(serialize_when_none=False)
value = StringType(serialize_when_none=False)
class SubResource(Model):
id = StringType()
class Permissions(Model):
certificates = ListType(StringType, serialize_when_none=False)
keys = ListType(StringType, serialize_when_none=False)
secrets = ListType(StringType, serialize_when_none=False)
storage = ListType(StringType, serialize_when_none=False)
class AccessPolicyEntry(Model):
application_id = StringType(serialize_when_none=False)
object_id = StringType(serialize_when_none=False)
permissions = ModelType(Permissions, serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
class IPRule(Model):
value = StringType(serialize_when_none=False)
class VirtualNetworkRule(Model):
ip = StringType(serialize_when_none=False)
ignore_missing_vnet_service_endpoint = BooleanType(serialize_when_none=False)
class NetworkRuleSet(Model):
bypass = StringType(choices=('AzureServices', 'None'), serialize_when_none=False)
default_action = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
ip_rules = ListType(ModelType(IPRule), serialize_when_none=False)
virtual_network_rules = ListType(ModelType(VirtualNetworkRule), serialize_when_none=False)
class PrivateEndpoint(Model):
id = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
status = StringType(choices=('Approved', 'Disconnected', 'Pending', 'Rejected'), serialize_when_none=False)
class PrivateEndpointConnectionItem(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpoint, serialize_when_none=False)
private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
provisioning_state = StringType(choices=('Creating', 'Deleting', 'Disconnected', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
class Sku(Model):
family = StringType(serialize_when_none=False)
name = StringType(choices=('premium', 'standard'), serialize_when_none=False)
class SecretAttributes(Model):
created = DateTimeType(serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable', 'Recoverable+ProtectedSubscription',
'Recoverable+Purgeable'), serialize_when_none=False)
updated = DateTimeType(serialize_when_none=False)
class ResourceId(Model):
source_id = StringType(serialize_when_none=False)
vault_url = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
version = StringType(serialize_when_none=False)
class VaultId(Model):
resource_id = ModelType(ResourceId, serialize_when_none=False)
class SecretItem(Model):
_attributes = ModelType(SecretAttributes, serialize_when_none=False)
_content_type = StringType(serialize_when_none=False)
_id = StringType(serialize_when_none=False)
_managed = BooleanType(serialize_when_none=False)
_tags = ModelType(Tags, serialize_when_none=False)
_vault_id = ModelType(VaultId, serialize_when_none=False)
class CertificateAttributes(Model):
created = DateTimeType(serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable',
'Recoverable+ProtectedSubscription',
'Recoverable+Purgeable'), serialize_when_none=False)
updated = DateTimeType(serialize_when_none=False)
class CertificateItem(Model):
_attributes = ModelType(CertificateAttributes, serialize_when_none=False)
_id = StringType(serialize_when_none=False)
_content_type = StringType(serialize_when_none=False)
_tags = ModelType(Tags, serialize_when_none=False)
_vault_id = ModelType(VaultId, serialize_when_none=False)
class VaultProperties(Model):
access_policies = ListType(ModelType(AccessPolicyEntry), serialize_when_none=False)
create_mode = StringType(choices=('default', 'recover'), serialize_when_none=False)
enable_purge_protection = BooleanType(default=False, serialize_when_none=False)
enable_purge_protection_str = StringType(serialize_when_none=False, default='Disabled')
enable_rbac_authorization = BooleanType(serialize_when_none=False)
enable_soft_delete = BooleanType(serialize_when_none=False)
enabled_for_deployment = BooleanType(serialize_when_none=False)
enabled_for_disk_encryption = BooleanType(serialize_when_none=False)
enabled_for_template_deployment = BooleanType(serialize_when_none=False)
hsm_pool_resource_id = StringType(serialize_when_none=False)
network_acls = ModelType(NetworkRuleSet, serialize_when_none=False)
private_endpoint_connections = ListType(ModelType(PrivateEndpointConnectionItem), serialize_when_none=False)
provisioning_state = StringType(choices=('RegisteringDns', 'Succeeded'), serialize_when_none=False)
sku = ModelType(Sku, serialize_when_none=False)
soft_delete_retention_in_days = IntType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
vault_uri = StringType(serialize_when_none=False)
class KeyAttributes(Model):
created = DateTimeType(serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable',
'Recoverable+ProtectedSubscription',
'Recoverable+Purgeable'), serialize_when_none=False)
updated = DateTimeType(serialize_when_none=False)
class KeyItem(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
key_uri = StringType(serialize_when_none=False)
attributes = ModelType(KeyAttributes, serialize_when_none=False)
kid = StringType(serialize_when_none=False)
managed = BooleanType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
class KeyVault(Model): # Main class
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
subscription_id = StringType(serialize_when_none=False)
subscription_name = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
properties = ModelType(VaultProperties, serialize_when_none=False)
keys = ListType(ModelType(KeyItem), serialize_when_none=False)
secrets = ListType(ModelType(SecretItem), serialize_when_none=False)
certificates = ListType(ModelType(CertificateItem), serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
def reference(self):
return {
"resource_id": self.id,
"external_link": f"https://portal.azure.com/#@.onmicrosoft.com/resource{self.id}/overview",
}
| 46.069519 | 147 | 0.762391 |
23fdcb3a6f0afe304ceebf8b1b6c1d6956339d6b
| 4,591 |
py
|
Python
|
smartva/csmf_grapher.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4 |
2019-01-23T12:57:47.000Z
|
2020-04-18T17:13:08.000Z
|
smartva/csmf_grapher.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4 |
2019-01-09T22:10:07.000Z
|
2022-02-16T04:57:06.000Z
|
smartva/csmf_grapher.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 11 |
2018-12-11T22:01:13.000Z
|
2022-01-07T11:38:02.000Z
|
import csv
import os
from collections import OrderedDict, defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smartva.data.common_data import ADULT, CHILD, NEONATE
from smartva.grapher_prep import GrapherPrep
from smartva.loggers import status_logger
from smartva.utils import status_notifier
INPUT_FILENAME_TEMPLATE = '{:s}-csmf.csv'
OUTPUT_FILENAME_TEMPLATE = '{:s}-figure.png'
MODULE_LABELS = (ADULT, CHILD, NEONATE)
def make_graph(graph_data, module_key, output_dir):
"""Generate and save a cause graph.
:param graph_data: Graph data dict.
:param module_key: Name of the module for which to generate graph.
:param output_dir: Directory in which to save graph.
"""
cause_keys = graph_data.keys()
cause_fractions = graph_data.values()
graph_title = module_key.capitalize() + ' CSMF'
graph_filename = graph_title.replace(' ', '-').lower()
max_value = max(cause_fractions)
xlocations = np.arange(len(cause_keys)) # the x locations for the groups
bar_width = .75 # the width of the bars
# Interactive mode off.
plt.ioff()
fig, ax = plt.subplots()
ax.set_title(graph_title)
ax.set_ylabel('Mortality fractions')
ax.yaxis.grid()
ax.set_xticklabels(cause_keys, rotation=90)
ax.set_xticks(xlocations)
ax.bar(xlocations, cause_fractions, bar_width, color='#C44440', align='center')
# Add whitespace at top of bar.
ax.set_ylim(top=max_value + max_value * 0.1)
# Add whitespace before first bar and after last.
plt.xlim([min(xlocations) - .5, max(xlocations) + 1.0])
# Add some spacing for rotated xlabels.
plt.subplots_adjust(bottom=0.60)
# Save graph figure.
plt.savefig(os.path.join(output_dir, OUTPUT_FILENAME_TEMPLATE.format(graph_filename)), dpi=150)
# Clear the current figure.
plt.clf()
plt.close()
class CSMFGrapher(GrapherPrep):
"""Generate and save a graph for each group's mortality fraction."""
def _update_status(self):
super(CSMFGrapher, self)._update_status()
status_logger.info('Making CSMF graphs')
status_notifier.update({'progress': 1})
def _read_graph_data(self):
super(CSMFGrapher, self)._read_graph_data()
# build ordered dict for values to be graphed. indexed by module
graph_data_unsorted = defaultdict(dict)
status_notifier.update({'sub_progress': (0, len(MODULE_LABELS))})
for cnt, module_key in enumerate(MODULE_LABELS):
status_notifier.update({'sub_progress': (cnt,)})
try:
with open(os.path.join(self.input_dir_path, INPUT_FILENAME_TEMPLATE.format(module_key)), 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
self.check_abort()
cause_key = row['cause'].rstrip()
cause_fraction = row['CSMF']
graph_data_unsorted[module_key][cause_key] = float(cause_fraction)
except IOError:
# The file isn't there, there was no data or an error, so just skip it
continue
for sex in ('male', 'female'):
try:
key = '-'.join([module_key, sex])
filename = os.path.join(self.input_dir_path,
'{:s}-csmf.csv'.format(key))
with open(filename, 'rb') as f:
for row in csv.DictReader(f):
self.check_abort()
cause_key = row['cause'].rstrip()
cause_fraction = row['CSMF']
graph_data_unsorted[key][cause_key] = float(cause_fraction)
except IOError:
continue
return graph_data_unsorted
def _make_graphs(self, graph_data_unsorted):
super(CSMFGrapher, self)._make_graphs(graph_data_unsorted)
# Make csmf graphs.
status_notifier.update({'sub_progress': (0, len(graph_data_unsorted))})
for cnt, (module_key, data) in enumerate(graph_data_unsorted.items()):
self.check_abort()
status_notifier.update({'sub_progress': (cnt,)})
# sort data in decreasing order
graph_data = OrderedDict(sorted(data.iteritems(), key=lambda x: x[1], reverse=True))
make_graph(graph_data, module_key, self.output_dir_path)
status_notifier.update({'sub_progress': None})
| 33.757353 | 116 | 0.624483 |
f66646b13b03135b74d14cfcca8a98d51ff7b8d8
| 84 |
py
|
Python
|
teste.py
|
cynthiaribcosta/Rep-teste
|
7f59b6252833113e77e65f5956b5973271a992b3
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
cynthiaribcosta/Rep-teste
|
7f59b6252833113e77e65f5956b5973271a992b3
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
cynthiaribcosta/Rep-teste
|
7f59b6252833113e77e65f5956b5973271a992b3
|
[
"Apache-2.0"
] | null | null | null |
import pytest:
from principal import soma
def teste_soma():
assert soma(2,4)==6
| 16.8 | 26 | 0.72619 |
151f551942d6b4815124ef27236954f9c103a4bf
| 121 |
py
|
Python
|
surfaceChange/__init__.py
|
SmithB/surfaceChange
|
28f2a485203ef1bba95ff8dee48958b8c2dcf63f
|
[
"MIT"
] | 1 |
2021-01-30T00:01:44.000Z
|
2021-01-30T00:01:44.000Z
|
surfaceChange/__init__.py
|
SmithB/surfaceChange
|
28f2a485203ef1bba95ff8dee48958b8c2dcf63f
|
[
"MIT"
] | 2 |
2021-01-11T18:43:14.000Z
|
2021-06-21T22:32:43.000Z
|
surfaceChange/__init__.py
|
SmithB/surfaceChange
|
28f2a485203ef1bba95ff8dee48958b8c2dcf63f
|
[
"MIT"
] | 7 |
2020-08-19T22:26:48.000Z
|
2021-12-05T23:11:02.000Z
|
from .ATL11_to_ATL15 import *
from .ATL14_write import *
from .ATL15_write import *
from .reread_data_from_fits import *
| 24.2 | 36 | 0.801653 |
4861015795ae9efc95ad5879fbc059917e78a8ab
| 975 |
py
|
Python
|
fixture/application.py
|
ndo1989/pythoncourse2020
|
23c6842476c4b5c4e2b1c7a32f26171dfa7a5622
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
ndo1989/pythoncourse2020
|
23c6842476c4b5c4e2b1c7a32f26171dfa7a5622
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
ndo1989/pythoncourse2020
|
23c6842476c4b5c4e2b1c7a32f26171dfa7a5622
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
self.wd.implicitly_wait(1)
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| 27.083333 | 65 | 0.601026 |
1980508ae2da08d93205af90b2276fdce4d2e25b
| 934 |
py
|
Python
|
Training/pytorch/evaluation/recall.py
|
QinchengZhang/PathologySegmentation
|
7a2c21346739a79c33e7a7ccc081018821868eb7
|
[
"MIT"
] | 3 |
2020-10-25T06:18:21.000Z
|
2021-12-23T01:42:56.000Z
|
Training/pytorch/evaluation/recall.py
|
QinchengZhang/PathologySegmentation
|
7a2c21346739a79c33e7a7ccc081018821868eb7
|
[
"MIT"
] | null | null | null |
Training/pytorch/evaluation/recall.py
|
QinchengZhang/PathologySegmentation
|
7a2c21346739a79c33e7a7ccc081018821868eb7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-27 13:48:29
LastEditors: TJUZQC
LastEditTime: 2020-10-27 14:11:23
Description: None
'''
import torch
from torch.autograd import Function
class Recall(Function):
"""Recall for individual examples"""
def forward(self, input, target):
assert input.shape == target.shape, f'the size of input and target must be same'
TP = (input == 1) & (target == 1)
FN = (input == 0) & (target == 1)
acc = torch.sum(TP).float() / (torch.sum(TP).float() + torch.sum(FN).float())
# acc = np.sum(input[target == input])/np.sum(input)
return acc
def recall(input, target):
"""recall for batches"""
if input.is_cuda:
s = torch.FloatTensor(1).cuda().zero_()
else:
s = torch.FloatTensor(1).zero_()
for i, c in enumerate(zip(input, target)):
s = s + Recall().forward(c[0], c[1])
return s / (i + 1)
| 26.685714 | 88 | 0.594218 |
d1e1fcb0121d666ac4a021e00926cf27fee3a4f9
| 704 |
py
|
Python
|
server/plugins/munkiversion/munkiversion.py
|
nathandarnell/sal
|
464414a2666e39bdf5b4b0033a84d5129c93c053
|
[
"Apache-2.0"
] | 215 |
2015-05-04T16:57:56.000Z
|
2022-03-07T12:58:12.000Z
|
server/plugins/munkiversion/munkiversion.py
|
nathandarnell/sal
|
464414a2666e39bdf5b4b0033a84d5129c93c053
|
[
"Apache-2.0"
] | 243 |
2015-07-04T18:10:56.000Z
|
2022-02-27T18:52:40.000Z
|
server/plugins/munkiversion/munkiversion.py
|
nathandarnell/sal
|
464414a2666e39bdf5b4b0033a84d5129c93c053
|
[
"Apache-2.0"
] | 90 |
2015-06-29T19:26:58.000Z
|
2022-02-17T19:03:00.000Z
|
from django.db.models import Count
import sal.plugin
class MunkiVersion(sal.plugin.Widget):
description = 'Chart of installed versions of Munki'
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, queryset, **kwargs):
context = self.super_get_context(queryset, **kwargs)
munki_info = queryset.values('munki_version').annotate(
count=Count('munki_version')).order_by('munki_version')
context['data'] = munki_info
return context
def filter(self, machines, data):
machines = machines.filter(munki_version=data)
title = 'Machines running version {} of MSC'.format(data)
return machines, title
| 32 | 67 | 0.690341 |
3442e5026d30eebf1c62a93cd86036642e4ff862
| 2,735 |
py
|
Python
|
ltron/geometry/relative_alignment.py
|
aaronwalsman/ltron
|
ac82055687e74084a6e42da5c12725c523c5c63c
|
[
"CC-BY-2.0",
"MIT"
] | 15 |
2021-06-20T20:37:47.000Z
|
2022-03-01T00:50:38.000Z
|
ltron/geometry/relative_alignment.py
|
aaronwalsman/ltron
|
ac82055687e74084a6e42da5c12725c523c5c63c
|
[
"CC-BY-2.0",
"MIT"
] | null | null | null |
ltron/geometry/relative_alignment.py
|
aaronwalsman/ltron
|
ac82055687e74084a6e42da5c12725c523c5c63c
|
[
"CC-BY-2.0",
"MIT"
] | 1 |
2021-11-22T03:49:28.000Z
|
2021-11-22T03:49:28.000Z
|
import random
import numpy
from pyquaternion import Quaternion
def relative_alignment(transforms, relative_estimates, iterations):
for i in range(iterations):
new_transforms = []
for i, transform in enumerate(transforms):
estimates = []
for j, other in enumerate(transforms):
estimate = other @ relative_estimates[j][i] # or something it
estimates.append(estimate)
average = average_transforms(estimates)
new_transforms.append(average)
transforms = new_transforms
return transforms
def average_transforms(transforms):
'''
Kids, don't do this, this is not ok.
'''
average = numpy.eye(4)
translates = [transform[:3,3] for transform in transforms]
average_translate = sum(translates)/len(translates)
average[:3,3] = average_translate
qs = [Quaternion(matrix=transform[:3,:3], rtol=1e-4, atol=1e-4) for transform in transforms]
base = random.choice(qs)
flipped_qs = []
for q in qs:
if q[0]*base[0] + q[1]*base[1] + q[2]*base[2] + q[3]*base[3] < 0:
flipped_qs.append(-q)
else:
flipped_qs.append(q)
averaged_q = sum(flipped_qs) / len(flipped_qs)
averaged_q = averaged_q.normalised
average[:3,:3] = averaged_q.rotation_matrix
return average
def test():
def random_configuration(n):
def random_orientation():
while True:
q = Quaternion([random.random()*2-1 for _ in range(4)])
if q.norm <= 1.:
return q.normalised
bounds = [-200, 200]
ts = [[random.uniform(*bounds) for _ in range(3)] for _ in range(n)]
qs = [random_orientation() for _ in range(n)]
configuration = []
for t, q in zip(ts, qs):
transform = numpy.eye(4)
transform[:3,:3] = q.rotation_matrix
transform[:3,3] = t
configuration.append(transform)
return configuration
n = 5
goal_configuration = random_configuration(n)
current_configuration = random_configuration(n)
offsets = []
for i, t1 in enumerate(goal_configuration):
row = []
for j, t2 in enumerate(goal_configuration):
offset = numpy.linalg.inv(t1) @ t2
row.append(offset)
offsets.append(row)
print('Goal:')
print(offsets[0][1])
for i in range(10):
print('Current (%i):'%i)
print(numpy.linalg.inv(
current_configuration[0]) @ current_configuration[1])
current_configuration = relative_alignment(
current_configuration, offsets, 1)
| 31.079545 | 96 | 0.586837 |
1d4a4f22e0dcd0a83c346e8dcde4f6d63727bd8f
| 6,287 |
py
|
Python
|
src/iiwa_ppo.py
|
LeonLester/Machin-title-in-progress-
|
777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
[
"MIT"
] | null | null | null |
src/iiwa_ppo.py
|
LeonLester/Machin-title-in-progress-
|
777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
[
"MIT"
] | null | null | null |
src/iiwa_ppo.py
|
LeonLester/Machin-title-in-progress-
|
777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
[
"MIT"
] | null | null | null |
import RobotDART as Rd
import numpy as np
from machin.frame.algorithms import PPO
from math import sqrt
import torch
from retry import retry
import pandas as pd
class Actor(torch.nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = torch.nn.Linear(state_dim, 32)
self.fc2 = torch.nn.Linear(32, 16)
self.mu_head = torch.nn.Linear(16, action_num)
self.sigma_head = torch.nn.Linear(16, action_num)
self.action_range = 2
def forward(self, state, action=None):
a = torch.relu(self.fc1(state))
a = torch.relu(self.fc2(a))
mu = self.mu_head(a)
sigma = torch.nn.functional.softplus(self.sigma_head(a))
dist = torch.distributions.Normal(mu, sigma)
act = action if action is not None else dist.rsample()
act_entropy = dist.entropy()
# the suggested way to confine your actions within a valid range
# is not clamping, but remapping the distribution
act_log_prob = dist.log_prob(act)
act_tanh = torch.tanh(act)
act = act_tanh * self.action_range
# the distribution remapping process used in the original essay.
act_log_prob -= torch.log(self.action_range * (1 - act_tanh.pow(2)) + 1e-6)
act_log_prob = act_log_prob.sum(1, keepdim=True)
return act, act_log_prob, act_entropy
class Critic(torch.nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = torch.nn.Linear(state_dim, 16)
self.fc2 = torch.nn.Linear(16, 16)
self.fc3 = torch.nn.Linear(16, 1)
def forward(self, state):
v = torch.relu(self.fc1(state))
v = torch.relu(self.fc2(v))
v = self.fc3(v)
return v
class Simulation:
def __init__(self):
timestep = 0.004
self.simu = Rd.RobotDARTSimu(timestep)
self.simu.set_collision_detector("fcl")
gconfig = Rd.gui.GraphicsConfiguration(1024, 768)
self.graphics = Rd.gui.Graphics(gconfig)
# self.simu.set_graphics(self.graphics)
# self.graphics.look_at([3., 1., 2.], [0., 0., 0.])
# load iiwa
packages = [("iiwa_description", "iiwa/iiwa_description")]
self.iiwa = Rd.Robot("iiwa/iiwa.urdf", packages)
self.iiwa.fix_to_world()
self.iiwa.set_actuator_types("servo")
positions = self.iiwa.positions()
self.iiwa.set_positions(positions)
robot_ghost = self.iiwa.clone_ghost()
initial_positions = [1, 0.8, -1.12, -1.47, 1.02, -0.45, 0.91]
self.iiwa.set_positions(initial_positions)
# add robot to the simulation
self.simu.add_robot(self.iiwa)
self.simu.add_robot(robot_ghost)
self.simu.add_floor()
def step(self, positions):
self.iiwa.set_commands(positions)
for _ in range(20):
if self.simu.step_world():
break
next_state = (self.iiwa.positions())
reward = 0
for pos in next_state:
reward += pos * pos
reward = - sqrt(reward)
done = False
return next_state, reward, done
# reset the simulation
def reset(self):
starting_state = [1, 0.8, -1.12, -1.47, 1.02, -0.45, 0.91]
self.iiwa.set_positions(starting_state)
return starting_state
@retry(Exception, tries=5, delay=0, backoff=0)
def ppo_sim(print_flag, max_episodes, max_steps):
iiwa_simulation = Simulation()
state_dim = 7
action_dim = 7
action_range = 2
# max_episodes = 500
# max_steps = 200
solved_reward = 500
solved_repeat = 500
data = [] # to get each episode's data
actor = Actor(state_dim, action_dim)
critic = Critic(state_dim)
ppo = PPO(actor,
critic,
torch.optim.Adam,
torch.nn.MSELoss(reduction='sum'),
actor_learning_rate=0.0001,
critic_learning_rate=0.0001,
batch_size=20
)
reward_fulfilled = 0
smoothed_total_reward = 0.0
for episode in range(1, max_episodes + 1):
episode_reward = 0.0
terminal = False
step = 0
state = torch.tensor(iiwa_simulation.reset(), dtype=torch.float32).view(1, state_dim)
tmp_observations = []
while not terminal and step < max_steps:
step += 1
with torch.no_grad():
# Observe the state of the environment and take action
# Take random actions in the first 20 episodes
if episode < 20:
action = ((2.0 * torch.rand(1, 7) - 1.0) * action_range)
torque = np.transpose(action)
else:
action = ppo.act({"state": state})[0]
torque = np.transpose(action)
next_state, reward, terminal = iiwa_simulation.step(torque)
next_state = torch.tensor(next_state, dtype=torch.float32).view(1, state_dim)
episode_reward += reward
tmp_observations.append({
"state": {"state": state},
"action": {"action": action},
"next_state": {"state": state},
"reward": reward,
"terminal": terminal or step == max_steps,
}
)
state = next_state
if print_flag:
print(f"Episode: [{episode:3d}/{max_episodes:3d}] Reward: {episode_reward:.2f}", end="\r")
print("", end="\n")
else:
data_curr = [episode, episode_reward]
data.append(data_curr)
smoothed_total_reward = smoothed_total_reward * 0.9 + episode_reward * 0.1
ppo.store_episode(tmp_observations)
if episode > 20:
ppo.update()
if smoothed_total_reward > solved_reward:
reward_fulfilled += 1
if reward_fulfilled >= solved_repeat:
print("Environment solved!")
exit(0)
else:
reward_fulfilled = 0
if not print_flag:
data_df = pd.DataFrame(data, columns=['Episode', 'Reward'])
return data_df
if __name__ == '__main__':
ppo_sim(1, 100, 200)
| 29.796209 | 102 | 0.580086 |
984b528ba9f527fef1429e85f8f81fe1f9d8f242
| 85,506 |
py
|
Python
|
src/genie/libs/parser/junos/show_route.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | 1 |
2020-01-07T15:34:41.000Z
|
2020-01-07T15:34:41.000Z
|
src/genie/libs/parser/junos/show_route.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/junos/show_route.py
|
filippohronsky/genieparser
|
85e4b7a8f101e5cd44d4d7116e0e7a1af13fe9df
|
[
"Apache-2.0"
] | null | null | null |
''' show_route.py
JUNOS parsers for the following commands:
* show route table {table}
* show route table {table} {prefix}
* show route
* show route protocol {protocol} extensive
* show route protocol {protocol} table {table} extensive
* show route protocol {protocol} table {table}
* show route protocol {protocol}
* show route protocol {protocol} {ip_address}
* show route instance detail
* show route protocol {protocol} table {table} extensive {destination}
* show route advertising-protocol {protocol} {ip_address}
* show route forwarding-table summary
* show route summary
* show route {route} extensive
* show route extensive
* show route extensive {destination}
'''
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Optional, Use, Schema
from genie.metaparser.util.exceptions import SchemaTypeError
'''
Schema for:
* show route table {table}
* show route table {table} {prefix}
* show route protocol {protocol} {ip_address}
* show route protocol {protocol} {ip_address} | no-more
'''
class ShowRouteTableSchema(MetaParser):
schema = {
'table_name': {
Any(): {
'destination_count': int,
'total_route_count': int,
'active_route_count': int,
'holddown_route_count': int,
'hidden_route_count': int,
'routes': {
Any(): {
'active_tag': str,
'protocol_name': str,
'preference': str,
Optional('preference2'): str,
'age': str,
'metric': str,
'next_hop': {
'next_hop_list': {
Any(): {
'to': str,
'via': str,
Optional('mpls_label'): str,
Optional('best_route'): str
}
}
}
}
}
}
}
}
'''
Parser for:
* show route table {table}
* show route table {table} {prefix}
'''
class ShowRouteTable(ShowRouteTableSchema):
cli_command = [
'show route table {table}',
'show route table {table} {prefix}',
]
def cli(self, table, prefix=None, output=None):
if output is None:
if table and prefix:
command = self.cli_command[1].format(table=table, prefix=prefix)
else:
command = self.cli_command[0].format(table=table)
out = self.device.execute(command)
else:
out = output
# inet.3: 3 destinations, 3 routes (3 active, 0 holddown, 0 hidden)
r1 = re.compile(r'(?P<table_name>\S+):\s+(?P<destination_count>\d+)\s+'
'destinations\,\s+(?P<total_route_count>\d+)\s+routes\s+'
'\((?P<active_route_count>\d+)\s+active\,\s+(?P<holddown_route_count>\d+)'
'\s+holddown\,\s+(?P<hidden_route_count>\d+)\s+hidden\)')
# 10.64.4.4/32 *[LDP/9] 03:40:50, metric 110
# 10.64.4.4/32 *[L-OSPF/9/5] 1d 02:16:51, metric 110
r2 = re.compile(r'(?P<rt_destination>\S+)\s+(?P<active_tag>\*|\-\*)'
'\[(?P<protocol_name>[\w\-]+)\/(?P<preference>\d+)(?:\/'
'(?P<preference2>\d+))?\]\s+(?P<age>[\S ]+)\,\s+'
'metric\s+(?P<metric>\d+)$')
# > to 192.168.220.6 via ge-0/0/1.0
# > to 192.168.220.6 via ge-0/0/1.0, Push 305550
r3 = re.compile(r'(?:(?P<best_route>\>*))?\s*to\s+(?P<to>\S+)\s+via\s+'
'(?P<via>[\w\d\/\-\.]+)\,*\s*(?:(?P<mpls_label>\S+\s+\d+))?')
parsed_output = {}
for line in out.splitlines():
line = line.strip()
# inet.3: 3 destinations, 3 routes (3 active, 0 holddown, 0 hidden)
result = r1.match(line)
if result:
group = result.groupdict()
table_name = group['table_name']
table_dict = parsed_output.setdefault('table_name', {})\
.setdefault(table_name, {})
table_dict['destination_count'] = int(group['destination_count'])
table_dict['total_route_count'] = int(group['total_route_count'])
table_dict['active_route_count'] = int(group['active_route_count'])
table_dict['holddown_route_count'] = int(group['holddown_route_count'])
table_dict['hidden_route_count'] = int(group['hidden_route_count'])
continue
# 10.64.4.4/32 *[LDP/9] 03:40:50, metric 110
result = r2.match(line)
if result:
group = result.groupdict()
rt_destination = group.pop('rt_destination', None)
route_dict = table_dict.setdefault('routes', {})\
.setdefault(rt_destination, {})
route_dict.update({k: v for k, v in group.items() if v})
continue
# > to 192.168.220.6 via ge-0/0/1.0
# > to 192.168.220.6 via ge-0/0/1.0, Push 305550
# to 10.2.94.2 via lt-1/2/0.49
result = r3.match(line)
if result:
max_index = table_dict.get('routes', {}).get(rt_destination, {})\
.get('next_hop', {})\
.get('next_hop_list', {}).keys()
if not max_index:
max_index = 1
else:
max_index = max(max_index) + 1
group = result.groupdict()
nh_dict = route_dict.setdefault('next_hop', {})\
.setdefault('next_hop_list', {})\
.setdefault(max_index, {})
nh_dict['to'] = group['to']
nh_dict['via'] = group['via']
mpls_label = group['mpls_label']
if mpls_label:
nh_dict['mpls_label'] = mpls_label
best_route = group['best_route']
if best_route:
nh_dict['best_route'] = best_route
continue
return parsed_output
class ShowRouteSchema(MetaParser):
""" Schema for:
* show route protocol {protocol} {ip_address}
"""
"""
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": [
{
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
"rt": [
{
Optional("@junos:style"): str,
"rt-destination": str,
"rt-entry": {
"active-tag": str,
"age": {
"#text": str,
Optional("@junos:seconds"): str
},
"current-active": str,
"last-active": str,
"metric": str,
"nh": {
"mpls-label": str,
"selected-next-hop": str,
"to": str,
"via": str
},
"nh-type": str,
"preference": str,
"preference2": str,
"protocol-name": str,
"rt-tag": str
}
}
],
"table-name": str,
"total-route-count": str
}
]
}
}
"""
def validate_route_table_list(value):
# Pass route-table list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table is not a list')
def validate_rt_list(value):
# Pass rt list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('rt list is not a list')
def validate_nh_list(value):
# Pass nh list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('nh list is not a list')
# Create nh-list Entry Schema
nh_schema = Schema({
Optional("mpls-label"): str,
Optional("selected-next-hop"): str,
Optional("nh-local-interface"): str,
Optional("nh-table"): str,
Optional("to"): str,
Optional("via"): str
})
# Validate each dictionary in list
for item in value:
nh_schema.validate(item)
return value
# Create rt-list Entry Schema
rt_schema = Schema({
Optional("@junos:style"): str,
Optional("rt-destination"): str,
"rt-entry": {
Optional("active-tag"): str,
"age": {
"#text": str,
Optional("@junos:seconds"): str
},
Optional('as-path'): str,
Optional("current-active"): str,
Optional("last-active"): str,
Optional("learned-from"): str,
Optional("local-preference"): str,
Optional("med"): str,
Optional("metric"): str,
Optional("metric2"): str,
Optional("nh"): Use(validate_nh_list),
Optional('nh-type'): str,
"preference": str,
Optional("preference2"): str,
"protocol-name": str,
Optional('rt-tag'): str,
Optional("validation-state"): str
}
})
# Validate each dictionary in list
for item in value:
rt_schema.validate(item)
return value
# Create RouteEntry Schema
route_entry_schema = Schema({
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
Optional("rt"): Use(validate_rt_list),
"table-name": str,
"total-route-count": str
})
# Validate each dictionary in list
for item in value:
route_entry_schema.validate(item)
return value
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
'route-information': {
Optional("@xmlns"): str,
'route-table': Use(validate_route_table_list)
}
}
class ShowRoute(ShowRouteSchema):
""" Parser for:
* show route
* show route protocol {protocol} {ip_address}
* show route protocol {protocol}
* show route protocol {protocol} table {table}
"""
cli_command = [
'show route',
'show route protocol {protocol}',
'show route protocol {protocol} {ip_address}',
'show route protocol {protocol} table {table}']
def cli(self, protocol=None, ip_address=None, table=None, output=None):
if not output:
if ip_address:
cmd = self.cli_command[2].format(
protocol=protocol,
ip_address=ip_address)
elif table:
cmd = self.cli_command[3].format(
protocol=protocol,
table=table)
elif protocol:
cmd = self.cli_command[1].format(
protocol=protocol)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
rt_destination = None
# inet.0: 932 destinations, 1618 routes (932 active, 0 holddown, 0 hidden)
p1 = re.compile(r'^(?P<table_name>\S+): +(?P<destination_count>\d+) +'
r'destinations, +(?P<total_route_count>\d+) +routes +'
r'\((?P<active_route_count>\d+) +active, +(?P<holddown>\d+) +'
r'holddown, +(?P<hidden>\d+) +hidden\)$')
# 10.220.0.0/16 *[BGP/170] 3w3d 03:12:24, MED 12003, localpref 120, from 10.169.14.240
# 10.169.14.240/32 *[Static/5] 5w2d 15:42:25
# *[OSPF3/10] 3w1d 17:03:23, metric 5
# 0.0.0.0/0 *[OSPF/150/10] 3w3d 03:24:58, metric 101, tag 0
# 167963 *[LDP/9] 1w6d 20:41:01, metric 1, metric2 100, tag 65000500
# 10.16.2.2/32 *[Static/5] 00:00:02
p2 = re.compile(r'^((?P<rt_destination>\S+) +)?(?P<active_tag>[\*\+\-])?'
r'\[(?P<protocol>[\w\-]+)\/(?P<preference>\d+)'
r'(\/(?P<preference2>\d+))?\] +(?P<text>\S+( +\S+)?)'
r'(, +metric +(?P<metric>\d+))?(, +metric2 +(?P<metric2>\d+))?'
r'(, +tag +(?P<rt_tag>\d+))?(, +MED +(?P<med>\w+))?'
r'(, +localpref +(?P<local_preference>\d+))?'
r'(, +from +(?P<learned_from>\S+))?$')
# MultiRecv
p2_1 = re.compile(r'^(?P<nh_type>MultiRecv)$')
# > to 10.169.14.121 via ge-0/0/1.0
p3 = re.compile(r'^(\> +)?(to +(?P<to>\S+) +)?via +(?P<via>\S+)'
r'(, +(?P<mpls_label>[\S\s]+))?$')
# Local via fxp0.0
p3_1 = re.compile(r'^Local +via +(?P<nh_local_interface>\S+)$')
# AS path: (65151 65000) I, validation-state: unverified
# AS path: I
p4 = re.compile(r'AS +path:(?P<as_path>( +\([\S\s]+\))? +\S+)'
r'(, validation-state: +(?P<validation_state>\S+))?$')
# to table inet.0
p5 = re.compile(r'^to +table +(?P<nh_table>\S+)$')
# 2001:db8:eb18:ca45::1/128
pIP = re.compile(r'^(?P<rt_destination>[\w:\/]+)$')
for line in out.splitlines():
line = line.strip()
# inet.0: 932 destinations, 1618 routes (932 active, 0 holddown, 0 hidden)
m = p1.match(line)
if m:
group = m.groupdict()
table_name = group['table_name']
destination_count = group['destination_count']
total_route_count = group['total_route_count']
active_route_count = group['active_route_count']
holddown = group['holddown']
hidden = group['hidden']
route_information_dict = ret_dict.setdefault('route-information', {})
route_table_list = route_information_dict.setdefault('route-table', [])
route_table_dict = {}
route_table_dict.update({'active-route-count': active_route_count})
route_table_dict.update({'destination-count': destination_count})
route_table_dict.update({'hidden-route-count': hidden})
route_table_dict.update({'holddown-route-count': holddown})
route_table_dict.update({'table-name': table_name})
route_table_dict.update({'total-route-count': total_route_count})
route_table_list.append(route_table_dict)
continue
# 10.169.14.240/32 *[Static/5] 5w2d 15:42:25
# *[OSPF3/10] 3w1d 17:03:23, metric 5
m = p2.match(line)
if m:
group = m.groupdict()
if not rt_destination:
rt_destination = group['rt_destination']
active_tag = group['active_tag']
protocol = group['protocol']
preference = group['preference']
preference2 = group['preference2']
text = group['text']
metric = group['metric']
metric2 = group['metric2']
rt_tag = group['rt_tag']
learned_from = group['learned_from']
local_preference = group['local_preference']
med = group['med']
rt_list = route_table_dict.setdefault('rt', [])
rt_dict = {}
rt_list.append(rt_dict)
rt_entry_dict = {}
if active_tag:
rt_entry_dict.update({'active-tag': active_tag})
rt_entry_dict.update({'protocol-name': protocol})
rt_entry_dict.update({'preference': preference})
if preference2:
rt_entry_dict.update({'preference2': preference2})
if metric:
rt_entry_dict.update({'metric': metric})
if metric2:
rt_entry_dict.update({'metric2': metric2})
if rt_tag:
rt_entry_dict.update({'rt-tag': rt_tag})
if learned_from:
rt_entry_dict.update({'learned-from': learned_from})
if local_preference:
rt_entry_dict.update({'local-preference': local_preference})
if med:
rt_entry_dict.update({'med': med})
age_dict = rt_entry_dict.setdefault('age', {})
age_dict.update({'#text': text})
rt_dict.update({'rt-entry': rt_entry_dict})
if rt_destination:
rt_dict.update({'rt-destination': rt_destination})
rt_destination = None
continue
m = p2_1.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'nh-type': group['nh_type']})
continue
# > to 10.169.14.121 via ge-0/0/1.0
m = p3.match(line)
if m:
group = m.groupdict()
nh_list = rt_entry_dict.setdefault('nh', [])
nh_dict = {}
nh_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
nh_list.append(nh_dict)
continue
# Local via fxp0.0
m = p3_1.match(line)
if m:
group = m.groupdict()
nh_list = rt_entry_dict.setdefault('nh', [])
nh_dict = {}
nh_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
nh_list.append(nh_dict)
continue
# AS path: (65151 65000) I, validation-state: unverified
# AS path: I
m = p4.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# to table inet.0
m = p5.match(line)
if m:
group = m.groupdict()
nh_list = rt_entry_dict.setdefault('nh', [])
nh_dict = {}
nh_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
nh_list.append(nh_dict)
continue
# 2001:db8:eb18:ca45::1/128
m = pIP.match(line)
if m:
group = m.groupdict()
rt_destination = group['rt_destination']
continue
return ret_dict
class ShowRouteProtocolNoMore(ShowRoute):
""" Parser for:
* show route protocol {protocol} {ip_address} | no-more
"""
cli_command = 'show route protocol {protocol} {ip_address} | no-more'
def cli(self, protocol, ip_address, output=None):
if not output:
cmd = self.cli_command.format(
protocol=protocol,
ip_address=ip_address)
out = self.device.execute(cmd)
else:
out = output
return super().cli(protocol=protocol,
ip_address=ip_address, output=out)
class ShowRouteProtocolExtensiveSchema(MetaParser):
""" Schema for:
* show route protocol {protocol} extensive
"""
"""
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": [
{
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
"rt": [
{
Optional("@junos:style"): str,
"rt-announced-count": str,
"rt-destination": str,
"rt-entry": {
"active-tag": str,
"age": {
"#text": str,
Optional("@junos:seconds"): str
},
"announce-bits": str,
"announce-tasks": str,
"as-path": str,
"bgp-path-attributes": {
"attr-as-path-effective": {
"aspath-effective-string": str,
"attr-value": str
}
},
"current-active": str,
"inactive-reason": str,
"last-active": str,
"local-as": str,
"metric": str,
"nh": {
Optional("@junos:indent"): str,
"label-element": str,
"label-element-childcount": str,
"label-element-lspid": str,
"label-element-parent": str,
"label-element-refcount": str,
"label-ttl-action": str,
"load-balance-label": str,
"mpls-label": str,
"nh-string": str,
"selected-next-hop": str,
"session": str,
"to": str,
"via": str,
"weight": str
},
"nh-address": str,
"nh-index": str,
"nh-kernel-id": str,
"nh-reference-count": str,
"nh-type": str,
"preference": str,
"preference2": str,
"protocol-name": str,
"rt-entry-state": str,
"rt-ospf-area": str,
"rt-tag": str,
"task-name": str,
"validation-state": str
},
"rt-entry-count": {
"#text": str,
Optional("@junos:format"): str
},
"rt-prefix-length": str,
"rt-state": str,
"tsi": {
"#text": str,
Optional("@junos:indent"): str
}
}
],
"table-name": str,
"total-route-count": str
}
]
}
}
"""
def validate_route_table_list(value):
# Pass route-table list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table is not a list')
def validate_rt_list(value):
# Pass rt list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('rt is not a list')
def validate_rt_entry_list(value):
if isinstance(value, dict):
value = [value]
if not isinstance(value, list):
raise SchemaTypeError('rt-entry is not a list')
def validate_nh_list(value):
# Pass nh list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('nh is not a list')
nh_schema = Schema({
Optional("@junos:indent"): str,
Optional("label-element"): str,
Optional("label-element-childcount"): str,
Optional("label-element-lspid"): str,
Optional("label-element-parent"): str,
Optional("label-element-refcount"): str,
Optional("label-ttl-action"): str,
Optional("load-balance-label"): str,
Optional("mpls-label"): str,
"nh-string": str,
Optional("selected-next-hop"): str,
Optional("session"): str,
Optional("to"): str,
"via": str,
Optional("weight"): str
})
# Validate each dictionary in list
for item in value:
nh_schema.validate(item)
return value
def validate_protocol_nh_list(value):
# Pass nh list of dict in value
if isinstance(value, dict):
value = [value]
if not isinstance(value, list):
raise SchemaTypeError('protocol-nh is not a list')
def validate_nh_list(value):
# Pass nh list of dict in value
if isinstance(value, dict):
value = [value]
if not isinstance(value, list):
raise SchemaTypeError('nh is not a list')
nh_schema = Schema({
Optional("@junos:indent"): str,
Optional("label-element"): str,
Optional("label-element-childcount"): str,
Optional("label-element-lspid"): str,
Optional("label-element-parent"): str,
Optional("label-element-refcount"): str,
Optional("label-ttl-action"): str,
Optional("load-balance-label"): str,
Optional("mpls-label"): str,
"nh-string": str,
Optional("selected-next-hop"): str,
Optional("session"): str,
Optional("to"): str,
"via": str,
Optional("weight"): str
})
# Validate each dictionary in list
for item in value:
nh_schema.validate(item)
return value
protocol_nh_schema = Schema({
Optional("@junos:indent"): str,
Optional("forwarding-nh-count"): str,
"indirect-nh": str,
Optional("label-ttl-action"): str,
Optional("load-balance-label"): str,
Optional("metric"): str,
Optional("mpls-label"): str,
Optional("nh"): Use(validate_nh_list),
Optional("nh-index"): str,
Optional("nh-type"): str,
Optional("output"): str,
"to": str
})
# Validate each dictionary in list
for item in value:
protocol_nh_schema.validate(item)
return value
rt_entry_schema = Schema({
Optional("active-tag"): str,
Optional("age"): {
"#text": str,
Optional("@junos:seconds"): str
},
Optional("announce-bits"): str,
Optional("announce-tasks"): str,
Optional("as-path"): str,
Optional("bgp-path-attributes"): {
"attr-as-path-effective": {
"aspath-effective-string": str,
"attr-value": str
}
},
Optional("current-active"): str,
Optional("inactive-reason"): str,
Optional("last-active"): str,
Optional("local-as"): str,
Optional("metric"): str,
Optional("nh"): Use(validate_nh_list),
"nh-address": str,
Optional("nh-index"): str,
Optional("nh-kernel-id"): str,
"nh-reference-count": str,
Optional("nh-type"): str,
"preference": str,
Optional("preference2"): str,
"protocol-name": str,
Optional("protocol-nh"): Use(validate_protocol_nh_list),
"rt-entry-state": str,
Optional("rt-ospf-area"): str,
Optional("rt-tag"): str,
"task-name": str,
Optional("validation-state"): str
})
# Validate each dictionary in list
for item in value:
rt_entry_schema.validate(item)
return value
# Create rt Schema
rt_schema = Schema({
Optional("@junos:style"): str,
"rt-announced-count": str,
"rt-destination": str,
Optional("rt-entry"): Use(validate_rt_entry_list),
"rt-entry-count": {
"#text": str,
Optional("@junos:format"): str
},
Optional("rt-prefix-length"): str,
Optional("rt-state"): str,
Optional("tsi"): {
"#text": str,
Optional("@junos:indent"): str
}
})
# Validate each dictionary in list
for item in value:
rt_schema.validate(item)
return value
# Create Route Table Schema
route_table_schema = Schema({
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
Optional("rt"): Use(validate_rt_list),
"table-name": str,
"total-route-count": str
})
# Validate each dictionary in list
for item in value:
route_table_schema.validate(item)
return value
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": Use(validate_route_table_list)
}
}
class ShowRouteProtocolExtensive(ShowRouteProtocolExtensiveSchema):
""" Parser for:
* show route protocol {protocol} extensive
* show route protocol {protocol} table {table} extensive
* show route protocol {protocol} table {table} extensive {destination}
* show route {route} extensive
* show route extensive
* show route extensive {destination}
"""
cli_command = ['show route protocol {protocol} extensive',
'show route protocol {protocol} table {table} extensive',
'show route protocol {protocol} table {table} extensive {destination}',
'show route {route} extensive',
'show route extensive',
'show route extensive {destination}']
def cli(self, protocol=None, table=None, destination=None, route=None, output=None):
if not output:
if protocol and table and destination:
cmd = self.cli_command[2].format(
protocol=protocol,
table=table,
destination=destination)
elif table and protocol:
cmd = self.cli_command[1].format(
protocol=protocol,
table=table)
elif protocol:
cmd = self.cli_command[0].format(
protocol=protocol)
elif route:
cmd = self.cli_command[3].format(
route=route)
elif destination:
cmd = self.cli_command[5].format(
destination=destination)
else:
cmd = self.cli_command[4]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
state_type = None
forwarding_nh_count = None
protocol_nh_found = None
originating_rib_found = None
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
p1 = re.compile(r'^(?P<table_name>\S+): +(?P<destination_count>\d+) +'
r'destinations, +(?P<total_route_count>\d+) +routes +'
r'\((?P<active_route_count>\d+) +active, +(?P<holddown_route_count>\d+) +'
r'holddown, +(?P<hidden_route_count>\d+) +hidden\)$')
# 0.0.0.0/0 (1 entry, 1 announced)
# 10.1.0.0/24 (2 entries, 1 announced)
# 0.0.0.0 (1 entry, 1 announced)
p2 = re.compile(r'^(?P<rt_destination>\S+)(\/(?P<rt_prefix_length>\d+))? +'
r'\((?P<format>(?P<text>\d+) +(entry|entries)), +(?P<announced>\d+) +announced\)$')
# State: <FlashAll>
# State: <Active Int Ext>
p3 = re.compile(r'State: +\<(?P<rt_state>[\S\s]+)\>$')
# *OSPF Preference: 150/10
# *BGP Preference: 170/-121
p4 = re.compile(r'^(?P<active_tag>\*)?(?P<protocol>\S+)\s+'
r'Preference:\s+(?P<preference>\d+)(\/(\-)?(?P<preference2>\d+))?$')
# Next hop type: Router, Next hop index: 613
p5 = re.compile(r'^Next +hop type: +(?P<nh_type>\S+), +Next +hop +'
r'index: +(?P<nh_index>\d+)$')
# Address: 0xdfa7934
p6 = re.compile(r'^Address: +(?P<nh_address>\S+)$')
# Next-hop reference count: 458
p7 = re.compile(r'^Next-hop +reference +count: +(?P<nh_reference_count>\d+)$')
# Next hop: 10.169.14.121 via ge-0/0/1.0 weight 0x1, selected
# Nexthop: 10.169.14.121 via ge-0/0/1.0
p8 = re.compile(r'^(?P<nh_string>Next *hop):( +(?P<to>\S+))? +via +(?P<via>\S+)'
r'( +weight +(?P<weight>\w+))?(, +(?P<selected_next_hop>\w+))?$')
# Protocol next hop: 10.169.14.240
p8_1 = re.compile(r'^Protocol +next +hop: +(?P<to>\S+)( +Metric: +(?P<metric>\d+))?$')
# Session Id: 0x141
p9 = re.compile(r'^Session +Id: +\d+[a-z]+(?P<session_id>\w+)$')
# Local AS: 65171
p10 = re.compile(r'^Local +AS: (?P<local_as>\d+)$')
# Age: 3w2d 4:43:35 Metric: 101
# Age: 3:07:25 Metric: 200
# Age: 29w6d 21:42:46
p11 = re.compile(r'^Age:\s+(?P<age>\w+(\s+\S+)?)(\s+Metric:\s+(?P<metric>\d+))?$')
# Validation State: unverified
p12 = re.compile(r'^Validation +State: +(?P<validation_state>\S+)$')
# Tag: 0
p13 = re.compile(r'^Tag: +(?P<rt_tag>\d+)$')
# Task: OSPF
p14 = re.compile(r'^Task: +(?P<task>\S+)$')
# Announcement bits (3): 0-KRT 5-LDP 7-Resolve tree 3
p15 = re.compile(r'^Announcement +bits +\((?P<announce_bits>\d+)\): +'
r'(?P<announce_tasks>[\S\s]+)$')
# AS path: I
p16 = re.compile(r'^(?P<aspath_effective_string>AS +path:) +(?P<attr_value>\S+)$')
# KRT in-kernel 0.0.0.0/0 -> {10.169.14.121}
p17 = re.compile(r'^(?P<text>KRT +in-kernel+[\S\s]+)$')
# Inactive reason: Route Preference
p18 = re.compile(r'^Inactive\s+reason: +(?P<inactive_reason>[\S\s]+)$')
# Area: 0.0.0.8
p19 = re.compile(r'^Area: +(?P<rt_ospf_area>\S+)$')
# Label operation: Push 17000
# Label operation: Push 17000, Push 1650, Push 1913(top)
p20 = re.compile(r'^Label +operation: +(?P<mpls_label>[\S\s]+)$')
# Label TTL action: no-prop-ttl
# Label TTL action: no-prop-ttl, no-prop-ttl, no-prop-ttl(top)
p21 = re.compile(r'^Label +TTL +action: +(?P<label_ttl_action>[\S\s]+)$')
# Load balance label: Label 17000: None; Label 1650: None; Label 1913: None;
p22 = re.compile(r'^Load +balance +label: +(?P<load_balance_label>[\S\s]+)$')
# Label element ptr: 0xc5f6ec0
p23 = re.compile(r'^Label +element +ptr: +(?P<label_element>\S+)$')
# Label parent element ptr: 0x0
p24 = re.compile(r'^Label +parent +element +ptr: +(?P<label_element_parent>\S+)$')
# Label element references: 2
p25 = re.compile(r'^Label +element +references: +(?P<label_element_refcount>\d+)$')
# Label element child references: 1
p26 = re.compile(r'^Label +element +child +references: +(?P<label_element_childcount>\d+)$')
# Label element lsp id: 0
p27 = re.compile(r'^Label +element +lsp +id: +(?P<label_element_lspid>\d+)$')
# Task: OSPF3 I/O./var/run/ppmd_control
p28 = re.compile(r'^Task: +(?P<task_name>[\S\s]+)$')
# OSPF3 realm ipv6-unicast area : 0.0.0.0, LSA ID : 0.0.0.1, LSA type : Extern
p29 = re.compile(r'^OSPF3\s+realm\s+ipv6-unicast\s+area\s:[\S\s]+$')
# Page 0 idx 1, (group hktGCS002 type Internal) Type 1 val 0x10c0b9b0 (adv_entry)
p30 = re.compile(r'^Page +\d+ +idx +\d+[\S\s]+$')
# Advertised metrics:
# Flags: Nexthop Change
# Nexthop: Self
# MED: 12003
# Localpref: 120
# AS path: [65171] (65151 65000) I
# Communities: 65001:10 65151:244
# Path 10.220.0.0
# from 10.169.14.240
# Vector len 4. Val: 1
p31 = re.compile(r'^(Advertised +metrics:)|'
r'(Flags: +)|(Nexthop: +)|(MED: +)|'
r'(Localpref: +)|(AS +path:)|(Communities:)|'
r'(Path +\S+)|(from +\S+)|(Vector +len)')
# Indirect next hop: 0xc285884 1048574 INH Session ID: 0x1ac
p32 = re.compile(r'^Indirect +next +hop: +(?P<indirect_nh>[\S\s]+)$')
# Indirect next hops: 1
p33 = re.compile(r'^Indirect +next +hops: +(?P<forwarding_nh_count>\d+)$')
# 10.169.14.240/32 Originating RIB: inet.0
p34 = re.compile(r'^\S+ +Originating +RIB: +[\S\s]+$')
# Node path count: 1
# Forwarding nexthops: 1
p35 = re.compile(r'^(Node +path +count: +)|(Forwarding +nexthops: +)[\S\s]+$')
for line in out.splitlines():
line = line.strip()
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
m = p1.match(line)
if m:
group = m.groupdict()
route_table = ret_dict.setdefault('route-information', {}). \
setdefault('route-table', [])
route_table_dict = {k.replace('_', '-'):
v for k, v in group.items() if v is not None}
route_table.append(route_table_dict)
continue
# 0.0.0.0/0 (1 entry, 1 announced)
# 10.1.0.0/24 (2 entries, 1 announced)
m = p2.match(line)
if m:
group = m.groupdict()
state_type = 'route_table'
rt_destination = group['rt_destination']
rt_format = group['format']
text = group['text']
announced = group['announced']
rt_prefix_length = group['rt_prefix_length']
rt_list = route_table_dict.setdefault('rt', [])
rt_dict = {}
rt_dict.update({'rt-announced-count' : announced})
rt_dict.update({'rt-destination' : rt_destination})
if rt_prefix_length:
rt_dict.update({'rt-prefix-length' : rt_prefix_length})
rt_entry_count_dict = rt_dict.setdefault('rt-entry-count', {})
rt_entry_count_dict.update({'#text': text})
rt_entry_count_dict.update({'@junos:format': rt_format})
rt_list.append(rt_dict)
continue
# State: <FlashAll>
# State: <Active Int Ext>
m = p3.match(line)
if m:
group = m.groupdict()
if state_type == 'route_table':
rt_dict.update({'rt-state': group['rt_state']})
elif state_type == 'protocol':
rt_entry_dict.update({'rt-entry-state': group['rt_state']})
continue
# *OSPF Preference: 150/10
m = p4.match(line)
if m:
group = m.groupdict()
state_type = 'protocol'
protocol_nh_found = None
originating_rib_found = None
active_tag = group['active_tag']
protocol_name = group['protocol']
preference = group['preference']
preference2 = group['preference2']
rt_entry_exist = rt_dict.get('rt-entry', None)
if not rt_entry_exist:
rt_entry_dict = rt_dict.setdefault('rt-entry', {})
else:
if isinstance(rt_entry_exist, list):
rt_entry_dict = {}
rt_entry_exist.append(rt_entry_dict)
else:
old_rt_entry_dict = rt_entry_exist
rt_entry_dict = {}
rt_dict['rt-entry'] = []
rt_dict_list = rt_dict['rt-entry']
rt_dict_list.append(old_rt_entry_dict)
rt_dict_list.append(rt_entry_dict)
if active_tag:
rt_entry_dict.update({'active-tag': active_tag})
rt_entry_dict.update({'protocol-name': protocol_name})
rt_entry_dict.update({'preference': preference})
if preference2:
rt_entry_dict.update({'preference2': preference2})
continue
# Next hop type: Router, Next hop index: 613
m = p5.match(line)
if m:
group = m.groupdict()
nh_type = group['nh_type']
nh_index = group['nh_index']
rt_entry_dict.update({'nh-type': nh_type})
rt_entry_dict.update({'nh-index': nh_index})
continue
# Address: 0xdfa7934
m = p6.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'nh-address': group['nh_address']})
continue
# Next-hop reference count: 458
m = p7.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'nh-reference-count': group['nh_reference_count']})
continue
# Next hop: 10.169.14.121 via ge-0/0/1.0 weight 0x1, selected
m = p8.match(line)
if m:
group = m.groupdict()
if originating_rib_found:
proto_output = protocol_nh_dict.get('output', '')
proto_output = '{}{}\n'.format(proto_output, line)
protocol_nh_dict.update({'output': proto_output})
continue
selected_next_hop = group['selected_next_hop']
if protocol_nh_found:
nh_list = protocol_nh_dict.setdefault('nh', [])
proto_nh_dict = {}
nh_list.append(proto_nh_dict)
else:
nh_list = rt_entry_dict.setdefault('nh', [])
nh_dict = {}
nh_list.append(nh_dict)
keys = ['to', 'via', 'weight', 'nh_string']
for key in keys:
v = group[key]
if v:
if protocol_nh_found:
proto_nh_dict.update({key.replace('_', '-'): v})
else:
nh_dict.update({key.replace('_', '-'): v})
continue
# Protocol Next hop: 10.169.14.121 via ge-0/0/1.0 weight 0x1, selected
m = p8_1.match(line)
if m:
group = m.groupdict()
protocol_nh_found = True
protocol_nh_list = rt_entry_dict.setdefault('protocol-nh', [])
protocol_nh_dict = {k.replace('_', '-'):
v for k, v in group.items() if v is not None}
if forwarding_nh_count:
protocol_nh_dict.update({'forwarding-nh-count' : forwarding_nh_count})
protocol_nh_list.append(protocol_nh_dict)
forwarding_nh_count = None
continue
# Session Id: 0x141
m = p9.match(line)
if m:
group = m.groupdict()
if originating_rib_found:
proto_output = protocol_nh_dict.get('output', '')
proto_output = '{}{}\n'.format(proto_output, line)
protocol_nh_dict.update({'output': proto_output})
elif protocol_nh_found:
proto_nh_dict.update({'session': group['session_id']})
else:
nh_dict.update({'session': group['session_id']})
continue
# Local AS: 65171
m = p10.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'local-as': group['local_as']})
continue
# Age: 3w2d 4:43:35 Metric: 101
m = p11.match(line)
if m:
group = m.groupdict()
age_dict = rt_entry_dict.setdefault('age', {})
age_dict.update({'#text': group['age']})
metric = group['metric']
if metric:
rt_entry_dict.update({'metric': metric})
continue
# Validation State: unverified
m = p12.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'validation-state': group['validation_state']})
continue
# Tag: 0
m = p13.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'rt-tag': group['rt_tag']})
continue
# Task: OSPF
m = p14.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'task-name': group['task']})
continue
# Announcement bits (3): 0-KRT 5-LDP 7-Resolve tree 3
m = p15.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'announce-bits': group['announce_bits']})
rt_entry_dict.update({'announce-tasks': group['announce_tasks']})
continue
# AS path: I
m = p16.match(line)
if m:
group = m.groupdict()
attr_as_path_dict = rt_entry_dict.setdefault('bgp-path-attributes', {}). \
setdefault('attr-as-path-effective', {})
rt_entry_dict.update({'as-path': line})
attr_as_path_dict.update({'aspath-effective-string':
group['aspath_effective_string']})
attr_as_path_dict.update({'attr-value': group['attr_value']})
continue
# KRT in-kernel 0.0.0.0/0 -> {10.169.14.121}
m = p17.match(line)
if m:
group = m.groupdict()
tsi_dict = rt_dict.setdefault('tsi', {})
tsi_dict.update({'#text': group['text']})
continue
# Inactive reason: Route Preference
m = p18.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'inactive-reason': group['inactive_reason']})
continue
# Area: 0.0.0.8
m = p19.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({'rt-ospf-area': group['rt_ospf_area']})
continue
# Label operation: Push 17000
# Label operation: Push 17000, Push 1650, Push 1913(top)
m = p20.match(line)
if m:
group = m.groupdict()
if protocol_nh_found:
protocol_nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
else:
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label TTL action: no-prop-ttl
# Label TTL action: no-prop-ttl, no-prop-ttl, no-prop-ttl(top)
m = p21.match(line)
if m:
group = m.groupdict()
if protocol_nh_found:
protocol_nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
else:
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Load balance label: Label 17000: None; Label 1650: None; Label 1913: None;
m = p22.match(line)
if m:
group = m.groupdict()
if protocol_nh_found:
protocol_nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
else:
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label element ptr: 0xc5f6ec0
m = p23.match(line)
if m:
group = m.groupdict()
if protocol_nh_found:
protocol_nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
else:
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label parent element ptr: 0x0
m = p24.match(line)
if m:
group = m.groupdict()
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label element references: 2
m = p25.match(line)
if m:
group = m.groupdict()
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label element child references: 1
m = p26.match(line)
if m:
group = m.groupdict()
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Label element lsp id: 0
m = p27.match(line)
if m:
group = m.groupdict()
nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Task: OSPF3 I/O./var/run/ppmd_control
m = p28.match(line)
if m:
group = m.groupdict()
rt_entry_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# OSPF3 realm ipv6-unicast area : 0.0.0.0, LSA ID : 0.0.0.1, LSA type : Extern
m = p29.match(line)
if m:
group = m.groupdict()
text = tsi_dict.get('#text', '')
tsi_dict.update({'#text': '{}\n{}'.format(text, line)})
continue
# Page 0 idx 1, (group hktGCS002 type Internal) Type 1 val 0x10c0b9b0 (adv_entry)
m = p30.match(line)
if m:
group = m.groupdict()
text = tsi_dict.get('#text', '')
tsi_dict.update({'#text': '{}\n{}'.format(text, line)})
continue
# Advertised metrics:
# Flags: Nexthop Change
# Nexthop: Self
# MED: 12003
# Localpref: 120
# AS path: [65171] (65151 65000) I
# Communities: 65001:10 65151:244
# Path 10.220.0.0
# from 10.169.14.240
# Vector len 4. Val: 1
m = p31.match(line)
if m:
group = m.groupdict()
text = tsi_dict.get('#text', '')
tsi_dict.update({'#text': '{}\n{}'.format(text, line)})
continue
# Indirect next hop: 0xc285884 1048574 INH Session ID: 0x1ac
m = p32.match(line)
if m:
group = m.groupdict()
protocol_nh_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# Indirect next hops: 1
m = p33.match(line)
if m:
group = m.groupdict()
protocol_nh_found = True
forwarding_nh_count = group['forwarding_nh_count']
continue
# 10.169.14.240/32 Originating RIB: inet.0
m = p34.match(line)
if m:
originating_rib_found = True
proto_output = protocol_nh_dict.get('output', '')
proto_output = '{}{}\n'.format(proto_output, line)
protocol_nh_dict.update({'output': proto_output})
continue
# Node path count: 1
# Forwarding nexthops: 1
m = p35.match(line)
if m:
proto_output = protocol_nh_dict.get('output', '')
proto_output = '{}{}\n'.format(proto_output, line)
protocol_nh_dict.update({'output': proto_output})
continue
return ret_dict
class ShowRouteForwardingTableSummarySchema(MetaParser):
""" Schema for:
* show route forwarding-table summary
"""
# schema = {
# Optional("@xmlns:junos"): str,
# "forwarding-table-information": {
# Optional("@xmlns"): str,
# "route-table": [
# {
# "address-family": str,
# "enabled-protocols": str,
# "route-table-summary": [
# {
# "route-count": str,
# "route-table-type": str
# }
# ],
# "table-name": str
# }
# ]
# }
# }
def validate_route_table_list(value):
# Pass route-table list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table is not a list')
def validate_route_table_summary_list(value):
# Pass route-table-summary list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table-summary is not a list')
# Create route-table-summary Schema
route_table_summary_schema = Schema({
"route-count": str,
"route-table-type": str
})
# Validate each dictionary in list
for item in value:
route_table_summary_schema.validate(item)
return value
# Create route-table-summary Schema
route_table_schema = Schema({
"address-family": str,
Optional("enabled-protocols"): str,
"route-table-summary": Use(validate_route_table_summary_list),
"table-name": str
})
# Validate each dictionary in list
for item in value:
route_table_schema.validate(item)
return value
schema = {
Optional("@xmlns:junos"): str,
"forwarding-table-information": {
Optional("@xmlns"): str,
"route-table": Use(validate_route_table_list)
}
}
class ShowRouteForwardingTableSummary(ShowRouteForwardingTableSummarySchema):
""" Parser for:
* show route forwarding-table summary
"""
cli_command = 'show route forwarding-table summary'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Routing table: default.inet
p1 = re.compile(r'^Routing +table: +(?P<table_name>\S+)$')
# Internet:
# DHCP Snooping:
p2 = re.compile(r'^(?P<address_family>\S+( +\S+)?):$')
# Enabled protocols: Bridging,
p3 = re.compile(r'^Enabled +protocols: +(?P<enabled_protocols>[\S\s]+)$')
# perm: 1 routes
p4 = re.compile(r'^(?P<route_table_type>\S+): +(?P<route_count>\d+) +routes$')
for line in out.splitlines():
line = line.strip()
# Routing table: default.inet
m = p1.match(line)
if m:
group = m.groupdict()
forwarding_table_information_dict = ret_dict.setdefault('forwarding-table-information', {})
route_table_list = forwarding_table_information_dict.setdefault('route-table', [])
route_table_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
route_table_list.append(route_table_dict)
continue
# Internet:
# DHCP Snooping:
m = p2.match(line)
if m:
group = m.groupdict()
route_table_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Enabled protocols: Bridging,
m = p3.match(line)
if m:
group = m.groupdict()
route_table_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# perm: 1 routes
m = p4.match(line)
if m:
group = m.groupdict()
route_table_summary_list = route_table_dict.setdefault('route-table-summary', [])
route_table_summary_list.append({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
return ret_dict
class ShowRouteReceiveProtocolSchema(MetaParser):
""" Schema for:
* show route receive-protocol {protocol} {peer}
"""
"""
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": [
{
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
"rt": [
{
Optional("@junos:style"): str,
"rt-destination": str,
"rt-entry": {
"active-tag": str,
"as-path": str,
"local-preference": str,
"med": str,
"nh": {
"to": str
},
"protocol-name": str
}
}
],
"table-name": str,
"total-route-count": str
}
]
}
}
"""
def validate_route_table_list(value):
# Pass route-table list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table is not a list')
def validate_rt_list(value):
# Pass rt list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('rt is not a list')
# Create rt entry Schema
rt_entry_schema = Schema({
Optional("@junos:style"): str,
"rt-destination": str,
"rt-entry": {
Optional("active-tag"): str,
"as-path": str,
"local-preference": str,
"med": str,
"nh": {
"to": str
},
"protocol-name": str
}
})
# Validate each dictionary in list
for item in value:
rt_entry_schema.validate(item)
return value
# Create route-table entry Schema
route_table_schema = Schema({
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
Optional("rt"): Use(validate_rt_list),
"table-name": str,
"total-route-count": str
})
# Validate each dictionary in list
for item in value:
route_table_schema.validate(item)
return value
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": Use(validate_route_table_list)
}
}
class ShowRouteReceiveProtocol(ShowRouteReceiveProtocolSchema):
""" Parser for:
* show route receive-protocol {protocol} {peer}
"""
cli_command = 'show route receive-protocol {protocol} {peer}'
def cli(self, protocol, peer, output=None):
if not output:
cmd = self.cli_command.format(protocol=protocol,
peer=peer)
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
p1 = re.compile(r'^(?P<table_name>\S+): +(?P<destination_count>\d+) +'
r'destinations, +(?P<total_route_count>\d+) +routes +'
r'\((?P<active_route_count>\d+) +active, +(?P<holddown_route_count>\d+) +'
r'holddown, +(?P<hidden_route_count>\d+) +hidden\)$')
# * 10.220.0.0/16 Self 12003 120 (65151 65000) I
# 10.220.0.0/16 10.189.5.253 12003 120 (65151 65000) I
p2 = re.compile(r'^((?P<active_tag>\*) +)?(?P<rt_destination>\S+) +'
r'(?P<to>\S+) +(?P<med>\d+) +(?P<local_preference>\d+) +'
r'(?P<as_path>\([\S\s]+\) +\w+)$')
for line in out.splitlines():
line = line.strip()
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
m = p1.match(line)
if m:
group = m.groupdict()
route_table_list = ret_dict.setdefault('route-information', {}). \
setdefault('route-table', [])
route_table_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
route_table_list.append(route_table_dict)
continue
# * 10.220.0.0/16 Self 12003 120 (65151 65000) I
# 10.220.0.0/16 10.189.5.253 12003 120 (65151 65000) I
m = p2.match(line)
if m:
group = m.groupdict()
rt_list = route_table_dict.setdefault('rt', [])
rt_dict = {'rt-destination': group['rt_destination']}
rt_entry_dict = rt_dict.setdefault('rt-entry', {})
keys = ['active_tag', 'as_path', 'local_preference', 'med']
for key in keys:
v = group[key]
if v:
rt_entry_dict.update({key.replace('_', '-'): v})
nh_dict = rt_entry_dict.setdefault('nh', {})
nh_dict.update({'to': group['to']})
rt_entry_dict.update({'protocol-name': protocol.upper()})
rt_list.append(rt_dict)
return ret_dict
class ShowRouteAdvertisingProtocolSchema(MetaParser):
""" Schema for:
* show route advertising-protocol {protocol} {ip_address}
"""
"""
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": {
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
"rt": [
{
Optional("@junos:style"): str,
"rt-destination": str,
"rt-entry": {
"active-tag": str,
"as-path": str,
"bgp-metric-flags": str,
"local-preference": str,
"med": str,
"nh": {
"to": str
},
"protocol-name": str
}
}
],
"table-name": str,
"total-route-count": str
}
}
}
"""
def validate_rt_list(value):
# Pass rt list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('rt is not a list')
# Create rt entry Schema
entry_schema = Schema({
Optional("@junos:style"): str,
"rt-destination": str,
"rt-entry": {
"active-tag": str,
"as-path": str,
"bgp-metric-flags": str,
"local-preference": str,
"med": str,
"nh": {
"to": str
},
"protocol-name": str
}
})
# Validate each dictionary in list
for item in value:
entry_schema.validate(item)
return value
# Main schema
schema = {
Optional("@xmlns:junos"): str,
"route-information": {
Optional("@xmlns"): str,
"route-table": {
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
Optional('rt'): Use(validate_rt_list),
"table-name": str,
"total-route-count": str
}
}
}
class ShowRouteAdvertisingProtocol(ShowRouteAdvertisingProtocolSchema):
""" Parser for:
* show route advertising-protocol {protocol} {neighbor}
"""
cli_command = 'show route advertising-protocol {protocol} {neighbor}'
def cli(self, protocol, neighbor, output=None):
if not output:
cmd = self.cli_command.format(protocol=protocol,
neighbor=neighbor)
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
p1 = re.compile(r'^(?P<table_name>\S+): +(?P<destination_count>\d+) +'
r'destinations, +(?P<total_route_count>\d+) +routes +'
r'\((?P<active_route_count>\d+) +active, +(?P<holddown_route_count>\d+) +'
r'holddown, +(?P<hidden_route_count>\d+) +hidden\)$')
# * 10.220.0.0/16 Self 12003 120 (65151 65000) I
p2 = re.compile(r'^(?P<active_tag>\*) +(?P<rt_destination>\S+) +'
r'(?P<to>\S+) +(?P<med>\d+) +(?P<local_preference>\d+) +'
r'(?P<as_path>\([\S\s]+\) +\w+)$')
for line in out.splitlines():
line = line.strip()
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
m = p1.match(line)
if m:
group = m.groupdict()
route_table_dict = ret_dict.setdefault('route-information', {}). \
setdefault('route-table', {})
route_table_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# * 10.220.0.0/16 Self 12003 120 (65151 65000) I
m = p2.match(line)
if m:
group = m.groupdict()
rt_list = route_table_dict.setdefault('rt', [])
rt_dict = {'rt-destination': group['rt_destination']}
rt_entry_dict = rt_dict.setdefault('rt-entry', {})
keys = ['active_tag', 'as_path', 'local_preference', 'med']
for key in keys:
rt_entry_dict.update({key.replace('_', '-'): group[key]})
nh_dict = rt_entry_dict.setdefault('nh', {})
nh_dict.update({'to': group['to']})
rt_entry_dict.update({'bgp-metric-flags': 'Nexthop Change'})
rt_entry_dict.update({'protocol-name': protocol.upper()})
rt_list.append(rt_dict)
return ret_dict
class ShowRouteSummarySchema(MetaParser):
""" Schema for:
* show route summary
"""
# schema = {
# Optional("@xmlns:junos"): str,
# "route-summary-information": {
# Optional("@xmlns"): str,
# "as-number": str,
# "route-table": [
# {
# "active-route-count": str,
# "destination-count": str,
# "hidden-route-count": str,
# "holddown-route-count": str,
# "protocols": [
# {
# "active-route-count": str,
# "protocol-name": str,
# "protocol-route-count": str
# }
# ],
# "table-name": str,
# "total-route-count": str
# }
# ],
# "router-id": str
# }
# }
def validate_route_table_list(value):
# Pass route-table list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('route-table is not a list')
def validate_protocols_list(value):
# Pass protocols list of dict in value
if not isinstance(value, list):
raise SchemaTypeError('protocols is not a list')
# Create protocols Schema
protocols_schema = Schema({
"active-route-count": str,
"protocol-name": str,
"protocol-route-count": str
})
# Validate each dictionary in list
for item in value:
protocols_schema.validate(item)
return value
# Create route-table Schema
route_table_schema = Schema({
"active-route-count": str,
"destination-count": str,
"hidden-route-count": str,
"holddown-route-count": str,
"protocols": Use(validate_protocols_list),
"table-name": str,
"total-route-count": str
})
# Validate each dictionary in list
for item in value:
route_table_schema.validate(item)
return value
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
"route-summary-information": {
Optional("@xmlns"): str,
"as-number": str,
"route-table": Use(validate_route_table_list),
"router-id": str
}
}
class ShowRouteSummary(ShowRouteSummarySchema):
""" Parser for:
* show route summary
"""
cli_command = 'show route summary'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Autonomous system number: 65171
p1 = re.compile(r'^Autonomous +system +number: +(?P<as_number>\d+)$')
# Router ID: 10.189.5.252
p2 = re.compile(r'^Router +ID: +(?P<router_id>\S+)$')
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
p3 = re.compile(r'^(?P<table_name>\S+): +(?P<destination_count>\d+) +'
r'destinations, +(?P<total_route_count>\d+) +routes +'
r'\((?P<active_route_count>\d+) +active, +(?P<holddown_route_count>\d+) +'
r'holddown, +(?P<hidden_route_count>\d+) +hidden\)$')
# Direct: 6 routes, 6 active
p4 = re.compile(r'^(?P<protocol_name>\S+): +(?P<protocol_route_count>\d+) +'
r'routes, +(?P<active_route_count>\d+) +\w+$')
for line in out.splitlines():
line = line.strip()
# Autonomous system number: 65171
m = p1.match(line)
if m:
group = m.groupdict()
route_summary_information_dict = ret_dict.setdefault('route-summary-information', {})
route_summary_information_dict.update({
k.replace('_', '-'):v for k, v in group.items()
if v is not None})
continue
# Router ID: 10.189.5.252
m = p2.match(line)
if m:
group = m.groupdict()
route_summary_information_dict.update({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
# inet.0: 929 destinations, 1615 routes (929 active, 0 holddown, 0 hidden)
m = p3.match(line)
if m:
group = m.groupdict()
route_table = route_summary_information_dict. \
setdefault('route-table', [])
route_table_dict = {k.replace('_', '-'):
v for k, v in group.items() if v is not None}
route_table.append(route_table_dict)
continue
# Direct: 6 routes, 6 active
m = p4.match(line)
if m:
group = m.groupdict()
protocols_list = route_table_dict.setdefault('protocols', [])
protocols_list.append({k.replace('_', '-'):
v for k, v in group.items() if v is not None})
continue
return ret_dict
class ShowRouteInstanceDetailSchema(MetaParser):
""" Schema for:
* show route instance detail
"""
# schema = {
# Optional("@xmlns:junos"): str,
# "instance-information": {
# Optional("@junos:style"): str,
# Optional("@xmlns"): str,
# "instance-core": [
# {
# "instance-interface": [
# {
# "interface-name": str
# }
# ],
# "instance-name": str,
# "instance-rib": [
# {
# "irib-active-count": str,
# "irib-hidden-count": str,
# "irib-holddown-count": str,
# "irib-name": str,
# "irib-route-count": str
# }
# ],
# "instance-state": str,
# "instance-type": str,
# "router-id": str
# }
# ]
# }
# }
def validate_instance_core_list(value):
if not isinstance(value, list):
raise SchemaTypeError('instance-core is not a list')
def validate_instance_rib_list(value):
if not isinstance(value, list):
raise SchemaTypeError('instance-rib is not a list')
instance_rib_schema = Schema({
"irib-active-count": str,
"irib-hidden-count": str,
"irib-holddown-count": str,
"irib-name": str,
"irib-route-count": str
})
# Validate each dictionary in list
for item in value:
instance_rib_schema.validate(item)
return value
def validate_interface_name_list(value):
if not isinstance(value, list):
raise SchemaTypeError('interface-name is not a list')
instance_rib_schema = Schema({
"interface-name": str
})
# Validate each dictionary in list
for item in value:
instance_rib_schema.validate(item)
return value
instance_core_schema = Schema({
Optional("instance-interface"): Use(validate_interface_name_list),
"instance-name": str,
Optional("instance-rib"): Use(validate_instance_rib_list),
Optional("instance-state"): str,
Optional("instance-type"): str,
Optional("router-id"): str
})
# Validate each dictionary in list
for item in value:
instance_core_schema.validate(item)
return value
schema = {
Optional("@xmlns:junos"): str,
"instance-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"instance-core": Use(validate_instance_core_list)
}
}
class ShowRouteInstanceDetail(ShowRouteInstanceDetailSchema):
""" Parser for:
* show route instance detail
"""
cli_command = 'show route instance detail'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Router ID: 0.0.0.0
p1 = re.compile(r'^Router +ID: +(?P<router_id>\S+)$')
# Type: forwarding State: Active
p2 = re.compile(r'^Type: +(?P<instance_type>\S+) +State: +(?P<instance_state>\S+)$')
# Tables:
p3 = re.compile(r'^Tables:$')
# Interfaces:
p4 = re.compile(r'^Interfaces:$')
# inet.0 : 1615 routes (929 active, 0 holddown, 0 hidden)
# __juniper_private1__.inet.0: 6 routes (5 active, 0 holddown, 0 hidden)
p5 = re.compile(r'^(?P<irib_name>\S+) *: +(?P<irib_route_count>\d+) +'
r'routes +\((?P<irib_active_count>\d+) +active, +'
r'(?P<irib_holddown_count>\d+) +holddown, +'
r'(?P<irib_hidden_count>\d+) +hidden\)$')
# master:
p6 = re.compile(r'^(?P<instance_name>\S+):$')
# pfh-0/0/0.16383
p7 = re.compile(r'^(?P<interface_name>\S+)$')
for line in out.splitlines():
line = line.strip()
# Router ID: 0.0.0.0
m = p1.match(line)
if m:
group = m.groupdict()
instance_core_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Type: forwarding State: Active
m = p2.match(line)
if m:
group = m.groupdict()
instance_core_dict.update({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# Tables:
m = p3.match(line)
if m:
continue
# Interfaces:
m = p4.match(line)
if m:
instance_interface_list = instance_core_dict.setdefault('instance-interface', [])
continue
# inet.0 : 1615 routes (929 active, 0 holddown, 0 hidden)
# __juniper_private1__.inet.0: 6 routes (5 active, 0 holddown, 0 hidden)
m = p5.match(line)
if m:
group = m.groupdict()
instance_rib_list = instance_core_dict.setdefault('instance-rib', [])
instance_rib_list.append({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
# master:
m = p6.match(line)
if m:
group = m.groupdict()
instance_core_list = ret_dict.setdefault('instance-information', {}). \
setdefault('instance-core', [])
instance_core_dict = {k.replace('_', '-'):v for k, v in group.items() if v is not None}
instance_core_list.append(instance_core_dict)
continue
# pfh-0/0/0.16383
m = p7.match(line)
if m:
group = m.groupdict()
instance_interface_list.append({k.replace('_', '-'):v for k, v in group.items() if v is not None})
continue
return ret_dict
| 40.219191 | 115 | 0.451068 |
25c2b0fb421fa996abcad52b601f7250c67f6f42
| 1,509 |
py
|
Python
|
fastaq_stats.py
|
kharten/Metagenomics17
|
1d180e6da17cb917a7be07fd562b3bd32bc3e7dd
|
[
"MIT"
] | null | null | null |
fastaq_stats.py
|
kharten/Metagenomics17
|
1d180e6da17cb917a7be07fd562b3bd32bc3e7dd
|
[
"MIT"
] | null | null | null |
fastaq_stats.py
|
kharten/Metagenomics17
|
1d180e6da17cb917a7be07fd562b3bd32bc3e7dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import gzip
def main():
#create tuples for different file types
fasta_tup = ('.fa', '.fasta')
fastq_tup = ('.fq', '.fastq')
#check for fasta or fastq as first argument
try:
fn = sys.argv[1]
if fn.endswith('.gz'):
open_file = gzip.open(fn, 'rb')
#read_file = open_file.readlines()
parse_fasta(open_file)
elif fn.endswith(fasta_tup):
open_file = open(fn, 'rU')
parse_fasta(open_file)
elif fn.endswith(fastq_tup):
open_file = open(fn, 'rU')
parse_fastq(open_file)
except IndexError:
print "No file found."
sys.exit(1)
def parse_fasta(open_file):
seq_desc = 0
seqs = 0
#assign fasta descriptions to list (seq_desc), and descriptions, seqs to dictionary (store_fasta)
for line in open_file:
#.rstrip removes whitespace from end of string
line = line.rstrip()
if line.startswith(">"):
seq_desc += 1
else:
seqs += len(line)
print "Total sequences found: %s" % seq_desc
print "Total residues found: %s" % seqs
def parse_fastq(open_file):
seq_desc = 0
seqs = 0
#variable to track lines in fastq entry
x = 4
#assign fastq descriptions to list (seq_desc), and descriptions, seqs to dictionary (store_fastq)
for line in open_file:
line = line.rstrip()
if x == 4 and line.startswith("@"):
seq_desc += 1
x = 1
else:
if x == 1:
seqs += (len(line))
x += 1
print "Total sequences found: %s" % seq_desc
print "Total residues found: %s" % seqs
if __name__ == '__main__':
main()
| 20.12 | 98 | 0.663353 |
4c63832a7f5404f94c1b7a380a9e5463add11ee9
| 3,824 |
py
|
Python
|
tests/TestPlayerCalls.py
|
ThiagoVieira/playsomeeuchre
|
6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c
|
[
"MIT"
] | null | null | null |
tests/TestPlayerCalls.py
|
ThiagoVieira/playsomeeuchre
|
6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c
|
[
"MIT"
] | null | null | null |
tests/TestPlayerCalls.py
|
ThiagoVieira/playsomeeuchre
|
6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c
|
[
"MIT"
] | null | null | null |
import unittest
from euchre.data_model import PartialRound
from euchre.players.PlayerInterface import PlayerInterface
from euchre.players.RandomPlayer import RandomPlayer
from euchre.players.BasicPlayer import BasicPlayer
from euchre.players.RulePlayer import RulePlayer
from euchre.serializer import json_to_game
class TestPlayerCalls(unittest.TestCase):
def test_random_player(self):
self.__execute_player_calls(RandomPlayer())
def test_basic_player(self):
self.__execute_player_calls(BasicPlayer())
def test_rule_player(self):
self.__execute_player_calls(RulePlayer())
def __execute_player_calls(self, player: PlayerInterface):
# test ability to pick up trump
json_str = '{"players": [{"index": 0, "order": 0, "hand": [1, 11, 5, 10, 25], "dropped_card": null}, ' \
'{"index": 1, "order": 1, "hand": [26, 16, 20, 13, 29], "dropped_card": null}, ' \
'{"index": 2, "order": 2, "hand": [9, 27, 24, 12, 3], "dropped_card": null}, ' \
'{"index": 3, "order": 3, "hand": [0, 2, 18, 19, 17], "dropped_card": null}], ' \
'"deck": [28, 4, 8, 21], "tricks": [], "trump_caller": null, "trump": null}'
f_round = PartialRound.from_round(json_to_game(json_str), 3)
move = player.dealer_pick_up_trump(f_round)
self.assertIn(move, [False, True], "Must return a boolean of whether to pick up card")
# test ability for dealer to discard card
json_str = '{"players": [{"index": 0, "order": 0, "hand": [1, 11, 5, 10, 25], "dropped_card": null}, ' \
'{"index": 1, "order": 1, "hand": [26, 16, 20, 13, 29], "dropped_card": null}, ' \
'{"index": 2, "order": 2, "hand": [9, 27, 24, 12, 3], "dropped_card": null}, ' \
'{"index": 3, "order": 3, "hand": [0, 2, 18, 19, 17], "dropped_card": null}], ' \
'"deck": [28, 4, 8, 21], "tricks": [], "trump_caller": 0, "trump": 0}'
f_round = PartialRound.from_round(json_to_game(json_str), 0)
move = player.dealer_discard_card(f_round)
self.assertIn(move.value, [1, 11, 5, 10, 25, 28], "Invalid dealer picking up card ")
# Test ability to call trump
json_str = '{"players": [{"index": 0, "order": 0, "hand": [1, 11, 5, 10, 25], "dropped_card": null}, ' \
'{"index": 1, "order": 1, "hand": [26, 16, 20, 13, 29], "dropped_card": null}, ' \
'{"index": 2, "order": 2, "hand": [9, 27, 24, 12, 3], "dropped_card": null}, ' \
'{"index": 3, "order": 3, "hand": [0, 2, 18, 19, 17], "dropped_card": null}], ' \
'"deck": [28, 4, 8, 21], "tricks": [], "trump_caller": null, "trump": null}'
f_round = PartialRound.from_round(json_to_game(json_str), 0)
move = player.call_trump(f_round)
self.assertIn(move, [None, 0, 1, 2], "Invalid calling trump move") # Can't call trump of card dropped
# test ability to make a move
json_str = '{"players": [{"index": 0, "order": 0, "hand": [1, 11, 5, 10, 25], "dropped_card": null}, ' \
'{"index": 1, "order": 1, "hand": [26, 16, 20, 13, 29], "dropped_card": null}, ' \
'{"index": 2, "order": 2, "hand": [9, 27, 24, 12, 3], "dropped_card": null}, ' \
'{"index": 3, "order": 3, "hand": [0, 2, 18, 19, 17], "dropped_card": null}], ' \
'"deck": [28, 4, 8, 21], "tricks": [{"start_index": 0, "cards":[1]}], ' \
'"trump_caller": 0, "trump": 0}'
f_round = PartialRound.from_round(json_to_game(json_str), 1)
move = player.play_card(f_round)
self.assertIn(move.value, [26, 16, 20, 13, 29], "Incorrect move made")
if __name__ == '__main__':
unittest.main()
| 58.830769 | 112 | 0.555178 |
57e4d5bf9eb13a392a9ed2d67f9234e158e114e3
| 5,706 |
py
|
Python
|
cirq-core/cirq/ops/fourier_transform_test.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/ops/fourier_transform_test.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/ops/fourier_transform_test.py
|
mkeshita/Cirq
|
06b5a5bd929757bf4bafe32b16bc4fabb36f3390
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest, sympy
import cirq
def test_phase_gradient():
np.testing.assert_allclose(
cirq.unitary(cirq.PhaseGradientGate(num_qubits=2, exponent=1)), np.diag([1, 1j, -1, -1j])
)
for k in range(4):
cirq.testing.assert_implements_consistent_protocols(
cirq.PhaseGradientGate(num_qubits=k, exponent=1)
)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_phase_gradient_symbolic(resolve_fn):
a = cirq.PhaseGradientGate(num_qubits=2, exponent=0.5)
b = cirq.PhaseGradientGate(num_qubits=2, exponent=sympy.Symbol('t'))
assert not cirq.is_parameterized(a)
assert cirq.is_parameterized(b)
assert cirq.has_unitary(a)
assert not cirq.has_unitary(b)
assert resolve_fn(a, {'t': 0.25}) is a
assert resolve_fn(b, {'t': 0.5}) == a
assert resolve_fn(b, {'t': 0.25}) == cirq.PhaseGradientGate(num_qubits=2, exponent=0.25)
def test_str():
assert str(cirq.PhaseGradientGate(num_qubits=2, exponent=0.5)) == 'Grad[2]^0.5'
assert str(cirq.PhaseGradientGate(num_qubits=2, exponent=1)) == 'Grad[2]'
def test_phase_gradient_gate_repr():
a = cirq.PhaseGradientGate(num_qubits=2, exponent=0.5)
cirq.testing.assert_equivalent_repr(a)
def test_quantum_fourier_transform_gate_repr():
b = cirq.QuantumFourierTransformGate(num_qubits=2, without_reverse=False)
cirq.testing.assert_equivalent_repr(b)
def test_pow():
a = cirq.PhaseGradientGate(num_qubits=2, exponent=0.5)
assert a ** 0.5 == cirq.PhaseGradientGate(num_qubits=2, exponent=0.25)
assert a ** sympy.Symbol('t') == cirq.PhaseGradientGate(
num_qubits=2, exponent=0.5 * sympy.Symbol('t')
)
def test_qft():
np.testing.assert_allclose(
cirq.unitary(cirq.qft(*cirq.LineQubit.range(2))),
np.array(
[
[1, 1, 1, 1],
[1, 1j, -1, -1j],
[1, -1, 1, -1],
[1, -1j, -1, 1j],
]
)
/ 2,
atol=1e-8,
)
np.testing.assert_allclose(
cirq.unitary(cirq.qft(*cirq.LineQubit.range(2), without_reverse=True)),
np.array(
[
[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1j, -1, -1j],
[1, -1j, -1, 1j],
]
)
/ 2,
atol=1e-8,
)
np.testing.assert_allclose(
cirq.unitary(cirq.qft(*cirq.LineQubit.range(4))),
np.array([[np.exp(2j * np.pi * i * j / 16) for i in range(16)] for j in range(16)]) / 4,
atol=1e-8,
)
np.testing.assert_allclose(
cirq.unitary(cirq.qft(*cirq.LineQubit.range(2)) ** -1),
np.array(
[
[1, 1, 1, 1],
[1, -1j, -1, 1j],
[1, -1, 1, -1],
[1, 1j, -1, -1j],
]
)
/ 2,
atol=1e-8,
)
for k in range(4):
for b in [False, True]:
cirq.testing.assert_implements_consistent_protocols(
cirq.QuantumFourierTransformGate(num_qubits=k, without_reverse=b)
)
def test_inverse():
a, b, c = cirq.LineQubit.range(3)
assert cirq.qft(a, b, c, inverse=True) == cirq.qft(a, b, c) ** -1
assert cirq.qft(a, b, c, inverse=True, without_reverse=True) == cirq.inverse(
cirq.qft(a, b, c, without_reverse=True)
)
def test_circuit_diagram():
cirq.testing.assert_has_diagram(
cirq.Circuit(cirq.decompose_once(cirq.qft(*cirq.LineQubit.range(4)))),
"""
0: ───H───Grad^0.5───────#2─────────────#3─────────────×───
│ │ │ │
1: ───────@──────────H───Grad^0.5───────#2─────────×───┼───
│ │ │ │
2: ──────────────────────@──────────H───Grad^0.5───×───┼───
│ │
3: ─────────────────────────────────────@──────────H───×───
""",
)
cirq.testing.assert_has_diagram(
cirq.Circuit(cirq.decompose_once(cirq.qft(*cirq.LineQubit.range(4), without_reverse=True))),
"""
0: ───H───Grad^0.5───────#2─────────────#3─────────────
│ │ │
1: ───────@──────────H───Grad^0.5───────#2─────────────
│ │
2: ──────────────────────@──────────H───Grad^0.5───────
│
3: ─────────────────────────────────────@──────────H───
""",
)
cirq.testing.assert_has_diagram(
cirq.Circuit(
cirq.qft(*cirq.LineQubit.range(4)), cirq.inverse(cirq.qft(*cirq.LineQubit.range(4)))
),
"""
0: ───qft───qft^-1───
│ │
1: ───#2────#2───────
│ │
2: ───#3────#3───────
│ │
3: ───#4────#4───────
""",
)
def test_setters_deprecated():
gate = cirq.PhaseGradientGate(num_qubits=1, exponent=0.1)
assert gate.exponent == 0.1
with cirq.testing.assert_deprecated('mutators', deadline='v0.15'):
gate.exponent = 0.2
assert gate.exponent == 0.2
| 31.524862 | 100 | 0.522257 |
fb87cc56ab8abf8efee5d1f83245abd447ca430d
| 5,196 |
py
|
Python
|
examples/get.py
|
phate999/api-samples
|
3f2b7b35021109a20d231ba073bcf5a00b846af0
|
[
"MIT"
] | 12 |
2018-11-09T21:12:26.000Z
|
2021-12-07T23:54:55.000Z
|
examples/get.py
|
phate999/api-samples
|
3f2b7b35021109a20d231ba073bcf5a00b846af0
|
[
"MIT"
] | 5 |
2019-09-09T20:45:16.000Z
|
2022-01-07T18:23:01.000Z
|
examples/get.py
|
phate999/api-samples
|
3f2b7b35021109a20d231ba073bcf5a00b846af0
|
[
"MIT"
] | 16 |
2019-04-04T17:11:08.000Z
|
2022-01-20T21:40:30.000Z
|
import argparse
import json
import logging
import os
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from datetime import datetime
headers = {
"Content-Type": "application/json",
"X-CP-API-ID": os.environ.get("X_CP_API_ID"),
"X-CP-API-KEY": os.environ.get("X_CP_API_KEY"),
"X-ECM-API-ID": os.environ.get("X_ECM_API_ID"),
"X-ECM-API-KEY": os.environ.get("X_ECM_API_KEY"),
}
class NcmAPIv2(object):
def __init__(self):
self.resp_data = {'data': []}
@staticmethod
def establish_session():
with requests.Session() as session:
retries = Retry(total=5, # Total number of retries to allow.
backoff_factor=2,
status_forcelist=[408, # 408 Request Timeout
500, # 500 Internal Server Error
502, # 502 Bad Gateway
503, # 503 Service Unavailable
504, # 504 Gateway Timeout
],
)
a = HTTPAdapter(max_retries=retries)
session.mount('https://', a)
return session
def next_url(self):
for item in self.resp['data']:
self.resp_data['data'].append(item)
if args.page and self.resp['meta']['next']:
self.url = self.resp['meta']['next']
return self.url
if args.steps and self.resp['meta']['next']:
while args.steps != 0:
args.steps -= 1
self.url = self.resp['meta']['next']
return self.url
def get_data(self, get_url, json_output):
session = self.establish_session()
while get_url:
logging.info(get_url)
try:
r = session.get(get_url, headers=headers,
timeout=30, stream=True)
if r.status_code != 200:
logging.info(str(r.status_code) + ": " + str(r.text))
break
else:
self.resp = json.loads(r.content.decode("utf-8"))
if len(self.resp['data']) < 1:
return self.resp
else:
get_url = self.next_url()
except Exception as e:
logging.info("Exception:", e)
raise
json_data = json.dumps(self.resp_data, indent=4, sort_keys=False)
with open(f'json/{json_output}', 'w') as outfile:
outfile.write(json_data)
return json_data
if __name__ == '__main__':
# Create json and logs dir
for path in ['json', 'logs']:
dir_path = os.path.join(os.getcwd(), path)
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError:
raise
# Setup logging information
logging.basicConfig(
filename=f'logs/{datetime.now().strftime("%d_%m_%Y_%H_%M_%S.log")}',
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Parse commandline options.
parser = argparse.ArgumentParser(
description="Query APIv2 Historical Locations")
parser.add_argument("--ecm-api-id", help="Override X_ECM_API_ID")
parser.add_argument("--ecm-api-key", help="Override X_ECM_API_KEY")
parser.add_argument("--cp-api-id", help="Override X-CP-API-ID")
parser.add_argument("--cp-api-key", help="Override X-CP-API-KEY")
parser.add_argument(
"--endpoint", help="Name of API endpoint.", default='accounts'
)
parser.add_argument(
"--account", help="Your NCM ID found in settings."
)
parser.add_argument(
"--limit", help="Limit elements in reply, default is 500.",
default=500,
)
parser.add_argument(
"--page", help="Keep following the next URL.",
action="store_true",
)
parser.add_argument(
"--server", default="https://www.cradlepointecm.com",
help="Base URL of server.",
)
parser.add_argument(
"--steps", type=int, help="Walk only this many steps.",
default=-1,
)
parser.add_argument(
"--output", help="json output file name and location",
default='example.json',
)
args = parser.parse_args()
if args.ecm_api_id:
headers["X-ECM-API-ID"] = args.ecm_api_id
if args.ecm_api_key:
headers["X-ECM-API-KEY"] = args.ecm_api_key
if args.cp_api_id:
headers["X-CP-API-ID"] = args.cp_api_id
if args.cp_api_key:
headers["X-CP-API-KEY"] = args.cp_api_key
url = f"{args.server}/api/v2/{args.endpoint}/"
if args.limit:
url += f"?limit={args.limit}"
if args.account:
url += f"&account={args.account}"
output = args.output
logging.info('Started')
# Create an instance of the class
s = NcmAPIv2()
data = s.get_data(f'{url}', f'{output}')
logging.info('Finished')
| 29.691429 | 79 | 0.54157 |
2a650aa31293988fb7314c42603b17174ec5c09e
| 8,439 |
py
|
Python
|
src/docker-images/job-exporter/src/dcgm.py
|
RichardZhaoW/DLWorkspace
|
27d3a3a82e59305bdc67dbfd69098d493f8b3cd5
|
[
"MIT"
] | null | null | null |
src/docker-images/job-exporter/src/dcgm.py
|
RichardZhaoW/DLWorkspace
|
27d3a3a82e59305bdc67dbfd69098d493f8b3cd5
|
[
"MIT"
] | null | null | null |
src/docker-images/job-exporter/src/dcgm.py
|
RichardZhaoW/DLWorkspace
|
27d3a3a82e59305bdc67dbfd69098d493f8b3cd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import logging
import collections
import time
import datetime
import threading
from prometheus_client.core import GaugeMetricFamily
import utils
logger = logging.getLogger(__name__)
class nv_host(object):
def __init__(self):
pass
def __enter__(self):
p = subprocess.Popen(["nv-hostengine"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
code = p.wait(timeout=60)
logger.info("start nv-hostengine exit with %s, stdout %s, stderr %s",
code, p.stdout.read(), p.stderr.read())
def __exit__(self, type, value, traceback):
return
# p = subprocess.Popen(["nv-hostengine", "--term"],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# code = p.wait(timeout=60)
# logger.info("stop nv-hostengine exit with %s, stdout %s, stderr %s",
# code, p.stdout.read(), p.stderr.read())
# Ref: https://github.com/NVIDIA/gpu-monitoring-tools/blob/9e2979804d297cf5a81640ba8a8e941365e58f14/dcgm-exporter/dcgm-exporter#L85
mapping = [
(54, "uuid",
"uuid of gpu"), # do not remove uuid, this is very important label
(100, "sm_clock", "SM clock frequency (in MHz)."),
(101, "memory_clock", "Memory clock frequency (in MHz)."),
(140, "memory_temp", "Memory temperature (in C)."),
(150, "gpu_temp", "GPU temperature (in C)."),
(155, "power_usage", "Power draw (in W)."),
(156, "total_energy_consumption",
"Total energy consumption since boot (in mJ)."),
(200, "pcie_tx_throughput",
"Total number of bytes transmitted through PCIe TX (in KB) via NVML."),
(201, "pcie_rx_throughput",
"Total number of bytes received through PCIe RX (in KB) via NVML."),
(202, "pcie_replay_counter", "Total number of PCIe retries."),
(203, "gpu_util", "GPU utilization (in %)."),
(204, "mem_copy_util", "Memory utilization (in %)."),
(206, "enc_util", "Encoder utilization (in %)."),
(207, "dec_util", "Decoder utilization (in %)."),
(230, "xid_errors", "Value of the last XID error encountered."),
(240, "power_violation",
"Throttling duration due to power constraints (in us)."),
(241, "thermal_violation",
"Throttling duration due to thermal constraints (in us)."),
(242, "sync_boost_violation",
"Throttling duration due to sync-boost constraints (in us)."),
(243, "board_limit_violation",
"Throttling duration due to board limit constraints (in us)."),
(244, "low_util_violation",
"Throttling duration due to low utilization (in us)."),
(245, "reliability_violation",
"Throttling duration due to reliability constraints (in us)."),
(246, "app_clock_violation", "Total throttling duration (in us)."),
(251, "fb_free", "Framebuffer memory free (in MiB)."),
(252, "fb_used", "Framebuffer memory used (in MiB)."),
(310, "ecc_sbe_volatile_total",
"Total number of single-bit volatile ECC errors."),
(311, "ecc_dbe_volatile_total",
"Total number of double-bit volatile ECC errors."),
(312, "ecc_sbe_aggregate_total",
"Total number of single-bit persistent ECC errors."),
(313, "ecc_dbe_aggregate_total",
"Total number of double-bit persistent ECC errors."),
(390, "retired_pages_sbe",
"Total number of retired pages due to single-bit errors."),
(391, "retired_pages_dbe",
"Total number of retired pages due to double-bit errors."),
(392, "retired_pages_pending", "Total number of pages pending retirement."),
# (409, "nvlink_flit_crc_error_count_total",
# "Total number of NVLink flow-control CRC errors."),
# (419, "nvlink_data_crc_error_count_total",
# "Total number of NVLink data CRC errors."),
# (429, "nvlink_replay_error_count_total", "Total number of NVLink retries."),
# (439, "nvlink_recovery_error_count_total",
# "Total number of NVLink recovery errors."),
# (449, "nvlink_bandwidth_total",
# "Total number of NVLink bandwidth counters for all lanes"),
]
DCGMMetrics = collections.namedtuple("DCGMMetrics",
list(map(lambda x: x[1], mapping)))
class DCGMHandler(object):
def __init__(self, interval, gauge_ref, info_ref, dcgmi_histogram,
dcgmi_timeout):
self.interval = interval
self.gauge_ref = gauge_ref
self.info_ref = info_ref
self.dcgmi_histogram = dcgmi_histogram
self.dcgmi_timeout = dcgmi_timeout
self.args = ",".join(map(lambda x: str(x[0]), mapping))
self.thread = None
def start(self):
self.thread = threading.Thread(target=self.run,
name="dcgm_handler",
args=(),
daemon=True)
self.thread.start()
def run(self):
with nv_host():
while True:
try:
metrics, gauges = self.get_dcgm_metric()
now = datetime.datetime.now()
self.info_ref.set(metrics, now)
self.gauge_ref.set(gauges, now)
time.sleep(self.interval)
except Exception:
logger.exception("DCGMHandler.run got exception")
def get_dcgm_metric(self):
metrics = {} # minor_number -> DCGMMetrics
gauges = {} # gauge_name -> GaugeMetricFamily
try:
dcgmi_output = utils.exec_cmd(
["dcgmi", "dmon", "-c", "1", "-d", "1", "-e", self.args],
histogram=self.dcgmi_histogram,
timeout=self.dcgmi_timeout)
except subprocess.CalledProcessError as e:
logger.exception("command '%s' return with error (code %d): %s",
e.cmd, e.returncode, e.output)
return metrics, gauges
except subprocess.TimeoutExpired:
logger.warning("dcgmi timeout")
return metrics, gauges
try:
for _, name, desc in mapping[1:]:
gauges[name] = GaugeMetricFamily(
"dcgm_" + name, desc, labels=["minor_number", "uuid"])
# [2:] is to remove headers
for line in dcgmi_output.split("\n")[2:]:
if line == "": # end of output
continue
part = line.split()
minor_number = part[0]
args = {}
for i, (_, name, _) in enumerate(mapping):
value = part[i + 1]
args[name] = value
if name == "uuid": # do not generate uuid metric
continue
if value == "N/A":
continue
args[name] = float(value)
gauges[name].add_metric([minor_number, args["uuid"]],
float(value))
metrics[minor_number] = DCGMMetrics(**args)
return metrics, gauges
except Exception:
logger.exception("calling dcgmi failed")
return metrics, gauges
if __name__ == '__main__':
logging.basicConfig(
format=
"%(asctime)s - %(levelname)s - %(threadName)s - %(filename)s:%(lineno)s - %(message)s",
level="DEBUG")
import collector
import datetime
from prometheus_client import Histogram
cmd_histogram = Histogram("cmd_dcgmi_latency_seconds",
"Command call latency for nvidia-smi (seconds)",
buckets=(1.0, 2.0, 4.0, 8.0, 16.0, 32.0,
64.0, 128.0, 256.0, 512.0, 1024.0,
float("inf")))
gauge_ref = collector.AtomicRef(datetime.timedelta(seconds=60))
metric_ref = collector.AtomicRef(datetime.timedelta(seconds=60))
dcgm_handler = dcgm.DCGMHandler(1, self.gauge_ref, metric_ref,
cmd_histogram, 600)
dcgm_handler.run()
for _ in range(10):
now = datetime.datetime.now()
gauge = gauge_ref.get(now)
metric = metric_ref.get(now)
logger.info("gauge is %s", gauge)
logger.info("metric is %s", metric)
time.sleep(2)
| 38.889401 | 131 | 0.571513 |
eb4077344f6575c23c8a91b0872e9e72eb59d16e
| 1,317 |
py
|
Python
|
sdks/python/apache_beam/coders/fast_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | 2 |
2020-08-30T14:49:58.000Z
|
2020-08-30T15:02:38.000Z
|
sdks/python/apache_beam/coders/fast_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | 11 |
2018-05-22T06:08:39.000Z
|
2018-10-05T15:02:21.000Z
|
sdks/python/apache_beam/coders/fast_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | 1 |
2018-11-23T11:49:03.000Z
|
2018-11-23T11:49:03.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for compiled implementation of coder impls."""
from __future__ import absolute_import
import logging
import unittest
# Run all the standard coder test cases.
from apache_beam.coders.coders_test_common import *
class FastCoders(unittest.TestCase):
def test_using_fast_impl(self):
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=unused-variable
import apache_beam.coders.stream
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 32.925 | 74 | 0.772969 |
94a4169ac78d6ebae59749cf67ffb9dd190f8e03
| 1,939 |
py
|
Python
|
nobos_commons/data_structures/constants/color_palette.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 2 |
2020-06-03T16:28:44.000Z
|
2020-10-10T03:07:23.000Z
|
nobos_commons/data_structures/constants/color_palette.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | null | null | null |
nobos_commons/data_structures/constants/color_palette.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 4 |
2020-10-10T03:07:25.000Z
|
2021-09-30T01:11:02.000Z
|
DETECTION_COLOR_PALETTE = [
(39, 129, 113),
(164, 80, 133),
(83, 122, 114),
(99, 81, 172),
(95, 56, 104),
(37, 84, 86),
(14, 89, 122),
(80, 7, 65),
(10, 102, 25),
(90, 185, 109),
(106, 110, 132),
(169, 158, 85),
(188, 185, 26),
(103, 1, 17),
(82, 144, 81),
(92, 7, 184),
(49, 81, 155),
(179, 177, 69),
(93, 187, 158),
(13, 39, 73),
(12, 50, 60),
(16, 179, 33),
(112, 69, 165),
(15, 139, 63),
(33, 191, 159),
(182, 173, 32),
(34, 113, 133),
(90, 135, 34),
(53, 34, 86),
(141, 35, 190),
(6, 171, 8),
(118, 76, 112),
(89, 60, 55),
(15, 54, 88),
(112, 75, 181),
(42, 147, 38),
(138, 52, 63),
(128, 65, 149),
(106, 103, 24),
(168, 33, 45),
(28, 136, 135),
(86, 91, 108),
(52, 11, 76),
(142, 6, 189),
(57, 81, 168),
(55, 19, 148),
(182, 101, 89),
(44, 65, 179),
(1, 33, 26),
(122, 164, 26),
(70, 63, 134),
(137, 106, 82),
(120, 118, 52),
(129, 74, 42),
(182, 147, 112),
(22, 157, 50),
(56, 50, 20),
(2, 22, 177),
(156, 100, 106),
(21, 35, 42),
(13, 8, 121),
(142, 92, 28),
(45, 118, 33),
(105, 118, 30),
(7, 185, 124),
(46, 34, 146),
(105, 184, 169),
(22, 18, 5),
(147, 71, 73),
(181, 64, 91),
(31, 39, 184),
(164, 179, 33),
(96, 50, 18),
(95, 15, 106),
(113, 68, 54),
(136, 116, 112),
(119, 139, 130),
(31, 139, 34),
(66, 6, 127),
(62, 39, 2),
(49, 99, 180),
(49, 119, 155),
(153, 50, 183),
(125, 38, 3),
(129, 87, 143),
(49, 87, 40),
(128, 62, 120),
(73, 85, 148),
(28, 144, 118),
(29, 9, 24),
(175, 45, 108),
(81, 175, 64),
(178, 19, 157),
(74, 188, 190),
(18, 114, 2),
(62, 128, 96),
(21, 3, 150),
(0, 6, 95),
(2, 20, 184),
(122, 37, 185)
]
| 18.825243 | 27 | 0.37803 |
c6f5b5402404304ba77cd6c951b9d6372660c1bb
| 9,097 |
py
|
Python
|
pythonBackend/SuperUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | 5 |
2020-06-27T02:57:47.000Z
|
2022-01-12T22:14:08.000Z
|
pythonBackend/SuperUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | null | null | null |
pythonBackend/SuperUser.py
|
sajadgzd/softwareEngineeringProject
|
b2c4838b01ae4cb790a64e3c0d0a5bc959054bab
|
[
"MIT"
] | 2 |
2020-05-14T18:29:55.000Z
|
2020-05-17T05:59:05.000Z
|
import sqlite3
import json
from flask import Flask, jsonify, render_template, request, send_from_directory
import uuid
########## SUPER USER CODE ##########
@app.route('/handleApplication', methods = ["POST"])
def handleApplication():
#------Get Data from Front-end-----#
jsonData = request.json
response = jsonData["response"] #ACCEPT or DECLINE
# responderEmail = jsonData["responderEmail"] #get the email of the responder
applicantEmail = jsonData["applicantEmail"].lower() #get the email of the applicant
#-----Database Connection------#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM signup WHERE [email] = ?",(applicantEmail,))
signUpUserData = cursor.fetchone() #fetching the row from signup
signUpUserData = list(signUpUserData) #convert that string into a python list
#data for the user table
fullname = signUpUserData[0]
email = signUpUserData[1]
password = signUpUserData[3]
groupList = []
groupList = json.dumps(groupList)
reputationScore = 0
status = None
invitations = []
invitations = json.dumps(invitations)
blacklist = []
blacklist = json.dumps(blacklist)
whitelist = []
whitelist = json.dumps(whitelist)
compliments = []
compliments = json.dumps(compliments)
inbox = []
inbox = json.dumps(inbox)
referredUsers = []
referredUsers = json.dumps(referredUsers)
rowData = []
rowData.append(email)
rowData.append(fullname)
rowData.append(password)
rowData.append(groupList)
rowData.append(reputationScore)
rowData.append(status)
rowData.append(invitations)
rowData.append(blacklist)
rowData.append(whitelist)
rowData.append(compliments)
rowData.append(inbox)
rowData.append(referredUsers)
# accept the invite
if response.lower() == "accepted":
#first update the signup row for this user and change user status
rowData[5] = "OU"
signUpUserData[6] = "USER"
cursor.execute("DELETE * FROM signup WHERE [email] = ?", (email,))
cursor.execute("INSERT INTO signup (fullname,email,interests,credentials,reference,appeal,status) VALUES(?,?,?,?,?,?,?)",tuple(signUpUserData))
connection.commit()
# add the user to the user database
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)",tuple(rowData))
connection.commit()
connection.close()
return (jsonify({"Message" : "{} is now registered to Team Up.".format(email)}))
#decline the invite
elif response.lower() == "declined":
signUpUserData[6] = "REJECTED"
#modify the row
cursor.execute("DELETE * FROM signup WHERE [email] = ?", (email,))
cursor.execute("INSERT INTO signup (fullname,email,interests,credentials,reference,appeal,status) VALUES(?,?,?,?,?,?,?)",tuple(signUpUserData))
connection.commit()
connection.close()
return jsonify({
"Message": "{} will be notified that their application has been rejected.".format(email)
})
@app.route('/blacklistFromServer', methods = ["POST"])
def blacklistFromServer():
jsonData = request.json
userEmail = jsonData["userEmail"]
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Get the visitor information-----#
cursor.execute("SELECT * FROM signup where [email] = ?",(userEmail,))
visitorData = list(cursor.fetchone())
#modify the visitor signup row
visitorData[6] = "BLACKLISTED"
cursor.execute("DELETE * FROM signup WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO signup (fullname,email,interests,credentials,reference,appeal,status) VALUES(?,?,?,?,?,?,?)",tuple(visitorData))
connection.commit()
connection.close()
return jsonify({
"Message": "The visitor has been added to blacklist."
})
@app.route('/reverseReputationDeduction', methods = ["POST"])
def reverseReputationDeduction():
jsonData = request.json
userEmail = jsonData["userEmail"]
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Get the user information-----#
cursor.execute("SELECT * FROM users where [email] = ?",(userEmail,))
userData = list(cursor.fetchone())
userData[4] += 5 #
cursor.execute("DELETE FROM users WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connection.close()
return (jsonify({
"Message": "{}'s point deducation has been reversed.".format(userEmail)
}))
@app.route('/shutDownGroup', methods = ["POST"])
def shutDownGroup():
#------Get Data from Front-end-----#
jsonData = request.json
groupName = jsonData['groupName']
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Delete the row-----#
cursor.execute("SELECT * FROM users WHERE [groupName] = ?",(groupName,))
groupData = cursor.fetchone()
groupData = list(groupData)
groupData[1] = "CLOSED"
cursor.execute("DELETE * FROM groups WHERE [groupName] = ?",(groupName,))
cursor.execute("INSERT INTO groups (groupName,status,posts,memberpolls,groupPolls,members) VALUES(?,?,?,?,?,?)",tuple(groupData))
connection.commit()
connection.close()
return (jsonify({
"Message": "{} has been closed.".format(groupName)
}))
@app.route('/issuePointDeduction', methods = ["POST"])
def issuePointDeduction():
jsonData = request.json
userEmail = jsonData["userEmail"]
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Get the user information-----#
cursor.execute("SELECT * FROM users where [email] = ?",(userEmail,))
userData = list(cursor.fetchone())
userData[4] -= 5
cursor.execute("DELETE FROM users WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connection.close()
managePointStatus(userEmail)
return (jsonify({
"Message": "{} has 5 points deducted from their score.".format(userEmail)
}))
@app.route('/issuePointIncrement', methods = ["POST"])
def issuePointIncrement():
jsonData = request.json
userEmail = jsonData["userEmail"]
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Get the user information-----#
cursor.execute("SELECT * FROM users where [email] = ?",(userEmail,))
userData = list(cursor.fetchone())
userData[4] += 5
cursor.execute("DELETE FROM users WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connection.close()
managePointStatus(userEmail)
return (jsonify({
"Message": "{} has 5 points added from their score.".format(userEmail)
}))
@app.route('/banUser', methods = ["POST"])
def banUser():
jsonData = request.json
userEmail = jsonData["userEmail"]
#------Connection-----#
connection = sqlite3.connect(r"./database.db")
cursor = connection.cursor()
#------Get the signup information-----#
cursor.execute("SELECT * FROM signup where [email] = ?",(userEmail,))
visitorData = list(cursor.fetchone())
#modify the signup row
visitorData[6] = "BLACKLISTED"
cursor.execute("DELETE * FROM signup WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO signup (fullname,email,interests,credentials,reference,appeal,status) VALUES(?,?,?,?,?,?,?)",tuple(visitorData))
#Get the users information
cursor.execute("SELECT * FROM users WHERE [email] = ?", (userEmail,))
userData = list(cursor.fetchone())
#modify the user row
userData[5] = "BLACKLISTED"
cursor.execute("DELETE * FROM users WHERE [email] = ?", (userEmail,))
cursor.execute("INSERT INTO users (email,fullname,password,groupList,reputationScore,status,invitations,blacklist,whitelist,compliments,inbox,referredUsers) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)",tuple(userData))
connection.commit()
connection.close()
return (jsonify({
"Message": "{} has been blacklisted from Signup and Users table.".format(userEmail)
}))
########## END SUPER USER CODE ##########
| 35.25969 | 213 | 0.652963 |
d637ce670e1378e1f97e7a93e7aa81ffa6402fb2
| 1,977 |
py
|
Python
|
lib/linux/get_browsers.py
|
cinfo-kwoc/Cinfo
|
1cdebe7165d687e977004ec2723019adb3b4288b
|
[
"MIT"
] | null | null | null |
lib/linux/get_browsers.py
|
cinfo-kwoc/Cinfo
|
1cdebe7165d687e977004ec2723019adb3b4288b
|
[
"MIT"
] | null | null | null |
lib/linux/get_browsers.py
|
cinfo-kwoc/Cinfo
|
1cdebe7165d687e977004ec2723019adb3b4288b
|
[
"MIT"
] | null | null | null |
import os
class get_browsers:
'''
********* THIS SCRIPT RETURNS A LIST CONTAINING BROWSERS INSTALLED ON USER'S LINUX SYSTEM *********
CLASS get_browsers DOCINFO:
get_browsers HAVE TWO FUNCTIONS I.E.,
1) __init__
2) work()
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ BLOCK SERVES THE INITIALIZATION FUNCTION, CONTAINING INITIALIZED VARIABLES WHICH IS GOING TO BE USED LATER BY OTHER MEMBER FUNCTION.
'''
self.command_output = "" # TO SAVE DATA RECIEVED FROM COMMAND INTO A STRING
self.browsers = [] # FOR SAVING BROWSER DATA COLLECTED INTO A SINGLE VARIABLE
def work(self):
'''
WORK() DOCFILE:
THE FUNCTION WORKS IN FOLLOWING WAY:
1) COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
2) SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST.
3) REMOVING LAST REDUNDANT ELEMENT.
4) REFINING NAME FROM THE LIST WE GET.
5) RETURNING THE LIST.
'''
self.command_output = os.popen("apropos 'web browser'").read() # COLLECTING DATA FROM COMMANDLINE, AND SAVING IT INTO A STRING.
self.browsers = self.command_output.split('\n') # SPLITTING DATA ACCORDING TO A NEW LINE AND SAVING ALL LINES 'BROWSER' NAMED LIST
self.browsers.pop() # REMOVING LAST REDUNDANT ELEMENT
self.browsers = [i[:i.find('(')-1] for i in self.browsers] # REFINING NAME FROM THE LIST WE GET
return self.browsers # RETURNING THE LIST
| 42.978261 | 145 | 0.699039 |
c161953f5f5c3466d3949a3fba98085566cf51c5
| 1,368 |
py
|
Python
|
eva/scripts/extract_rosbag_pose.py
|
epfl-lasa/rds
|
574b3881dbaf4fdcd785dd96ba4c451928454b40
|
[
"MIT"
] | 12 |
2020-08-18T09:01:50.000Z
|
2022-03-17T19:53:30.000Z
|
eva/scripts/extract_rosbag_pose.py
|
epfl-lasa/rds
|
574b3881dbaf4fdcd785dd96ba4c451928454b40
|
[
"MIT"
] | null | null | null |
eva/scripts/extract_rosbag_pose.py
|
epfl-lasa/rds
|
574b3881dbaf4fdcd785dd96ba4c451928454b40
|
[
"MIT"
] | 1 |
2021-08-25T13:12:55.000Z
|
2021-08-25T13:12:55.000Z
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseWithCovarianceStamped
import tf
import numpy as np
import time
import scipy.io as sio
import signal
import sys
n_max = 30*300 # 30 Hz for 5 min
data = np.empty([n_max, 4])
# [[time, x, y, phi]]
counter = 0
start_time = None
previous_t = -100.0
def save():
global data
data = data[0:counter, :]
sio.savemat('pose.mat', {'data' : data})
def signal_handler(sig, frame):
save()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def callbackPoseUpdate(msg):
global data
global counter
global start_time
global previous_t
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
q = (msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)
rpy = tf.transformations.euler_from_quaternion(q)
phi = rpy[2]
if start_time == None:
start_time = rospy.Time.now() #time.time()
t = 0.0
else:
t = (rospy.Time.now() - start_time).to_sec() #time.time() - start_time
previous_t = t
if counter >= n_max:
print ('Longer than expected')
return
data[counter, :] = np.array([t, x, y, phi])
counter += 1
def main():
rospy.init_node('extract_rosbag_pose_node')
rospy.Subscriber('poseupdate', PoseWithCovarianceStamped, callbackPoseUpdate)
print ('Ready ...')
rospy.spin()
if __name__ == '__main__':
main()
| 21.046154 | 78 | 0.707602 |
8e2f46ce220c24596c1d0c1481d04aa4a40db141
| 49 |
py
|
Python
|
recipe/conda_forge_ci_setup/__init__.py
|
scopatz/conda-forge-ci-setup-feedstock
|
6b81908e35bdecb9e92241b978e626fbd42b7d54
|
[
"BSD-3-Clause"
] | 6 |
2020-03-30T23:05:16.000Z
|
2021-12-14T15:28:16.000Z
|
recipe/conda_forge_ci_setup/__init__.py
|
scopatz/conda-forge-ci-setup-feedstock
|
6b81908e35bdecb9e92241b978e626fbd42b7d54
|
[
"BSD-3-Clause"
] | null | null | null |
recipe/conda_forge_ci_setup/__init__.py
|
scopatz/conda-forge-ci-setup-feedstock
|
6b81908e35bdecb9e92241b978e626fbd42b7d54
|
[
"BSD-3-Clause"
] | 3 |
2020-09-03T13:19:47.000Z
|
2021-03-04T10:12:09.000Z
|
"""conda-forge-ci-utils"""
__version__ = '2.0.6'
| 16.333333 | 26 | 0.632653 |
35a020fa781dfd60dfcefcb8b1eb1f9db8771525
| 328 |
py
|
Python
|
nebula2/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 110 |
2019-10-24T09:21:07.000Z
|
2022-03-31T07:06:00.000Z
|
nebula2/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 83 |
2019-11-20T07:55:05.000Z
|
2022-03-23T10:55:14.000Z
|
nebula2/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 56 |
2019-10-11T07:01:05.000Z
|
2022-03-11T09:09:15.000Z
|
#!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
from nebula2.common.ttypes import Value
Value.__hash__ = lambda self: self.value.__hash__()
| 25.230769 | 77 | 0.746951 |
4b1326cb9e1ab13b114b0692a5985d0c268e468c
| 6,579 |
py
|
Python
|
venv/Lib/site-packages/pyroute2/netns/nslink.py
|
kalymgr/Project-T-Cryptocurrencies
|
5dbb679a76bcf07b913036e7b44ba4247c39482d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pyroute2/netns/nslink.py
|
kalymgr/Project-T-Cryptocurrencies
|
5dbb679a76bcf07b913036e7b44ba4247c39482d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pyroute2/netns/nslink.py
|
kalymgr/Project-T-Cryptocurrencies
|
5dbb679a76bcf07b913036e7b44ba4247c39482d
|
[
"MIT"
] | null | null | null |
'''
NetNS
=====
A NetNS object is IPRoute-like. It runs in the main network
namespace, but also creates a proxy process running in
the required netns. All the netlink requests are done via
that proxy process.
NetNS supports standard IPRoute API, so can be used instead
of IPRoute, e.g., in IPDB::
# start the main network settings database:
ipdb_main = IPDB()
# start the same for a netns:
ipdb_test = IPDB(nl=NetNS('test'))
# create VETH
ipdb_main.create(ifname='v0p0', kind='veth', peer='v0p1').commit()
# move peer VETH into the netns
with ipdb_main.interfaces.v0p1 as veth:
veth.net_ns_fd = 'test'
# please keep in mind, that netns move clears all the settings
# on a VETH interface pair, so one should run netns assignment
# as a separate operation only
# assign addresses
# please notice, that `v0p1` is already in the `test` netns,
# so should be accessed via `ipdb_test`
with ipdb_main.interfaces.v0p0 as veth:
veth.add_ip('172.16.200.1/24')
veth.up()
with ipdb_test.interfaces.v0p1 as veth:
veth.add_ip('172.16.200.2/24')
veth.up()
Please review also the test code, under `tests/test_netns.py` for
more examples.
By default, NetNS creates requested netns, if it doesn't exist,
or uses existing one. To control this behaviour, one can use flags
as for `open(2)` system call::
# create a new netns or fail, if it already exists
netns = NetNS('test', flags=os.O_CREAT | os.O_EXCL)
# create a new netns or use existing one
netns = NetNS('test', flags=os.O_CREAT)
# the same as above, the default behaviour
netns = NetNS('test')
To remove a network namespace::
from pyroute2 import NetNS
netns = NetNS('test')
netns.close()
netns.remove()
One should stop it first with `close()`, and only after that
run `remove()`.
'''
import os
import errno
import signal
import atexit
import logging
from functools import partial
from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl
from pyroute2.iproute import RTNL_API
from pyroute2.netns import setns
from pyroute2.netns import remove
from pyroute2.remote import Server
from pyroute2.remote import Transport
from pyroute2.remote import RemoteSocket
log = logging.getLogger(__name__)
class FD(object):
def __init__(self, fd):
self.fd = fd
for name in ('read', 'write', 'close'):
setattr(self, name, partial(getattr(os, name), self.fd))
def fileno(self):
return self.fd
def flush(self):
return None
class NetNS(RTNL_API, RemoteSocket):
'''
NetNS is the IPRoute API with network namespace support.
**Why not IPRoute?**
The task to run netlink commands in some network namespace, being in
another network namespace, requires the architecture, that differs
too much from a simple Netlink socket.
NetNS starts a proxy process in a network namespace and uses
`multiprocessing` communication channels between the main and the proxy
processes to route all `recv()` and `sendto()` requests/responses.
**Any specific API calls?**
Nope. `NetNS` supports all the same, that `IPRoute` does, in the same
way. It provides full `socket`-compatible API and can be used in
poll/select as well.
The only difference is the `close()` call. In the case of `NetNS` it
is **mandatory** to close the socket before exit.
**NetNS and IPDB**
It is possible to run IPDB with NetNS::
from pyroute2 import NetNS
from pyroute2 import IPDB
ip = IPDB(nl=NetNS('somenetns'))
...
ip.release()
Do not forget to call `release()` when the work is done. It will shut
down `NetNS` instance as well.
'''
def __init__(self, netns, flags=os.O_CREAT):
self.netns = netns
self.flags = flags
trnsp_in, self.remote_trnsp_out = [Transport(FD(x))
for x in os.pipe()]
self.remote_trnsp_in, trnsp_out = [Transport(FD(x))
for x in os.pipe()]
self.child = os.fork()
if self.child == 0:
# child process
trnsp_in.close()
trnsp_out.close()
try:
setns(self.netns, self.flags)
except OSError as e:
(self
.remote_trnsp_out
.send({'stage': 'init', 'error': e}))
os._exit(e.errno)
except Exception as e:
(self.
remote_trnsp_out
.send({'stage': 'init',
'error': OSError(errno.ECOMM,
str(e),
self.netns)}))
os._exit(255)
try:
Server(self.remote_trnsp_in, self.remote_trnsp_out)
finally:
os._exit(0)
try:
super(NetNS, self).__init__(trnsp_in, trnsp_out)
except Exception:
self.close()
raise
atexit.register(self.close)
self.marshal = MarshalRtnl()
def clone(self):
return type(self)(self.netns, self.flags)
def _cleanup_atexit(self):
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
else:
try:
atexit._exithandlers.remove((self.close, (), {}))
except ValueError:
pass
def close(self):
self._cleanup_atexit()
try:
super(NetNS, self).close()
except:
# something went wrong, force server shutdown
try:
self.trnsp_out.send({'stage': 'shutdown'})
except Exception:
pass
log.error('forced shutdown procedure, clean up netns manually')
# force cleanup command channels
for close in (self.trnsp_in.close,
self.trnsp_out.close,
self.remote_trnsp_in.close,
self.remote_trnsp_out.close):
try:
close()
except Exception:
pass # Maybe already closed in remote.Client.close
try:
os.kill(self.child, signal.SIGKILL)
os.waitpid(self.child, 0)
except OSError:
pass
def post_init(self):
pass
def remove(self):
'''
Try to remove this network namespace from the system.
'''
remove(self.netns)
| 29.370536 | 75 | 0.594163 |
cf3020346ffd5fb4d8d77b6be635bca8ee20ec8e
| 8,596 |
py
|
Python
|
src/find_homography_distance_orig (2).py
|
Charamba/Cross-Ratio-Arrays-Shape-Descriptor
|
b252814f54bb11c2519a1bd4d0be3f524faba901
|
[
"Unlicense"
] | 2 |
2021-09-05T15:50:02.000Z
|
2022-01-05T03:10:50.000Z
|
src/find_homography_distance_orig (2).py
|
Charamba/Cross-Ratio-Arrays-Shape-Descriptor
|
b252814f54bb11c2519a1bd4d0be3f524faba901
|
[
"Unlicense"
] | null | null | null |
src/find_homography_distance_orig (2).py
|
Charamba/Cross-Ratio-Arrays-Shape-Descriptor
|
b252814f54bb11c2519a1bd4d0be3f524faba901
|
[
"Unlicense"
] | null | null | null |
import cv2
import numpy as np
from numpy.linalg import inv
from scipy.spatial.distance import directed_hausdorff
import math
def bbox_dimensions(points):
X = [p.x for p in points]
Y = [p.y for p in points]
min_x = min(X)
min_y = min(Y)
max_x = max(X)
max_y = max(Y)
dbbx = max_x - min_x
dbby = max_y - min_y
return (dbbx, dbby)
def calc_homography(pairs_points):
if len(pairs_points) == 0:
return float('Inf')
pts_src = np.array([[src_pt[0], src_pt[1]] for (src_pt, _) in pairs_points])
pts_dst = np.array([[dst_pt[0], dst_pt[1]] for (_, dst_pt) in pairs_points])
print("pts_dst = ", len(pts_dst))
h_inv, status = cv2.findHomography(pts_dst, pts_src, cv2.LMEDS)
return h_inv
def transf_points(h, points):
rectif_points = []
for pt_dst in points:
#pt_dst = pts_dst[i]
#print("pt_dst: ", pt_dst)
#print("h_inv: ", h_inv)
p_rectif = np.matmul(np.array(h), np.array([pt_dst.x, pt_dst.y, 1]))
p_rectif_norm = [p_rectif[0]/p_rectif[2], p_rectif[1]/p_rectif[2], p_rectif[2]/p_rectif[2]]
(xrn, yrn) = (p_rectif_norm[0], p_rectif_norm[1])
rectif_points.append((xrn, yrn))
return rectif_points
def calc_hausdorff_distance(template_hull_points, query_hull_points, pairs_points):
h_inv = calc_homography(pairs_points)
(dbbx, dbby) = bbox_dimensions(template_hull_points)
query_hull_points_rect = transf_points(h_inv, query_hull_points)
template_hull_points = [(p.x/dbbx,p.y/dbby) for p in template_hull_points]
query_hull_points_rect = [(p.x/dbbx,p.y/dbby) for p in query_hull_points]
return directed_hausdorff(template_hull_points, query_hull_points_rect)[0], directed_hausdorff(query_hull_points_rect, template_hull_points)[0]
def calc_homography_distance(pairs_points):
if len(pts_dst) == 0:
return float('Inf')
h_inv, status = cv2.findHomography(pts_dst, pts_src, cv2.LMEDS)#,9.0)
n = 0
if h_inv is None:
return float('Inf')
dist = 0 if len(pts_dst) > 0 else float('Inf')
dist_array = []
(dbbx, dbby) = bbox_dimensions(pts_src)
#print("dbbx = ", dbbx)
#print("dbby = ", dbby)
for i, s in enumerate(status):
if s == 1:
orig_pt = pts_src[i]
pt_dst = pts_dst[i]
#print("pt_dst: ", pt_dst)
#print("h_inv: ", h_inv)
p_rectif = np.matmul(h_inv, [pt_dst[0], pt_dst[1], 1])
p_rectif_norm = [p_rectif[0]/p_rectif[2], p_rectif[1]/p_rectif[2], p_rectif[2]/p_rectif[2]]
(xo, yo) = (orig_pt[0], orig_pt[1])
(xrn, yrn) = (p_rectif_norm[0], p_rectif_norm[1])
dx = (xo-xrn)/dbbx
dy = (yo-yrn)/dbby
error_geom = math.sqrt(dx*dx + dy*dy)
dist += error_geom
dist_array.append(error_geom)
#if s == 1:
n += 1
#else s==0:
matchings_percent = float(sum(status))/len(status)
#print("matchings_percent = ", matchings_percent)
#print("Total = ", len(status))
#print("n = ", n)
dist_array.sort()
errors = np.array(dist_array)#dist_array[0:4]
#median_error = np.median(dist_array)
#return (1.0-matchings_percent), median_error/n
# math.sqrt((dist/n)**2 + (1.0 - n/150)**2)
return 100*(dist/n)#*(1.0 - n/150) if n/150 >= 0.1 else float('Inf')#(1.0-matchings_percent)#(1 - matchings_percent)#np.median(errors)/n#1.0/n#dist/n
if __name__ == '__main__' :
# acessible vs acessible test 0
pairs_points = [((10.630961, -151.628088), (4.908575, -200.799395)), ((61.261922, -75.256176), (70.817150, -124.598790)), ((86.577403, -37.070221), (136.725725, -48.398186)), ((137.208364, 39.301691), (202.634300, 27.802419)), ((187.839325, 115.673603), (268.542875, 104.003024)), ((198.000000, 131.000000), (272.000000, 108.000000)), ((201.000000, 138.000000), (274.000000, 111.000000)), ((210.000000, 161.000000), (289.000000, 140.000000)), ((210.000000, 164.000000), (289.000000, 142.000000)), ((193.000000, 171.000000), (259.000000, 152.000000)), ((128.461571, 197.015371), (174.000000, 178.000000)), ((82.635361, 212.783400), (76.547010, 203.561440)), ((-6.000000, 243.000000), (-13.000000, 227.000000)), ((-16.000000, 246.000000), (-22.000000, 229.000000)), ((-29.000000, 249.000000), (-37.000000, 232.000000)), ((-37.000000, 250.000000), (-52.000000, 234.000000)), ((-51.000000, 251.000000), (-63.000000, 235.000000)), ((-66.000000, 251.000000), (-100.000000, 235.000000)), ((-75.000000, 250.000000), (-118.000000, 233.000000)), ((-88.000000, 248.000000), (-140.000000, 229.000000)), ((-109.000000, 242.000000), (-153.000000, 226.000000)), ((-114.000000, 240.000000), (-160.000000, 224.000000)), ((-133.000000, 231.000000), (-192.000000, 212.000000)), ((-143.000000, 225.000000), (-210.000000, 203.000000)), ((-155.000000, 216.000000), (-220.000000, 197.000000)), ((-174.000000, 198.000000), (-241.000000, 182.000000)), ((-180.000000, 191.000000), (-249.000000, 175.000000)), ((-186.000000, 183.000000), (-263.000000, 161.000000)), ((-192.000000, 174.000000), (-278.000000, 142.000000)), ((-201.000000, 157.000000), (-283.000000, 134.000000)), ((-207.000000, 142.000000), (-293.000000, 114.000000)), ((-210.000000, 133.000000), (-296.000000, 106.000000)), ((-213.000000, 121.000000), (-301.000000, 87.000000)), ((-215.000000, 108.000000), (-302.000000, 81.000000)), ((-216.000000, 99.000000), (-303.000000, 73.000000)), ((-216.000000, 75.000000), (-303.000000, 48.000000)), ((-213.000000, 54.000000), (-301.000000, 34.000000)), ((-212.000000, 49.000000), (-298.000000, 22.000000)), ((-210.000000, 41.000000), (-291.000000, 3.000000)), ((-176.485566, -54.400419), (-248.845287, -88.506571)), ((-145.971132, -140.800839), (-206.690575, -180.013143)), ((-118.000000, -220.000000), (-168.000000, -264.000000)), ((-114.000000, -228.000000), (-166.000000, -268.000000)), ((-112.000000, -231.000000), (-153.000000, -283.000000)), ((-103.000000, -240.000000), (-147.000000, -287.000000)), ((-92.000000, -246.000000), (-131.000000, -294.000000)), ((-89.000000, -247.000000), (-127.000000, -295.000000)), ((-85.000000, -248.000000), (-122.000000, -296.000000)), ((-65.000000, -247.000000), (-94.000000, -295.000000)), ((-62.000000, -246.000000), (-90.000000, -294.000000)), ((-54.000000, -242.000000), (-81.000000, -291.000000)), ((-51.000000, -240.000000), (-77.000000, -289.000000)), ((-43.000000, -232.000000), (-61.000000, -277.000000))]
# honda vs acessible test 30
#pairs_points = [((918.000000, -767.000000), (-217.000000, 100.000000)), ((925.573130, -528.688114), (88.558067, -77.347163)), ((929.359694, -409.532171), (-207.000000, -78.000000)), ((933.146259, -290.376228), (-67.137316, 147.129403)), ((936.932824, -171.220285), (103.000000, 163.000000)), ((940.719389, -52.064342), (94.930112, 2.421396)), ((944.505954, 67.091601), (-106.705974, 141.194104)), ((948.292518, 186.247544), (29.000000, 161.000000)), ((959.652213, 543.715372), (106.000000, 145.000000)), ((965.000000, 712.000000), (-130.069293, -128.862356)), ((845.786661, 757.810363), (-205.000000, 109.000000)), ((726.573323, 758.620725), (104.488178, 122.074233)), ((488.146645, 760.241450), (-197.000000, -85.000000)), ((368.933306, 761.051813), (-244.000000, 68.000000)), ((249.719968, 761.862175), (-163.534647, -106.931178)), ((130.506629, 762.672538), (-211.000000, -75.000000)), ((-227.133387, 765.103626), (-29.673233, -194.655891)), ((-359.000000, 766.000000), (44.000000, -226.000000)), ((-417.000000, 766.000000), (106.000000, 141.000000)), ((-536.199721, 764.024314), (35.222959, 161.676409)), ((-960.914207, 637.787412), (-27.568658, 153.064701)), ((-965.000000, 88.000000), (-258.000000, 5.000000)), ((-964.000000, 79.000000), (-257.000000, -2.000000)), ((-963.000000, 74.000000), (-255.000000, -12.000000)), ((-961.000000, 66.000000), (-216.000000, -71.000000)), ((-933.000000, 16.000000), (-253.000000, 49.000000)), ((-927.000000, 9.000000), (-139.000000, 136.000000)), ((-907.000000, -10.000000), (-144.000000, 135.000000)), ((-894.000000, -20.000000), (-200.000000, -83.000000)), ((655.692461, -661.145138), (102.000000, 168.000000)), ((905.000000, -763.000000), (-249.000000, 58.000000)), ((915.000000, -767.000000), (-180.000000, 123.000000))]
# dividno os pontos em duas listas
pts_src = np.array([[src_pt.x, src_pt.y] for (src_pt, _) in pairs_points])
pts_dst = np.array([[dst_pt.x, dst_pt.y] for (_, dst_pt) in pairs_points])
dist = calc_homography_distance(pts_src, pts_dst)
#print("dist = ", dist)
#cv2.waitKey(0)
| 69.322581 | 2,942 | 0.629711 |
08fd9f7ab3c71344e11589cee3c694ec396fef54
| 4,130 |
py
|
Python
|
constraint_generation/python/spice_parse/netlist.py
|
rbarzic/MAGICAL
|
0510550b263913f8c62f46662a9dfa2ae94d2386
|
[
"BSD-3-Clause"
] | null | null | null |
constraint_generation/python/spice_parse/netlist.py
|
rbarzic/MAGICAL
|
0510550b263913f8c62f46662a9dfa2ae94d2386
|
[
"BSD-3-Clause"
] | null | null | null |
constraint_generation/python/spice_parse/netlist.py
|
rbarzic/MAGICAL
|
0510550b263913f8c62f46662a9dfa2ae94d2386
|
[
"BSD-3-Clause"
] | null | null | null |
class netlist_element():
def __init__(self,typeof):
self.typeof = typeof
self.parent = False
def __str__(self):
return(self.typeof)
class subcircuit(netlist_element):
def __init__(self, name, nets, instances):
self.name = name;
self.labels = {}
self.power_nets = []
self.ground_nets = []
self.internal_nets = []
# dictionarry of net names,
# key is net name, value is net object
# marke these nets also as io
self.io_nets = nets # MY NOTE: get the pin list
self.nets = {} #= nets;
for n in nets:
self.nets[n] = net(n, self)
self.nets[n].is_io = True
self.instances = instances;
for i in self.instances:
# register subcircuit as parrent
i.parent = self
# add all internal nets
for n in i.pins:
if n not in self.nets:
self.nets[n] = net(n, self)
netlist_element.__init__(self,'subcircuit')
def __str__(self):
insts = {}
for i in self.instances:
insts[i.name] = i.reference
return(self.typeof + " " + self.name + "(" + str(self.nets) + "):" + str(insts))
def map_instances(self, mapping_function):
for i in range(len(self.instances)):
self.instances[i] = mapping_function(self.instances[i])
def map_nets(self, mapping_function):
for n in self.nets:
self.nets[n] = mapping_function(self.nets[n])
def __repr__(self):
return self.name
class instance(netlist_element):
def __init__(self, name, pins, reference, parameters):
self.name = name;
self.pins = pins;
self.reference = reference;
self.parameters = parameters;
netlist_element.__init__(self,'instance')
def __str__(self):
return(self.typeof + " " + self.name + "@" + self.reference + str(self.parameters))
def get_attr(self):
self.attr = self.reference.split('_')
class net(netlist_element):
def __init__(self, name, parent):
self.name = name
self.nettype = 'standard'
self.nodes = set()
self.labels = {}
self.is_vdd = False
self.is_gnd = False
self.is_internal = False
self.is_io = False
self.parent = parent
def connect(self, pin):
self.nodes.add(pin)
def __repr__(self):
return (self.parent.__repr__() + "." + self.name)
class pin(netlist_element):
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.net = False
self.direction = False
def connect(self,net):
if not self.net:
# get the net object from the subcircuit
self.net = self.parent.parent.nets[net]
self.net.connect(self)
def __repr__(self):
return (self.parent.__repr__() + "." + self.name)
class mosfet(instance):
def __init__(self,instance):
self.name = instance.name
self.parent = instance.parent
self.parameters = instance.parameters
self.reference = instance.reference
self.drain = pin('d',self)
self.gate = pin('g',self)
self.source = pin('s',self)
self.bulk = pin('b',self)
self.labels = {}
def connect(self, drain, gate, source, bulk):
self.drain.connect(drain)
self.gate.connect(gate)
self.source.connect(source)
self.bulk.connect(bulk)
def __str__(self):
return(self.typeof + ":" + self.reference + '[' + self.drain.net.name + ', '+ self.gate.net.name + ', '+ self.source.net.name + ', '+ self.bulk.net.name + ']')
def __repr__(self):
return(self.parent.__repr__() + "." + self.name)
class nmos(mosfet):
def __init__(self,instance):
netlist_element.__init__(self,'nmos')
mosfet.__init__(self, instance)
class pmos(mosfet):
def __init__(self,instance):
netlist_element.__init__(self,'pmos')
mosfet.__init__(self, instance)
| 31.769231 | 167 | 0.575787 |
a2ccdecc6979646a022e6b5aa62bcedaf9f091ea
| 629 |
py
|
Python
|
methods/get_and_set.py
|
kurtgalvin/dunder-mifflin-methods
|
69925575d3b9acdd01cea30414e8f01847566bc1
|
[
"MIT"
] | null | null | null |
methods/get_and_set.py
|
kurtgalvin/dunder-mifflin-methods
|
69925575d3b9acdd01cea30414e8f01847566bc1
|
[
"MIT"
] | null | null | null |
methods/get_and_set.py
|
kurtgalvin/dunder-mifflin-methods
|
69925575d3b9acdd01cea30414e8f01847566bc1
|
[
"MIT"
] | null | null | null |
# __get__ and __set__ tutorial
class Hours:
def __init__(self, hours_per_day):
self.hours_per_day = hours_per_day
self.total_hours = 0
self.regular_hours = 0
self.overtime_hours = 0
def __get__(self, instance, owner):
return {
'total': self.total_hours,
'regular': self.regular_hours,
'overtime': self.overtime_hours
}
def __set__(self, instance, value):
self.hours_per_day = value
class Manager:
hours = Hours(8)
def __init__(self, name):
self.name = name.lower()
michael = Manager('michael')
michael.
| 22.464286 | 43 | 0.612083 |
738e1e9d005d879b052ec6d7949465e03f99162d
| 15,021 |
py
|
Python
|
src/dataops/tests/test_logic.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
src/dataops/tests/test_logic.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
src/dataops/tests/test_logic.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import StringIO
import datetime
import numpy as np
import pandas as pd
import test
from dataops import pandas_db
from dataops.formula_evaluation import evaluate_top_node, evaluate_node_sql
from dataops.ops import perform_dataframe_upload_merge
from dataops.pandas_db import get_filter_query
class DataopsMatrixManipulation(test.OntaskTestCase):
table_name = 'TEST_TABLE'
pk = '999'
csv1 = """key,text1,text2,double1,double2,bool1,bool2,date1,date2
1.0,"d1_t1_1",,111.0,,True,,1/1/18 01:00:00,
2.0,"d2_t1_2",,112.0,,False,,1/1/18 02:00:00,
3.0,"",d1_t2_3,,123.0,,False,,1/2/18 03:00:00
4.0,,d1_t2_4,,124.0,,True,,1/2/18 04:00:00
5.0,"d1_t1_5",,115.0,,False,,1/1/18 05:00:00,
6.0,"d1_t1_6",,116.0,,True,,1/1/18 06:00:00,
7.0,,d1_t2_7,,126.0,,True,,1/2/18 07:00:00
8.0,,d1_t2_8,,127.0,,False,,1/2/18 08:00:00"""
csv2 = """key,text2,text3,double2,double3,bool2,bool3,date2,date3
5.0,,d2_t3_5,,235.0,,FALSE,,2/3/18
6.0,d2_t2_6,,216.0,,TRUE,,2/2/18 06:00,
7.0,,d2_t3_7,,237.0,,TRUE,,2/3/18 07:00
8.0,d2_t2_8,,218.0,,FALSE,,2/2/18 08:00,
9.0,,d2_t3_9,,239.0,,TRUE,,2/3/18 09:00
10.0,d2_t2_10,,2110.0,,FALSE,,2/2/18 10:00,
11.0,,d2_t3_11,,2311.0,,FALSE,,2/3/18 11:00
12.0,d2_t2_12,,2112.0,,TRUE,,2/2/18 12:00,"""
merge_info = {
'initial_column_names': None,
'rename_column_names': None,
'columns_to_upload': None,
'src_selected_key': 'key',
'dst_selected_key': 'key',
'how_merge': None
}
def parse_data_frames(self):
# Parse the two CSV strings and return as data frames
df_dst = pandas_db.load_df_from_csvfile(
StringIO.StringIO(self.csv1),
0,
0)
df_src = pandas_db.load_df_from_csvfile(
StringIO.StringIO(self.csv2),
0,
0
)
# Fix the merge_info fields.
self.merge_info['initial_column_names'] = list(df_src.columns)
self.merge_info['rename_column_names'] = list(df_src.columns)
self.merge_info['columns_to_upload'] = list(df_src.columns)
return df_dst, df_src
def df_update_inner(self):
df_dst, df_src = self.parse_data_frames()
self.merge_info['how_merge'] = 'inner'
result = perform_dataframe_upload_merge(self.pk,
df_dst,
df_src,
self.merge_info)
# Result must be correct (None)
self.assertEquals(result, None)
result_df = pandas_db.load_from_db(self.pk)
def df_equivalent_after_sql(self):
# Parse the CSV
df_source = pandas_db.load_df_from_csvfile(
StringIO.StringIO(self.csv1),
0,
0)
# Store the DF in the DB
pandas_db.store_table(df_source, self.table_name)
# Load it from the DB
df_dst = pandas_db.load_table(self.table_name)
# Columns have to have the same values (None and NaN are
# different)
for x in df_source.columns:
np.testing.assert_array_equal(
np.array(df_source[x], dtype=unicode),
np.array(df_dst[x], dtype=unicode)
)
class FormulaEvaluation(test.OntaskTestCase):
skel = {
u'condition': u'AND',
u'not': False,
u'rules': [{
u'field': u'variable',
u'id': u'variable',
u'input': u'{0}', # Number/Text/
u'operator': u'{1}', # Operator
u'type': u'{2}', # Data type: integer, double, text, ...
u'value': u'{3}'}], # The constant
u'valid': True
}
test_table = 'TEST_TABLE'
def set_skel(self, input_value, op_value, type_value, value, vname=None):
if vname:
self.skel['rules'][0]['field'] = vname
self.skel['rules'][0]['id'] = vname
self.skel['rules'][0]['input'] = input_value
self.skel['rules'][0]['operator'] = op_value
self.skel['rules'][0]['type'] = type_value
self.skel['rules'][0]['value'] = value
return self.skel
def do_operand(self,
input_value,
op_value,
type_value,
value1,
value2,
value3):
self.assertTrue(
evaluate_top_node(
self.set_skel(input_value,
op_value.format(''),
type_value,
value1),
{'variable': value2}
)
)
self.assertFalse(
evaluate_top_node(
self.set_skel(input_value,
op_value.format(''),
type_value,
value1),
{'variable': value3}
)
)
if op_value.find('{0}') != -1:
self.assertFalse(
evaluate_top_node(
self.set_skel(input_value,
op_value.format('not_'),
type_value,
value1),
{'variable': value2}
)
)
self.assertTrue(
evaluate_top_node(
self.set_skel(input_value,
op_value.format('not_'),
type_value,
value1),
{'variable': value3}
)
)
def do_sql_operand(self,
input_value,
op_value,
type_value,
value):
self.set_skel(
input_value,
op_value.format(''),
type_value,
value, 'v_' + type_value
)
query, fields = get_filter_query(self.test_table, None, self.skel)
result = pd.read_sql_query(query, pandas_db.engine, params=fields)
self.assertEqual(len(result), 1)
def test_eval_node(self):
#
# EQUAL
#
self.do_operand('number', '{0}equal', 'integer', '0', 0, 3)
self.do_operand('number', '{0}equal', 'double', '0.3', 0.3, 3.2)
self.do_operand('text', '{0}equal', 'string', 'aaa', 'aaa', 'abb')
self.do_operand('text', '{0}equal', 'boolean', '1', True, False)
self.do_operand('text', '{0}equal', 'boolean', '0', False, True)
self.do_operand('text',
'{0}equal',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 3),
datetime.datetime(2018, 9, 15, 0, 3, 4))
#
# BEGINS WITH
#
self.do_operand('text',
'{0}begins_with',
'string', 'ab', 'abcd', 'acd')
#
# CONTAINS
#
self.do_operand('text',
'{0}contains',
'string', 'bc', 'abcd', 'acd')
#
# ENDS WITH
#
self.do_operand('text',
'{0}ends_with',
'string', 'cd', 'abcd', 'abc')
#
# IS EMPTY
#
self.do_operand('text',
'is_{0}empty',
'string', None, None, 'aaa')
#
# LESS
#
self.do_operand('number', 'less', 'integer', '1', 0, 3)
self.do_operand('number', 'less', 'double', '1.2', 0.2, 3.2)
self.do_operand('text',
'less',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 2),
datetime.datetime(2018, 9, 15, 0, 3, 4))
#
# LESS OR EQUAL
#
self.do_operand('number', 'less_or_equal', 'integer', '1', 0, 3)
self.do_operand('number', 'less_or_equal', 'integer', '1', 1, 3)
self.do_operand('number', 'less_or_equal', 'double', '1.2', 0.2, 3.2)
self.do_operand('number', 'less_or_equal', 'double', '1.2', 1.2, 3.2)
self.do_operand('text',
'less_or_equal',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 2),
datetime.datetime(2018, 9, 15, 0, 3, 4))
self.do_operand('text',
'less_or_equal',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 3),
datetime.datetime(2018, 9, 15, 0, 3, 4))
#
# GREATER
#
self.do_operand('number', 'greater', 'integer', '1', 3, 0, )
self.do_operand('number', 'greater', 'double', '1.2', 3.2, 0.2)
self.do_operand('text',
'greater',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 4),
datetime.datetime(2018, 9, 15, 0, 3, 2))
#
# GREATER OR EQUAL
#
self.do_operand('number', 'greater_or_equal', 'integer', '1', 3, 0)
self.do_operand('number', 'greater_or_equal', 'integer', '1', 1, 0)
self.do_operand('number', 'greater_or_equal', 'double', '1.2', 3.2, 0.2)
self.do_operand('number', 'greater_or_equal', 'double', '1.2', 1.2, 0.2)
self.do_operand('text',
'greater_or_equal',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 4),
datetime.datetime(2018, 9, 15, 0, 3, 2))
self.do_operand('text',
'greater_or_equal',
'datetime',
'2018-09-15T00:03:03',
datetime.datetime(2018, 9, 15, 0, 3, 3),
datetime.datetime(2018, 9, 15, 0, 3, 2))
#
# In Between (needs special function)
#
self.do_operand('number', '{0}between', 'integer', ['1', '4'], 2, 5)
self.do_operand('number',
'{0}between',
'double',
['1.0',
'4.0'],
2.0,
5.0)
self.do_operand('text',
'{0}between',
'datetime',
['2018-09-15T00:03:03', '2018-09-15T00:04:03'],
datetime.datetime(2018, 9, 15, 0, 3, 30),
datetime.datetime(2018, 9, 15, 0, 4, 30))
def test_eval_sql(self):
# Create the dataframe with the variables
df = pd.DataFrame(
[(1, 2.0, True, 'xxx', datetime.datetime(2018, 01, 01, 00, 00,
00)),
(None, None, None, None, None)],
columns=['v_integer',
'v_double',
'v_boolean',
'v_string',
'v_datetime'])
# Store the data frame
pandas_db.store_table(df, 'TEST_TABLE')
#
# EQUAL
#
self.do_sql_operand('number', '{0}equal', 'integer', '1')
self.do_sql_operand('number', '{0}equal', 'double', '2.0')
self.do_sql_operand('number', '{0}equal', 'boolean', '1')
self.do_sql_operand('text', '{0}equal', 'string', 'xxx')
self.do_sql_operand('text',
'{0}equal',
'datetime',
'2018-01-01T00:00:00')
#
# BEGINS WITH
#
self.do_sql_operand('text',
'{0}begins_with',
'string',
'x')
#
# CONTAINS
#
self.do_sql_operand('text',
'{0}contains',
'string',
'xx')
#
# ENDS WITH
#
self.do_sql_operand('text',
'{0}ends_with',
'string',
'xx')
#
# IS EMPTY
#
self.do_sql_operand('number', 'is_{0}empty', 'integer', None)
self.do_sql_operand('number', 'is_{0}empty', 'double', None)
self.do_sql_operand('text', 'is_{0}empty', 'string', None)
self.do_sql_operand('text', 'is_{0}empty', 'boolean', None)
self.do_sql_operand('text', 'is_{0}empty', 'datetime', None)
#
# LESS
#
self.do_sql_operand('number', 'less', 'integer', '2')
self.do_sql_operand('number', 'less', 'double', '3.2')
self.do_sql_operand('text',
'less',
'datetime',
'2018-01-02T00:00:00')
#
# LESS OR EQUAL
#
self.do_sql_operand('number', 'less_or_equal', 'integer', '1')
self.do_sql_operand('number', 'less_or_equal', 'double', '2.0')
self.do_sql_operand('text',
'less_or_equal',
'datetime',
'2018-01-01T00:00:00')
#
# GREATER
#
self.do_sql_operand('number', 'greater', 'integer', '0')
self.do_sql_operand('number', 'greater', 'double', '1.2')
self.do_sql_operand('text',
'greater',
'datetime',
'2017-01-01T00:00:00')
#
# GREATER OR EQUAL
#
self.do_sql_operand('number', 'greater_or_equal', 'integer', '1')
self.do_sql_operand('number', 'greater_or_equal', 'double', '2.0')
self.do_sql_operand('text',
'greater_or_equal',
'datetime',
'2018-01-01T00:00:00')
#
# BETWEEN
#
self.do_sql_operand('number', '{0}between', 'integer', ['0', '2'])
self.do_sql_operand('number', '{0}between', 'double', ['1.2', '2.2'])
self.do_sql_operand('text',
'{0}between',
'datetime', ['2017-01-01T00:00:00',
'2018-09-13T00:00:00'])
| 35.095794 | 80 | 0.446175 |
966670cbfde49dd8515a4113d2a9471777b20fbf
| 14,253 |
py
|
Python
|
dictionary.py
|
berkegokmen1/dictionary
|
8b3b8a8b94ca668136b543805e9614a3c1cb50ea
|
[
"MIT"
] | 1 |
2021-12-08T10:45:41.000Z
|
2021-12-08T10:45:41.000Z
|
dictionary.py
|
berkegokmen1/dictionary
|
8b3b8a8b94ca668136b543805e9614a3c1cb50ea
|
[
"MIT"
] | null | null | null |
dictionary.py
|
berkegokmen1/dictionary
|
8b3b8a8b94ca668136b543805e9614a3c1cb50ea
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import json
from colorama import init
from termcolor import colored
import csv
import re
import concurrent.futures
init()
tr_definitions = []
en_definition = ""
collocations = []
synonyms = []
word_forms = {
"Noun": [],
"Verb": [],
"Adjective": [],
"Adverb": []
}
sentences = []
pattern = '(\w+)\s*(-t|-d|-c|-syn|-sen|-wf)?\s*(-n|-v|-adj|-adv)?'
version = "2.0"
def control_execution(*args): # funtion, word, form
try:
if(not args[0] == get_forms):
args[0](args[1])
else:
args[0](args[1], args[2])
except:
pass
def get_turk(word):
source = requests.get(f"https://tureng.com/en/turkish-english/{word}").text
soup = BeautifulSoup(source, "lxml")
turk_table = soup.find("table").find_all("td", class_="tr ts")
global tr_definitions
tr_definitions.append(turk_table[0].find("a").text)
try:
tr_definitions.append(turk_table[1].find("a").text)
tr_definitions.append(turk_table[2].find("a").text)
except:
pass
return ", ".join(tr_definitions)
def get_sentences(word):
source = requests.get(f"https://sentence.yourdictionary.com/{word}").text
soup = BeautifulSoup(source, "lxml")
sentences_table = soup.find_all("div", class_="sentence component")
global sentences
sentences.append(sentences_table[0].find("p").text)
try:
sentences.append(sentences_table[1].find("p").text)
sentences.append(sentences_table[2].find("p").text)
except:
pass
return " || ".join(sentences)
def get_en(word):
source = requests.get(f"https://www.yourdictionary.com/{word}").text
soup = BeautifulSoup(source, "lxml")
def_en = soup.find("div", class_="definition").find("div").text
global en_definition
if ("The definition of" in def_en):
def_en = (def_en[18]).upper() + def_en[19:]
en_definition = def_en
return def_en
else:
en_definition = def_en
return def_en
def get_synonyms(word):
source = requests.get(
f"http://www.synonymy.com/synonym.php?word={word}").text
soup = BeautifulSoup(source, "lxml")
sy_table = soup.find_all("div", class_="defbox")[
1].find("ol").find("li").find_all("a")
for sy in sy_table:
global synonyms
synonyms.append(sy.text)
return ", ".join(synonyms)
def get_forms(word, form):
if (form == "noun"):
source = requests.get(
f"https://www.wordhippo.com/what-is/the-noun-for/{word}.html").text
soup = BeautifulSoup(source, "lxml")
noun_table = soup.find_all("div", class_="defv2wordtype")
for i in range(0, 4):
try:
word_forms["Noun"].append(noun_table[i].text)
except:
pass
return ", ".join(word_forms["Noun"])
elif (form == "verb"):
source = requests.get(
f"https://www.wordhippo.com/what-is/the-verb-for/{word}.html").text
soup = BeautifulSoup(source, "lxml")
verb_table = soup.find_all("div", class_="defv2wordtype")
for i in range(0, 4):
try:
word_forms["Verb"].append(verb_table[i].text)
except:
pass
return ", ".join(word_forms["Verb"])
elif (form == "adjective"):
source = requests.get(
f"https://www.wordhippo.com/what-is/the-adjective-for/{word}.html").text
soup = BeautifulSoup(source, "lxml")
adj_table = soup.find_all("div", class_="defv2wordtype")
for i in range(0, 4):
try:
word_forms["Adjective"].append(adj_table[i].text)
except:
pass
return ", ".join(word_forms["Adjective"])
elif (form == "adverb"):
source = requests.get(
f"https://www.wordhippo.com/what-is/the-adverb-for/{word}.html").text
soup = BeautifulSoup(source, "lxml")
adv_table = soup.find_all("div", class_="defv2wordtype")
for i in range(0, 4):
try:
word_forms["Adverb"].append(adv_table[i].text)
except:
pass
return ", ".join(word_forms["Adverb"])
def get_collocations(word):
try:
response = requests.get(
f"http://www.just-the-word.com/api/combinations?word={word}").text
data = json.loads(response)
except:
pass
i = 0
try:
for item in data["combinations"][1]["list"]:
if (not i == 5):
global collocations
collocations.append(item[2])
i += 1
except:
for item in data["combinations"][0]["list"]:
if (not i == 5):
collocations.append(item[2])
i += 1
return ", ".join(collocations)
def search(word):
with concurrent.futures.ThreadPoolExecutor() as executor:
f1 = executor.submit(control_execution, get_turk, word)
f2 = executor.submit(control_execution, get_en, word)
f3 = executor.submit(control_execution, get_collocations, word)
f4 = executor.submit(control_execution, get_synonyms, word)
f5 = executor.submit(control_execution, get_sentences, word)
searchWF(word)
def searchWF(word):
with concurrent.futures.ThreadPoolExecutor() as executor:
f6 = executor.submit(control_execution, get_forms, word, "noun")
f7 = executor.submit(control_execution, get_forms, word, "verb")
f8 = executor.submit(control_execution, get_forms, word, "adjective")
f9 = executor.submit(control_execution, get_forms, word, "adverb")
def store(word):
with open("wordList.csv", "a") as csv_file:
csv_writer = csv.writer(csv_file)
try:
csv_writer.writerow(["", "", "", "", "", "", "", "", "", ""])
csv_writer.writerow(["Word", "Turkish", "Definition", "Collocations", "Synonyms",
"Sentences", "Noun F.", "Verb F.", "Adjective F.", "Adverb F."])
csv_writer.writerow([word, ", ".join(tr_definitions), en_definition, ", ".join(collocations), ", ".join(synonyms), " || ".join(
sentences), ", ".join(word_forms["Noun"]), ", ".join(word_forms["Verb"]), ", ".join(word_forms["Adjective"]), ", ".join(word_forms["Adverb"])])
except:
pass
print(f"berkegokmen dictionary script version {version}\n")
print(colored("\nFLAGS", "white", "on_grey", ["bold", "underline"]))
print("")
print(colored("-t:", "white", "on_grey", ["bold"]),
colored("turkish", "white", "on_grey"))
print(colored("-d:", "white", "on_grey", ["bold"]),
colored("definition", "white", "on_grey"))
print(colored("-c:", "white", "on_grey", ["bold"]),
colored("collocations", "white", "on_grey"))
print(colored("-syn:", "white", "on_grey", ["bold"]),
colored("synonyms", "white", "on_grey"))
print(colored("-sen:", "white", "on_grey", ["bold"]),
colored("sentences", "white", "on_grey"))
print(colored("-wf:", "white", "on_grey", ["bold"]),
colored("word formations", "white", "on_grey"))
print(colored(" -wf -n:", "white", "on_grey", ["bold"]),
colored("noun form", "white", "on_grey"))
print(colored(" -wf -v:", "white", "on_grey", ["bold"]),
colored("verb form", "white", "on_grey"))
print(colored(" -wf -adj:", "white", "on_grey", ["bold"]),
colored("adjective form", "white", "on_grey"))
print(colored(" -wf -adv:", "white", "on_grey", ["bold"]),
colored("adverb form", "white", "on_grey"))
print("\n", colored("Type 'berke' to exit.",
"red", "on_grey", ["bold"]), "\n")
while True:
print(colored("*" * 50, "blue", "on_grey", ["bold"]))
_word_ = input(colored("$Word: ", "yellow", "on_grey", ["bold"]))
if (not _word_ == "berke"):
tr_definitions = []
en_definition = ""
collocations = []
synonyms = []
word_forms = {
"Noun": [],
"Verb": [],
"Adjective": [],
"Adverb": []
}
sentences = []
match = re.search(pattern, _word_)
if (match):
_word_ = match.group(1)
if (match.group(2) == None):
search(_word_)
print(colored("Turkish:", "cyan", "on_grey",
["bold"]), ", ".join(tr_definitions))
print(colored("Definition:", "cyan",
"on_grey", ["bold"]), en_definition)
try:
print(colored("Collocations:", "cyan", "on_grey",
["bold"]), ", ".join(collocations))
except:
collocations = []
print(colored("Collocations:", "cyan", "on_grey",
["bold"]), ", ".join(collocations))
print(colored("Synonyms:", "cyan", "on_grey",
["bold"]), ", ".join(synonyms))
print(colored("Sentences:", "cyan", "on_grey",
["bold"]), " || ".join(sentences))
print(colored("Word formations:", "cyan", "on_grey", ["bold"]))
print(colored("\tNoun:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Noun"]))
print(colored("\tVerb:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Verb"]))
print(colored("\tAdjective:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Adjective"]))
print(colored("\tAdverb:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Adverb"]))
store(_word_)
elif (match.group(2) == "-t"):
try:
print(colored("Turkish:", "cyan",
"on_grey", ["bold"]), get_turk(_word_))
except:
print(colored("Turkish:", "cyan",
"on_grey", ["bold"]), None)
elif (match.group(2) == "-d"):
try:
print(colored("Definition:", "cyan",
"on_grey", ["bold"]), get_en(_word_))
except:
print(colored("Definition:", "cyan", "on_grey", ["bold"]), colored(
"ERROR", "red", "on_grey", ["bold"]))
elif (match.group(2) == "-c"):
try:
print(colored("Collocations:", "cyan", "on_grey",
["bold"]), get_collocations(_word_))
except:
print(colored("Collocations:", "cyan",
"on_grey", ["bold"]), colored("100 words/day limit reached.", "white", "on_grey", ["reverse"]))
elif (match.group(2) == "-syn"):
try:
print(colored("Synonyms:", "cyan", "on_grey",
["bold"]), get_synonyms(_word_))
except:
print(colored("Synonyms:", "cyan",
"on_grey", ["bold"]), None)
elif (match.group(2) == "-sen"):
try:
print(colored("Sentences:", "cyan", "on_grey",
["bold"]), get_sentences(_word_))
except:
print(colored("Sentences:", "cyan",
"on_grey", ["bold"]), None)
elif (match.group(2) == "-wf"):
if (match.group(3) == None):
searchWF(_word_)
print(colored("Word formations:",
"cyan", "on_grey", ["bold"]))
print(colored("\tNoun:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Noun"]))
print(colored("\tVerb:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Verb"]))
print(colored("\tAdjective:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Adjective"]))
print(colored("\tAdverb:", "cyan", "on_grey",
["bold"]), ", ".join(word_forms["Adverb"]))
elif(match.group(3) == "-n"):
try:
print(colored("Noun:", "cyan", "on_grey",
["bold"]), get_forms(_word_, "noun"))
except:
print(colored("Noun:", "cyan",
"on_grey", ["bold"]), None)
elif(match.group(3) == "-v"):
try:
print(colored("Verb:", "cyan", "on_grey",
["bold"]), get_forms(_word_, "verb"))
except:
print(colored("Verb:", "cyan",
"on_grey", ["bold"]), None)
elif(match.group(3) == "-adj"):
try:
print(colored("Adjective:", "cyan", "on_grey",
["bold"]), get_forms(_word_, "adjective"))
except:
print(colored("Adjective:", "cyan",
"on_grey", ["bold"]), None)
elif(match.group(3) == "-n"):
try:
print(colored("Adverb:", "cyan", "on_grey",
["bold"]), get_forms(_word_, "adverb"))
except:
print(colored("Adverb:", "cyan",
"on_grey", ["bold"]), None)
else:
pass
else:
pass
else:
print("pattern not found")
else:
print(colored("*" * 50, "blue", "on_grey", ["bold"]))
print("\n", colored("Bye!",
"white", "on_grey", ["bold"]), "\n")
break
| 36.92487 | 159 | 0.482144 |
b420dfad5b8216c7575ae1dc332f160af58585c9
| 7,085 |
py
|
Python
|
molsys/util/spacegroups.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
molsys/util/spacegroups.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
molsys/util/spacegroups.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: julian
Minimum set to convert spacegroup symbols to the respective spacegroup number
Much more has to be added here!
"""
spacegroups={
'P1':1,
'P-1':2,
'P2':3,
'P21':4,
'C2':5,
'Pm':6,
'Pc':7,
'Cm':8,
'Cc':9,
'P2/m':10,
'P21/m':11,
'C2/m':12,
'P2/c':13,
'P21/c':14,
'C2/c':15,
'P222':16,
'P2221':17,
'P21212':18,
'P212121':19,
'C2221':20,
'C222':21,
'F222':22,
'I222':23,
'I212121':24,
'Pmm2':25,
'Pmc21':26,
'Pmc2(1)':26,
'Pcc2':27,
'Pma2':28,
'Pca21':29,
'Pnc2':30,
'Pmn21':31,
'Pba2':32,
'Pna21':33,
'Pnn2':34,
'Cmm2':35,
'Cmc21':36,
'Ccc2':37,
'Amm2':38,
'Abm2':39,
'Ama2':40,
'Aba2':41,
'Fmm2':42,
'Fdd2':43,
'Imm2':44,
'Iba2':45,
'Ima2':46,
'Pmmm':47,
'Pmmmm':47,
'Pnnn':48,
'Pccm':49,
'Pban':50,
'Pmma':51,
'Pmbm':51,
'Pnna':52,
'Pmna':53,
'Pcca':54,
'Pbam':55,
'Pbamm':55,
'Pccn':56,
'Pbcm':57,
'Pnnm':58,
'Pmmn':59,
'Pbcn':60,
'Pbca':61,
'Pnma':62,
'Cmcm':63,
'Cmca':64,
'Cmmm':65,
'Cmmmm':65,
'Cccm':66,
'Cmma':67,
'Ccca':68,
'Fmmm':69,
'Fddd':70,
'Immm':71,
'Ibam':72,
'Ibca':73,
'Imma':74,
'P4':75,
'P41':76,
'P42':77,
'P43':78,
'I4':79,
'I41':80,
'P-4':81,
'I-4':82,
'P4/m':83,
'P42/m':84,
'P4/n':85,
'P42/n':86,
'I4/m':87,
'I41/a':88,
'P422':89,
'P4212':90,
'P4122':91,
'P41212':92,
'P4222':93,
'P42212':94,
'P4322':95,
'P43212':96,
'I422':97,
'I4122':98,
'P4mm':99,
'P4bm':100,
'P42cm':101,
'P42nm':102,
'P4cc':103,
'P4nc':104,
'P42mc':105,
'P42/mc':105,
'P42bc':106,
'I4mm':107,
'I4cm':108,
'I41md':109,
'I41cd':110,
'P-42m':111,
'P-42c':112,
'P-421m':113,
'P-421c':114,
'P-4m2':115,
'P-4c2':116,
'P-4b2':117,
'P-4n2':118,
'I-4m2':119,
'I-4c2':120,
'I-42m':121,
'I-42d':122,
'P4/mmm':123,
'P4/mcc':124,
'P4/nbm':125,
'P4/nnc':126,
'P4/mbm':127,
'P4/mnc':128,
'P4/nmm':129,
'P4/ncc':130,
'P42/mmc':131,
'P42/mcm':132,
'P42/nbc':133,
'P42/nnm':134,
'P42/mbc':135,
'P42/mnm':136,
'P42/nmc':137,
'P42/ncm':138,
'I4/mmm':139,
'I4/mcm':140,
'I41/amd':141,
'I41/acd':142,
'P3':143,
'P31':144,
'P32':145,
'R3':146,
'P-3':147,
'R-3':148,
'P312':149,
'P321':150,
'P3112':151,
'P3121':152,
'P3212':153,
'P3221':154,
'R32':155,
'P3m1':156,
'P31m':157,
'P3c1':158,
'P31c':159,
'R3m':160,
'R3c':161,
'P-31m':162,
'P-31c':163,
'P-3m1':164,
'P-3c1':165,
'R-3m':166,
'R-3c':167,
'P6':168,
'P61':169,
'P65':170,
'P62':171,
'P64':172,
'P63':173,
'P-6':174,
'P6/m':175,
'P63/m':176,
'P622':177,
'P6122':178,
'P6522':179,
'P6222':180,
'P6422':181,
'P6322':182,
'P6mm':183,
'P6cc':184,
'P63cm':185,
'P63mc':186,
'P-6m2':187,
'P-6c2':188,
'P-62m':189,
'P-62c':190,
'P6/mmm':191,
'P6/mcc':192,
'P63/mcm':193,
'P63/mmc':194,
'P23':195,
'F23':196,
'I23':197,
'P213':198,
'I213':199,
'Pm-3':200,
'Pn-3':201,
'Fm-3':202,
'Fd-3':203,
'Im-3':204,
'Pa-3':205,
'Ia-3':206,
'P432':207,
'P4232':208,
'F432':209,
'F4132':210,
'I432':211,
'P4332':212,
'P4132':213,
'I4132':214,
'P-43m':215,
'F4-3m':216,
'F-43m':216,
'I-43m':217,
'P-43n':218,
'F-43c':219,
'I-43d':220,
'Pm-3m':221,
'Pn-3n':222,
'Pm-3n':223,
'Pn-3m':224,
'Fm-3m':225,
'Fm-3c':226,
'Fd-3m':227,
'Fd-3c':228,
'Im-3m':229,
'Ia-3d':230,
'C12/m1':12,
'I12/m1':12,
'I12/a1':15,
'C12/c1':15,
'A12/n1':15,
'P121/c1':14,
'P121/n1':14,
'C1m1':8,
'P12/m1':10,
'P1211':4,
'I2/m':12,
'I2/a':15,
'P21/n':14,
'A2/n':15,
"P 4/m": 83,
"P 4/n": 85,
"P n n a": 52,
"I -4 2 m": 121,
"P n n n": 48,
"P n n m": 58,
"P 43 2 2": 95,
"I -4 2 d": 122,
"P c": 7,
"F d -3": 203,
"P 2/m": 10,
"I 4 c m": 108,
"I 41/a m d": 141,
"P 62 2 2": 180,
"I 41 c d": 110,
"P 2/c": 13,
"P m": 6,
"P 4 b n": 100,
"I 4 2 2": 97,
"P 42 2 2": 93,
"P 3 1 c": 159,
"I 4 m m": 107,
"P 61 2 2": 178,
"I m m 2": 44,
"R 3 2": 155,
"I b a 2": 45,
"P 3 m 1": 156,
"P b a m": 55,
"P b a n": 50,
"P 4/n n c": 126,
"C c c a": 68,
"C m m 2": 35,
"I 41 m d": 109,
"C c c m": 66,
"P 42/n b c": 133,
"P 3": 143,
"P 2": 3,
"P 1": 1,
"P 6": 168,
"P 4": 75,
"P 42/n n m": 134,
"P n -3 n": 222,
"P 64": 172,
"C 2 2 2": 21,
"I -4 m 2": 119,
"P 42 c m": 101,
"P n n 2": 34,
"P 21 21 21": 19,
"A m a 2": 40,
"F m -3 m": 225,
"F m -3 c": 226,
"P 63": 173,
"P -3": 147,
"I -4 c 2": 120,
"P -1": 2,
"P 65": 170,
"P 62": 171,
"C m m a": 67,
"C c c 2": 37,
"P -4": 81,
"C m m m": 65,
"P n -3 m": 224,
"P 61": 169,
"P m -3": 200,
"P b a 2": 32,
"I b a m": 72,
"R 3 m": 160,
"I m m m": 71,
"P -4 2 m": 111,
"P 63/m m c": 194,
"P -4 2 c": 112,
"I 41 3 2": 214,
"P -4 m 2": 115,
"I m m a": 74,
"I 2 2 2": 23,
"I a -3 d": 230,
"P 63 2 2": 182,
"R 3 c": 161,
"P -3 c 1": 165,
"P 3 1 2": 149,
"P 6 2 2": 177,
"P 42/m c m": 132,
"P 4/n c c": 130,
"P 43 21 2": 96,
"P n m a": 62,
"F d -3 c": 228,
"P -6": 174,
"P 42 m c": 105,
"A m m 2": 38,
"I 41 2 2": 98,
"P 42/n c m": 138,
"P 4/m n c": 128,
"R 3": 146,
"P 4 2 2": 89,
"P -3 1 c": 163,
"P -3 1 m": 162,
"I 41": 80,
"I 4 3 2": 211,
"I 4/m m m": 139,
"P 42/n m c": 137,
"P 64 2 2": 181,
"P 63 c m": 185,
"P 42/m n m": 136,
"C m c 21": 36,
"P m -3 n": 223,
"P n a 21": 33,
"P 42 b c": 106,
"P 32 1 2": 153,
"A b m 2": 39,
"I a -3": 206,
"P -4 21 c": 114,
"P 63 m c": 186,
"P 4/m b m": 127,
"P 31 2 1": 152,
"P -6 2 m": 189,
"P -6 2 c": 190,
"P -4 b 2": 117,
"P -4 21 m": 113,
"P 6/m": 175,
"P -6 c 2": 188,
"P 4/n b m": 125,
"I 41/a c d": 142,
"R -3 m": 166,
"P 6/m m m": 191,
"F d -3 m": 227,
"R -3 c": 167,
"P -4 3 m": 215,
"P -4 3 n": 218,
"P 21/m": 11,
"P 4 3 2": 207,
"P 21/c": 14,
"F 4 3 2": 209,
"P 4 n c": 104,
"P 2 2 2": 16,
"F -4 3 m": 216,
"P 3 2 1": 150,
"P 3 1 m": 157,
"P 43 3 2": 212,
"P 41 3 2": 213,
"P 6 m m": 183,
"I 21 21 21": 24,
"F m m 2": 42,
"P m m 2": 25,
"P b c n": 60,
"C 2 2 21": 20,
"P b c m": 57,
"I m -3": 204,
"P 4 21 2": 90,
"P m -3 m": 221,
"P 21 21 2": 18,
"P 42/m m c": 131,
"P b c a": 61,
"P m a 2": 28,
"F d d d": 70,
"P 42 21 2": 94,
"P 41 2 2": 91,
"P m m n": 59,
"P m m m": 47,
"F m m m": 69,
"P -4 n 2": 118,
"P -3 m 1": 164,
"P m m a": 51,
"I 2 3": 197,
"P 32 2 1": 154,
"P 65 2 2": 179,
"P 42/m": 84,
"P m c 21": 26,
"P 42/n": 86,
"P 6 c c": 184,
"P 63/m": 176,
"P 63/m c m": 193,
"P -4 c 2": 116,
"F d d 2": 43,
"P m n a": 53,
"C m c m": 63,
"I b c a": 73,
"F m -3": 202,
"P 21": 4,
"P 21 3": 198,
"P 4 m m": 99,
"P 2 3": 195,
"F -4 3 c": 219,
"I -4 3 d": 220,
"I 4/m": 87,
"P a -3": 205,
"C 2": 5,
"I -4 3 m": 217,
"F 2 3": 196,
"F 2 2 2": 22,
"I 41/a": 88,
"P 4 c c": 103,
"P -6 m 2": 187,
"P n c 2": 30,
"I 4/m c m": 140,
"P 4/m m m": 123,
"P 31 1 2": 151,
"I 21 3": 199,
"P 41 21 2": 92,
"C 2/m": 12,
"P 42 3 2": 208,
"C 2/c": 15,
"P 6/m c c": 192,
"P 31": 144,
"P c c 2": 27,
"P 32": 145,
"P c a 21": 29,
"I 4": 79,
"P 3 c 1": 158,
"P 2 2 21": 17,
"C m": 8,
"I -4": 82,
"P 4/n m m": 129,
"C c": 9,
"R -3": 148,
"I m -3 m": 229,
"P c c n": 56,
"P c c m": 49,
"P 42 n m": 102,
"F 41 3 2": 210,
"A b a 2": 41,
"P 42/m b c": 135,
"P m n 21": 31,
"C m c a": 64,
"P c c a": 54,
"I m a 2": 46,
"P n -3": 201,
"P 41": 76,
"P 42": 77,
"P 43": 78,
"P 4/m c c": 124
}
def get_spacegroup_number(sg_name):
''' returns the spacegroup number if the string is found in the dictionary. Returns None if not found
:Parameters:
-sg_name (str): name(symbol) of the spacegroup
'''
try:
sgnum = spacegroups[sg_name]
except:
sgnum = None
return sgnum
| 14.05754 | 105 | 0.494707 |
02dcef89bd759f31293e683c65f5a688f1e72d10
| 1,508 |
py
|
Python
|
client2.py
|
MattPChoy/Doorbell
|
fd282f2ccaac741b9e553d05219884efd33df56b
|
[
"MIT"
] | null | null | null |
client2.py
|
MattPChoy/Doorbell
|
fd282f2ccaac741b9e553d05219884efd33df56b
|
[
"MIT"
] | null | null | null |
client2.py
|
MattPChoy/Doorbell
|
fd282f2ccaac741b9e553d05219884efd33df56b
|
[
"MIT"
] | null | null | null |
import io
import socket
import struct
import time
import picamera
client_socket = socket.socket()
client_socket.connect(('192.168.1.42', 8000)) # ADD IP HERE
# Make a file-like object out of the connection
connection = client_socket.makefile('wb')
try:
camera = picamera.PiCamera()
camera.vflip = True
camera.resolution = (500, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg'):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 60:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
| 32.782609 | 71 | 0.667109 |
fd38176e71e21efe00c28621b97835993c97e7f7
| 8,522 |
py
|
Python
|
zplib/image/ffmpeg.py
|
zpincus/zplib
|
930b4b88633c95c7f7761d0183aec882484f00bc
|
[
"MIT"
] | 6 |
2017-03-26T09:28:18.000Z
|
2021-07-15T00:09:33.000Z
|
zplib/image/ffmpeg.py
|
zpincus/zplib
|
930b4b88633c95c7f7761d0183aec882484f00bc
|
[
"MIT"
] | null | null | null |
zplib/image/ffmpeg.py
|
zpincus/zplib
|
930b4b88633c95c7f7761d0183aec882484f00bc
|
[
"MIT"
] | 5 |
2015-04-21T02:49:01.000Z
|
2017-11-13T13:29:35.000Z
|
import subprocess
import platform
import numpy
import sys
import json
if platform.system() == 'Windows':
FFMPEG_BIN = 'ffmpeg.exe'
FFPROBE_BIN = ''
else:
FFMPEG_BIN = 'ffmpeg'
FFPROBE_BIN = 'ffprobe'
BYTEORDERS = {'<':'le', '>':'be', '=':'le' if sys.byteorder == 'little' else 'be'}
def read_video(input, force_grayscale=False):
"""Return iterator over frames from an input video via ffmpeg.
Parameters:
input: filename to open
force_grayscale: if True, return uint8 grayscale frames, otherwise
returns rgb frames.
"""
ffprobe_command = FFPROBE_BIN + ' -loglevel fatal -select_streams V:0 -show_entries stream=pix_fmt,width,height -print_format json ' + input
probe = subprocess.run(ffprobe_command.split(), stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if probe.returncode != 0:
raise RuntimeError('Could not read video metadata:\n'+probe.stderr.decode())
metadata = json.loads(probe.stdout.decode())['streams'][0]
pixel_format_in = metadata['pix_fmt']
shape = metadata['width'], metadata['height']
if pixel_format_in.startswith('gray16'):
pixel_format_out = 'gray16'+BYTEORDERS['='] # output system-native endian
dtype = numpy.uint16
elif pixel_format_in == 'gray' or force_grayscale:
pixel_format_out = 'gray'
dtype = numpy.uint8
else:
pixel_format_out = 'rgb24'
dtype = numpy.uint8
shape += (3,)
command = [FFMPEG_BIN,
'-loglevel', 'fatal',
'-nostdin', # do not expect interaction, do not fuss with tty settings
'-i', input,
'-map', '0:V:0', # grab video channel 0, just like ffprobe above
'-f', 'rawvideo', # write raw image data
'-pix_fmt', pixel_format_out,
'-' # pipe output to stdout
]
ffmpeg = subprocess.Popen(command, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dtype = numpy.dtype(dtype)
size = numpy.product(shape) * dtype.itemsize
while True:
frame = ffmpeg.stdout.read(size)
if len(frame) == 0:
break
yield _get_arr(frame, dtype, shape)
ffmpeg.stdout.close()
ffmpeg.wait()
if ffmpeg.returncode != 0:
raise RuntimeError('Could not read video data:\n'+ffmpeg.stderr.read().decode())
def write_video(frame_iterator, framerate, output, preset=None, lossless=False, verbose=True, **h264_opts):
"""Write uint8 RGB or grayscale image frames to a h264-encoded video file
using ffmpeg.
In general, the mp4 container is best here, so the output filename should end
with '.mp4'. Other suffixes will produce different container files.
The yuv420p pixel format is used, which is widely compatible with video decoders.
This pixel format allows (nearly) lossless encoding of grayscale images (with
+/-1 unit quantization errors for 10-20% of pixels), but not for color images.
For maximum compatibility, even image dimensions are required as well.
Parameters:
frame_iterator: yields numpy arrays. Each frame must be the same size and
dtype. Currently, only uint8 grayscale and RGB images are supported.
Images must have even dimensions to ensure maximum compatibility.
framerate: the frames per second for the output movie.
output: the file name to write. (Should end with '.mp4'.)
preset: the ffmpeg preset to use. Defaults to 'medium', or 'ultrafast'
if lossless compression is specified. The other useful preset for
lossless is 'veryslow', to get as good compression as possible.
For lossy compression, 'faster', 'fast', 'medium', 'slow', and 'slower'
is a reasonable range.
lossless: if True, use lossless compression. Only valid with grayscale
images.
verbose: if True, print status message while compressing.
**h264_opts: options to be passed to the h264 encoder, from:
https://www.ffmpeg.org/ffmpeg-codecs.html#Options-23
'threads', 'tune', and 'profile' may be especially useful.
"""
frame_iterator = iter(frame_iterator)
first_frame = next(frame_iterator)
assert first_frame.dtype == numpy.uint8
assert first_frame.ndim in (2,3)
if first_frame.ndim == 3:
assert first_frame.shape[2] == 3
pixel_format_in = 'rgb24'
else:
pixel_format_in = 'gray'
assert first_frame.shape[0] % 2 == 0 and first_frame.shape[1] % 2 == 0
if preset is None:
if lossless:
preset = 'ultrafast'
else:
preset = 'medium'
if lossless:
h264_opts['qp'] = '0'
_write_video(first_frame, frame_iterator, framerate, output, codec='libx264',
pixel_format_in=pixel_format_in, pixel_format_out='yuv420p', verbose=verbose,
preset=preset, **h264_opts)
def write_lossless_video(frame_iterator, framerate, output, threads=None, verbose=True):
"""Write uint8 (color/gray) or uint16 (gray) image frames to a lossless
FFV1-encoded video file using ffmpeg.
In general, the mkv container is best here, so the output filename should end
with '.mkv'. Other suffixes will produce different container files.
RGB images can only be uint8, but grayscale images can be uint8 or uint16.
Parameters:
frame_iterator: yields numpy arrays. Each frame must be the same size and
dtype.
framerate: the frames per second for the output movie.
output: the file name to write. (Should end with '.mkv'.)
threads: if not None, the number of threads to use and slices to divide
the image into for compression.
verbose: if True, print status message while compressing.
"""
frame_iterator = iter(frame_iterator)
first_frame = next(frame_iterator)
assert first_frame.dtype in (numpy.uint8, numpy.uint16)
assert first_frame.ndim in (2,3)
if first_frame.ndim == 3:
assert first_frame.shape[2] == 3 and first_frame.dtype == numpy.uint8
pixel_format_in = 'rgb24'
elif first_frame.dtype == numpy.uint8:
pixel_format_in = 'gray'
else:
pixel_format_in = 'gray16'+BYTEORDERS[first_frame.dtype.byteorder]
ffv_args = dict(level='3', g='1', context='1', slicecrc='1')
if threads is not None:
threads = str(threads)
ffv_args['threads'] = threads
ffv_args['slices'] = threads
_write_video(first_frame, frame_iterator, framerate, output, codec='ffv1',
pixel_format_in=pixel_format_in, pixel_format_out=pixel_format_in, verbose=verbose, **ffv_args)
def _write_video(first_frame, frame_iterator, framerate, output, codec, pixel_format_in, pixel_format_out, verbose=False, **codec_opts):
command = [FFMPEG_BIN,
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-video_size', '{}x{}'.format(*first_frame.shape[:2]), # size of one frame
'-pix_fmt', pixel_format_in,
'-framerate', str(framerate), # frames per second
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', codec,
'-pix_fmt', pixel_format_out,
]
for k, v in codec_opts.items():
command += ['-'+k, str(v)]
command.append(output)
stderr = None if verbose else subprocess.DEVNULL
ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stderr=stderr)
ffmpeg.stdin.write(_get_bytes(first_frame))
try:
for i, frame in enumerate(frame_iterator):
if frame.shape != first_frame.shape or frame.dtype != first_frame.dtype:
raise ValueError('Frame {} has unexpected shape/dtype'.format(i+1))
ffmpeg.stdin.write(_get_bytes(frame))
finally:
ffmpeg.stdin.close()
ffmpeg.wait()
if ffmpeg.returncode != 0:
raise RuntimeError('ffmpeg encoding failed')
def _get_bytes(image_arr):
if image_arr.ndim == 2:
return image_arr.tobytes(order='F')
else:
return image_arr.transpose((2,0,1)).tobytes(order='F')
def _get_arr(image_bytes, dtype, shape):
if len(shape) == 2:
# could be uint8 or uint16 -- need to account for itemsize
strides = (dtype.itemsize, shape[0]*dtype.itemsize)
else:
# assume uint8 here
strides = (3, shape[0]*3, 1)
return numpy.ndarray(shape=shape, buffer=image_bytes, dtype=dtype, strides=strides)
| 41.368932 | 144 | 0.661112 |
51f29ab1cbb07f5d0e8a831564cd25b1ebee3264
| 519 |
py
|
Python
|
server/architext/entities/location_save.py
|
JimenaAndrea/architext
|
fb49624f7301902a357815af0ca5d320cfc6ddb6
|
[
"MIT"
] | 3 |
2020-08-02T07:14:25.000Z
|
2021-04-25T12:22:53.000Z
|
server/architext/entities/location_save.py
|
JimenaAndrea/architext
|
fb49624f7301902a357815af0ca5d320cfc6ddb6
|
[
"MIT"
] | 130 |
2020-07-15T12:09:30.000Z
|
2021-05-27T15:02:01.000Z
|
server/architext/entities/location_save.py
|
JimenaAndrea/architext
|
fb49624f7301902a357815af0ca5d320cfc6ddb6
|
[
"MIT"
] | 1 |
2021-06-10T15:51:49.000Z
|
2021-06-10T15:51:49.000Z
|
import mongoengine
class LocationSave(mongoengine.Document):
user = mongoengine.ReferenceField('User', required=True)
world = mongoengine.ReferenceField('World', required=True)
room = mongoengine.ReferenceField('Room', required=True)
def __init__(self, *args, save_on_creation=True, **kwargs):
super().__init__(*args, **kwargs)
if self.id is None and save_on_creation:
self.save()
def change_room(self, new_room):
self.room = new_room
self.save()
| 34.6 | 63 | 0.672447 |
814dd9d093a729799dbfe0a416b260ae185eb61b
| 24,204 |
py
|
Python
|
google/cloud/security/common/data_access/sql_queries/create_tables.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | 1 |
2018-03-26T08:15:21.000Z
|
2018-03-26T08:15:21.000Z
|
google/cloud/security/common/data_access/sql_queries/create_tables.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/security/common/data_access/sql_queries/create_tables.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL queries to create Cloud SQL tables."""
CREATE_APPENGINE_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`dispatch_rules` json DEFAULT NULL,
`auth_domain` varchar(255) DEFAULT NULL,
`location_id` varchar(255) DEFAULT NULL,
`code_bucket` varchar(255) DEFAULT NULL,
`default_cookie_expiration` varchar(255) DEFAULT NULL,
`serving_status` varchar(255) DEFAULT NULL,
`default_hostname` varchar(255) DEFAULT NULL,
`default_bucket` varchar(255) DEFAULT NULL,
`iap` json DEFAULT NULL,
`gcr_domain` varchar(255) DEFAULT NULL,
`raw_application` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_SERVICES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`service` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `service_key` (`app_id`, `service_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_VERSIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`version_id` varchar(255) DEFAULT NULL,
`version` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `version_key` (`app_id`, `service_id`, `version_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_APPENGINE_INSTANCES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`service_id` varchar(255) DEFAULT NULL,
`version_id` varchar(255) DEFAULT NULL,
`instance_id` varchar(255) DEFAULT NULL,
`instance` json DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `instance_key` (`app_id`, `service_id`, `version_id`,
`instance_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BACKEND_SERVICES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`affinity_cookie_ttl_sec` int DEFAULT NULL,
`backends` json DEFAULT NULL,
`cdn_policy` json DEFAULT NULL,
`connection_draining` json DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`enable_cdn` bool DEFAULT NULL,
`health_checks` json DEFAULT NULL,
`iap` json DEFAULT NULL,
`load_balancing_scheme` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`port_name` varchar(255) DEFAULT NULL,
`port` int DEFAULT NULL,
`protocol` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`session_affinity` varchar(255) DEFAULT NULL,
`timeout_sec` varchar(255) DEFAULT NULL,
`raw_backend_service` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BIGQUERY_DATASETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`dataset_id` varchar(255) DEFAULT NULL,
`access_domain` varchar(255) DEFAULT NULL,
`access_user_by_email` varchar(255) DEFAULT NULL,
`access_special_group` varchar(255) DEFAULT NULL,
`access_group_by_email` varchar(255) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`access_view_project_id` varchar(255) DEFAULT NULL,
`access_view_table_id` varchar(255) DEFAULT NULL,
`access_view_dataset_id` varchar(255) DEFAULT NULL,
`raw_access_map` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_ACL_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`bucket` varchar(255) DEFAULT NULL,
`domain` varchar(255) DEFAULT NULL,
`email` varchar(255) DEFAULT NULL,
`entity` varchar(255) DEFAULT NULL,
`entity_id` varchar(255) DEFAULT NULL,
`acl_id` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`project_team` json DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`bucket_acl_selflink` varchar(255) DEFAULT NULL,
`raw_bucket_acl` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`bucket_id` varchar(255) DEFAULT NULL,
`bucket_name` varchar(255) DEFAULT NULL,
`bucket_kind` varchar(255) DEFAULT NULL,
`bucket_storage_class` varchar(255) DEFAULT NULL,
`bucket_location` varchar(255) DEFAULT NULL,
`bucket_create_time` datetime DEFAULT NULL,
`bucket_update_time` datetime DEFAULT NULL,
`bucket_selflink` varchar(255) DEFAULT NULL,
`bucket_lifecycle_raw` json DEFAULT NULL,
`raw_bucket` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_INSTANCES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`name` varchar(255) DEFAULT NULL,
`project` varchar(255) DEFAULT NULL,
`backend_type` varchar(255) DEFAULT NULL,
`connection_name` varchar(255) DEFAULT NULL,
`current_disk_size` bigint DEFAULT NULL,
`database_version` varchar(255) DEFAULT NULL,
`failover_replica_available` varchar(255) DEFAULT NULL,
`failover_replica_name` varchar(255) DEFAULT NULL,
`instance_type` varchar(255) DEFAULT NULL,
`ipv6_address` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`master_instance_name` varchar(255) DEFAULT NULL,
`max_disk_size` bigint DEFAULT NULL,
`on_premises_configuration_host_port` varchar(255) DEFAULT NULL,
`on_premises_configuration_kind` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`replica_configuration` json DEFAULT NULL,
`replica_names` json DEFAULT NULL,
`self_link` varchar(255) DEFAULT NULL,
`server_ca_cert` json DEFAULT NULL,
`service_account_email_address` varchar(255) DEFAULT NULL,
`settings_activation_policy` varchar(255) DEFAULT NULL,
`settings_authorized_gae_applications` json DEFAULT NULL,
`settings_availability_type` varchar(255) DEFAULT NULL,
`settings_backup_configuration_binary_log_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_kind` varchar(255) DEFAULT NULL,
`settings_backup_configuration_start_time` varchar(255) DEFAULT NULL,
`settings_crash_safe_replication_enabled` varchar(255) DEFAULT NULL,
`settings_data_disk_size_gb` bigint DEFAULT NULL,
`settings_data_disk_type` varchar(255) DEFAULT NULL,
`settings_database_flags` json DEFAULT NULL,
`settings_database_replication_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_ipv4_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_require_ssl` varchar(255) DEFAULT NULL,
`settings_kind` varchar(255) DEFAULT NULL,
`settings_labels` json DEFAULT NULL,
`settings_location_preference_follow_gae_application` varchar(255) DEFAULT NULL,
`settings_location_preference_kind` varchar(255) DEFAULT NULL,
`settings_location_preference_zone` varchar(255) DEFAULT NULL,
`settings_maintenance_window` json DEFAULT NULL,
`settings_pricing_plan` varchar(255) DEFAULT NULL,
`settings_replication_type` varchar(255) DEFAULT NULL,
`settings_settings_version` bigint DEFAULT NULL,
`settings_storage_auto_resize` varchar(255) DEFAULT NULL,
`settings_storage_auto_resize_limit` bigint DEFAULT NULL,
`settings_tier` varchar(255) DEFAULT NULL,
`state` varchar(255) DEFAULT NULL,
`suspension_reason` json DEFAULT NULL,
`raw_cloudsql_instance` json DEFAULT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPADDRESSES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`type` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`time_to_retire` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPCONFIGURATION_AUTHORIZEDNETWORKS = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`value` varchar(255) DEFAULT NULL,
`expiration_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FIREWALL_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`firewall_rule_id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`firewall_rule_name` varchar(255) DEFAULT NULL,
`firewall_rule_description` varchar(512) DEFAULT NULL,
`firewall_rule_kind` varchar(255) DEFAULT NULL,
`firewall_rule_network` varchar(255) DEFAULT NULL,
`firewall_rule_priority` smallint(5) unsigned,
`firewall_rule_direction` varchar(255) DEFAULT NULL,
`firewall_rule_source_ranges` json DEFAULT NULL,
`firewall_rule_destination_ranges` json DEFAULT NULL,
`firewall_rule_source_tags` json DEFAULT NULL,
`firewall_rule_source_service_accounts` json DEFAULT NULL,
`firewall_rule_target_service_accounts` json DEFAULT NULL,
`firewall_rule_target_tags` json DEFAULT NULL,
`firewall_rule_allowed` json DEFAULT NULL,
`firewall_rule_denied` json DEFAULT NULL,
`firewall_rule_self_link` varchar(255) DEFAULT NULL,
`firewall_rule_create_time` datetime(3) DEFAULT NULL,
`raw_firewall_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDERS_TABLE = """
CREATE TABLE `{0}` (
`folder_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('ACTIVE','DELETE_REQUESTED',
'DELETED','LIFECYCLE_STATE_UNSPECIFIED') DEFAULT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_folder` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`folder_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FORWARDING_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`ip_protocol` enum('TCP','UDP','ESP','AH','SCTP','ICMP') DEFAULT NULL,
`port_range` varchar(255) DEFAULT NULL,
`ports` json DEFAULT NULL,
`target` varchar(255) DEFAULT NULL,
`load_balancing_scheme` enum('INTERNAL','EXTERNAL') DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`backend_service` varchar(255) DEFAULT NULL,
`raw_forwarding_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_KE_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`addons_config` json DEFAULT NULL,
`cluster_ipv4_cidr` varchar(255) DEFAULT NULL,
`create_time` varchar(255) DEFAULT NULL, # Handle RFC3339 (timezone)
`current_master_version` varchar(255) DEFAULT NULL,
`current_node_count` int DEFAULT NULL,
`current_node_version` varchar(255) DEFAULT NULL,
`endpoint` varchar(255) DEFAULT NULL,
`initial_cluster_version` varchar(255) DEFAULT NULL,
`instance_group_urls` varchar(255) DEFAULT NULL,
`legacy_abac` json DEFAULT NULL,
`locations` varchar(255) DEFAULT NULL,
`logging_service` varchar(255) DEFAULT NULL,
`monitoring_service` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`node_config` json DEFAULT NULL,
`node_ipv4_cidr_size` int DEFAULT NULL,
`node_pools` json DEFAULT NULL,
`self_link` varchar(255) DEFAULT NULL,
`services_ipv4_cidr` varchar(255) DEFAULT NULL,
`status` varchar(255) DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`server_config` json DEFAULT NULL,
`raw_cluster` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: Add a RAW_GROUP_MEMBERS_TABLE.
CREATE_GROUP_MEMBERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`member_kind` varchar(255) DEFAULT NULL,
`member_role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_status` varchar(255) DEFAULT NULL,
`member_id` varchar(255) DEFAULT NULL,
`member_email` varchar(255) DEFAULT NULL,
`raw_member` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`group_email` varchar(255) DEFAULT NULL,
`group_kind` varchar(255) DEFAULT NULL,
`direct_member_count` bigint(20) DEFAULT NULL,
`raw_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`can_ip_forward` bool DEFAULT NULL,
`cpu_platform` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`disks` json DEFAULT NULL,
`machine_type` varchar(255) DEFAULT NULL,
`metadata` json DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`network_interfaces` json DEFAULT NULL,
`scheduling` json DEFAULT NULL,
`service_accounts` json DEFAULT NULL,
`status` varchar(255) DEFAULT NULL,
`status_message` varchar(255) DEFAULT NULL,
`tags` json DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`instance_urls` json DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`size` int DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_TEMPLATES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`properties` json DEFAULT NULL,
`raw_instance_template` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUP_MANAGERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`base_instance_name` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`current_actions` json DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`instance_group` varchar(255) DEFAULT NULL,
`instance_template` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`target_pools` json DEFAULT NULL,
`target_size` int DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group_manager` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORGANIZATIONS_TABLE = """
CREATE TABLE `{0}` (
`org_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED', 'DELETED') NOT NULL,
`raw_org` json DEFAULT NULL,
`creation_time` datetime DEFAULT NULL,
PRIMARY KEY (`org_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`project_id` varchar(255) NOT NULL,
`project_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED','DELETED') NOT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_project` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `project_id_UNIQUE` (`project_id`),
UNIQUE KEY `project_number_UNIQUE` (`project_number`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`buckets` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_SERVICE_ACCOUNTS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`email` varchar(255) DEFAULT NULL,
`oauth2_client_id` varchar(255) DEFAULT NULL,
`account_keys` json DEFAULT NULL,
`raw_service_account` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: define the violation_type enum as a list
CREATE_VIOLATIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`violation_hash` varchar(255) NOT NULL,
`resource_type` varchar(255) NOT NULL,
`resource_id` varchar(255) NOT NULL,
`rule_name` varchar(255) DEFAULT NULL,
`rule_index` int DEFAULT NULL,
`violation_type` enum('UNSPECIFIED',
'ADDED','REMOVED',
'BIGQUERY_VIOLATION',
'BLACKLIST_VIOLATION',
'BUCKET_VIOLATION',
'CLOUD_SQL_VIOLATION',
'FIREWALL_BLACKLIST_VIOLATION',
'FIREWALL_MATCHES_VIOLATION',
'FIREWALL_REQUIRED_VIOLATION',
'FIREWALL_WHITELIST_VIOLATION',
'FORWARDING_RULE_VIOLATION',
'KE_VERSION_VIOLATION',
'GROUP_VIOLATION',
'IAP_VIOLATION',
'INSTANCE_NETWORK_INTERFACE_VIOLATION') NOT NULL,
`violation_data` json DEFAULT NULL,
`created_at_datetime` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
| 41.23339 | 88 | 0.651504 |
331e7484b3bae0322b20d327f133c5d558450e9c
| 7,106 |
py
|
Python
|
test/test_report.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 19 |
2020-10-29T10:14:59.000Z
|
2022-03-20T06:27:35.000Z
|
test/test_report.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 52 |
2020-10-14T19:21:27.000Z
|
2021-09-15T19:01:52.000Z
|
test/test_report.py
|
masino-lab/fairMLHealth
|
943ffed5f57997401823bd2afc257f34f76ea157
|
[
"MIT"
] | 9 |
2020-12-02T21:40:27.000Z
|
2021-11-01T18:09:10.000Z
|
"""
"""
from fairmlhealth import report
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
import pytest
import pandas as pd
from .__utils import synth_dataset
import fairmlhealth.__validation as valid
@pytest.fixture(scope="class")
def load_data(request):
N = 128
df = synth_dataset(N)
X = df[["A", "B", "C", "E", "F", "prtc_attr", "other"]]
y = pd.Series(
(X["F"] * X["other"]).values + np.random.randint(0, 2, N), name="y"
).clip(upper=1)
X_train, y_train = X.iloc[0 : int(N / 4)], y.iloc[0 : int(N / 4)]
X_test, y_test = X.iloc[int(N / 4) : N], y.iloc[int(N / 4) : N]
# Train models
model_1 = BernoulliNB().fit(X_train, y_train)
model_2 = DecisionTreeClassifier().fit(X_train, y_train)
model_3 = DecisionTreeClassifier().fit(X_train.to_numpy(), y_train.to_numpy())
# Set test attributes
request.cls.X = X_test
request.cls.y = y_test
request.cls.prtc_attr = X_test["prtc_attr"]
request.cls.model_dict = {0: model_1, 1: model_2, 2: model_3}
yield
@pytest.mark.usefixtures("load_data")
class TestCompareFunction:
""" Test proper functioning of the compare function. Result
should be a pandas dataframe
"""
def is_result_valid(self, result):
if not isinstance(result, pd.DataFrame) and result.shape[0] > 0:
raise AssertionError("Invalid Result")
def test_single_dataInputs(self):
result = report.compare(
self.X, self.y, self.prtc_attr, self.model_dict, flag_oor=False
)
self.is_result_valid(result)
def test_model_list(self):
result = report.compare(
self.X, self.y, self.prtc_attr, [self.model_dict[0]], flag_oor=False
)
self.is_result_valid(result)
def test_mixed_groupings(self):
result = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
[self.model_dict[0], self.model_dict[1]],
flag_oor=False,
)
self.is_result_valid(result)
def test_with_protected_attributes(self):
result = report.compare(
[self.X, self.X],
[self.y, self.y],
[self.prtc_attr, self.prtc_attr],
[self.model_dict[0], self.model_dict[1]],
flag_oor=False,
)
self.is_result_valid(result)
def test_preds_not_models(self):
result = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
predictions=[self.y, self.y],
flag_oor=False,
)
self.is_result_valid(result)
def test_preds_and_probs(self):
result = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
predictions=[self.y, self.y],
probabilities=[self.y, self.y],
flag_oor=False,
)
self.is_result_valid(result)
def test_multiple_calls(self):
args = (self.X, self.y, self.prtc_attr, self.model_dict[0])
_ = report.compare(*args, flag_oor=False)
result = report.compare(*args, flag_oor=False)
self.is_result_valid(result)
def test_embeddedHTML_noFlag(self):
result = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
predictions=[self.y, self.y],
probabilities=[self.y, self.y],
flag_oor=False,
output_type="html",
)
assert isinstance(result, str)
def test_embeddedHTML_withFlag_valid(self):
result = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
predictions=[self.y, self.y],
probabilities=[self.y, self.y],
flag_oor=True,
output_type="html",
)
assert isinstance(result, str)
def test_outputType_withFlag_invalid(self):
with pytest.raises(ValueError):
_ = report.compare(
[self.X, self.X],
self.y,
self.prtc_attr,
predictions=[self.y, self.y],
probabilities=[self.y, self.y],
flag_oor=True,
output_type="dataframe",
)
@pytest.mark.usefixtures("load_data")
class TestCompareValidations:
""" Validations for the compare function
"""
def test_mismatch_input_numbers(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: self.y},
{1: self.prtc_attr},
self.model_dict,
flag_oor=False,
)
def test_missing_models(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: self.y},
{0: self.prtc_attr, 1: self.prtc_attr},
{0: None, 1: None},
flag_oor=False,
)
def test_invalid_X_member(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: self.y},
{0: self.prtc_attr, 1: self.prtc_attr},
{0: self.y, 1: self.y},
flag_oor=False,
)
def test_invalid_y_member(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: None},
{0: self.y, 1: self.y},
{0: self.prtc_attr, 1: self.prtc_attr},
self.model_dict,
flag_oor=False,
)
def test_invalid_prtc_member(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: None},
{0: self.prtc_attr, 1: self.prtc_attr},
self.model_dict,
flag_oor=False,
)
def test_invalid_model_member(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: self.y},
{0: self.prtc_attr, 1: None},
self.model_dict,
flag_oor=False,
)
def test_differing_keys(self):
with pytest.raises(valid.ValidationError):
report.compare(
{5: self.X, 6: self.X},
{0: self.y, 1: self.y},
{0: self.prtc_attr, 1: self.prtc_attr},
self.model_dict,
flag_oor=False,
)
def test_missing_keys(self):
with pytest.raises(valid.ValidationError):
report.compare(
{0: self.X, 1: self.X},
{0: self.y, 1: self.y},
None,
self.model_dict,
flag_oor=False,
)
| 30.62931 | 82 | 0.5387 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.