hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
578eb4b2256c789d7177ecf50be80f3f522b2335 | 3,457 | py | Python | upstat.py | requiem202/dopa | c0ccf8c4a2b2aa613417d0b4a118597c5b4f80b4 | [
"MIT"
] | null | null | null | upstat.py | requiem202/dopa | c0ccf8c4a2b2aa613417d0b4a118597c5b4f80b4 | [
"MIT"
] | null | null | null | upstat.py | requiem202/dopa | c0ccf8c4a2b2aa613417d0b4a118597c5b4f80b4 | [
"MIT"
] | null | null | null | # http://stat.dopa.go.th/stat/statnew/upstat_age.php
import requests
import bs4
# import csv
# import numpy as np
import pandas as pd
years = [
'36',
'37',
'38',
'39',
'40',
'41',
'42',
'43',
'44',
'45',
'46',
'47',
'48',
'49',
'50',
'51',
'52',
'53',
'54',
'55',
'56',
'57',
'58',
'59',
'60'
]
# df_male = pd.DataFrame()
# df_female = pd.DataFrame()
df_male = None
df_female = None
df_total = None
url = "http://stat.dopa.go.th/stat/statnew/upstat_age_disp.php"
for year in years:
print('fetching ' + year)
payload = {
'rcodecode': '',
'send': 5000,
'catDesc': '',
'service': 'bsts_stat_webWS',
'YEAR': year,
'LEVEL': 1,
'txtMsg': '1||||' + year + '12|'
}
headers = {
'Connection': "keep-alive",
'Pragma': "no-cache",
'Cache-Control': "no-cache",
'Origin': "http://stat.dopa.go.th",
'Upgrade-Insecure-Requests': "1",
'Content-Type': "application/x-www-form-urlencoded",
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'Referer': "http://stat.dopa.go.th/stat/statnew/upstat_age.php",
'Accept-Encoding': "gzip, deflate",
'Accept-Language': "en-US,en;q=0.9,th;q=0.8",
'Cookie': "TS01a5fe75_77=0803fa2a32ab28004b48defeba32a6a146b4d1c7b0b56938c7923e30014962d5eb09b1757031f0e989bb209444fbe16b086beabfe0823800aac8bd3e40759693c520fc4114ca52a6c1c9d8d522de41f4aa6ab9a2c6ddb9009e4ce6d7d51e09797603875704dc9291642c51276c2ca231; TS01a5fe75=010214bde33dded3e477fb0a4cfe99a504f47ff312589c47f0b8a6cad754dc35028b834873",
'Postman-Token': "d95b9ddc-7ff2-4e53-b4a2-10efaeb258ac"
}
response = requests.request("POST", url, data=payload, headers=headers)
# print(response.text)
doc = bs4.BeautifulSoup(response.text, 'html.parser')
tables = doc.find_all('table')
table = tables[4]
labels = []
males = []
females = []
totals = []
for row in table.find_all('tr')[1:]:
# print(row)
cols = row.find_all('td')
age1 = cols[0].get_text().strip()
male1 = int(cols[1].get_text().strip().replace(',', ''))
female1 = int(cols[2].get_text().strip().replace(',', ''))
total1 = int(cols[3].get_text().strip().replace(',', ''))
labels.append(age1)
males.append(male1)
females.append(female1)
totals.append(total1)
age2 = cols[4].get_text().strip()
male2 = int(cols[5].get_text().strip().replace(',', ''))
female2 = int(cols[6].get_text().strip().replace(',', ''))
total2 = int(cols[7].get_text().strip().replace(',', ''))
labels.append(age2)
males.append(male2)
females.append(female2)
totals.append(total2)
if df_male is None:
df_male = pd.DataFrame(columns=labels)
if df_female is None:
df_female = pd.DataFrame(columns=labels)
if df_total is None:
df_total = pd.DataFrame(columns=labels)
df_male.loc[year] = males
df_female.loc[year] = females
df_total.loc[year] = totals
df_male.to_csv('male_year_age.csv')
df_female.to_csv('female_year_age.csv')
df_total.to_csv('total_year_age.csv')
| 28.808333 | 346 | 0.599653 |
6e5fecab0a9337f3bf8a6f93413641d82548be62 | 712 | py | Python | Operator2.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | Operator2.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | Operator2.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | '''
class Name : Demo
Function Name : __add__()
Description : Demonstration Of Operator Overloading V2
Function Date : 15 Mar 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
class Demo:
def __init__(self, x, y):
self.i = x
self.j = y
def __add__(self, other):
return Demo(self.i + other.i, self.j + other.j)
def main():
#no1 = 10
#no2 = 20
obj1 = Demo(10,20)
obj2 = Demo(30,40)
ret = obj1 + obj2
print(ret.i, ret.j)
#print(obj1)
#print(obj2)
#ret = no1 + no2
#print("Addition is : ", ret)
if __name__ == "__main__":
main() | 18.25641 | 61 | 0.51264 |
0720faec38991846e09093ae14fa8c9b37764dc1 | 1,654 | py | Python | annotation/analyze/show_annotated.py | seal-git/chABSA-dataset | a33b59e1101e451495735c69094d4f598d54f6f4 | [
"MIT"
] | 107 | 2018-04-10T09:13:57.000Z | 2022-03-31T15:21:20.000Z | annotation/analyze/show_annotated.py | seal-git/chABSA-dataset | a33b59e1101e451495735c69094d4f598d54f6f4 | [
"MIT"
] | 2 | 2018-10-27T05:47:47.000Z | 2022-02-25T10:06:43.000Z | annotation/analyze/show_annotated.py | seal-git/chABSA-dataset | a33b59e1101e451495735c69094d4f598d54f6f4 | [
"MIT"
] | 9 | 2018-04-11T00:59:15.000Z | 2022-02-25T11:50:33.000Z | import os
from collections import Counter
import json
ANNOTATE_DIR = os.path.join(os.path.dirname(__file__), "../../data/annotated")
def print_counter(c):
result = c.most_common()
result_dict = dict(result)
string = json.dumps(result_dict, ensure_ascii=False, indent=2, sort_keys=True)
print(string)
def main():
s_counter = Counter()
e_counter = Counter()
ea_counter = Counter()
label_collection = {}
total = 0
for _dir in os.listdir(ANNOTATE_DIR):
if not _dir.startswith("E"):
continue
company_dir = os.path.join(ANNOTATE_DIR, _dir)
for a in os.listdir(company_dir):
if not a.startswith("ann"):
continue
path = os.path.join(company_dir, a)
with open(path, encoding="utf-8") as f:
af = json.load(f)
ass = af["annotations"]
for a in ass:
label = a["label"]
ea, s = label.split(",")
e, at = ea.split("#")
if label not in label_collection:
label_collection[a["label"]] = []
label_collection[a["label"]].append(a["label_target"])
s_counter[s] += 1
ea_counter[ea] += 1
e_counter[e] += 1
total += 1
display = json.dumps(label_collection, ensure_ascii=False, indent=2, sort_keys=True)
print(display)
print_counter(ea_counter)
print_counter(s_counter)
print_counter(e_counter)
print(total)
if __name__ == "__main__":
main()
| 29.535714 | 88 | 0.539903 |
17529bd778d0956c70d3fdec9734b30a0ab9ff44 | 2,926 | py | Python | addons/Sprytile-6b68d00/rx/subjects/subject.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | 733 | 2017-08-22T09:47:54.000Z | 2022-03-27T23:56:52.000Z | rx/subjects/subject.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 74 | 2017-08-16T09:13:05.000Z | 2022-03-15T02:31:49.000Z | rx/subjects/subject.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 77 | 2017-09-14T16:56:11.000Z | 2022-03-27T13:55:16.000Z | from rx import config
from rx.core import Observer, ObservableBase, Disposable
from rx.internal import DisposedException
from .anonymoussubject import AnonymousSubject
from .innersubscription import InnerSubscription
class Subject(ObservableBase, Observer):
"""Represents an object that is both an observable sequence as well as an
observer. Each notification is broadcasted to all subscribed observers.
"""
def __init__(self):
super(Subject, self).__init__()
self.is_disposed = False
self.is_stopped = False
self.observers = []
self.exception = None
self.lock = config["concurrency"].RLock()
def check_disposed(self):
if self.is_disposed:
raise DisposedException()
def _subscribe_core(self, observer):
with self.lock:
self.check_disposed()
if not self.is_stopped:
self.observers.append(observer)
return InnerSubscription(self, observer)
if self.exception:
observer.on_error(self.exception)
return Disposable.empty()
observer.on_completed()
return Disposable.empty()
def on_completed(self):
"""Notifies all subscribed observers of the end of the sequence."""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
if os:
for observer in os:
observer.on_completed()
def on_error(self, exception):
"""Notifies all subscribed observers with the exception.
Keyword arguments:
error -- The exception to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
self.exception = exception
if os:
for observer in os:
observer.on_error(exception)
def on_next(self, value):
"""Notifies all subscribed observers with the value.
Keyword arguments:
value -- The value to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
if os:
for observer in os:
observer.on_next(value)
def dispose(self):
"""Unsubscribe all observers and release resources."""
with self.lock:
self.is_disposed = True
self.observers = None
@classmethod
def create(cls, observer, observable):
return AnonymousSubject(observer, observable)
| 28.134615 | 77 | 0.584757 |
4577b301cd1f052a9ea154cd3758d0af56874e04 | 442 | py | Python | src/mobot/asgi.py | mobilecoinofficial/mobot | 4872e4308beb5305d88dcace94394aaa251f65e1 | [
"MIT"
] | 6 | 2021-07-28T13:49:16.000Z | 2022-02-16T22:08:03.000Z | src/mobot/asgi.py | mobilecoinofficial/mobot | 4872e4308beb5305d88dcace94394aaa251f65e1 | [
"MIT"
] | 10 | 2021-08-18T15:18:34.000Z | 2021-09-27T21:40:24.000Z | mobot/mobot/asgi.py | sugargoat/mobot-1 | 407ac7fa477a1274bacc718c3080d94c35d40d5c | [
"MIT"
] | 3 | 2021-07-28T01:17:06.000Z | 2021-09-20T21:19:50.000Z | # Copyright (c) 2021 MobileCoin. All rights reserved.
"""
ASGI config for mobot project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobot.settings')
application = get_asgi_application()
| 23.263158 | 78 | 0.778281 |
c06f1760cbcdb4b1c02885894921d1d6d5cc47cd | 3,106 | py | Python | fundamentals/dynamic_programming/text_justification.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | fundamentals/dynamic_programming/text_justification.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | fundamentals/dynamic_programming/text_justification.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
class TextJustification(object):
"""
This is how LateX justifies its text
Goal: split text into "good" lines
where the text is an array of words, size N
Def badness(i,j) = how "bad" it would be to use words i, ..., j-1 as a line
INF if the text doesn't fit on the line
= /
\ (page width - words width) ^3
New goal: minimize the total badness of all lines
Define subproblem:
total badness of words[i:N-1]
number of subproblems: N
What to guess in each subproblem?
where to break the next line
Define recurrence:
total_badness(words[i:N]) = min_j { badness[i,j] + total_badness(words[j:N]) }
What's the overall solution?
total_badness(words[0:N])
keeping parent pointers will give us the line breaks that minimizes cost
"""
def __init__(self, page_width):
self.page_width = page_width
def justify(self, words):
max_length = max([len(w) for w in words])
if max_length > self.page_width:
raise ValueError("can't fit at least one word into the page width")
N = len(words)
total_badness = [None] * (N + 1)
total_badness[N] = (0, None)
# call into recursive justify
ans = self._justify_rec(words, total_badness, 0, N)
uniq_breaks = []
current = None
for x in total_badness:
if x[1] != current and x[1] is not None:
uniq_breaks.append(x[1])
current = x[1]
results = []
last = None
for i in uniq_breaks:
if last is None:
last = 0
results.append(" ".join(words[last:i]))
last = i
return "\n".join(results)
def _justify_rec(self, words, total_badness, i, N):
"""
Solves the recursive step: what is the optimal line breaking with words[i:] ?
:param words: Word list with words from 0..N-1
:param total_badness: Dynamic solution array
:param i: Defines "subproblem" of finding best solution of words[i:]
:return: nothing! we'll fill in the DP array
"""
if total_badness[i]:
return total_badness[i][0]
min_cost = float("inf")
min_parent = None
# compute min_j badness(i,j) + total_badness[j]
for j in range(i+1, N+1):
current_cost = self.line_badness(words[i:j]) + self._justify_rec(words, total_badness, j, N)
if current_cost < min_cost:
min_cost = current_cost
min_parent = j
total_badness[i] = (min_cost, min_parent)
return min_cost
def line_badness(self, line):
length = len(" ".join(line))
if length > self.page_width:
return float("inf")
else:
return abs((self.page_width - length)**3)
def main():
tj = TextJustification(10)
print tj.justify(["the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog"])
if __name__ == "__main__":
main() | 27.732143 | 104 | 0.573406 |
3c13e98c4c7d8dfdc0ab4e4fa758710d2051ddce | 2,317 | py | Python | examples/harris/harris_numba.py | uw-ipd/numba | 26dde2b28cadda403a5549a84dc1698900b23f74 | [
"BSD-2-Clause"
] | 140 | 2017-07-15T21:17:44.000Z | 2022-03-19T00:56:05.000Z | examples/harris/harris_numba.py | uw-ipd/numba | 26dde2b28cadda403a5549a84dc1698900b23f74 | [
"BSD-2-Clause"
] | 24 | 2017-07-24T16:25:35.000Z | 2021-12-08T17:54:38.000Z | examples/harris/harris_numba.py | uw-ipd/numba | 26dde2b28cadda403a5549a84dc1698900b23f74 | [
"BSD-2-Clause"
] | 50 | 2017-07-15T21:15:16.000Z | 2021-12-12T15:27:05.000Z | #! /usr/bin/env python
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
from __future__ import print_function
import sys
import time
import os
import numpy as np
from numba import njit, stencil
try:
from PIL import Image
except ImportError:
raise RuntimeError("Pillow is needed to run this example. Try 'conda install pillow'")
@njit
def xsten(a):
ret = np.zeros_like(a)
ashape = a.shape
for i in range(1,ashape[0]-1):
for j in range(1,ashape[1]-1):
ret[i,j] = ((a[i-1,j-1] * -1.0) + (a[i-1,j] * -2.0) + (a[i-1,j+1] * -1.0) + a[i+1,j-1] + (a[i+1,j] * 2.0) + a[i+1,j+1]) / 12.0
return ret
@njit
def ysten(a):
ret = np.zeros_like(a)
ashape = a.shape
for i in range(1,ashape[0]-1):
for j in range(1,ashape[1]-1):
ret[i,j] = ((a[i-1,j-1] * -1.0) + (a[i,j-1] * -2.0) + (a[i+1,j-1] * -1.0) + a[i-1,j+1] + (a[i,j+1] * 2.0) + a[i+1,j+1]) / 12.0
return ret
@njit
def harris_common(a):
ret = np.zeros_like(a)
ashape = a.shape
for i in range(1,ashape[0]-1):
for j in range(1,ashape[1]-1):
ret[i,j] = (a[i-1,j-1] + a[i-1,j] + a[i-1,j+1] + a[i,j-1] + a[i,j] + a[i,j+1] + a[i+1,j-1] + a[i+1,j] + a[i+1,j+1])
return ret
@njit
def harris(Iin):
Ix = xsten(Iin)
Iy = ysten(Iin)
Ixx = Ix * Ix
Iyy = Iy * Iy
Ixy = Ix * Iy
Sxx = harris_common(Ixx)
Syy = harris_common(Iyy)
Sxy = harris_common(Ixy)
det = (Sxx * Syy) - (Sxy * Sxy)
trace = Sxx + Syy
return det - (0.04 * trace * trace)
def main (*args):
iterations = 10
if len(args) > 0:
input_file = args[0]
else:
raise ValueError("A jpeg file must be provided as the first command line parameter.")
parts = os.path.splitext(input_file)
new_file_name = parts[0] + "-corners" + parts[1]
input_img = Image.open(input_file).convert('L')
input_arr = np.array(input_img)
tstart = time.time()
for i in range(iterations):
output_arr = harris(input_arr)
htime = time.time() - tstart
print("SELFTIMED ", htime)
new_img = Image.fromarray(output_arr.astype(np.uint8), mode=input_img.mode)
new_img.format = input_img.format
new_img.save(new_file_name)
if __name__ == "__main__":
main(*sys.argv[1:])
| 26.033708 | 138 | 0.577902 |
e7ba646bb38b9cdc1a47e5067cc1c11892cd14aa | 871 | py | Python | tests/data/expected/parser/openapi/openapi_parser_parse/with_import_format.py | stevesimmons/datamodel-code-generator | b77009e978607064939333992a28b323b3420579 | [
"MIT"
] | 891 | 2019-07-23T04:23:32.000Z | 2022-03-31T13:36:33.000Z | tests/data/expected/parser/openapi/openapi_parser_parse/with_import_format.py | stevesimmons/datamodel-code-generator | b77009e978607064939333992a28b323b3420579 | [
"MIT"
] | 663 | 2019-07-23T09:50:26.000Z | 2022-03-29T01:56:55.000Z | tests/data/expected/parser/openapi/openapi_parser_parse/with_import_format.py | stevesimmons/datamodel-code-generator | b77009e978607064939333992a28b323b3420579 | [
"MIT"
] | 108 | 2019-07-23T08:50:37.000Z | 2022-03-09T10:50:22.000Z | from __future__ import annotations
from typing import List, Optional
from pydantic import AnyUrl, BaseModel
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Pets(BaseModel):
__root__: List[Pet]
class User(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Users(BaseModel):
__root__: List[User]
class Id(BaseModel):
__root__: str
class Rules(BaseModel):
__root__: List[str]
class Error(BaseModel):
code: int
message: str
class Api(BaseModel):
apiKey: Optional[str] = None
apiVersionNumber: Optional[str] = None
apiUrl: Optional[AnyUrl] = None
apiDocumentationUrl: Optional[AnyUrl] = None
class Apis(BaseModel):
__root__: List[Api]
class Event(BaseModel):
name: Optional[str] = None
class Result(BaseModel):
event: Optional[Event] = None
| 15.017241 | 48 | 0.683123 |
1a59c010099b0c8cde9cdbafa5147927e3d18895 | 18,914 | py | Python | appengine/cr-buildbucket/validation.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | appengine/cr-buildbucket/validation.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | appengine/cr-buildbucket/validation.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validates V2 proto messages.
Internally, this module is a bit magical. It keeps a stack of fields currently
being validated per thread. It is used to construct a path to an invalid field
value.
"""
import contextlib
import logging
import re
import threading
from components import cipd
from go.chromium.org.luci.buildbucket.proto import common_pb2
import buildtags
import config
import errors
import model
class Error(Exception):
"""Raised on validation errors."""
PUBSUB_USER_DATA_MAX_LENGTH = 4096
# Maximum size of Build.summary_markdown field. Defined in build.proto.
MAX_BUILD_SUMMARY_MARKDOWN_SIZE = 4000 # 4 KB
# swarming.py and api.py reserve these properties.
RESERVED_PROPERTY_PATHS = [
# Reserved for buildbucket internals.
['buildbucket'],
['$recipe_engine/buildbucket'],
# Deprecated in favor of api.buildbucket.builder.builder,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['buildername'],
# Deprecated in favor of api.buildbucket.build_input.gitiles_commit,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['branch'],
['repository'],
# Set to const true.
['$recipe_engine/runtime', 'is_luci'],
# Populated from Build.input.experimental.
['$recipe_engine/runtime', 'is_experimental'],
]
# Statuses with start time required.
START_TIME_REQUIRED_STATUSES = (
common_pb2.STARTED,
common_pb2.SUCCESS,
common_pb2.FAILURE,
)
# Step statuses, listed from best to worst and if applicable. See
# https://chromium.googlesource.com/infra/luci/luci-go/+/dffd1081b775979aa1c5a8046d9a65adead1cee8/buildbucket/proto/step.proto#75
STATUS_PRECEDENCE = (
common_pb2.SUCCESS, # best
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
common_pb2.CANCELED, # worst
)
# Character separating parent from children steps.
STEP_SEP = '|'
################################################################################
# Validation of common.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_gerrit_change(change, require_project=False):
"""Validates common_pb2.GerritChange."""
# project is not required.
_check_truth(change, 'host', 'change', 'patchset')
if require_project and not change.project: # pragma: no branch
# TODO(nodir): escalate to an error.
logging.warning('gerrit_change.project is not specified')
def validate_gitiles_commit(commit, require_ref=True):
"""Validates common_pb2.GitilesCommit."""
_check_truth(commit, 'host', 'project')
if require_ref:
_check_truth(commit, 'ref')
if commit.ref:
if not commit.ref.startswith('refs/'):
_enter_err('ref', 'must start with "refs/"')
else:
if not commit.id:
_err('id or ref is required')
if commit.position:
_enter_err('position', 'requires ref')
if commit.id:
with _enter('id'):
_validate_hex_sha1(commit.id)
def validate_tags(string_pairs, mode):
"""Validates a list of common.StringPair tags.
For mode, see buildtags.validate_tags docstring.
"""
for p in string_pairs:
if ':' in p.key:
_err('tag key "%s" cannot have a colon', p.key)
with _handle_invalid_input_error():
tags = ['%s:%s' % (p.key, p.value) for p in string_pairs]
buildtags.validate_tags(tags, mode)
################################################################################
# Validation of build.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_builder_id(builder_id, require_bucket=True, require_builder=True):
"""Validates build_pb2.BuilderID."""
assert require_bucket or not require_builder
_check_truth(builder_id, 'project')
if require_bucket:
_check_truth(builder_id, 'bucket')
if require_builder:
_check_truth(builder_id, 'builder')
with _enter('project'), _handle_invalid_input_error():
config.validate_project_id(builder_id.project)
with _enter('bucket'), _handle_invalid_input_error():
if builder_id.bucket:
config.validate_bucket_name(builder_id.bucket)
parts = builder_id.bucket.split('.')
if len(parts) >= 3 and parts[0] == 'luci':
_err(
'invalid usage of v1 bucket format in v2 API; use %r instead',
parts[2]
)
elif builder_id.builder:
_err('required by .builder field')
with _enter('builder'), _handle_invalid_input_error():
if builder_id.builder:
errors.validate_builder_name(builder_id.builder)
################################################################################
# Validation of rpc.proto messages.
# The order of functions must match the order of messages in rpc.proto.
def validate_get_build_request(req):
"""Validates rpc_pb2.GetBuildRequest."""
if req.id:
if req.HasField('builder') or req.build_number:
_err('id is mutually exclusive with builder and build_number')
elif req.HasField('builder') and req.build_number:
validate_builder_id(req.builder)
else:
_err('id or (builder and build_number) are required')
def validate_search_builds_request(req):
"""Validates rpc_pb2.SearchBuildRequest."""
with _enter('predicate'):
validate_build_predicate(req.predicate)
_validate_paged_request(req)
def validate_requested_dimension(dim):
"""Validates common_pb2.RequestedDimension."""
_check_truth(dim, 'key', 'value')
with _enter('key'):
if dim.key == 'caches':
_err('"caches" is invalid; define caches instead')
if dim.key == 'pool':
_err('"pool" is not allowed')
with _enter('expiration'):
with _enter('seconds'):
if dim.expiration.seconds < 0:
_err('must not be negative')
if dim.expiration.seconds % 60 != 0:
_err('must be a multiple of 60')
if dim.expiration.nanos:
_enter_err('nanos', 'must be 0')
def validate_schedule_build_request(req, legacy=False):
if '/' in req.request_id: # pragma: no cover
_enter_err('request_id', 'must not contain /')
if not req.HasField('builder') and not req.template_build_id:
_err('builder or template_build_id is required')
if req.HasField('builder'):
with _enter('builder'):
validate_builder_id(req.builder, require_builder=not legacy)
with _enter('exe'):
_check_falsehood(req.exe, 'cipd_package')
if req.exe.cipd_version:
with _enter('cipd_version'):
_validate_cipd_version(req.exe.cipd_version)
with _enter('properties'):
validate_struct(req.properties)
if not legacy: # pragma: no branch
for path in RESERVED_PROPERTY_PATHS:
if _struct_has_path(req.properties, path):
_err('property path %r is reserved', path)
if req.HasField('gitiles_commit'):
with _enter('gitiles_commit'):
validate_gitiles_commit(req.gitiles_commit)
_check_repeated(
req,
'gerrit_changes',
lambda c: validate_gerrit_change(c, require_project=not legacy),
)
with _enter('tags'):
validate_tags(req.tags, 'new')
_check_repeated(req, 'dimensions', validate_requested_dimension)
key_exp = set()
with _enter('dimensions'):
for d in req.dimensions:
t = (d.key, d.expiration.seconds)
if t in key_exp:
_err(
'key "%s" and expiration %ds are not unique', d.key,
d.expiration.seconds
)
key_exp.add(t)
if req.priority < 0 or req.priority > 255:
_enter_err('priority', 'must be in [0, 255]')
if req.HasField('notify'): # pragma: no branch
with _enter('notify'):
validate_notification_config(req.notify)
def validate_cancel_build_request(req):
_check_truth(req, 'id', 'summary_markdown')
with _enter('summary_markdown'):
validate_build_summary_markdown(req.summary_markdown)
def validate_struct(struct):
for name, value in struct.fields.iteritems():
if not value.WhichOneof('kind'):
_enter_err(name, 'value is not set; for null, initialize null_value')
def validate_notification_config(notify):
_check_truth(notify, 'pubsub_topic')
if len(notify.user_data) > PUBSUB_USER_DATA_MAX_LENGTH:
_enter_err('user_data', 'must be <= %d bytes', PUBSUB_USER_DATA_MAX_LENGTH)
# Set of UpdateBuildRequest field paths updatable via UpdateBuild RPC.
UPDATE_BUILD_FIELD_PATHS = {
'build.status',
'build.status_details',
'build.summary_markdown',
'build.steps',
'build.output',
'build.output.properties',
'build.output.gitiles_commit',
'build.tags',
}
# Set of valid build statuses supported by UpdateBuild RPC.
UPDATE_BUILD_STATUSES = {
common_pb2.STARTED,
# kitchen does not actually use SUCCESS. It relies on swarming pubsub
# handler in Buildbucket because a task may fail after recipe succeeded.
common_pb2.SUCCESS,
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
}
def validate_update_build_request(req, make_build_steps_func=None):
"""Validates rpc_pb2.UpdateBuildRequest.
If make_build_steps_func is given, it will be called at the end to validate
the size of the its serialized representation. This allows the callee to save
the BuildStep locally and thus avoid re-doing the work later.
"""
update_paths = set(req.update_mask.paths)
with _enter('update_mask', 'paths'):
unsupported = update_paths - UPDATE_BUILD_FIELD_PATHS
if unsupported:
_err('unsupported path(s) %r', sorted(unsupported))
# Check build values, if present in the mask.
with _enter('build'):
_check_truth(req.build, 'id')
if 'build.status' in update_paths:
if req.build.status not in UPDATE_BUILD_STATUSES:
_enter_err(
'status', 'invalid status %s for UpdateBuild',
common_pb2.Status.Name(req.build.status)
)
if 'build.output.gitiles_commit' in update_paths:
with _enter('output', 'gitiles_commit'):
validate_gitiles_commit(req.build.output.gitiles_commit)
if 'build.summary_markdown' in update_paths:
with _enter('summary_markdown'):
validate_build_summary_markdown(req.build.summary_markdown)
if 'build.output.properties' in update_paths:
with _enter('output', 'properties'):
validate_struct(req.build.output.properties)
if 'build.tags' in update_paths:
with _enter('tags'):
validate_tags(req.build.tags, 'append')
if 'build.steps' in update_paths: # pragma: no branch
with _enter('steps'):
build_steps = (
make_build_steps_func()
if make_build_steps_func else model.BuildSteps.make(req.build)
)
limit = model.BuildSteps.MAX_STEPS_LEN
if len(build_steps.step_container_bytes) > limit:
_err('too big to accept')
validate_steps(req.build.steps)
def validate_build_summary_markdown(summary_markdown):
size = len(summary_markdown)
limit = MAX_BUILD_SUMMARY_MARKDOWN_SIZE
if size > limit:
_err('too big to accept (%d > %d bytes)', size, limit)
def validate_steps(steps):
seen_steps = dict()
for i, s in enumerate(steps):
with _enter('step[%d]' % i):
validate_step(s, seen_steps)
def validate_step(step, steps):
"""Validates build's step, internally and relative to (previous) steps."""
_check_truth(step, 'name')
if step.name in steps:
_enter_err('name', 'duplicate: %r', step.name)
validate_internal_timing_consistency(step)
log_names = set()
_check_repeated(step, 'logs', lambda log: validate_log(log, log_names))
name_path = step.name.split(STEP_SEP)
parent_name = STEP_SEP.join(name_path[:-1])
if parent_name:
if parent_name not in steps:
_err('parent to %r must precede', step.name)
parent = steps[parent_name]
validate_status_consistency(step, parent)
validate_timing_consistency(step, parent)
steps[step.name] = step
def validate_internal_timing_consistency(step):
"""Validates internal timing consistency of a step."""
if (step.status not in common_pb2.Status.values() or
step.status == common_pb2.STATUS_UNSPECIFIED):
_err('must have buildbucket.v2.Status that is not STATUS_UNSPECIFIED')
if step.status in START_TIME_REQUIRED_STATUSES and not step.HasField(
'start_time'):
_enter_err(
'start_time', 'required by status %s',
common_pb2.Status.Name(step.status)
)
elif step.status < common_pb2.STARTED and step.HasField('start_time'):
_enter_err(
'start_time', 'invalid for status %s',
common_pb2.Status.Name(step.status)
)
if bool(step.status & common_pb2.ENDED_MASK) ^ step.HasField('end_time'):
_err('must have both or neither end_time and a terminal status')
if (step.HasField('end_time') and
step.start_time.ToDatetime() > step.end_time.ToDatetime()):
_err('start_time after end_time')
def validate_status_consistency(child, parent):
"""Validates inter-step status consistency."""
c, p = child.status, parent.status
c_name, p_name = common_pb2.Status.Name(c), common_pb2.Status.Name(p)
if p == common_pb2.SCHEDULED:
_enter_err('status', 'parent %r must be at least STARTED', parent.name)
if not bool(c & common_pb2.ENDED_MASK) and p != common_pb2.STARTED:
_enter_err(
'status', 'non-terminal (%s) %r must have STARTED parent %r (%s)',
c_name, child.name, parent.name, p_name
)
if (p in STATUS_PRECEDENCE and c in STATUS_PRECEDENCE and
STATUS_PRECEDENCE.index(p) < STATUS_PRECEDENCE.index(c)):
_enter_err(
'status', '%r\'s status %s is worse than parent %r\'s status %s',
child.name, c_name, parent.name, p_name
)
def validate_timing_consistency(child, parent):
"""Validates inter-step timing consistency."""
parent_start = parent.start_time.ToDatetime(
) if parent.HasField('start_time') else None
parent_end = parent.end_time.ToDatetime(
) if parent.HasField('end_time') else None
if child.HasField('start_time'):
child_start = child.start_time.ToDatetime()
with _enter('start_time'):
if parent_start and parent_start > child_start:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_start:
_err('cannot follow parent %r\'s end time', parent.name)
if child.HasField('end_time'):
child_end = child.end_time.ToDatetime()
with _enter('end_time'):
if parent_start and parent_start > child_end:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_end:
_err('cannot follow parent %r\'s end time', parent.name)
def validate_log(log, names):
"""Validates a log within a build step; checks uniqueness against names param.
"""
_check_truth(log, 'name', 'url', 'view_url')
if log.name in names:
_enter_err('name', 'duplicate: %r', log.name)
names.add(log.name)
def validate_build_predicate(predicate):
"""Validates rpc_pb2.BuildPredicate."""
if predicate.HasField('builder'):
with _enter('builder'):
validate_builder_id(
predicate.builder, require_bucket=False, require_builder=False
)
_check_repeated(predicate, 'gerrit_changes', validate_gerrit_change)
if predicate.HasField('output_gitiles_commit'):
with _enter('output_gitiles_commit'):
_validate_predicate_output_gitiles_commit(predicate.output_gitiles_commit)
if predicate.HasField('create_time') and predicate.HasField('build'):
_err('create_time and build are mutually exclusive')
with _enter('tags'):
validate_tags(predicate.tags, 'search')
# List of supported BuildPredicate.output_gitiles_commit field sets.
# It is more restrictied than the generic validate_gitiles_commit because the
# field sets by which builds are indexed are more restricted.
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET = {
tuple(sorted(s)) for s in [
('host', 'project', 'id'),
('host', 'project', 'ref'),
('host', 'project', 'ref', 'position'),
]
}
def _validate_predicate_output_gitiles_commit(commit):
"""Validates BuildsPredicate.output_gitiles_commit.
From rpc_pb2.SearchBuildsRequest.output_gitiles_commit comment:
One of the following subfield sets must specified:
- host, project, id
- host, project, ref
- host, project, ref, position
"""
field_set = tuple(sorted(f.name for f, _ in commit.ListFields()))
if field_set not in SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET:
_err(
'unsupported set of fields %r. Supported field sets: %r', field_set,
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET
)
validate_gitiles_commit(commit, require_ref=False)
################################################################################
# Internals.
def _validate_cipd_version(version):
if not cipd.is_valid_version(version):
_err('invalid version "%s"', version)
def _struct_has_path(struct, path):
"""Returns True if struct has a value at field path."""
for p in path:
f = struct.fields.get(p)
if f is None:
return False
struct = f.struct_value
return True
def _validate_hex_sha1(sha1):
pattern = r'[a-z0-9]{40}'
if not re.match(pattern, sha1):
_err('does not match r"%s"', pattern)
def _validate_paged_request(req):
"""Validates req.page_size."""
if req.page_size < 0:
_enter_err('page_size', 'must be not be negative')
def _check_truth(msg, *field_names):
"""Validates that the field values are truish."""
assert field_names, 'at least 1 field is required'
for f in field_names:
if not getattr(msg, f):
_enter_err(f, 'required')
def _check_falsehood(msg, *field_names):
"""Validates that the field values are falsish."""
for f in field_names:
if getattr(msg, f):
_enter_err(f, 'disallowed')
def _check_repeated(msg, field_name, validator):
"""Validates each element of a repeated field."""
for i, c in enumerate(getattr(msg, field_name)):
with _enter('%s[%d]' % (field_name, i)):
validator(c)
@contextlib.contextmanager
def _enter(*names):
_field_stack().extend(names)
try:
yield
finally:
_field_stack()[-len(names):] = []
def _err(fmt, *args):
field_path = '.'.join(_field_stack())
raise Error('%s: %s' % (field_path, fmt % args))
@contextlib.contextmanager
def _handle_invalid_input_error():
try:
yield
except errors.InvalidInputError as ex:
_err('%s', ex.message)
def _enter_err(name, fmt, *args):
with _enter(name):
_err(fmt, *args)
def _field_stack():
if not hasattr(_CONTEXT, 'field_stack'): # pragma: no cover
_CONTEXT.field_stack = []
return _CONTEXT.field_stack
# Validation context of the current thread.
_CONTEXT = threading.local()
| 30.555735 | 129 | 0.694248 |
2af67d5eb9bf521adcc74c3b5e0e2fc96808e92f | 4,418 | py | Python | pytorch_lightning/utilities/parsing.py | stas00/pytorch-lightning | 84c507c4df5f5c336deb19ce7f70fa02329f39f6 | [
"Apache-2.0"
] | 1 | 2021-06-10T07:12:58.000Z | 2021-06-10T07:12:58.000Z | pytorch_lightning/utilities/parsing.py | stas00/pytorch-lightning | 84c507c4df5f5c336deb19ce7f70fa02329f39f6 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/utilities/parsing.py | stas00/pytorch-lightning | 84c507c4df5f5c336deb19ce7f70fa02329f39f6 | [
"Apache-2.0"
] | null | null | null | import inspect
from argparse import Namespace
from typing import Dict
def str_to_bool(val):
"""Convert a string representation of truth to true (1) or false (0).
Copied from the python implementation distutils.utils.strtobool
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
>>> str_to_bool('YES')
1
>>> str_to_bool('FALSE')
0
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError(f'invalid truth value {val}')
def clean_namespace(hparams):
"""Removes all functions from hparams so we can pickle."""
if isinstance(hparams, Namespace):
del_attrs = []
for k in hparams.__dict__:
if callable(getattr(hparams, k)):
del_attrs.append(k)
for k in del_attrs:
delattr(hparams, k)
elif isinstance(hparams, dict):
del_attrs = []
for k, v in hparams.items():
if callable(v):
del_attrs.append(k)
for k in del_attrs:
del hparams[k]
def get_init_args(frame) -> dict:
_, _, _, local_vars = inspect.getargvalues(frame)
if '__class__' not in local_vars:
return
cls = local_vars['__class__']
spec = inspect.getfullargspec(cls.__init__)
init_parameters = inspect.signature(cls.__init__).parameters
self_identifier = spec.args[0] # "self" unless user renames it (always first arg)
varargs_identifier = spec.varargs # by convention this is named "*args"
kwargs_identifier = spec.varkw # by convention this is named "**kwargs"
exclude_argnames = (
varargs_identifier, kwargs_identifier, self_identifier, '__class__', 'frame', 'frame_args'
)
# only collect variables that appear in the signature
local_args = {k: local_vars[k] for k in init_parameters.keys()}
local_args.update(local_args.get(kwargs_identifier, {}))
local_args = {k: v for k, v in local_args.items() if k not in exclude_argnames}
return local_args
def collect_init_args(frame, path_args: list, inside: bool = False) -> list:
"""
Recursively collects the arguments passed to the child constructors in the inheritance tree.
Args:
frame: the current stack frame
path_args: a list of dictionaries containing the constructor args in all parent classes
inside: track if we are inside inheritance path, avoid terminating too soon
Return:
A list of dictionaries where each dictionary contains the arguments passed to the
constructor at that level. The last entry corresponds to the constructor call of the
most specific class in the hierarchy.
"""
_, _, _, local_vars = inspect.getargvalues(frame)
if '__class__' in local_vars:
local_args = get_init_args(frame)
# recursive update
path_args.append(local_args)
return collect_init_args(frame.f_back, path_args, inside=True)
elif not inside:
return collect_init_args(frame.f_back, path_args, inside)
else:
return path_args
def flatten_dict(source, result=None):
if result is None:
result = {}
for k, v in source.items():
if isinstance(v, dict):
_ = flatten_dict(v, result)
else:
result[k] = v
return result
class AttributeDict(Dict):
"""Extended dictionary accesisable with dot notation.
>>> ad = AttributeDict({'key1': 1, 'key2': 'abc'})
>>> ad.key1
1
>>> ad.update({'my-key': 3.14})
>>> ad.update(mew_key=42)
>>> ad.key1 = 2
>>> ad
"key1": 2
"key2": abc
"mew_key": 42
"my-key": 3.14
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(f'Missing attribute "{key}"')
def __setattr__(self, key, val):
self[key] = val
def __repr__(self):
if not len(self):
return ""
max_key_length = max([len(str(k)) for k in self])
tmp_name = '{:' + str(max_key_length + 3) + 's} {}'
rows = [tmp_name.format(f'"{n}":', self[n]) for n in sorted(self.keys())]
out = '\n'.join(rows)
return out
| 30.895105 | 98 | 0.615211 |
ab17b080fb3448654afbb6623fff5a9539f4d1c2 | 237 | py | Python | web_app/forms.py | BuilderTron/torres_yardworks | 75f9be62ea9a8a48295836ca5a35121cb1ee0472 | [
"MIT"
] | null | null | null | web_app/forms.py | BuilderTron/torres_yardworks | 75f9be62ea9a8a48295836ca5a35121cb1ee0472 | [
"MIT"
] | null | null | null | web_app/forms.py | BuilderTron/torres_yardworks | 75f9be62ea9a8a48295836ca5a35121cb1ee0472 | [
"MIT"
] | null | null | null | from django import forms
from .models import ClientLeads
class ReqForm(forms.ModelForm):
class Meta:
model = ClientLeads
fields = ("first_name", "last_name", "email", "phone", "address", "date", "service", "message") | 33.857143 | 103 | 0.670886 |
92413dfc6406e032db9b6f148e26b1323fd36648 | 396 | py | Python | pymath/binary/divisible_by_5/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 3 | 2017-05-02T10:28:13.000Z | 2019-02-06T09:10:11.000Z | pymath/binary/divisible_by_5/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2017-06-21T20:39:14.000Z | 2020-02-25T10:28:57.000Z | pymath/binary/divisible_by_5/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2016-07-29T04:35:22.000Z | 2017-01-18T17:05:36.000Z | class Divisible5(object):
def __init__(self, binary_lst):
self.binary_lst = binary_lst
def div_five(self):
return ",".join([x for x in self.binary_lst.split(",") if int(x, 2) % 5 == 0])
def div_five_tw0(self):
lst = []
for x in self.binary_lst.split(","):
if int(x, 2) % 5 == 0:
lst.append(x)
return ",".join(lst)
| 28.285714 | 86 | 0.527778 |
a026d8df444f17f36f78d9e5bc91d63b13d363b6 | 643 | py | Python | audiomentations/__init__.py | lixianyi/audiomentations | 8bbd26537545c946d306b099aed46edc7dad727a | [
"MIT"
] | 1 | 2021-09-17T09:28:10.000Z | 2021-09-17T09:28:10.000Z | audiomentations/__init__.py | lixianyi/audiomentations | 8bbd26537545c946d306b099aed46edc7dad727a | [
"MIT"
] | null | null | null | audiomentations/__init__.py | lixianyi/audiomentations | 8bbd26537545c946d306b099aed46edc7dad727a | [
"MIT"
] | null | null | null | from .augmentations.spectrogram_transforms import SpecFrequencyMask, SpecChannelShuffle
from .augmentations.transforms import (
AddBackgroundNoise,
AddGaussianNoise,
AddGaussianSNR,
AddImpulseResponse,
ApplyImpulseResponse,
AddShortNoises,
BandPassFilter,
Clip,
ClippingDistortion,
FrequencyMask,
Gain,
HighPassFilter,
LoudnessNormalization,
LowPassFilter,
Mp3Compression,
Normalize,
PitchShift,
PolarityInversion,
Resample,
Reverse,
Shift,
TimeMask,
TimeStretch,
Trim,
)
from .core.composition import Compose, SpecCompose
__version__ = "0.18.0"
| 20.741935 | 87 | 0.723173 |
7c91c491fae883d1f7cc17aa70fd9f04fd26b208 | 3,306 | py | Python | t5/spider_dataset_utils.py | TomerRonen34/SmBopCompGen | c14d566126951e31dcaa506bbece8d19afe01f33 | [
"MIT"
] | null | null | null | t5/spider_dataset_utils.py | TomerRonen34/SmBopCompGen | c14d566126951e31dcaa506bbece8d19afe01f33 | [
"MIT"
] | null | null | null | t5/spider_dataset_utils.py | TomerRonen34/SmBopCompGen | c14d566126951e31dcaa506bbece8d19afe01f33 | [
"MIT"
] | null | null | null | """
inspired by https://github.com/google-research/language/blob/master/language/nqg/tasks/spider/append_schema.py
and https://github.com/google-research/language/blob/master/language/nqg/tasks/spider/write_dataset.py
"""
from collections import defaultdict
from pathlib import Path
from typing import Tuple, List, Dict, Any
import pandas as pd
from smbop.compgen.utils import load_json
def load_spider_dataset(dataset_dir: Path) -> Tuple[pd.DataFrame, pd.DataFrame]:
assert dataset_dir.exists()
train_df = _load_and_prepare_examples(dataset_dir, "train")
dev_df = _load_and_prepare_examples(dataset_dir, "dev")
return train_df, dev_df
def _load_and_prepare_examples(dataset_dir: Path, train_or_dev: str) -> pd.DataFrame:
examples_path = _get_exmaples_path(dataset_dir, train_or_dev)
tables_path = _get_tables_path(dataset_dir, train_or_dev)
db_id_to_schema_string = _get_db_id_to_schema_string(tables_path)
examples: List[Dict] = load_json(examples_path)
gt_df = pd.DataFrame([_construct_gt_row(example, db_id_to_schema_string)
for example in examples])
return gt_df
def _construct_gt_row(example: Dict[str, Any], db_id_to_schema_string: Dict[str, str],
prefix: str = "semanticparse") -> Dict[str, str]:
question, query, db_id = example["question"].lower(), example["query"].lower(), example["db_id"].lower()
schema_string = db_id_to_schema_string[db_id]
input_text = f"{db_id}: {question} {schema_string}"
gt_row = {"prefix": prefix, "input_text": input_text, "target_text": query}
return gt_row
def _get_exmaples_path(dataset_dir: Path, train_or_dev: str):
if train_or_dev == "train":
return dataset_dir / "train_spider.json"
elif train_or_dev == "dev":
return dataset_dir / "dev.json"
else:
raise ValueError()
def _get_tables_path(dataset_dir: Path, train_or_dev: str) -> Path:
default_table_file = dataset_dir / "tables.json"
if train_or_dev == "train":
return default_table_file
elif train_or_dev == "dev":
dev_table_file = dataset_dir / "dev_tables.json"
if Path(dev_table_file).exists():
return dev_table_file
else:
return default_table_file
else:
raise ValueError()
def _get_db_id_to_schema_string(tables_path: Path) -> Dict[str, str]:
all_dbs_info = load_json(tables_path)
db_id_to_schema_string = {}
for db_info in all_dbs_info:
db_id = db_info["db_id"].lower()
db_id_to_schema_string[db_id] = _construct_schema_string(db_info)
return db_id_to_schema_string
def _construct_schema_string(db_info: Dict[str, Any]) -> str:
"""Returns the schema serialized as a string."""
table_id_to_column_names = defaultdict(list)
for table_id, name in db_info["column_names_original"]:
table_id_to_column_names[table_id].append(name.lower())
tables = db_info["table_names_original"]
table_strings = []
for table_id, table_name in enumerate(tables):
column_names = table_id_to_column_names[table_id]
table_string = " | %s : %s" % (table_name.lower(), " , ".join(column_names))
table_strings.append(table_string)
schema_string = "".join(table_strings)
return schema_string
| 37.146067 | 110 | 0.713249 |
8d8cab731b6ce293421afc88be3454b4ae34aa0a | 16,783 | py | Python | server/app/app.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | 3 | 2019-11-11T15:41:07.000Z | 2020-12-14T08:47:35.000Z | server/app/app.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | null | null | null | server/app/app.py | prete/cellxgene | 11acea86c4b3df334300fac7e9e034c1e61e67bc | [
"MIT"
] | 1 | 2021-05-12T15:15:05.000Z | 2021-05-12T15:15:05.000Z | import datetime
import logging
from functools import wraps
from http import HTTPStatus
from urllib.parse import urlparse
import hashlib
import os
from flask import (
Flask,
redirect,
current_app,
make_response,
render_template,
abort,
Blueprint,
request,
send_from_directory,
)
from flask_restful import Api, Resource
from server_timing import Timing as ServerTiming
import server.common.rest as common_rest
from server.common.data_locator import DataLocator
from server.common.errors import DatasetAccessError, RequestException
from server.common.health import health_check
from server.common.utils.utils import path_join, Float32JSONEncoder
from server.data_common.matrix_loader import MatrixDataLoader
webbp = Blueprint("webapp", "server.common.web", template_folder="templates")
ONE_WEEK = 7 * 24 * 60 * 60
def _cache_control(always, **cache_kwargs):
"""
Used to easily manage cache control headers on responses.
See Werkzeug for attributes that can be set, eg, no_cache, private, max_age, etc.
https://werkzeug.palletsprojects.com/en/1.0.x/datastructures/#werkzeug.datastructures.ResponseCacheControl
"""
def inner_cache_control(f):
@wraps(f)
def wrapper(*args, **kwargs):
response = make_response(f(*args, **kwargs))
if not always and not current_app.app_config.server_config.app__generate_cache_control_headers:
return response
if response.status_code >= 400:
return response
for k, v in cache_kwargs.items():
setattr(response.cache_control, k, v)
return response
return wrapper
return inner_cache_control
def cache_control(**cache_kwargs):
""" config driven """
return _cache_control(False, **cache_kwargs)
def cache_control_always(**cache_kwargs):
""" always generate headers, regardless of the config """
return _cache_control(True, **cache_kwargs)
# tell the client not to cache the index.html page so that changes to the app work on redeployment
# note that the bulk of the data needed by the client (datasets) will still be cached
@webbp.route("/", methods=["GET"])
@cache_control_always(public=True, max_age=0, no_store=True, no_cache=True, must_revalidate=True)
def dataset_index(url_dataroot=None, dataset=None):
app_config = current_app.app_config
server_config = app_config.server_config
if dataset is None:
if app_config.is_multi_dataset():
return dataroot_index()
else:
location = server_config.single_dataset__datapath
else:
dataroot = None
for key, dataroot_dict in server_config.multi_dataset__dataroot.items():
if dataroot_dict["base_url"] == url_dataroot:
dataroot = dataroot_dict["dataroot"]
break
if dataroot is None:
abort(HTTPStatus.NOT_FOUND)
location = path_join(dataroot, dataset)
dataset_config = app_config.get_dataset_config(url_dataroot)
scripts = dataset_config.app__scripts
inline_scripts = dataset_config.app__inline_scripts
try:
cache_manager = current_app.matrix_data_cache_manager
with cache_manager.data_adaptor(url_dataroot, location, app_config) as data_adaptor:
data_adaptor.set_uri_path(f"{url_dataroot}/{dataset}")
args = {"SCRIPTS": scripts, "INLINE_SCRIPTS": inline_scripts}
return render_template("index.html", **args)
except DatasetAccessError as e:
return common_rest.abort_and_log(
e.status_code, f"Invalid dataset {dataset}: {e.message}", loglevel=logging.INFO, include_exc_info=True
)
@webbp.errorhandler(RequestException)
def handle_request_exception(error):
return common_rest.abort_and_log(error.status_code, error.message, loglevel=logging.INFO, include_exc_info=True)
def get_data_adaptor(url_dataroot=None, dataset=None):
config = current_app.app_config
server_config = config.server_config
dataset_key = None
if dataset is None:
datapath = server_config.single_dataset__datapath
else:
dataroot = None
for key, dataroot_dict in server_config.multi_dataset__dataroot.items():
if dataroot_dict["base_url"] == url_dataroot:
dataroot = dataroot_dict["dataroot"]
dataset_key = key
break
if dataroot is None:
raise DatasetAccessError(f"Invalid dataset {url_dataroot}/{dataset}")
datapath = path_join(dataroot, dataset)
# path_join returns a normalized path. Therefore it is
# sufficient to check that the datapath starts with the
# dataroot to determine that the datapath is under the dataroot.
if not datapath.startswith(dataroot):
raise DatasetAccessError(f"Invalid dataset {url_dataroot}/{dataset}")
if datapath is None:
return common_rest.abort_and_log(HTTPStatus.BAD_REQUEST, "Invalid dataset NONE", loglevel=logging.INFO)
cache_manager = current_app.matrix_data_cache_manager
return cache_manager.data_adaptor(dataset_key, datapath, config)
def requires_authentication(func):
@wraps(func)
def wrapped_function(self, *args, **kwargs):
auth = current_app.auth
if auth.is_user_authenticated():
return func(self, *args, **kwargs)
else:
return make_response("not authenticated", HTTPStatus.UNAUTHORIZED)
return wrapped_function
def rest_get_data_adaptor(func):
@wraps(func)
def wrapped_function(self, dataset=None):
try:
with get_data_adaptor(self.url_dataroot, dataset) as data_adaptor:
data_adaptor.set_uri_path(f"{self.url_dataroot}/{dataset}")
return func(self, data_adaptor)
except DatasetAccessError as e:
return common_rest.abort_and_log(
e.status_code, f"Invalid dataset {dataset}: {e.message}", loglevel=logging.INFO, include_exc_info=True
)
return wrapped_function
def dataroot_test_index():
# the following index page is meant for testing/debugging purposes
data = '<!doctype html><html lang="en">'
data += "<head><title>Hosted Cellxgene</title></head>"
data += "<body><H1>Welcome to cellxgene</H1>"
config = current_app.app_config
server_config = config.server_config
auth = server_config.auth
if auth.is_valid_authentication_type():
if server_config.auth.is_user_authenticated():
data += f"<p>Logged in as {auth.get_user_id()} / {auth.get_user_name()} / {auth.get_user_email()}</p>"
if auth.requires_client_login():
if server_config.auth.is_user_authenticated():
data += f"<p><a href='{auth.get_logout_url(None)}'>Logout</a></p>"
else:
data += f"<p><a href='{auth.get_login_url(None)}'>Login</a></p>"
datasets = []
for dataroot_dict in server_config.multi_dataset__dataroot.values():
dataroot = dataroot_dict["dataroot"]
url_dataroot = dataroot_dict["base_url"]
locator = DataLocator(dataroot, region_name=server_config.data_locator__s3__region_name)
for fname in locator.ls():
location = path_join(dataroot, fname)
try:
MatrixDataLoader(location, app_config=config)
datasets.append((url_dataroot, fname))
except DatasetAccessError:
# skip over invalid datasets
pass
data += "<br/>Select one of these datasets...<br/>"
data += "<ul>"
datasets.sort()
for url_dataroot, dataset in datasets:
data += f"<li><a href={url_dataroot}/{dataset}>{dataset}</a></li>"
data += "</ul>"
data += "</body></html>"
return make_response(data)
def dataroot_index():
# Handle the base url for the cellxgene server when running in multi dataset mode
config = current_app.app_config
if not config.server_config.multi_dataset__index:
abort(HTTPStatus.NOT_FOUND)
elif config.server_config.multi_dataset__index is True:
return dataroot_test_index()
else:
return redirect(config.server_config.multi_dataset__index)
class HealthAPI(Resource):
@cache_control(no_store=True)
def get(self):
config = current_app.app_config
return health_check(config)
class DatasetResource(Resource):
"""Base class for all Resources that act on datasets."""
def __init__(self, url_dataroot):
super().__init__()
self.url_dataroot = url_dataroot
class SchemaAPI(DatasetResource):
# TODO @mdunitz separate dataset schema and user schema
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.schema_get(data_adaptor)
class ConfigAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.config_get(current_app.app_config, data_adaptor)
class UserInfoAPI(DatasetResource):
@cache_control_always(no_store=True)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.userinfo_get(current_app.app_config, data_adaptor)
class AnnotationsObsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.annotations_obs_get(request, data_adaptor)
@requires_authentication
@cache_control(no_store=True)
@rest_get_data_adaptor
def put(self, data_adaptor):
return common_rest.annotations_obs_put(request, data_adaptor)
class AnnotationsVarAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.annotations_var_get(request, data_adaptor)
class DataVarAPI(DatasetResource):
@cache_control(no_store=True)
@rest_get_data_adaptor
def put(self, data_adaptor):
return common_rest.data_var_put(request, data_adaptor)
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.data_var_get(request, data_adaptor)
class ColorsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.colors_get(data_adaptor)
class DiffExpObsAPI(DatasetResource):
@cache_control(no_store=True)
@rest_get_data_adaptor
def post(self, data_adaptor):
return common_rest.diffexp_obs_post(request, data_adaptor)
class LayoutObsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.layout_obs_get(request, data_adaptor)
@cache_control(no_store=True)
@rest_get_data_adaptor
def put(self, data_adaptor):
return common_rest.layout_obs_put(request, data_adaptor)
def get_api_base_resources(bp_base):
"""Add resources that are accessed from the api_base_url"""
api = Api(bp_base)
# Diagnostics routes
api.add_resource(HealthAPI, "/health")
return api
def get_api_dataroot_resources(bp_dataroot, url_dataroot=None):
"""Add resources that refer to a dataset"""
api = Api(bp_dataroot)
def add_resource(resource, url):
"""convenience function to make the outer function less verbose"""
api.add_resource(resource, url, resource_class_args=(url_dataroot,))
# Initialization routes
add_resource(SchemaAPI, "/schema")
add_resource(ConfigAPI, "/config")
add_resource(UserInfoAPI, "/userinfo")
# Data routes
add_resource(AnnotationsObsAPI, "/annotations/obs")
add_resource(AnnotationsVarAPI, "/annotations/var")
add_resource(DataVarAPI, "/data/var")
# Display routes
add_resource(ColorsAPI, "/colors")
# Computation routes
add_resource(DiffExpObsAPI, "/diffexp/obs")
add_resource(LayoutObsAPI, "/layout/obs")
return api
def handle_api_base_url(app, app_config):
"""If an api_base_url is provided, then an inline script is generated to
handle the new API prefix"""
api_base_url = app_config.server_config.get_api_base_url()
if not api_base_url:
return
sha256 = hashlib.sha256(api_base_url.encode()).hexdigest()
script_name = f"api_base_url-{sha256}.js"
script_path = os.path.join(app.root_path, "../common/web/templates", script_name)
with open(script_path, "w") as fout:
fout.write("window.CELLXGENE.API.prefix = `" + api_base_url + "${location.pathname}api/`;\n")
dataset_configs = [app_config.default_dataset_config] + list(app_config.dataroot_config.values())
for dataset_config in dataset_configs:
inline_scripts = dataset_config.app__inline_scripts
inline_scripts.append(script_name)
class Server:
@staticmethod
def _before_adding_routes(app, app_config):
""" will be called before routes are added, during __init__. Subclass protocol """
pass
def __init__(self, app_config):
self.app = Flask(__name__, static_folder=None)
handle_api_base_url(self.app, app_config)
self._before_adding_routes(self.app, app_config)
self.app.json_encoder = Float32JSONEncoder
server_config = app_config.server_config
if server_config.app__server_timing_headers:
ServerTiming(self.app, force_debug=True)
# enable session data
self.app.permanent_session_lifetime = datetime.timedelta(days=50 * 365)
# Config
secret_key = server_config.app__flask_secret_key
self.app.config.update(SECRET_KEY=secret_key)
self.app.register_blueprint(webbp)
api_version = "/api/v0.2"
api_base_url = server_config.get_api_base_url()
api_path = "/"
if api_base_url:
parse = urlparse(api_base_url)
api_path = parse.path
bp_base = Blueprint("bp_base", __name__, url_prefix=api_path)
base_resources = get_api_base_resources(bp_base)
self.app.register_blueprint(base_resources.blueprint)
if app_config.is_multi_dataset():
# NOTE: These routes only allow the dataset to be in the directory
# of the dataroot, and not a subdirectory. We may want to change
# the route format at some point
for dataroot_dict in server_config.multi_dataset__dataroot.values():
url_dataroot = dataroot_dict["base_url"]
bp_dataroot = Blueprint(
f"api_dataset_{url_dataroot}",
__name__,
url_prefix=f"{api_path}/{url_dataroot}/<dataset>" + api_version,
)
dataroot_resources = get_api_dataroot_resources(bp_dataroot, url_dataroot)
self.app.register_blueprint(dataroot_resources.blueprint)
self.app.add_url_rule(
f"/{url_dataroot}/<dataset>",
f"dataset_index_{url_dataroot}",
lambda dataset, url_dataroot=url_dataroot: dataset_index(url_dataroot, dataset),
methods=["GET"],
)
self.app.add_url_rule(
f"/{url_dataroot}/<dataset>/",
f"dataset_index_{url_dataroot}/",
lambda dataset, url_dataroot=url_dataroot: dataset_index(url_dataroot, dataset),
methods=["GET"],
)
self.app.add_url_rule(
f"/{url_dataroot}/<dataset>/static/<path:filename>",
f"static_assets_{url_dataroot}",
view_func=lambda dataset, filename: send_from_directory("../common/web/static", filename),
methods=["GET"],
)
else:
bp_api = Blueprint("api", __name__, url_prefix=f"{api_path}{api_version}")
resources = get_api_dataroot_resources(bp_api)
self.app.register_blueprint(resources.blueprint)
self.app.add_url_rule(
"/static/<path:filename>",
"static_assets",
view_func=lambda filename: send_from_directory("../common/web/static", filename),
methods=["GET"],
)
self.app.matrix_data_cache_manager = server_config.matrix_data_cache_manager
self.app.app_config = app_config
auth = server_config.auth
self.app.auth = auth
if auth.requires_client_login():
auth.add_url_rules(self.app)
auth.complete_setup(self.app)
| 36.484783 | 118 | 0.681404 |
0dce756af4ac34eaddd1baa00507dcd0039cb704 | 3,137 | py | Python | osm/transformers/PreprocessTransformer.py | cjneetha/osm | cc391fbfee8c89a1f15257ea36e6bbc85bbea938 | [
"MIT"
] | 1 | 2018-06-12T09:11:34.000Z | 2018-06-12T09:11:34.000Z | osm/transformers/PreprocessTransformer.py | cjneetha/osm | cc391fbfee8c89a1f15257ea36e6bbc85bbea938 | [
"MIT"
] | null | null | null | osm/transformers/PreprocessTransformer.py | cjneetha/osm | cc391fbfee8c89a1f15257ea36e6bbc85bbea938 | [
"MIT"
] | 2 | 2018-10-22T12:32:02.000Z | 2018-12-28T11:43:03.000Z | from sklearn.base import TransformerMixin, BaseEstimator
import osm.transformers.preprocessor as utils
class PreprocessTransformer(BaseEstimator, TransformerMixin):
def __init__(self,
replace_urls=False,
replace_emoticons=False,
replace_exclamations=False,
replace_punctuations=False,
replace_numbers=False,
replace_negations=False,
replace_colloquials=False,
replace_repeated_letters=False,
replace_contractions=False,
replace_whitespace=True,
replace_currency=False,
replace_currency_with=None,
colloq_dict=None
) -> None:
if replace_colloquials is True and colloq_dict is None:
raise ValueError("The colloquial dictionary is missing")
super().__init__()
self.replace_urls = replace_urls
self.replace_emoticons = replace_emoticons
self.replace_exclamations = replace_exclamations
self.replace_punctuations = replace_punctuations
self.replace_numbers = replace_numbers
self.replace_negations = replace_negations
self.replace_colloquials = replace_colloquials
self.replace_repeated_letters = replace_repeated_letters
self.replace_contractions = replace_contractions
self.replace_whitespace = replace_whitespace
self.replace_currency = replace_currency
self.replace_currency_with = replace_currency_with
self.colloq_dict = colloq_dict
self.colloq_pattern = None
# build regex for colloquials
if replace_colloquials is True:
self.colloq_pattern = utils.build_colloquial_regex_pattern(self.colloq_dict)
def fit(self, x, y=None):
return self
def transform(self, x):
return [utils.run_preprocessor(doc,
replace_urls=self.replace_urls,
replace_emoticons=self.replace_emoticons,
replace_exclamations=self.replace_exclamations,
replace_punctuations=self.replace_punctuations,
replace_numbers=self.replace_numbers,
replace_negations=self.replace_numbers,
replace_colloquials=self.replace_colloquials,
replace_repeated_letters=self.replace_repeated_letters,
replace_contractions=self.replace_contractions,
replace_whitespace=self.replace_whitespace,
replace_currency=self.replace_currency,
replace_currency_with=self.replace_currency_with,
colloq_dict=self.colloq_dict,
colloq_pattern=self.colloq_pattern
) for doc in x]
| 48.261538 | 94 | 0.586548 |
baf5a8d244276591b16fcf1676d22e4321272546 | 22 | py | Python | ch01/hungry.py | KevinCarpricorn/Code | 8d0164f5b28f937e8891854f86e1a9b584122b48 | [
"MIT"
] | null | null | null | ch01/hungry.py | KevinCarpricorn/Code | 8d0164f5b28f937e8891854f86e1a9b584122b48 | [
"MIT"
] | null | null | null | ch01/hungry.py | KevinCarpricorn/Code | 8d0164f5b28f937e8891854f86e1a9b584122b48 | [
"MIT"
] | null | null | null | print("I'm hungry!")
| 11 | 21 | 0.590909 |
f5e938bd6693acdfb11d4c959bf177ca3851066b | 1,545 | py | Python | chpt4/Lottery.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | null | null | null | chpt4/Lottery.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-21T09:39:00.000Z | 2018-05-27T15:59:15.000Z | chpt4/Lottery.py | GDG-Buea/learn-python | 9dfe8caa4b57489cf4249bf7e64856062a0b93c2 | [
"Apache-2.0"
] | 2 | 2018-05-19T14:59:56.000Z | 2018-05-19T15:25:48.000Z | # This program prompts the user to enter a three-digit number and determines whether the user wins according
# to the following rules:
# 1. If the user input matches the lottery number in the exact order, the award is
# $10,000.
# 2. If all the digits in the user input match all the digits in the lottery number, the
# award is $3,000.
# 3. If one digit in the user input matches a digit in the lottery number, the award is
# $1,000.
import random
lotteryNumber = random.randrange(000, 999)
userReply = eval(input("What is the wining number? "))
lotteryNumberRemainder1 = lotteryNumber // 100 #first digit
lotteryNumberRemainderA = lotteryNumber % 100
lotteryNumberRemainder2 = lotteryNumberRemainderA // 10 #second digit
lotteryNumberRemainder3 = lotteryNumberRemainderA % 10 #third digit
userReplydigit1 = userReply // 100 #first digit
userReplydigitA = userReply % 100
userReplydigit2 = userReplydigitA // 10 #second digit
userReplydigit3 = userReplydigitA % 10 #third digit
if userReply == lotteryNumber:
print("Exact match, you have won $10,000")
elif lotteryNumberRemainder1 == userReplydigit1 or lotteryNumberRemainder2 == userReplydigit2 or \
lotteryNumberRemainder3 == userReplydigit3:
print("Cool match, you won $1,000.")
elif lotteryNumberRemainder1 == userReplydigit1 and lotteryNumberRemainder2 == userReplydigit2 and \
lotteryNumberRemainder3 == userReplydigit3:
print("Cool match , you won $3,000.")
else:
print("You lost")
print("The wining answer is: ", lotteryNumber)
| 38.625 | 108 | 0.752104 |
bb56dbf0d869f5260249c7ad71880d92819a38a3 | 910 | py | Python | packages/pyright-internal/src/tests/samples/annotatedVar5.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/annotatedVar5.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/annotatedVar5.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests type annotations for instance variables.
class ClassC(object):
def __init__(self):
self.inst_var1 = 3
@property
def prop1(self):
return 1
@prop1.setter
def prop1(self, val):
pass
def foo(self):
# This should generate an error because the assigned
# type doesn't match the declared type.
self.inst_var1 = 3 # type: str
self.inst_var1: str = "hello"
# This should generate an error because the declared
# type doesn't match the previously declared type.
self.inst_var1: int = "hello"
# This should generate an error because the assigned
# type doesn't match the declared type.
self.inst_var1 = "hello" # type: int
self.prop1 = 3
class ClassE(ClassC):
def __init__(self):
# This should generate an error.
self.inst_var1 = 3
| 23.947368 | 60 | 0.616484 |
bf4de26e1b105a29e681e61163c98b1535def1b9 | 3,541 | py | Python | roccclient/models/page_of_persons_all_of.py | Sage-Bionetworks/rocc-client | 85b73afe7d4977094810c0a8094f56ebe7ed3d48 | [
"Apache-2.0"
] | null | null | null | roccclient/models/page_of_persons_all_of.py | Sage-Bionetworks/rocc-client | 85b73afe7d4977094810c0a8094f56ebe7ed3d48 | [
"Apache-2.0"
] | 14 | 2020-12-06T23:54:23.000Z | 2021-02-03T18:35:02.000Z | roccclient/models/page_of_persons_all_of.py | Sage-Bionetworks/rocc-client | 85b73afe7d4977094810c0a8094f56ebe7ed3d48 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Registry of Open Community Challenge API
The OpenAPI specification implemented by the Challenge Registries. # Introduction TBA # noqa: E501
The version of the OpenAPI document: 0.1.4
Contact: thomas.schaffter@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from roccclient.configuration import Configuration
class PageOfPersonsAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'persons': 'list[Person]'
}
attribute_map = {
'persons': 'persons'
}
def __init__(self, persons=None, local_vars_configuration=None): # noqa: E501
"""PageOfPersonsAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._persons = None
self.discriminator = None
if persons is not None:
self.persons = persons
@property
def persons(self):
"""Gets the persons of this PageOfPersonsAllOf. # noqa: E501
An array of Persons # noqa: E501
:return: The persons of this PageOfPersonsAllOf. # noqa: E501
:rtype: list[Person]
"""
return self._persons
@persons.setter
def persons(self, persons):
"""Sets the persons of this PageOfPersonsAllOf.
An array of Persons # noqa: E501
:param persons: The persons of this PageOfPersonsAllOf. # noqa: E501
:type persons: list[Person]
"""
self._persons = persons
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageOfPersonsAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PageOfPersonsAllOf):
return True
return self.to_dict() != other.to_dict()
| 28.556452 | 104 | 0.586275 |
9edcb9e090c6785a9096b3736757a151a54dce98 | 20,253 | py | Python | mmtbx/maps/mtriage.py | gbunkoczi/cctbx_project | 2b330def68e362f838d93abe2eb2a8bc6d650408 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/maps/mtriage.py | gbunkoczi/cctbx_project | 2b330def68e362f838d93abe2eb2a8bc6d650408 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/maps/mtriage.py | gbunkoczi/cctbx_project | 2b330def68e362f838d93abe2eb2a8bc6d650408 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
import sys
from libtbx import group_args
from cctbx import maptbx
import iotbx.phil
from libtbx import adopt_init_args
from cctbx.maptbx import resolution_from_map_and_model
from libtbx import group_args
from cctbx import miller, adptbx
from mmtbx import masks
from scitbx.array_family import flex
import time
from libtbx import introspection
from libtbx.str_utils import size_as_string_with_commas
from libtbx.utils import null_out,Sorry
def show_process_info(out):
print("\\/"*39, file=out)
introspection.virtual_memory_info().show_if_available(out=out, show_max=True)
print("/\\"*39, file=out)
out.flush()
master_params_str = """
scattering_table = wk1995 it1992 n_gaussian neutron *electron
.type = choice
.help = Scattering table (X-ray, neutron or electron)
compute {
map_counts = True
.type = bool
.help = Compute map counts
fsc_curve_model = True
.type = bool
.help = Compute model-map CC in reciprocal space: FSC(model map, data map)
d_fsc_half_map_05 = False
.type = bool
.help = Compute d_half_map (FSC=0.5)
d_fsc_model_05 = True
.type = bool
.help = Compute d_fsc_model (FSC=0.5)
d_fsc_model_0 = True
.type = bool
.help = Compute d_fsc_model (FSC=0)
d_fsc_model_0143 = True
.type = bool
.help = Compute d_fsc_model (FSC=0.143)
d_model = True
.type = bool
.help = Resolution estimate using model and map
d_model_b0 = True
.type = bool
.help = Resolution estimate using model and map, assumin all atoms B=0
d99 = True
.type = bool
.help = Resolution estimate d99
}
resolution = None
.type = float
.help = Map resolution (d_FSC)
mask_maps = None
.type = bool
.help = Mask out region outside molecule
.style = tribool
auto_mask_if_no_model = False
.type = bool
.help = If mask_maps is set and no model is present, mask based on density
.style = tribool
radius_smooth = None
.type = float
.help = Mask smoothing radius
.short_caption = Mask smoothing radius
radius_smooth_ratio = 2
.type = float
.help = If mask smoothing radius is not specified it will be \
radius_smooth_ratio times the resolution
.short_caption = Mask smoothing radius ratio
n_bins = None
.type = int
.help = Number of bins for FSC curves. Suggested number is 5000. \
Alternative is default (None) which gives bins of width 100.
.short_caption = Bins for FSC
nproc = 1
.type = int
.help = Number of processors to use
show_time = False
.type = bool
.help = Show individual run times for each step
include_curves = True
.type = bool
.help = "Keep FSC curves"
include_mask = True
.type = bool
.help = "Keep mask"
"""
def show_histogram(map_histograms, log):
if(map_histograms.h_half_map_1 is None):
hm = map_histograms.h_map
print(" Values Map", file=log)
lc_1 = hm.data_min()
s_1 = enumerate(hm.slots())
for (i_1,n_1) in s_1:
hc_1 = hm.data_min() + hm.slot_width() * (i_1+1)
print("%8.4f - %-8.4f : %d" % (lc_1, hc_1, n_1), file=log)
lc_1 = hc_1
else:
print(" Full map Half-map1 Half-map2", file=log)
h0 = map_histograms.h_map
h1 = map_histograms.h_half_map_1
h2 = map_histograms.h_half_map_2
data_min = map_histograms._data_min
lc_2 = data_min
lc_1 = h0.data_min()
s_0 = enumerate(h0.slots())
s_1 = h1.slots()
s_2 = h2.slots()
for (i_1,n_1) in s_0:
hc_1 = h0.data_min() + h0.slot_width() * (i_1+1)
hc_2 = data_min + h2.slot_width() * (i_1+1)
print("%8.4f - %-8.4f : %9d %8.4f - %-8.4f : %9d %9d" % (
lc_1, hc_1, n_1, lc_2, hc_2, s_1[i_1], s_2[i_1]), file=log)
lc_1 = hc_1
lc_2 = hc_2
print(" Half-maps, correlation of histograms: ", \
map_histograms.half_map_histogram_cc, file=log)
def get_fsc(map_data, model, params):
result = None
if(params.compute.fsc):
mtriage_params = master_params().extract()
mtriage_params.scattering_table = params.scattering_table
mtriage_params.compute.map_counts = False
mtriage_params.compute.fsc_curve_model = True
mtriage_params.compute.d_fsc_model_05 = False
mtriage_params.compute.d_fsc_model_0 = False
mtriage_params.compute.d_fsc_model_0143 = False
mtriage_params.compute.d_model = False
mtriage_params.compute.d_model_b0 = False
mtriage_params.compute.d99 = False
mtriage_params.mask_maps = True
mtriage_params.resolution = params.resolution
result = mtriage(
map_data = map_data,
xray_structure = model.get_xray_structure(),
params = mtriage_params).get_results().masked.fsc_curve_model
return result
def get_atom_radius(xray_structure=None, resolution=None, radius=None):
if(radius is not None): return radius
radii = []
if(resolution is not None):
radii.append(resolution)
if(xray_structure is not None and resolution is not None):
b_iso = adptbx.u_as_b(
flex.mean(xray_structure.extract_u_iso_or_u_equiv()))
o = maptbx.atom_curves(scattering_type="C", scattering_table="electron")
rad_image = o.image(d_min=resolution, b_iso=b_iso,
radius_max=max(15.,resolution), radius_step=0.01).radius
radii.append(rad_image)
return max(3, min(10, max(radii)))
def master_params():
return iotbx.phil.parse(master_params_str, process_includes=False)
class caller(object):
def __init__(self, show=False):
self.time_cumulative = 0
self.show=show
def call(self, f, msg):
t0 = time.time()
f()
sa=size_as_string_with_commas(
introspection.virtual_memory_info().current_max_sizes().virtual_memory)
if(self.show):
delta = time.time()-t0
self.time_cumulative += delta
print("%6.2f %8.2f %15s:"%(delta, self.time_cumulative, sa), msg)
sys.stdout.flush()
class mtriage(object):
def __init__(self,
map_data,
map_data_1 = None,
map_data_2 = None,
xray_structure = None,
crystal_symmetry = None,
params = None):
self.map_data = map_data.deep_copy()
self.map_data_1 = None
self.map_data_2 = None
if(map_data_1 is not None): self.map_data_1 = map_data_1.deep_copy()
if(map_data_2 is not None): self.map_data_2 = map_data_2.deep_copy()
self.xray_structure = xray_structure
self.crystal_symmetry = crystal_symmetry
self.params = params
self.results_masked = None
self.results_unmasked = None
self.time_cumulative = 0
if(self.params is None):
self.params = master_params().extract()
self.caller = caller(show=self.params.show_time)
def call(self, func, prefix):
t0 = time.time()
result = func()
sa=size_as_string_with_commas(
introspection.virtual_memory_info().current_max_sizes().virtual_memory)
if(self.params.show_time):
delta = time.time()-t0
self.time_cumulative += delta
print("%6.2f %8.2f %15s:"%(delta, self.time_cumulative, sa), prefix)
sys.stdout.flush()
return result
def _run(self):
if(self.params.mask_maps is None):
# No masking
self.params.mask_maps = False
self.results_unmasked = _mtriage(
map_data = self.map_data,
map_data_1 = self.map_data_1,
map_data_2 = self.map_data_2,
xray_structure = self.xray_structure,
crystal_symmetry = self.crystal_symmetry,
params = self.params,
caller = self.caller
).run().get_results(
include_curves = self.params.include_curves,
include_mask = self.params.include_mask)
# Masking
self.params.mask_maps = True
self.results_masked = _mtriage(
map_data = self.map_data,
map_data_1 = self.map_data_1,
map_data_2 = self.map_data_2,
xray_structure = self.xray_structure,
crystal_symmetry = self.crystal_symmetry,
params = self.params,
caller = self.caller
).run().get_results(
include_curves = self.params.include_curves,
include_mask = self.params.include_mask)
else:
result = _mtriage(
map_data = self.map_data,
map_data_1 = self.map_data_1,
map_data_2 = self.map_data_2,
xray_structure = self.xray_structure,
crystal_symmetry = self.crystal_symmetry,
params = self.params,
caller = self.caller
).run().get_results(
include_curves = self.params.include_curves,
include_mask = self.params.include_mask)
if(self.params.mask_maps): self.results_masked = result
else: self.results_unmasked = result
def get_results(self):
self._run()
cs = self.crystal_symmetry
if(cs is None): cs = self.xray_structure.crystal_symmetry()
return group_args(
crystal_symmetry = cs,
masked = self.results_masked,
unmasked = self.results_unmasked)
class _mtriage(object):
def __init__(self,
map_data,
map_data_1,
map_data_2,
xray_structure,
crystal_symmetry,
caller,
params):
adopt_init_args(self, locals())
if(self.crystal_symmetry is None):
self.crystal_symmetry = self.xray_structure.crystal_symmetry()
self.call = self.caller.call
self.resolution = self.params.resolution
# Results
self.d99 = None
self.d999 = None
self.d9999 = None
self.d99999 = None
self.d99_1 = None
self.d99_2 = None
self.d_model = None
self.d_model_b0 = None
self.b_iso_overall = None
self.d_fsc = None
self.d_fsc_05 = None
self.d_fsc_model_05 = None
self.d_fsc_model_0 = None
self.d_fsc_model_0143 = None
self.fsc_curve = None
self.fsc_curve_model = None
self.mask_smooth = None
self.radius_smooth = self.params.radius_smooth
self.n_bins = self.params.n_bins
self.d_corner = None
self.d9999 = None
# Info (results)
self.map_counts = None
self.half_map_1_counts = None
self.half_map_2_counts = None
self.map_histograms = None
# Internal work objects
self.f_map = None
self.f_map_1 = None
self.f_map_2 = None
self.f_calc = None
def run(self):
# Compute radius
self.call(f=self._compute_radius, msg="Compute radius")
# Compute and apply mask
self.call(f=self._compute_and_apply_mask, msg="Masking")
# Compute F_maps
self.call(f=self._compute_f_maps, msg="Compute F_maps")
# Compute d99
self.call(f=self._compute_d99, msg="Compute d99")
# Strategy adjustments based on d99
self.call(f=self._adjust, msg="Adjustments based on d99")
# Compute half-map FSC
self.call(f=self._compute_half_map_fsc, msg="Compute half-map FSC")
# Compute half-map FSC at 0.5
self.call(f=self._compute_half_map_fsc_05, msg="Compute half-map FSC_05")
# Compute Fcalc
self.call(f=self._compute_f_calc, msg="Compute Fcalc")
# Map-model FSC curve
self.call(f=self._compute_fsc_curve_model, msg="Compute fsc_curve_model")
# d_fsc_model_0
self.call(f=self._compute_f_fsc_model_0, msg="Compute d_fsc_model_0")
# d_fsc_model_0143
self.call(f=self._compute_f_fsc_model_0143, msg="Compute d_fsc_model_0143")
# d_fsc_model_05
self.call(f=self._compute_f_fsc_model_05, msg="Compute d_fsc_model_05")
# Compute d_model
self.call(f=self._compute_d_model, msg="Compute d_model")
return self
def _adjust(self):
if(self.d99>10.): # Atomic model isn't suitable?
self.params.compute.fsc_curve_model = False
self.params.compute.d_fsc_model_05 = False
self.params.compute.d_fsc_model_0 = False
self.params.compute.d_fsc_model_0143= False
self.params.compute.d_model = False
self.params.compute.d_model_b0 = False
def _compute_radius(self):
if(not self.params.mask_maps): return
if(self.radius_smooth is not None): return
if self.resolution and self.params.radius_smooth_ratio:
self.radius_smooth = \
self.params.resolution*self.params.radius_smooth_ratio
return
if(self.xray_structure is None):
if(self.resolution): # resolution but no smooth ratio
self.radius_smooth = self.resolution
return
if(self.xray_structure is not None and
[self.radius_smooth, self.resolution].count(None)==2):
f_map = miller.structure_factor_box_from_map(
map = self.map_data,
crystal_symmetry = self.crystal_symmetry)
self.radius_smooth = maptbx.d99(f_map = f_map).result.d99
if self.params.radius_smooth_ratio:
self.radius_smooth = self.radius_smooth *self.params.radius_smooth_ratio
self.radius_smooth = get_atom_radius(
xray_structure = self.xray_structure,
radius = self.radius_smooth,
resolution = self.resolution)
def _compute_soft_mask_from_density(self):
from cctbx.maptbx.segment_and_split_map import get_iterated_solvent_fraction
mask_data,solvent_fraction=get_iterated_solvent_fraction(
crystal_symmetry=self.crystal_symmetry,
fraction_of_max_mask_threshold=0.05, #
cell_cutoff_for_solvent_from_mask=1, # Use low-res method always
mask_resolution=self.resolution,
return_mask_and_solvent_fraction=True,
verbose=False,
map=self.map_data,
out=null_out())
if solvent_fraction:
from cctbx.maptbx.segment_and_split_map import apply_soft_mask
map_data,smoothed_mask_data=apply_soft_mask(map_data=self.map_data,
mask_data=mask_data.as_double(),
rad_smooth=self.radius_smooth,
crystal_symmetry=self.crystal_symmetry,
out=null_out())
return smoothed_mask_data
else:
return None
def _compute_and_apply_mask(self):
if(not self.params.mask_maps): return
if(self.xray_structure is None):
self.mask_smooth=None
if self.params.auto_mask_if_no_model:
if not self.params.resolution:
raise Sorry("Need approximate resolution for auto_mask_if_no_model")
# generate mask from the density
self.mask_smooth=self._compute_soft_mask_from_density()
if not self.mask_smooth: # failed or did not attempt
return
else:
self.mask_smooth = masks.smooth_mask(
xray_structure = self.xray_structure,
n_real = self.map_data.all(),
rad_smooth = self.radius_smooth).mask_smooth
self.map_data = self.map_data*self.mask_smooth
if(self.map_data_1 is not None):
self.map_data_1 = self.map_data_1*self.mask_smooth
self.map_data_2 = self.map_data_2*self.mask_smooth
def _compute_f_maps(self):
self.f_map = miller.structure_factor_box_from_map(
map = self.map_data,
crystal_symmetry = self.crystal_symmetry)
if(self.map_data_1 is not None):
self.f_map_1 = miller.structure_factor_box_from_map(
map = self.map_data_1,
crystal_symmetry = self.crystal_symmetry)
self.f_map_2 = miller.structure_factor_box_from_map(
map = self.map_data_2,
crystal_symmetry = self.crystal_symmetry)
def _compute_d99(self):
if(not self.params.compute.d99): return
d99 = maptbx.d99(f_map = self.f_map)
self.d99 = d99.result.d99
self.d999 = d99.result.d999
self.d9999 = d99.result.d9999
self.d99999 = d99.result.d99999
self.f_map = self.f_map.resolution_filter(d_min = self.d99999-0.1)
d99_obj_1, d99_obj_2 = None,None
if(self.map_data_1 is not None):
d99_1 = maptbx.d99(
map = self.map_data_1,
crystal_symmetry = self.crystal_symmetry)
d99_2 = maptbx.d99(
map = self.map_data_2,
crystal_symmetry = self.crystal_symmetry)
self.d99_1 = d99_1.result.d99
self.d99_2 = d99_2.result.d99
self.f_map_1 = d99_1.f_map
self.f_map_2 = d99_2.f_map
def _compute_f_calc(self):
if(self.xray_structure is None): return
self.f_calc = self.f_map.structure_factors_from_scatterers(
xray_structure = self.xray_structure).f_calc()
def _compute_d_model(self):
if(not self.params.compute.d_model): return
if(self.xray_structure is not None):
o = resolution_from_map_and_model.run(
f_map = self.f_map,
d_fsc_model = self.d_fsc_model_0,
xray_structure = self.xray_structure)
self.d_model = o.d_min
self.b_iso_overall = o.b_iso
self.d_model_b0 = o.d_model_b0
def _compute_half_map_fsc(self):
if(self.map_data_1 is not None):
bin_width=100
if self.n_bins:
bin_width=max(bin_width,int(0.5+self.f_map_1.size()/self.n_bins))
self.fsc_curve = self.f_map_1.d_min_from_fsc(
other = self.f_map_2, bin_width=bin_width, fsc_cutoff=0.143)
self.d_fsc = self.fsc_curve.d_min
def _compute_half_map_fsc_05(self):
if(not self.params.compute.d_fsc_half_map_05): return
if(self.map_data_1 is not None):
bin_width=100
if self.n_bins:
bin_width=max(bin_width,int(0.5+self.f_map_1.size()/self.n_bins))
self.fsc_curve_05 = self.f_map_1.d_min_from_fsc(
other = self.f_map_2, bin_width=bin_width, fsc_cutoff=0.5)
self.d_fsc_05 = self.fsc_curve_05.d_min
def _compute_fsc_curve_model(self):
if(not self.params.compute.fsc_curve_model): return
if(self.xray_structure is not None):
bin_width=100
if self.n_bins:
bin_width=max(bin_width,int(0.5+self.f_calc.size()/self.n_bins))
self.fsc_curve_model = self.f_calc.fsc(
other=self.f_map, bin_width=bin_width)
def _compute_f_fsc_model_0(self):
if(not self.params.compute.d_fsc_model_0): return
if(self.xray_structure is None): return
assert self.fsc_curve_model is not None
self.d_fsc_model_0 = self.f_calc.d_min_from_fsc(
fsc_curve=self.fsc_curve_model, fsc_cutoff=0.).d_min
def _compute_f_fsc_model_0143(self):
if(not self.params.compute.d_fsc_model_0143): return
if(self.xray_structure is None): return
assert self.fsc_curve_model is not None
self.d_fsc_model_0143 = self.f_calc.d_min_from_fsc(
fsc_curve=self.fsc_curve_model, fsc_cutoff=0.143).d_min
def _compute_f_fsc_model_05(self):
if(not self.params.compute.d_fsc_model_05): return
if(self.xray_structure is None): return
assert self.fsc_curve_model is not None
self.d_fsc_model_05 = self.f_calc.d_min_from_fsc(
fsc_curve=self.fsc_curve_model, fsc_cutoff=0.5).d_min
def get_results(self, include_curves, include_mask):
mask = None
if(self.mask_smooth is not None and include_mask):
mask = self.mask_smooth
map_histograms = None
fsc_curve = None
fsc_curve_model = None
if(include_curves):
map_histograms = self.map_histograms
fsc_curve = self.fsc_curve
fsc_curve_model = self.fsc_curve_model
return group_args(
d99 = self.d99,
d999 = self.d999,
d9999 = self.d9999,
d99999 = self.d99999,
d99_1 = self.d99_1,
d99_2 = self.d99_2,
d_model = self.d_model,
d_model_b0 = self.d_model_b0,
b_iso_overall = self.b_iso_overall,
d_fsc = self.d_fsc,
d_fsc_05 = self.d_fsc_05,
d_fsc_model_05 = self.d_fsc_model_05,
d_fsc_model_0 = self.d_fsc_model_0,
d_fsc_model_0143 = self.d_fsc_model_0143,
fsc_curve = fsc_curve,
fsc_curve_model = fsc_curve_model,
mask = mask,
radius_smooth = self.radius_smooth)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| 36.62387 | 85 | 0.662075 |
31a4fc6272833a2fd8f8ded5ca66dd5ad3b78a50 | 204 | py | Python | app/entrypoint/operator/__init__.py | globocom/enforcement | 004ff545d6d61b95b555d9981525510496862b3e | [
"BSD-3-Clause"
] | 7 | 2020-11-08T18:02:26.000Z | 2021-10-15T21:40:35.000Z | app/entrypoint/operator/__init__.py | globocom/enforcement | 004ff545d6d61b95b555d9981525510496862b3e | [
"BSD-3-Clause"
] | 19 | 2020-11-19T20:57:20.000Z | 2021-09-03T14:53:34.000Z | app/entrypoint/operator/__init__.py | globocom/enforcement-service | 004ff545d6d61b95b555d9981525510496862b3e | [
"BSD-3-Clause"
] | 3 | 2020-10-03T02:40:34.000Z | 2020-10-19T10:17:06.000Z | from app.entrypoint.operator.base_controller import BaseController
from app.entrypoint.operator.cluster_rule_controller import ClusterRuleController
__all__ = ['ClusterRuleController', 'BaseController']
| 40.8 | 81 | 0.867647 |
2671a12137caa28aed8305be2b721b4431af6359 | 1,475 | py | Python | samples/generated_samples/retail_v2_generated_catalog_service_list_catalogs_sync.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 1 | 2022-02-11T14:00:31.000Z | 2022-02-11T14:00:31.000Z | samples/generated_samples/retail_v2_generated_catalog_service_list_catalogs_sync.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/retail_v2_generated_catalog_service_list_catalogs_sync.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 2 | 2022-01-28T09:53:16.000Z | 2022-02-07T14:27:38.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListCatalogs
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-retail
# [START retail_v2_generated_CatalogService_ListCatalogs_sync]
from google.cloud import retail_v2
def sample_list_catalogs():
# Create a client
client = retail_v2.CatalogServiceClient()
# Initialize request argument(s)
request = retail_v2.ListCatalogsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_catalogs(request=request)
# Handle the response
for response in page_result:
print(response)
# [END retail_v2_generated_CatalogService_ListCatalogs_sync]
| 31.382979 | 85 | 0.754576 |
f7baddc3fb92b8a125421774d798390ea89f7e60 | 14,539 | py | Python | src/examplePlugins/update_timecode_values.py | darkvertex/shotgunEvents | 453005e6b4ec42031102f032ff8cfb73709bbcef | [
"MIT"
] | null | null | null | src/examplePlugins/update_timecode_values.py | darkvertex/shotgunEvents | 453005e6b4ec42031102f032ff8cfb73709bbcef | [
"MIT"
] | null | null | null | src/examplePlugins/update_timecode_values.py | darkvertex/shotgunEvents | 453005e6b4ec42031102f032ff8cfb73709bbcef | [
"MIT"
] | null | null | null | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
# See docs folder for detailed usage info.
from __future__ import division
import os
import shotgun_api3
def registerCallbacks(reg):
"""
Register our callbacks.
:param reg: A Registrar instance provided by the event loop handler.
"""
# Grab authentication env vars for this plugin. Install these into the env
# if they don't already exist.
server = os.environ["SG_SERVER"]
script_name = os.environ["SGDAEMON_FIELDTOFIELD_NAME"]
script_key = os.environ["SGDAEMON_FIELDTOFIELD_KEY"]
args = {
"entity_type": "Version",
"entity_name_field": "code",
"head_duration_field": "sg_head_duration",
"tail_duration_field": "sg_tail_duration",
"timecode_cut_in_field": "sg_timecode_cut_in",
"timecode_cut_out_field": "sg_timecode_cut_out",
"timecode_in_field": "sg_timecode_in",
"timecode_out_field": "sg_timecode_out",
"first_frame_field": "sg_first_frame",
"last_frame_field": "sg_last_frame",
"frame_count_field": "frame_count",
"fps": 24.0,
}
# Grab an sg connection for the validator.
sg = shotgun_api3.Shotgun(server, script_name=script_name, api_key=script_key)
# Bail if our validator fails.
if not is_valid(sg, reg.logger, args):
reg.logger.warning("Plugin is not valid, will not register callback.")
return
event_filter = {
"Shotgun_%s_Change"
% args["entity_type"]: [
args["head_duration_field"],
args["tail_duration_field"],
args["first_frame_field"],
]
}
reg.registerCallback(
script_name,
script_key,
update_timecode_and_frame_values,
event_filter,
args,
)
reg.logger.debug("Registered callback.")
def check_entity_schema(sg, logger, entity_type, field_name, field_type):
"""
Verifies that field_name of field_type exists in entity_type's schema.
:param sg: An authenticated Shotgun Python API instance.
:param entity_type: String, a Shotgun entity type.
:param field_name: String, the name of a field on entity_type.
:param field_type: List of strings, the Shotgun field type field_name should be.
:returns: True if valid, None otherwise.
"""
# Make sure we can read the schema.
try:
entity_schema = sg.schema_field_read(entity_type)
except Exception as e:
logger.warning('Can\'t read SG schema for entity "%s": %s' % (entity_type, e))
return
# Grab the Shotgun field data type, if the field exists.
sg_type = entity_schema.get(field_name, {}).get("data_type", {}).get("value")
# Assume the entity doesn't exist in Shotgun and bail if no data_type value
# was found.
if not sg_type:
logger.warning(
'%s entity field "%s" does not exist in Shotgun, please fix.'
% (
entity_type,
field_name,
)
)
return
# Make sure the field is the correct Shotgun type.
if sg_type not in field_type:
logger.warning(
'SG field "%s" is type "%s" but should be of type(s) "%s," please fix.'
% (field_name, sg_type, field_type)
)
return
return True
def is_valid(sg, logger, args):
"""
Validate our args.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param args: Any additional misc arguments passed through this plugin.
:returns: True if plugin is valid, None if not.
"""
args_to_check = {
"entity_type": {"type": [str], "allow_empty": False},
"entity_name_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "text",
},
"head_duration_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "number",
},
"tail_duration_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "number",
},
"timecode_cut_in_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "text",
},
"timecode_cut_out_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "text",
},
"timecode_in_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "text",
},
"timecode_out_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "text",
},
"first_frame_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "number",
},
"last_frame_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "number",
},
"frame_count_field": {
"type": [str],
"allow_empty": False,
"entity": args["entity_type"],
"sg_type": "number",
},
"fps": {"type": [float]},
}
# Make sure we can read the entity_type's schema.
try:
sg.schema_field_read(args["entity_type"])
except Exception as e:
logger.warning(
'Can\'t read SG schema for "entity_type" setting\'s value ("%s"): %s'
% (args["entity_type"], e)
)
return
for name, checks in args_to_check.items():
# Grab the setting's value type.
value_type = type(args[name])
# Make sure the setting value is the correct Python type.
if value_type not in checks["type"]:
logger.warning(
'"%s" setting\'s value is type "%s" but should be type "%s," please fix.'
% (name, value_type, checks["type"])
)
return
# Make sure the setting has a non-empty value if allow_empty is False.
if checks.get("allow_empty") is False and not args[name]:
logger.warning(
'"%s" setting\'s value is empty but requires a value, please fix.'
% (name,)
)
return
# We can't run any more checks unless the setting's value is non-empty.
if args[name]:
# If we've got an entity value, we assume the setting refers to a
# Shotgun field. If we don't, stop the checks here.
if "entity" not in checks:
continue
# Perform some standard checks on the entity and field.
if not check_entity_schema(
sg, logger, checks["entity"], args[name], checks["sg_type"]
):
return
return True
def get_updates(sg, logger, event, args, entity):
"""
Updates timecode, handles, and other editorial field values.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param event: A Shotgun EventLogEntry entity dictionary.
:param args: Any additional misc arguments passed through this plugin.
:param entity: A Shotgun entity dict.
:return: A dictionary with entity values to update, or None.
"""
# Initialize our update_data dict.
update_data = {}
# If the head duration changes, update the timecode cut in value.
if event["attribute_name"] == args["head_duration_field"]:
# Return if there's no value in the timecode cut in field.
if not entity[args["timecode_cut_in_field"]]:
return
# Retrieve the first frame from timecode.
first_frame = frame_from_timecode(
entity[args["timecode_cut_in_field"]], args["fps"]
) - (entity[args["head_duration_field"]] or 0)
# Register our first_frame value in the update dict.
update_data[args["timecode_in_field"]] = timecode_from_frame(
first_frame, args["fps"]
)
# If the tail duration changes, update the timecode out value.
elif event["attribute_name"] == args["tail_duration_field"]:
# Return if there's no value in the timecode cut out field.
if not entity[args["timecode_cut_out_field"]]:
return
# Retrieve a frame to convert from timecode.
timecode_frame = frame_from_timecode(
entity.get(args["timecode_cut_out_field"]), args["fps"]
) + (entity.get(args["tail_duration_field"]) or 0)
# Register our timecode_frame value in the update dict.
update_data[args["timecode_out_field"]] = timecode_from_frame(
timecode_frame, args["fps"]
)
# If the first frame changes, adjust the last frame value.
elif event["attribute_name"] == args["first_frame_field"]:
# Return if there's no value in the first frame field.
if not entity[args["first_frame_field"]]:
return
# Register our last_frame value in the update dict.
update_data[args["last_frame_field"]] = (
entity[args["first_frame_field"]] or 0
) + (entity[args["frame_count_field"]] or 0)
# If the update_data dict is not empty, we have to also recompute and update
# the frame_count and last_frame values.
if update_data:
timecode_out = (
update_data[args["timecode_out_field"]]
if args["timecode_out_field"] in update_data
else entity[args["timecode_out_field"]]
)
timecode_in = (
update_data[args["timecode_in_field"]]
if args["timecode_in_field"] in update_data
else entity[args["timecode_in_field"]]
)
frame_duration = frame_from_timecode(
timecode_out, args["fps"]
) - frame_from_timecode(timecode_in)
update_data[args["frame_count_field"]] = frame_duration
if entity[args["first_frame_field"]]:
update_data[args["last_frame_field"]] = (
entity[args["first_frame_field"]] or 0
) + (update_data[args["frame_count_field"]] or 0)
return update_data
def frame_from_timecode(timecode, fps=24.0):
"""
Return the frame corresponding to the given timecode, for the given fps.
:param timecode: String, timecode.
:param fps: Float representing frames-per-second.
:returns: Int representing a number of frames.
"""
# Return a frame of 0 if we don't have a valid timecode or we have a drop
# frame timecode (drop frame is unsupported).
if not timecode or ":" not in timecode or ";" in timecode:
return 0
(hour, minute, second, frame) = timecode.split(":")
hours = int(hour)
minutes = int(minute)
seconds = int(second)
frames = int(frame)
seconds = (hours * 60 * 60) + (minutes * 60) + seconds
frames = (seconds * fps) + frames
return int(round(frames))
def timecode_from_frame(frame_duration, fps=24.0):
"""
Return the timecode corresponding to the given frame, for the given fps.
:param frame_duration: Int representing a number of frames.
:param fps: Float value representing frames per second.
:returns: String representing a non-drop-frame timecode value.
"""
# Total number of seconds in whole clip.
seconds = frame_duration / fps
# Remainder frames from seconds calculation.
remainder = seconds - int(seconds)
frames = int(round(remainder * fps))
# Total number of minutes in the whole clip.
minutes = int(seconds) / 60
# Remainder seconds from minutes calculation
remainder = minutes - int(minutes)
seconds = int(round(remainder * 60))
# Total number of hours in the whole clip.
hours = int(minutes) / 60
# Remainder minutes from hours calculation.
remainder = hours - int(hours)
minutes = int(round(remainder * 60))
# Hours without the remainder.
hours = int(hours)
timecode = "%02d:%02d:%02d:%02d" % (hours, minutes, seconds, frames)
return timecode
def update_timecode_and_frame_values(sg, logger, event, args):
"""
Update both timecode and frame values.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param event: A Shotgun EventLogEntry entity dictionary.
:param args: Any additional misc arguments passed through this plugin.
"""
# Return if we don't have all the field values we need.
if not event.get("meta", {}).get("entity_id"):
return
# Make some vars for convenience.
entity_id = event["meta"]["entity_id"]
entity_type = args["entity_type"]
# Requery the entity to gather additional field values.
entity = sg.find_one(
entity_type,
[["id", "is", entity_id]],
[
args["entity_name_field"],
args["timecode_in_field"],
args["timecode_cut_in_field"],
args["head_duration_field"],
args["timecode_out_field"],
args["timecode_cut_out_field"],
args["tail_duration_field"],
args["first_frame_field"],
args["frame_count_field"],
],
)
# Return if the entity isn't found.
if not entity:
logger.debug("No %s with id %s.", (entity_type, entity_id))
return
# Determine and calculate values to update on the entity, if any.
update_data = get_updates(sg, logger, event, args, entity)
# Update our entity with the values in update_data.
if update_data:
sg.update(entity_type, entity_id, update_data)
logger.info(
"%s %s updated with %s."
% (
entity_type,
entity[args["entity_name_field"]],
update_data,
)
)
else:
logger.info(
"Nothing to update on %s %s with id %s."
% (
entity_type,
entity[args["entity_name_field"]],
entity_id,
)
)
| 32.024229 | 89 | 0.596877 |
ccd53a8d3862ef5c6d2753c2286b26b68df9b5ca | 2,204 | py | Python | pebm/ebm/WavesCharacteristics.py | SheinaG/pebm_new | ab5822a66c18d18dcf52e1b6af8b1bd515462fd2 | [
"MIT"
] | null | null | null | pebm/ebm/WavesCharacteristics.py | SheinaG/pebm_new | ab5822a66c18d18dcf52e1b6af8b1bd515462fd2 | [
"MIT"
] | null | null | null | pebm/ebm/WavesCharacteristics.py | SheinaG/pebm_new | ab5822a66c18d18dcf52e1b6af8b1bd515462fd2 | [
"MIT"
] | null | null | null | import numpy as np
def compute_amp(ecg, features_dict):
amp = np.ones(len(features_dict[0])) * -1
if len(features_dict) == 1:
amp[features_dict[0] != -1] = ecg[features_dict[0][features_dict[0] != -1]]
else:
begin_amp = np.ones(len(features_dict[0])) * -1
end_amp = np.ones(len(features_dict[0])) * -1
begin_amp[features_dict[0] != -1] = ecg[features_dict[0][features_dict[0] != -1]]
end_amp[features_dict[1] != -1] = ecg[features_dict[1][features_dict[1] != -1]]
amp = np.abs(end_amp - begin_amp)
amp[(begin_amp == -1) | (end_amp == -1)] = -1
return amp
def compute_area(ecg, freq, features_dict, factor=1000):
begin_fiducial = features_dict[0]
end_fiducial = features_dict[1]
area = np.ones(len(features_dict[0])) * -1
r_index = (begin_fiducial != -1) & (end_fiducial != -1)
area = area[r_index]
bfi = begin_fiducial[r_index].astype(int)
efi = end_fiducial[r_index].astype(int)
for i in range(0, len(bfi) - 1):
area[i] = np.sum(np.abs(ecg[bfi[i]: efi[i]]))
units = (factor / freq)
area = area * units
return area
def extract_waves_characteristics(ecg, freq, features_dict):
amplitude_points = dict(Pwave=[features_dict['P'], features_dict['Poff']],
Twave=[features_dict['T'], features_dict['Toff']],
Rwave=[features_dict['qrs']],
STamp=[features_dict['QRSoff'], features_dict['Ton']])
area_points = dict(Parea=[features_dict['Pon'], features_dict['Poff']],
Tarea=[features_dict['Ton'], features_dict['Toff']],
QRSarea=[features_dict['QRSon'], features_dict['QRSoff']])
amplitude = {}
for key in amplitude_points:
amplitude[key] = compute_amp(ecg, amplitude_points[key])
area = {}
for key in area_points:
area[key] = compute_area(ecg, freq, area_points[key], factor=1000)
J_offset = int(0.04 * freq)
J_ecg = ecg[features_dict['QRSoff'] + J_offset]
J_point = dict(Jpoint=J_ecg)
Waves = {}
Waves.update(amplitude)
Waves.update(area)
Waves.update(J_point)
return Waves
| 36.733333 | 89 | 0.604809 |
77dffec983e941c840257d45e516e4a1ee1abac5 | 145 | py | Python | src/cms/views/offer_templates/__init__.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | 4 | 2019-12-05T16:45:17.000Z | 2020-05-09T07:26:34.000Z | src/cms/views/offer_templates/__init__.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | 56 | 2019-12-05T12:31:37.000Z | 2021-01-07T15:47:45.000Z | src/cms/views/offer_templates/__init__.py | S10MC2015/cms-django | b08f2be60a9db6c8079ee923de2cd8912f550b12 | [
"Apache-2.0"
] | 2 | 2019-12-11T09:52:26.000Z | 2020-05-09T07:26:38.000Z | """
Python standard Init-File
"""
from .offer_template_list_view import OfferTemplateListView
from .offer_template_view import OfferTemplateView
| 24.166667 | 59 | 0.848276 |
1717c2dfe8ff950225a3e707cf5448cf3e647835 | 1,122 | py | Python | ofx2xlsmbr/tests/BankStatementAdderTest.py | rmusmanno/ofx2xlsmbr | 15d45148f6ad77ee300a42215c3098576ff9d7c4 | [
"MIT"
] | null | null | null | ofx2xlsmbr/tests/BankStatementAdderTest.py | rmusmanno/ofx2xlsmbr | 15d45148f6ad77ee300a42215c3098576ff9d7c4 | [
"MIT"
] | 1 | 2021-03-31T19:12:01.000Z | 2021-03-31T19:12:01.000Z | ofx2xlsmbr/tests/BankStatementAdderTest.py | rmusmanno/ofx2xlsmbr | 15d45148f6ad77ee300a42215c3098576ff9d7c4 | [
"MIT"
] | null | null | null | from ..utils.BankStatementAdder import BankStatementAdder
from ..model.BankStatement import BankStatement
from ..model.CashFlow import CashFlow, CashFlowType
import datetime
import logging
logger = logging.getLogger(__name__)
def bankStatementAdderTest():
outflow = [CashFlow('debit 1', CashFlowType.DEBIT, -100.0, datetime.datetime.now().date()),
CashFlow('debit 1', CashFlowType.DEBIT, -100.0, datetime.datetime.now().date()),
CashFlow('debit 2', CashFlowType.DEBIT, -127.0, datetime.datetime.now().date()),
CashFlow('credit 1', CashFlowType.CREDIT, -42.0, datetime.datetime.now().date())]
inflow = [CashFlow('credit 1', CashFlowType.CREDIT, 1000.0, datetime.datetime.now().date())]
bs = BankStatement(inflow, outflow)
inflow2 = [CashFlow('credit 1', CashFlowType.CREDIT, 1000.0, datetime.datetime.now().date()),
CashFlow('credit 2', CashFlowType.CREDIT, 2000.0, datetime.datetime.now().date())]
bs2 = BankStatement(inflow2, outflow)
adder = BankStatementAdder()
bsResult = adder.add(bs, bs2)
logger.info(bsResult) | 38.689655 | 98 | 0.688948 |
3ae52b5097ceb7e3b0fcd3d887855006ae286b30 | 53,352 | py | Python | acapi2/tests/test_environments.py | pmatias/acapi2 | 06df9f7e0ebf77c01aa05572d6c9a21e5ca0a353 | [
"MIT"
] | null | null | null | acapi2/tests/test_environments.py | pmatias/acapi2 | 06df9f7e0ebf77c01aa05572d6c9a21e5ca0a353 | [
"MIT"
] | null | null | null | acapi2/tests/test_environments.py | pmatias/acapi2 | 06df9f7e0ebf77c01aa05572d6c9a21e5ca0a353 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Acquia Environments"""
import requests_mock
from acapi2.tests import BaseTest
@requests_mock.Mocker()
class TestEnvironments(BaseTest):
def test_backups(self, mocker):
env_id = "1-a47ac10b-58cc-4372-a567-0e02b2c3d470"
db_name = "db_name"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"databases/{db_name}/backups"
)
response = {
"total": 2,
"pagination": {"total": 2, "limit": 2, "offset": 0},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"databases/db_name/backups"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"databases/db_name"
},
"limit": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/databases"
"/db_name/backups{?limit}",
"templated": True,
},
"offset": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/databases/"
"db_name/backups{?offset}",
"templated": True,
},
"sort": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"databases/db_name/backups{?sort}",
"templated": True,
},
"filter": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/d"
"atabases/db_name/backups{?filter}",
"templated": True,
},
},
"_embedded": {
"items": [
{
"id": 1,
"database": {"id": 14, "name": "db_name"},
"type": "daily",
"started_at": "2012-05-15T12:00:00Z",
"completed_at": "2012-05-15T12:00:00Z",
"flags": {"deleted": False},
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/"
"db_name/backups/1"
},
"parent": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-"
"4372-a567-0e02b2c3d470/databases"
},
"download": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372"
"-a567-0e02b2c3d470/databases/"
"db_name/backups/1/actions/download"
},
},
},
{
"id": 2,
"database": {"id": 14, "name": "db_name"},
"type": "daily",
"started_at": "2012-03-28T12:00:00Z",
"completed_at": "2012-03-28T12:00:01Z",
"flags": {"deleted": False},
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/"
"db_name/backups/2"
},
"parent": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases"
},
"download": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/db_name"
"/backups/2/actions/download"
},
},
},
{
"id": 3,
"database": {"id": 14, "name": "db_name"},
"type": "daily",
"started_at": "2017-01-08T04:00:00Z",
"completed_at": "2017-01-08T04:00:01Z",
"flags": {"deleted": False},
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/db_name"
"/backups/3"
},
"parent": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases"
},
"download": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/db_name"
"/backups/3/actions/download"
},
},
},
{
"id": 4,
"database": {"id": 14, "name": "db_name"},
"type": "daily",
"started_at": "2017-01-08T05:00:02Z",
"completed_at": "2017-01-08T05:00:03Z",
"flags": {"deleted": False},
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/db_name"
"/backups/4"
},
"parent": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases"
},
"download": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470/databases/db_name"
"/backups/4/actions/download"
},
},
},
]
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).backups(db_name)
self.assertEqual(response["total"], 2)
self.assertIn("_embedded", response)
def test_backup_details(self, mocker):
env_id = "1-a47ac10b-58cc-4372-a567-0e02b2c3d470"
db_name = "db_name"
id_backup = "1"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"databases/{db_name}/backups/{id_backup}"
)
response = {
"id": 1,
"database": {"id": 14, "name": "db_name"},
"type": "daily",
"started_at": "2012-05-15T12:00:00Z",
"completed_at": "2012-05-15T12:00:00Z",
"flags": {"deleted": False},
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"database-backups/1"
},
"download": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"database-backups/1/actions/download"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/d"
"atabase-backups"
},
},
"_embedded": {
"environment": {
"id": "1-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "Production",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-"
"a567-0e02b2c3d470"
}
},
},
"database": {
"id": 14,
"name": "db_name",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"environments/1-a47ac10b-58cc-4372-a567-"
"0e02b2c3d470/databases/db_name"
}
},
},
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).backup_details(
db_name, id_backup
)
self.assertEqual(response["id"], 1)
self.assertIn("_embedded", response)
def test_backup_download(self, mocker):
env_id = "1-a47ac10b-58cc-4372-a567-0e02b2c3d470"
db_name = "db_name"
id_backup = "1"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"databases/{db_name}/backups/{id_backup}/actions/download"
)
response = {
"url": "http://test-site.com/AH_DOWNLOAD?t=1&d=/mnt/files/site/"
"backups/on-demand/backup.sql.gz&dev=hash",
"expires_at": "2020-03-27T10:26:51+00:00",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/databases/"
"db_name/backups/1/actions/download"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"1-a47ac10b-58cc-4372-a567-0e02b2c3d470/databases/"
"db_name/backups/1/actions"
},
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).backup_download(
db_name, id_backup
)
self.assertIn("url", response)
def test_code_switch(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/code/actions/switch"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
mocker.register_uri(
"POST",
uri,
status_code=202,
json={"message": "The code is being switched."},
)
response = self.acquia.environment(env_id).code_switch(
"my-feature-branch"
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"switched", response.content)
def test_configure(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
data = {
"version": "5.5",
"max_execution_time": 10,
"memory_limit": 192,
"apc": 128,
"max_input_vars": 1000,
"max_post_size": 256,
"sendmail_path": "/usr/bin/sendmail",
"varnish_over_ssl": False,
}
response_message = {
"message": "The environment configuration is being updated."
}
mocker.register_uri("PUT", uri, status_code=202, json=response_message)
response = self.acquia.environment(env_id).configure(data)
self.assertEqual(response.status_code, 202)
self.assertIn(b"updated", response.content)
def test_create_backup(self, mocker):
env_id = "1-a47ac10b-58cc-4372-a567-0e02b2c3d470"
db_name = "db_name"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"databases/{db_name}/backups"
)
response = {
"message": "Creating the backup.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"12-d314739e-296f-11e9-b210-d663bd873d93/"
"databases/my_db/backups/"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"42b56cff-0b55-4bdf-a949-1fd0fca61c6c"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"12-d314739e-296f-11e9-b210-d663bd873d93/"
"databases/my_db/"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).create_backup(db_name)
self.assertEqual(response["message"], "Creating the backup.")
self.assertIn("_links", response)
def test_create_domain(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/domains"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"message": "The domain 'ceruleanhq.com' is being added."
}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).create_domain(
"ceruleanhq.com"
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"added", response.content)
def test_create_log_forwarding_destinations(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/log-forwarding-destinations"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"message": "Log forwarding destination for the \
environment has been created."
}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
label = "Test destination"
sources = ["apache-access", "apache-error"]
consumer = "syslog"
credentials = {
"certificate": "-----BEGIN CERTIFICATE-----...\
-----END CERTIFICATE-----"
}
address = "example.com:1234"
response = self.acquia.environment(
env_id
).create_log_forwarding_destinations(
label, sources, consumer, credentials, address
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"created", response.content)
def test_delete_backup(self, mocker):
env_id = "12-d314739e-296f-11e9-b210-d663bd873d93"
db_name = "my_db"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"databases/{db_name}/backups/1"
)
response = {
"message": "Deleting the database backup.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/12-d314"
"739e-296f-11e9-b210-d663bd873d93/databases/"
"my_db/backups/1"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/42b5"
"6cff-0b55-4bdf-a949-1fd0fca61c6c"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/12-d31"
"4739e-296f-11e9-b210-d663bd873d93/databases/"
"my_db/backups"
},
},
}
mocker.register_uri("DELETE", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).delete_backup(db_name, 1)
self.assertEqual(response["message"], "Deleting the database backup.")
self.assertIn("_links", response)
def test_delete_domain(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
domain = "ceruleanhq.com"
uri = "{base_uri}/environments/{env_id}/domains/{domain}"
uri = uri.format(base_uri=self.endpoint, env_id=env_id, domain=domain)
mocker.register_uri("DELETE", uri, status_code=202)
response = self.acquia.environment(env_id).delete_domain(
"ceruleanhq.com"
)
self.assertEqual(response.status_code, 202)
def test_clear_varnish_domain(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
domain = "ceruleanhq.com"
uri = (
"{base_uri}/environments/{env_id}/domains/"
"{domain}/actions/clear-varnish"
)
uri = uri.format(base_uri=self.endpoint, env_id=env_id, domain=domain)
response_message = {
"message": "Varnish is being cleared for domain 'ceruleanhq.com'."
}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).clear_varnish_domain(
"ceruleanhq.com"
)
self.assertEqual(response.status_code, 202)
def test_clear_varnish_domains(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
domains = ["ceruleanhq.com"]
uri = (
"{base_uri}/environments/{env_id}/domains/" "actions/clear-varnish"
)
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"message": "Varnish is being cleared for the selected domains."
}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).clear_varnish_domains(
domains
)
self.assertEqual(response.status_code, 202)
def test_delete_log_forwarding_destinations(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
destination_uuid = "df4c5428-8d2e-453d-9edf-e412647449b1"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"log-forwarding-destinations/{destination_uuid}"
)
response_message = {
"message": "Log forwarding destination has been deleted."
}
mocker.register_uri(
"DELETE", uri, status_code=202, json=response_message
)
response = self.acquia.environment(
env_id
).delete_log_forwarding_destinations(destination_uuid)
self.assertEqual(response.status_code, 202)
self.assertIn(b"deleted", response.content)
def test_destroy(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response = {"message": "The environment is being deleted."}
mocker.register_uri(
url=uri, method="DELETE", status_code=202, json=response
)
response = self.acquia.environment(env_id).destroy()
self.assertEqual(response.status_code, 202)
def test_deploy_code(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
env_id_from = "14-0c7e79ab-1c4a-424e-8446-76ae8be7e851"
uri = "{base_uri}/environments/{env_id}/code"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {"message": "The code is being deployed."}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).deploy_code(env_id_from)
self.assertEqual(response.status_code, 202)
self.assertIn(b"deployed", response.content)
def test_deploy_database(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
env_id_from = "14-0c7e79ab-1c4a-424e-8446-76ae8be7e851"
uri = "{base_uri}/environments/{env_id}/databases"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {"message": "The database is queued for copying."}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).deploy_database(
env_id_from, "my_new_db"
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"queued", response.content)
def test_deploy_files(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
env_id_from = "14-0c7e79ab-1c4a-424e-8446-76ae8be7e851"
uri = "{base_uri}/environments/{env_id}/files"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"message": "The files have been queued for copying."
}
mocker.register_uri(
"POST", uri, status_code=202, json=response_message
)
response = self.acquia.environment(env_id).deploy_files(env_id_from)
self.assertEqual(response.status_code, 202)
self.assertIn(b"queued", response.content)
def test_get_crons(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/crons"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"_embedded": {
"items": [
{
"_links": {
"self": {
"href": "{baseUri}/environments/24-a47ac10b-58cc-4372-a567-0e02b2c3d470\
/crons/43595"
}
},
"command": "/usr/local/bin/drush -r /var/www/html/mysub/docroot \
ah-db-backup mysub",
"day_month": "*",
"day_week": "*",
"environment": {
"id": "24-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "prod",
},
"flags": {
"enabled": True,
"on_any_web": True,
"system": True,
},
"hour": "8",
"id": "43595",
"label": None,
"minute": "0",
"month": "*",
"server": [],
},
{
"_links": {
"self": {
"href": "{baseUri}/environments/24-a47ac10b-58cc-4372-a567-0e02b2c3d470\
/crons/56834"
}
},
"command": "/usr/local/bin/drush9 --uri=[http://[site-uri] \
--root=/var/www/html/${AH_SITE_NAME}/docroot \
-dv cron &>> /var/log/sites/${AH_SITE_NAME}\
/logs/$(hostname -s)/drush-cron.log",
"day_month": "*",
"day_week": "*",
"environment": {
"id": "24-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "prod",
},
"flags": {
"enabled": True,
"on_any_web": True,
"system": False,
},
"hour": "*",
"id": "56834",
"label": "Site Cron Every Hour",
"minute": "0",
"month": "*",
"server": [],
},
]
},
"_links": {
"parent": {
"href": "{baseUri}/environments/\
24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
},
"self": {
"href": "{baseUri}/environments/\
24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons"
},
},
"total": 2,
}
mocker.register_uri("GET", uri, status_code=200, json=response_message)
response = self.acquia.environment(env_id).get_crons()
self.assertEqual(response["total"], 2)
self.assertIn("_embedded", response)
def test_get_log_forwarding_destinations(self, mocker):
env_id = "5-185f07c7-9c4f-407b-8968-67892ebcb38a"
uri = "{base_uri}/environments/{env_id}/log-forwarding-destinations"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"total": 2,
"_links": {
"self": {
"href": "{base_uri}/environments/5-185f07c7-9c4f-407b-8968-67892ebcb38a\
/log-forwarding-destinations"
},
"parent": {
"href": "{base_uri}/environments\
/5-185f07c7-9c4f-407b-8968-67892ebcb38a"
},
},
"_embedded": {
"items": [
{
"uuid": "df4c5428-8d2e-453d-9edf-e412647449b1",
"label": "Test destination",
"consumer": "sumologic",
"address": "example.com:1234",
"credentials": {
"certificate": {
"certificate": "-----BEGIN CERTIFICATE-----...\
-----END CERTIFICATE-----",
"expires_at": "2018-07-16T16:15:33+00:00",
},
"key": None,
"token": "4ded264c8891c400df8fc8905f07beb5f",
},
"sources": ["apache-access", "apache-error"],
"status": "active",
"flags": {
"enabled": True,
"certificate_expiring": False,
},
"environment": {
"id": "123-ea9060c5-1ed8-46ec-87d5-2ce2a0861577",
"name": "Test",
},
},
{
"uuid": "df4c5428-8d2e-453d-9edf-e412647449b5",
"label": "Another test destination",
"consumer": "syslog",
"address": "193.169.2.19:5678",
"credentials": {
"certificate": {
"certificate": "-----BEGIN CERTIFICATE-----...\
-----END CERTIFICATE-----",
"expires_at": "2018-07-16T16:15:33+00:00",
},
"key": "1d0789d519c0b943cf38f401d30ffbdcd2",
"token": "4ded264c8891c400df8fc8905f07beb5f",
},
"sources": ["drupal-request", "drupal-watchdog"],
"status": "active",
"flags": {
"enabled": False,
"certificate_expiring": True,
},
"environment": {
"id": "123-ea9060c5-1ed8-46ec-87d5-2ce2a0861577",
"name": "Test",
},
},
]
},
}
mocker.register_uri("GET", uri, status_code=200, json=response_message)
response = self.acquia.environment(
env_id
).get_log_forwarding_destinations()
self.assertEqual(response["total"], 2)
self.assertIn("_embedded", response)
def test_get_php_version(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {"configuration": {"php": {"version": "7.2"}}}
mocker.register_uri("GET", uri, status_code=200, json=response_message)
response = self.acquia.environment(env_id).get_php_version()
self.assertEqual(response["php_version"], "7.2")
def test_get_servers(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}/servers"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"total": 2,
"_links": {
"self": {
"href": "{baseUri}/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/servers"
},
"parent": {
"href": "{baseUri}/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
},
},
"_embedded": {
"items": [
{
"id": "6",
"name": "ded-6",
"hostname": "ded-6.servers.acquia.com",
"ssh_user": "user.dev",
"ip": "10.0.0.1",
"status": "normal",
"region": "us-west-1",
"roles": ["web", "db"],
"ami_type": "c1.medium",
"configuration": {
"memcache": 64,
"ecu": 5,
"memory": 1.7,
},
"flags": {
"elastic_ip": False,
"active_web": True,
"active_bal": False,
"primary_db": True,
"web": True,
"database": True,
"balancer": False,
"memcache": True,
"dedicated": False,
"self_service": False,
},
"environment": {
"id": "24-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "dev",
},
"_links": {
"self": {
"href": "{baseUri}/environments/"
"24-a47ac10b-58cc-4372-a567-"
"0e02b2c3d470/servers/6"
}
},
},
{
"id": "4",
"name": "bal-4",
"hostname": "bal-4.servers.acquia.com",
"ssh_user": None,
"ip": "10.0.0.2",
"status": "normal",
"region": "us-west-1",
"roles": ["bal"],
"ami_type": "m1.small",
"configuration": {
"memcache": None,
"ecu": 1,
"memory": 1.7,
},
"flags": {
"elastic_ip": False,
"active_web": False,
"active_bal": False,
"primary_db": True,
"web": False,
"database": False,
"balancer": True,
"memcache": False,
"dedicated": True,
"self_service": False,
},
"environment": {
"id": "24-a47ac10b-58cc-4372-a567-0e02b2c3d470",
"name": "dev",
},
"_links": {
"self": {
"href": "{baseUri}/environments/"
"24-a47ac10b-58cc-4372-a567-"
"0e02b2c3d470/servers/4"
}
},
},
]
},
}
mocker.register_uri("GET", uri, status_code=200, json=response_message)
response = self.acquia.environment(env_id).get_servers()
self.assertEqual(response["total"], 2)
self.assertIn("_embedded", response)
def test_set_php_version(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
uri = "{base_uri}/environments/{env_id}"
uri = uri.format(base_uri=self.endpoint, env_id=env_id)
response_message = {
"message": "The environment configuration is being updated."
}
mocker.register_uri("PUT", uri, status_code=202, json=response_message)
response = self.acquia.environment(env_id).set_php_version("7.0")
self.assertEqual(response.status_code, 202)
self.assertIn(b"updated", response.content)
def test_update_log_forwarding_destinations(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
destination_uuid = "df4c5428-8d2e-453d-9edf-e412647449b1"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"log-forwarding-destinations/{destination_uuid}"
)
response_message = {
"message": "Log forwarding destination has been updated."
}
mocker.register_uri("PUT", uri, status_code=202, json=response_message)
label = "Test destination"
sources = ["apache-access", "apache-error"]
consumer = "syslog"
credentials = {
"certificate": "-----BEGIN CERTIFICATE-----...\
-----END CERTIFICATE-----"
}
address = "example.com:1234"
response = self.acquia.environment(
env_id
).update_log_forwarding_destinations(
label, sources, consumer, credentials, address, destination_uuid
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"updated", response.content)
def test_enable_cron(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
cron_id = "1889"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"crons/{cron_id}/actions/enable"
)
response = {
"message": "The cron is being enabled.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons/1889/"
"actions/enable"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/"
"crons/1889/actions"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"ceda2e82-54b7-4181-ae97-6a3163b187b8"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).enable_cron(cron_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"The cron is being enabled.", response.content)
def test_disable_cron(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
cron_id = "1234"
uri = (
f"{self.endpoint}/environments/{env_id}/"
f"crons/{cron_id}/actions/disable"
)
response = {
"message": "The cron is being disabled.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons/1234/"
"actions/disable"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons/"
"1234/actions"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"7b37b885-8ae4-454b-b8fa-ffaeff54f6a4"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).disable_cron(cron_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"The cron is being disabled.", response.content)
def test_delete_cron(self, mocker):
env_id = "24-a47ac10b-58cc-4372-a567-0e02b2c3d470"
cron_id = "1891"
uri = f"{self.endpoint}/environments/{env_id}/" f"crons/{cron_id}"
response = {
"message": "Deleting cron.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons/1891"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"24-a47ac10b-58cc-4372-a567-0e02b2c3d470/crons"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"767cee8d-05f6-4761-a3dc-755957dfc9e6"
},
},
}
mocker.register_uri("DELETE", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).delete_cron(cron_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"Deleting cron.", response.content)
def test_get_ssl_settings(self, mocker):
env_id = "3-110075c3-126e-6b43-c2ce-30be75fb33c2"
uri = f"{self.endpoint}/environments/{env_id}/ssl"
response = {
"balancer": {"hostname": "example.us-east-1.elb.amazonaws.com"},
"ips": ["127.0.0.1"],
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"3-110075c3-126e-6b43-c2ce-30be75fb33c2/ssl"
},
"certificates": {
"href": "https://cloud.acquia.com/api/environments/"
"3-110075c3-126e-6b43-c2ce-30be75fb33c2/ssl/certificates"
},
"csrs": {
"href": "https://cloud.acquia.com/api/environments/"
"3-110075c3-126e-6b43-c2ce-30be75fb33c2/ssl/csrs"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"3-110075c3-126e-6b43-c2ce-30be75fb33c2"
},
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).get_ssl_settings()
self.assertIn("balancer", response)
def test_get_ssl_certs(self, mocker):
env_id = "5-a1a10dab-62f4-418c-bc58-ab7742078ba8"
uri = f"{self.endpoint}/environments/{env_id}/ssl/certificates"
response = {
"total": 3,
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl/certificates"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl"
},
},
"_embedded": {
"items": [
{
"id": 7,
"label": None,
"certificate": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"private_key": None,
"ca": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"flags": {"active": True, "csr": True, "legacy": True},
"expires_at": "2022-03-28T00:12:34-0400",
"domains": ["example.com", "www.example.com"],
"environment": {
"id": "5-a1a10dab-62f4-418c-bc58-ab7742078ba8",
"name": "prod",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl/certificates/7"
},
"csr": {
"href": "https://cloud.acquia.com/api/"
"environments/5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl/csrs/7"
},
},
},
{
"id": 3,
"label": "Test Certificate 1",
"certificate": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"private_key": "-----BEGIN RSA PRIVATE KEY-----...-----END RSA PRIVATE KEY-----",
"ca": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"flags": {
"active": True,
"csr": False,
"legacy": False,
},
"expires_at": "2021-01-01T00:00:00-0400",
"domains": ["example2.com", "www.example2.com"],
"environment": {
"id": "5-a1a10dab-62f4-418c-bc58-ab7742078ba8",
"name": "prod",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl/certificates/3"
}
},
},
{
"id": 4,
"label": "Test Certificate 2",
"certificate": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"private_key": "-----BEGIN RSA PRIVATE KEY-----...-----END RSA PRIVATE KEY-----",
"ca": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"flags": {
"active": False,
"csr": True,
"legacy": False,
},
"expires_at": "2021-01-01T00:00:00-0400",
"domains": ["example3.com", "www.example3.com"],
"environment": {
"id": "5-a1a10dab-62f4-418c-bc58-ab7742078ba8",
"name": "prod",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/5-a1a10dab-62f4-418c-bc58-ab7742078ba8/ssl/certificates/4"
}
},
},
]
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).get_ssl_certs()
self.assertIn("certificate", response[0])
def test_install_ssl_cert(self, mocker):
env_id = "123-4ba86d4a-e193-4282-8963-d9d24746f444"
uri = f"{self.endpoint}/environments/{env_id}/ssl/certificates"
legacy = False
certificate = (
"-----BEGIN CERTIFICATE-----abc123....-----END CERTIFICATE-----",
)
private_key = (
"-----BEGIN RSA PRIVATE KEY-----secret....-----END RSA PRIVATE KEY-----",
)
ca_certificates = (
"-----BEGIN CERTIFICATE-----123abc....-----END CERTIFICATE-----",
)
csr_id = (123,)
label = "My New Cert"
response = {
"message": "Installing the certificate.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/123-4ba86d4a-e193-4282-8963-d9d24746f444/ssl/certificates"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/8fdacf25-38e4-4621-b5de-e78638fe2ceb"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/123-4ba86d4a-e193-4282-8963-d9d24746f444/ssl"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).install_ssl_cert(
label, certificate, private_key, ca_certificates, legacy, csr_id
)
self.assertEqual(response.status_code, 202)
self.assertIn(b"Installing the certificate.", response.content)
def test_get_ssl_cert(self, mocker):
env_id = "5-9d46fd9d-e58b-47a3-8e9e-e8e0c2a854b4"
cert_id = "13"
uri = (
f"{self.endpoint}/environments/{env_id}/ssl/certificates/{cert_id}"
)
response = {
"id": 13,
"label": "Test Certificate",
"certificate": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"private_key": "-----BEGIN RSA PRIVATE KEY-----...-----END RSA PRIVATE KEY-----",
"ca": "-----BEGIN CERTIFICATE-----...-----END CERTIFICATE-----",
"flags": {"active": True, "csr": True, "legacy": False},
"expires_at": "2022-03-28T00:12:34-0400",
"domains": ["example.com", "www.example.com"],
"environment": {
"id": "5-9d46fd9d-e58b-47a3-8e9e-e8e0c2a854b4",
"name": "prod",
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"5-9d46fd9d-e58b-47a3-8e9e-e8e0c2a854b4/ssl/certificates/13"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/"
"5-9d46fd9d-e58b-47a3-8e9e-e8e0c2a854b4/ssl/certificates"
},
},
}
mocker.register_uri("GET", uri, status_code=200, json=response)
response = self.acquia.environment(env_id).get_ssl_cert(cert_id)
self.assertIn("certificate", response)
def test_delete_ssl_cert(self, mocker):
env_id = "286-a027502b-ad6c-a48e-a7e8-aa0def7d25e1"
cert_id = "9"
uri = (
f"{self.endpoint}/environments/{env_id}/ssl/certificates/{cert_id}"
)
response = {
"message": "Deleting the certificate.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/286-a027502b-ad6c-a48e-a7e8-aa0def7d25e1/ssl/certificates/9"
},
"parent": {
"href": "https://cloud.acquia.com/api/environments/286-a027502b-ad6c-a48e-a7e8-aa0def7d25e1/ssl/certificates"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/767cee8d-05f6-4761-a3dc-755957dfc9e6"
},
},
}
mocker.register_uri("DELETE", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).delete_ssl_cert(cert_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"Deleting the certificate.", response.content)
def test_activate_ssl_cert(self, mocker):
env_id = "123-a027502b-ad6c-a48e-a7e8-aa0def7d25e1"
cert_id = "1"
uri = f"{self.endpoint}/environments/{env_id}/ssl/certificates/{cert_id}/actions/activate"
response = {
"message": "Activating the certificate.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"123-a027502b-ad6c-a48e-a7e8-aa0def7d25e1/ssl/certificates/"
"1/actions/activate"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"4ee513c7-13b4-459f-af60-ba50c4f7cb5d"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).activate_ssl_cert(cert_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"Activating the certificate.", response.content)
def test_deactivate_ssl_cert(self, mocker):
env_id = "123-a027502b-ad6c-a48e-a7e8-aa0def7d25e1"
cert_id = "4547"
uri = f"{self.endpoint}/environments/{env_id}/ssl/certificates/{cert_id}/actions/deactivate"
response = {
"message": "Deactivating the certificate.",
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/environments/"
"123-a027502b-ad6c-a48e-a7e8-aa0def7d25e1/ssl/certificates/"
"4547/actions/deactivate"
},
"notification": {
"href": "https://cloud.acquia.com/api/notifications/"
"cb5de18e-5721-4c26-9f67-1a7d806dd09e"
},
},
}
mocker.register_uri("POST", uri, status_code=202, json=response)
response = self.acquia.environment(env_id).deactivate_ssl_cert(cert_id)
self.assertEqual(response.status_code, 202)
self.assertIn(b"Deactivating the certificate.", response.content)
| 40.296073 | 141 | 0.452504 |
2f5bf1a750e849a158abb9b14e152caf1234c7b9 | 1,699 | py | Python | umatobi/mock_studying/mock_study.py | umedoblock/umatobi | d0c6bcb350dc951e8f57a0c03389082902803165 | [
"MIT"
] | 3 | 2019-08-18T19:02:57.000Z | 2020-02-17T03:23:24.000Z | umatobi/mock_studying/mock_study.py | umedoblock/umatobi | d0c6bcb350dc951e8f57a0c03389082902803165 | [
"MIT"
] | 227 | 2018-11-25T12:04:42.000Z | 2019-12-26T14:18:32.000Z | umatobi/mock_studying/mock_study.py | umedoblock/umatobi | d0c6bcb350dc951e8f57a0c03389082902803165 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
class Bar(object):
def __init__(self):
self.bar = 'bar __init__()' # __init__() kill bar() function
print('bar __init__()')
def bar(self):
# self.bar = 'bar bar()' # bar() suiside ageint Bar class
print('bar bar()')
def baz(self):
self.baz = 'baz baz()'
print('bar baz()')
# self.bar() # bar() is dead.
class Foo(object):
sock = 'sock'
def __init__(self):
self.attr = 100
self.sock = 'sock in __init__()'
def sock2(self):
pass
def tes(self):
return 'I am tes().'
if __name__ == '__main__':
bar = Bar()
# bar.bar()
bar.baz()
print()
print('hello')
m = MagicMock()
# print('mock =', m)
foo = Foo()
print()
with patch.object(foo, 'sock', 'sock with') as mocked1:
print('foo.sock =', foo.sock)
print()
with patch.object(Foo, 'sock2', return_value='sock2() with') as mocked2:
print('mocked2 =', mocked2)
foo2 = Foo()
print('2 foo2.sock2() =', foo2.sock2())
mocked2.assert_called_once_with()
print()
with patch.object(Foo, 'sock2', autospec=True) as mocked3:
print('mocked3 =', mocked3)
mocked3.return_value = 'mocked3.return_value'
foo3 = Foo()
print('3 foo3.sock2() =', foo3.sock2())
mocked3.assert_called_once_with(foo3)
print()
foo3 = Foo()
with patch.object(Foo, 'tes') as mocked_tes:
mocked_tes.return_value = 'mocked_tes.return_value'
print('foo3.tes() =', foo3.tes())
print('go out from with')
print('foo3.tes() =', foo3.tes())
| 24.623188 | 76 | 0.569747 |
e97a1567e97e7b1d3c5ebb9a1cb3d66d98d68683 | 2,035 | py | Python | src/easynlp/summarization.py | easynlp/easynlp | 4b3b405a64ca166cc19ee9c43b79a475cf699996 | [
"MIT"
] | 6 | 2021-07-09T08:13:44.000Z | 2021-11-10T04:09:33.000Z | src/easynlp/summarization.py | easynlp/easynlp | 4b3b405a64ca166cc19ee9c43b79a475cf699996 | [
"MIT"
] | 1 | 2021-07-09T17:18:16.000Z | 2021-07-09T17:18:16.000Z | src/easynlp/summarization.py | easynlp/easynlp | 4b3b405a64ca166cc19ee9c43b79a475cf699996 | [
"MIT"
] | 1 | 2022-02-09T15:37:14.000Z | 2022-02-09T15:37:14.000Z | from easynlp.data import handle_data
import transformers
from typing import Dict, List, Optional, Union
import pandas as pd
import datasets
def summarization(
data: Union[List[Dict[str, str]], Dict[str, List], pd.DataFrame, datasets.Dataset],
input_column: str = "text",
output_column: str = "summarization",
model_name: Optional[str] = None,
):
"""Performs summarization on given data."""
# get default model name
if model_name is None:
model_name = f"google/pegasus-xsum"
# check input and output columns are different
assert (
input_column != output_column
), f"input and output columns must be different, both are {input_column}"
# convert data to datasets.Dataset
dataset = handle_data(data)
# remove all columns that aren't the `input_column`
columns_to_remove = [f for f in dataset.features if f != input_column]
dataset = dataset.remove_columns(columns_to_remove)
# load model and tokenizer
model = transformers.AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# create pipeline
pipe = transformers.SummarizationPipeline(
model=model,
tokenizer=tokenizer,
)
# perform summarization
dataset = dataset.map(
get_summarization,
fn_kwargs={
"pipe": pipe,
"input_column": input_column,
"output_column": output_column,
},
batched=True,
batch_size=len(dataset) // 100,
)
return dataset
def get_summarization(
examples: List[Dict[str, List[str]]],
pipe: transformers.Pipeline,
input_column: str,
output_column: str,
) -> Dict[str, List[str]]:
"""Performs summarization on a batch of examples."""
outputs = pipe(
examples[input_column],
clean_up_tokenization_spaces=True,
)
predicted_summarizations = [output["summary_text"] for output in outputs]
return {output_column: predicted_summarizations}
| 29.071429 | 87 | 0.681572 |
11b762101cf7e8617b08ab5ba049aa7807fd0459 | 832 | py | Python | translate/google/cloud/translate_v2/__init__.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | translate/google/cloud/translate_v2/__init__.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | translate/google/cloud/translate_v2/__init__.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Translation API wrapper."""
from pkg_resources import get_distribution
__version__ = get_distribution('google-cloud-translate').version
from google.cloud.translate_v2.client import Client
__all__ = (
'__version__',
'Client',
)
| 29.714286 | 74 | 0.759615 |
51174e9b44c78a24c965e9df5abc942953b0e3a7 | 88,372 | py | Python | sekoia.io/lib/stix2patterns/v20/grammars/STIXPatternParser.py | cdc-eba/SEKOIA.IO-for-Splunk | 329fefe08bba6c90a90dcfaa3ab5d9c4d1c8670f | [
"Apache-2.0"
] | 3 | 2021-01-06T06:14:56.000Z | 2021-01-29T16:01:20.000Z | sekoia.io/lib/stix2patterns/v20/grammars/STIXPatternParser.py | cdc-eba/SEKOIA.IO-for-Splunk | 329fefe08bba6c90a90dcfaa3ab5d9c4d1c8670f | [
"Apache-2.0"
] | 3 | 2021-06-10T15:05:26.000Z | 2021-11-10T11:31:08.000Z | sekoia.io/lib/stix2patterns/v20/grammars/STIXPatternParser.py | cdc-eba/SEKOIA.IO-for-Splunk | 329fefe08bba6c90a90dcfaa3ab5d9c4d1c8670f | [
"Apache-2.0"
] | 1 | 2021-06-22T12:25:05.000Z | 2021-06-22T12:25:05.000Z | # Generated from STIXPattern.g4 by ANTLR 4.8
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3")
buf.write(u"\67\u00e9\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write(u"\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t")
buf.write(u"\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\7\3\60")
buf.write(u"\n\3\f\3\16\3\63\13\3\3\4\3\4\3\4\3\4\3\4\3\4\7\4;\n")
buf.write(u"\4\f\4\16\4>\13\4\3\5\3\5\3\5\3\5\3\5\3\5\7\5F\n\5\f")
buf.write(u"\5\16\5I\13\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6")
buf.write(u"T\n\6\3\6\3\6\3\6\3\6\3\6\3\6\7\6\\\n\6\f\6\16\6_\13")
buf.write(u"\6\3\7\3\7\3\7\3\7\3\7\3\7\7\7g\n\7\f\7\16\7j\13\7\3")
buf.write(u"\b\3\b\3\b\3\b\3\b\3\b\7\br\n\b\f\b\16\bu\13\b\3\t\3")
buf.write(u"\t\5\ty\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u0080\n\t\3\t\3\t")
buf.write(u"\3\t\3\t\3\t\5\t\u0087\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u008e")
buf.write(u"\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u0095\n\t\3\t\3\t\3\t\3")
buf.write(u"\t\3\t\5\t\u009c\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u00a3\n")
buf.write(u"\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\5\t\u00ac\n\t\3\n\3\n")
buf.write(u"\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r")
buf.write(u"\3\r\3\r\3\r\5\r\u00bf\n\r\3\16\3\16\3\17\3\17\3\20\3")
buf.write(u"\20\3\20\3\20\3\20\3\20\5\20\u00cb\n\20\3\20\3\20\7\20")
buf.write(u"\u00cf\n\20\f\20\16\20\u00d2\13\20\3\21\3\21\3\21\3\21")
buf.write(u"\3\21\3\21\7\21\u00da\n\21\f\21\16\21\u00dd\13\21\3\21")
buf.write(u"\3\21\5\21\u00e1\n\21\3\22\3\22\5\22\u00e5\n\22\3\23")
buf.write(u"\3\23\3\23\2\t\4\6\b\n\f\16\36\24\2\4\6\b\n\f\16\20\22")
buf.write(u"\24\26\30\32\34\36 \"$\2\t\3\2 !\3\2\"%\4\2\4\4\6\6\3")
buf.write(u"\2\36\37\4\2\t\t\36\36\4\2\3\4\63\63\4\2\3\t\13\13\2")
buf.write(u"\u00f3\2&\3\2\2\2\4)\3\2\2\2\6\64\3\2\2\2\b?\3\2\2\2")
buf.write(u"\nS\3\2\2\2\f`\3\2\2\2\16k\3\2\2\2\20\u00ab\3\2\2\2\22")
buf.write(u"\u00ad\3\2\2\2\24\u00b2\3\2\2\2\26\u00b6\3\2\2\2\30\u00ba")
buf.write(u"\3\2\2\2\32\u00c0\3\2\2\2\34\u00c2\3\2\2\2\36\u00ca\3")
buf.write(u"\2\2\2 \u00e0\3\2\2\2\"\u00e4\3\2\2\2$\u00e6\3\2\2\2")
buf.write(u"&\'\5\4\3\2\'(\7\2\2\3(\3\3\2\2\2)*\b\3\1\2*+\5\6\4\2")
buf.write(u"+\61\3\2\2\2,-\f\4\2\2-.\7\17\2\2.\60\5\4\3\5/,\3\2\2")
buf.write(u"\2\60\63\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62\5\3\2\2")
buf.write(u"\2\63\61\3\2\2\2\64\65\b\4\1\2\65\66\5\b\5\2\66<\3\2")
buf.write(u"\2\2\678\f\4\2\289\7\r\2\29;\5\6\4\5:\67\3\2\2\2;>\3")
buf.write(u"\2\2\2<:\3\2\2\2<=\3\2\2\2=\7\3\2\2\2><\3\2\2\2?@\b\5")
buf.write(u"\1\2@A\5\n\6\2AG\3\2\2\2BC\f\4\2\2CD\7\f\2\2DF\5\b\5")
buf.write(u"\5EB\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2H\t\3\2\2\2")
buf.write(u"IG\3\2\2\2JK\b\6\1\2KL\7-\2\2LM\5\f\7\2MN\7,\2\2NT\3")
buf.write(u"\2\2\2OP\7+\2\2PQ\5\4\3\2QR\7*\2\2RT\3\2\2\2SJ\3\2\2")
buf.write(u"\2SO\3\2\2\2T]\3\2\2\2UV\f\5\2\2V\\\5\22\n\2WX\f\4\2")
buf.write(u"\2X\\\5\24\13\2YZ\f\3\2\2Z\\\5\26\f\2[U\3\2\2\2[W\3\2")
buf.write(u"\2\2[Y\3\2\2\2\\_\3\2\2\2][\3\2\2\2]^\3\2\2\2^\13\3\2")
buf.write(u"\2\2_]\3\2\2\2`a\b\7\1\2ab\5\16\b\2bh\3\2\2\2cd\f\4\2")
buf.write(u"\2de\7\r\2\2eg\5\f\7\5fc\3\2\2\2gj\3\2\2\2hf\3\2\2\2")
buf.write(u"hi\3\2\2\2i\r\3\2\2\2jh\3\2\2\2kl\b\b\1\2lm\5\20\t\2")
buf.write(u"ms\3\2\2\2no\f\4\2\2op\7\f\2\2pr\5\16\b\5qn\3\2\2\2r")
buf.write(u"u\3\2\2\2sq\3\2\2\2st\3\2\2\2t\17\3\2\2\2us\3\2\2\2v")
buf.write(u"x\5\30\r\2wy\7\16\2\2xw\3\2\2\2xy\3\2\2\2yz\3\2\2\2z")
buf.write(u"{\t\2\2\2{|\5\"\22\2|\u00ac\3\2\2\2}\177\5\30\r\2~\u0080")
buf.write(u"\7\16\2\2\177~\3\2\2\2\177\u0080\3\2\2\2\u0080\u0081")
buf.write(u"\3\2\2\2\u0081\u0082\t\3\2\2\u0082\u0083\5$\23\2\u0083")
buf.write(u"\u00ac\3\2\2\2\u0084\u0086\5\30\r\2\u0085\u0087\7\16")
buf.write(u"\2\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088")
buf.write(u"\3\2\2\2\u0088\u0089\7\25\2\2\u0089\u008a\5 \21\2\u008a")
buf.write(u"\u00ac\3\2\2\2\u008b\u008d\5\30\r\2\u008c\u008e\7\16")
buf.write(u"\2\2\u008d\u008c\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f")
buf.write(u"\3\2\2\2\u008f\u0090\7\20\2\2\u0090\u0091\7\t\2\2\u0091")
buf.write(u"\u00ac\3\2\2\2\u0092\u0094\5\30\r\2\u0093\u0095\7\16")
buf.write(u"\2\2\u0094\u0093\3\2\2\2\u0094\u0095\3\2\2\2\u0095\u0096")
buf.write(u"\3\2\2\2\u0096\u0097\7\21\2\2\u0097\u0098\7\t\2\2\u0098")
buf.write(u"\u00ac\3\2\2\2\u0099\u009b\5\30\r\2\u009a\u009c\7\16")
buf.write(u"\2\2\u009b\u009a\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d")
buf.write(u"\3\2\2\2\u009d\u009e\7\23\2\2\u009e\u009f\7\t\2\2\u009f")
buf.write(u"\u00ac\3\2\2\2\u00a0\u00a2\5\30\r\2\u00a1\u00a3\7\16")
buf.write(u"\2\2\u00a2\u00a1\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\u00a4")
buf.write(u"\3\2\2\2\u00a4\u00a5\7\22\2\2\u00a5\u00a6\7\t\2\2\u00a6")
buf.write(u"\u00ac\3\2\2\2\u00a7\u00a8\7+\2\2\u00a8\u00a9\5\f\7\2")
buf.write(u"\u00a9\u00aa\7*\2\2\u00aa\u00ac\3\2\2\2\u00abv\3\2\2")
buf.write(u"\2\u00ab}\3\2\2\2\u00ab\u0084\3\2\2\2\u00ab\u008b\3\2")
buf.write(u"\2\2\u00ab\u0092\3\2\2\2\u00ab\u0099\3\2\2\2\u00ab\u00a0")
buf.write(u"\3\2\2\2\u00ab\u00a7\3\2\2\2\u00ac\21\3\2\2\2\u00ad\u00ae")
buf.write(u"\7\26\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\7\27\2\2\u00b0")
buf.write(u"\u00b1\7\t\2\2\u00b1\23\3\2\2\2\u00b2\u00b3\7\33\2\2")
buf.write(u"\u00b3\u00b4\t\4\2\2\u00b4\u00b5\7\30\2\2\u00b5\25\3")
buf.write(u"\2\2\2\u00b6\u00b7\7\34\2\2\u00b7\u00b8\7\4\2\2\u00b8")
buf.write(u"\u00b9\7\35\2\2\u00b9\27\3\2\2\2\u00ba\u00bb\5\32\16")
buf.write(u"\2\u00bb\u00bc\7\'\2\2\u00bc\u00be\5\34\17\2\u00bd\u00bf")
buf.write(u"\5\36\20\2\u00be\u00bd\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf")
buf.write(u"\31\3\2\2\2\u00c0\u00c1\t\5\2\2\u00c1\33\3\2\2\2\u00c2")
buf.write(u"\u00c3\t\6\2\2\u00c3\35\3\2\2\2\u00c4\u00c5\b\20\1\2")
buf.write(u"\u00c5\u00c6\7(\2\2\u00c6\u00cb\t\6\2\2\u00c7\u00c8\7")
buf.write(u"-\2\2\u00c8\u00c9\t\7\2\2\u00c9\u00cb\7,\2\2\u00ca\u00c4")
buf.write(u"\3\2\2\2\u00ca\u00c7\3\2\2\2\u00cb\u00d0\3\2\2\2\u00cc")
buf.write(u"\u00cd\f\5\2\2\u00cd\u00cf\5\36\20\6\u00ce\u00cc\3\2")
buf.write(u"\2\2\u00cf\u00d2\3\2\2\2\u00d0\u00ce\3\2\2\2\u00d0\u00d1")
buf.write(u"\3\2\2\2\u00d1\37\3\2\2\2\u00d2\u00d0\3\2\2\2\u00d3\u00d4")
buf.write(u"\7+\2\2\u00d4\u00e1\7*\2\2\u00d5\u00d6\7+\2\2\u00d6\u00db")
buf.write(u"\5\"\22\2\u00d7\u00d8\7)\2\2\u00d8\u00da\5\"\22\2\u00d9")
buf.write(u"\u00d7\3\2\2\2\u00da\u00dd\3\2\2\2\u00db\u00d9\3\2\2")
buf.write(u"\2\u00db\u00dc\3\2\2\2\u00dc\u00de\3\2\2\2\u00dd\u00db")
buf.write(u"\3\2\2\2\u00de\u00df\7*\2\2\u00df\u00e1\3\2\2\2\u00e0")
buf.write(u"\u00d3\3\2\2\2\u00e0\u00d5\3\2\2\2\u00e1!\3\2\2\2\u00e2")
buf.write(u"\u00e5\5$\23\2\u00e3\u00e5\7\n\2\2\u00e4\u00e2\3\2\2")
buf.write(u"\2\u00e4\u00e3\3\2\2\2\u00e5#\3\2\2\2\u00e6\u00e7\t\b")
buf.write(u"\2\2\u00e7%\3\2\2\2\30\61<GS[]hsx\177\u0086\u008d\u0094")
buf.write(u"\u009b\u00a2\u00ab\u00be\u00ca\u00d0\u00db\u00e0\u00e4")
return buf.getvalue()
class STIXPatternParser ( Parser ):
grammarFileName = "STIXPattern.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"'AND'", u"'OR'", u"'NOT'",
u"'FOLLOWEDBY'", u"'LIKE'", u"'MATCHES'", u"'ISSUPERSET'",
u"'ISSUBSET'", u"'LAST'", u"'IN'", u"'START'", u"'STOP'",
u"'SECONDS'", u"'true'", u"'false'", u"'WITHIN'", u"'REPEATS'",
u"'TIMES'", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"'<'", u"'<='", u"'>'", u"'>='", u"'''",
u"':'", u"'.'", u"','", u"')'", u"'('", u"']'", u"'['",
u"'+'", u"<INVALID>", u"'-'", u"'^'", u"'/'", u"'*'" ]
symbolicNames = [ u"<INVALID>", u"IntNegLiteral", u"IntPosLiteral",
u"FloatNegLiteral", u"FloatPosLiteral", u"HexLiteral",
u"BinaryLiteral", u"StringLiteral", u"BoolLiteral",
u"TimestampLiteral", u"AND", u"OR", u"NOT", u"FOLLOWEDBY",
u"LIKE", u"MATCHES", u"ISSUPERSET", u"ISSUBSET", u"LAST",
u"IN", u"START", u"STOP", u"SECONDS", u"TRUE", u"FALSE",
u"WITHIN", u"REPEATS", u"TIMES", u"IdentifierWithoutHyphen",
u"IdentifierWithHyphen", u"EQ", u"NEQ", u"LT", u"LE",
u"GT", u"GE", u"QUOTE", u"COLON", u"DOT", u"COMMA",
u"RPAREN", u"LPAREN", u"RBRACK", u"LBRACK", u"PLUS",
u"HYPHEN", u"MINUS", u"POWER_OP", u"DIVIDE", u"ASTERISK",
u"WS", u"COMMENT", u"LINE_COMMENT", u"InvalidCharacter" ]
RULE_pattern = 0
RULE_observationExpressions = 1
RULE_observationExpressionOr = 2
RULE_observationExpressionAnd = 3
RULE_observationExpression = 4
RULE_comparisonExpression = 5
RULE_comparisonExpressionAnd = 6
RULE_propTest = 7
RULE_startStopQualifier = 8
RULE_withinQualifier = 9
RULE_repeatedQualifier = 10
RULE_objectPath = 11
RULE_objectType = 12
RULE_firstPathComponent = 13
RULE_objectPathComponent = 14
RULE_setLiteral = 15
RULE_primitiveLiteral = 16
RULE_orderableLiteral = 17
ruleNames = [ u"pattern", u"observationExpressions", u"observationExpressionOr",
u"observationExpressionAnd", u"observationExpression",
u"comparisonExpression", u"comparisonExpressionAnd",
u"propTest", u"startStopQualifier", u"withinQualifier",
u"repeatedQualifier", u"objectPath", u"objectType", u"firstPathComponent",
u"objectPathComponent", u"setLiteral", u"primitiveLiteral",
u"orderableLiteral" ]
EOF = Token.EOF
IntNegLiteral=1
IntPosLiteral=2
FloatNegLiteral=3
FloatPosLiteral=4
HexLiteral=5
BinaryLiteral=6
StringLiteral=7
BoolLiteral=8
TimestampLiteral=9
AND=10
OR=11
NOT=12
FOLLOWEDBY=13
LIKE=14
MATCHES=15
ISSUPERSET=16
ISSUBSET=17
LAST=18
IN=19
START=20
STOP=21
SECONDS=22
TRUE=23
FALSE=24
WITHIN=25
REPEATS=26
TIMES=27
IdentifierWithoutHyphen=28
IdentifierWithHyphen=29
EQ=30
NEQ=31
LT=32
LE=33
GT=34
GE=35
QUOTE=36
COLON=37
DOT=38
COMMA=39
RPAREN=40
LPAREN=41
RBRACK=42
LBRACK=43
PLUS=44
HYPHEN=45
MINUS=46
POWER_OP=47
DIVIDE=48
ASTERISK=49
WS=50
COMMENT=51
LINE_COMMENT=52
InvalidCharacter=53
def __init__(self, input, output=sys.stdout):
super(STIXPatternParser, self).__init__(input, output=output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class PatternContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.PatternContext, self).__init__(parent, invokingState)
self.parser = parser
def observationExpressions(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionsContext,0)
def EOF(self):
return self.getToken(STIXPatternParser.EOF, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_pattern
def enterRule(self, listener):
if hasattr(listener, "enterPattern"):
listener.enterPattern(self)
def exitRule(self, listener):
if hasattr(listener, "exitPattern"):
listener.exitPattern(self)
def accept(self, visitor):
if hasattr(visitor, "visitPattern"):
return visitor.visitPattern(self)
else:
return visitor.visitChildren(self)
def pattern(self):
localctx = STIXPatternParser.PatternContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_pattern)
try:
self.enterOuterAlt(localctx, 1)
self.state = 36
self.observationExpressions(0)
self.state = 37
self.match(STIXPatternParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObservationExpressionsContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObservationExpressionsContext, self).__init__(parent, invokingState)
self.parser = parser
def observationExpressionOr(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionOrContext,0)
def observationExpressions(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ObservationExpressionsContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionsContext,i)
def FOLLOWEDBY(self):
return self.getToken(STIXPatternParser.FOLLOWEDBY, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_observationExpressions
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressions"):
listener.enterObservationExpressions(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressions"):
listener.exitObservationExpressions(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressions"):
return visitor.visitObservationExpressions(self)
else:
return visitor.visitChildren(self)
def observationExpressions(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ObservationExpressionsContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 2
self.enterRecursionRule(localctx, 2, self.RULE_observationExpressions, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 40
self.observationExpressionOr(0)
self._ctx.stop = self._input.LT(-1)
self.state = 47
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.ObservationExpressionsContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressions)
self.state = 42
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 43
self.match(STIXPatternParser.FOLLOWEDBY)
self.state = 44
self.observationExpressions(3)
self.state = 49
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ObservationExpressionOrContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObservationExpressionOrContext, self).__init__(parent, invokingState)
self.parser = parser
def observationExpressionAnd(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionAndContext,0)
def observationExpressionOr(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ObservationExpressionOrContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionOrContext,i)
def OR(self):
return self.getToken(STIXPatternParser.OR, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_observationExpressionOr
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionOr"):
listener.enterObservationExpressionOr(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionOr"):
listener.exitObservationExpressionOr(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionOr"):
return visitor.visitObservationExpressionOr(self)
else:
return visitor.visitChildren(self)
def observationExpressionOr(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ObservationExpressionOrContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_observationExpressionOr, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 51
self.observationExpressionAnd(0)
self._ctx.stop = self._input.LT(-1)
self.state = 58
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.ObservationExpressionOrContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressionOr)
self.state = 53
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 54
self.match(STIXPatternParser.OR)
self.state = 55
self.observationExpressionOr(3)
self.state = 60
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ObservationExpressionAndContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObservationExpressionAndContext, self).__init__(parent, invokingState)
self.parser = parser
def observationExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionContext,0)
def observationExpressionAnd(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ObservationExpressionAndContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionAndContext,i)
def AND(self):
return self.getToken(STIXPatternParser.AND, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_observationExpressionAnd
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionAnd"):
listener.enterObservationExpressionAnd(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionAnd"):
listener.exitObservationExpressionAnd(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionAnd"):
return visitor.visitObservationExpressionAnd(self)
else:
return visitor.visitChildren(self)
def observationExpressionAnd(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ObservationExpressionAndContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 6
self.enterRecursionRule(localctx, 6, self.RULE_observationExpressionAnd, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 62
self.observationExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 69
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.ObservationExpressionAndContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressionAnd)
self.state = 64
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 65
self.match(STIXPatternParser.AND)
self.state = 66
self.observationExpressionAnd(3)
self.state = 71
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ObservationExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObservationExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return STIXPatternParser.RULE_observationExpression
def copyFrom(self, ctx):
super(STIXPatternParser.ObservationExpressionContext, self).copyFrom(ctx)
class ObservationExpressionRepeatedContext(ObservationExpressionContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObservationExpressionContext)
super(STIXPatternParser.ObservationExpressionRepeatedContext, self).__init__(parser)
self.copyFrom(ctx)
def observationExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionContext,0)
def repeatedQualifier(self):
return self.getTypedRuleContext(STIXPatternParser.RepeatedQualifierContext,0)
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionRepeated"):
listener.enterObservationExpressionRepeated(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionRepeated"):
listener.exitObservationExpressionRepeated(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionRepeated"):
return visitor.visitObservationExpressionRepeated(self)
else:
return visitor.visitChildren(self)
class ObservationExpressionSimpleContext(ObservationExpressionContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObservationExpressionContext)
super(STIXPatternParser.ObservationExpressionSimpleContext, self).__init__(parser)
self.copyFrom(ctx)
def LBRACK(self):
return self.getToken(STIXPatternParser.LBRACK, 0)
def comparisonExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ComparisonExpressionContext,0)
def RBRACK(self):
return self.getToken(STIXPatternParser.RBRACK, 0)
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionSimple"):
listener.enterObservationExpressionSimple(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionSimple"):
listener.exitObservationExpressionSimple(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionSimple"):
return visitor.visitObservationExpressionSimple(self)
else:
return visitor.visitChildren(self)
class ObservationExpressionCompoundContext(ObservationExpressionContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObservationExpressionContext)
super(STIXPatternParser.ObservationExpressionCompoundContext, self).__init__(parser)
self.copyFrom(ctx)
def LPAREN(self):
return self.getToken(STIXPatternParser.LPAREN, 0)
def observationExpressions(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionsContext,0)
def RPAREN(self):
return self.getToken(STIXPatternParser.RPAREN, 0)
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionCompound"):
listener.enterObservationExpressionCompound(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionCompound"):
listener.exitObservationExpressionCompound(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionCompound"):
return visitor.visitObservationExpressionCompound(self)
else:
return visitor.visitChildren(self)
class ObservationExpressionWithinContext(ObservationExpressionContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObservationExpressionContext)
super(STIXPatternParser.ObservationExpressionWithinContext, self).__init__(parser)
self.copyFrom(ctx)
def observationExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionContext,0)
def withinQualifier(self):
return self.getTypedRuleContext(STIXPatternParser.WithinQualifierContext,0)
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionWithin"):
listener.enterObservationExpressionWithin(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionWithin"):
listener.exitObservationExpressionWithin(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionWithin"):
return visitor.visitObservationExpressionWithin(self)
else:
return visitor.visitChildren(self)
class ObservationExpressionStartStopContext(ObservationExpressionContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObservationExpressionContext)
super(STIXPatternParser.ObservationExpressionStartStopContext, self).__init__(parser)
self.copyFrom(ctx)
def observationExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionContext,0)
def startStopQualifier(self):
return self.getTypedRuleContext(STIXPatternParser.StartStopQualifierContext,0)
def enterRule(self, listener):
if hasattr(listener, "enterObservationExpressionStartStop"):
listener.enterObservationExpressionStartStop(self)
def exitRule(self, listener):
if hasattr(listener, "exitObservationExpressionStartStop"):
listener.exitObservationExpressionStartStop(self)
def accept(self, visitor):
if hasattr(visitor, "visitObservationExpressionStartStop"):
return visitor.visitObservationExpressionStartStop(self)
else:
return visitor.visitChildren(self)
def observationExpression(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ObservationExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 8
self.enterRecursionRule(localctx, 8, self.RULE_observationExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 81
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [STIXPatternParser.LBRACK]:
localctx = STIXPatternParser.ObservationExpressionSimpleContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 73
self.match(STIXPatternParser.LBRACK)
self.state = 74
self.comparisonExpression(0)
self.state = 75
self.match(STIXPatternParser.RBRACK)
pass
elif token in [STIXPatternParser.LPAREN]:
localctx = STIXPatternParser.ObservationExpressionCompoundContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 77
self.match(STIXPatternParser.LPAREN)
self.state = 78
self.observationExpressions(0)
self.state = 79
self.match(STIXPatternParser.RPAREN)
pass
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 91
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 89
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
localctx = STIXPatternParser.ObservationExpressionStartStopContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression)
self.state = 83
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 84
self.startStopQualifier()
pass
elif la_ == 2:
localctx = STIXPatternParser.ObservationExpressionWithinContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression)
self.state = 85
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 86
self.withinQualifier()
pass
elif la_ == 3:
localctx = STIXPatternParser.ObservationExpressionRepeatedContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression)
self.state = 87
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 88
self.repeatedQualifier()
pass
self.state = 93
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ComparisonExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ComparisonExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def comparisonExpressionAnd(self):
return self.getTypedRuleContext(STIXPatternParser.ComparisonExpressionAndContext,0)
def comparisonExpression(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ComparisonExpressionContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ComparisonExpressionContext,i)
def OR(self):
return self.getToken(STIXPatternParser.OR, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_comparisonExpression
def enterRule(self, listener):
if hasattr(listener, "enterComparisonExpression"):
listener.enterComparisonExpression(self)
def exitRule(self, listener):
if hasattr(listener, "exitComparisonExpression"):
listener.exitComparisonExpression(self)
def accept(self, visitor):
if hasattr(visitor, "visitComparisonExpression"):
return visitor.visitComparisonExpression(self)
else:
return visitor.visitChildren(self)
def comparisonExpression(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ComparisonExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 10
self.enterRecursionRule(localctx, 10, self.RULE_comparisonExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 95
self.comparisonExpressionAnd(0)
self._ctx.stop = self._input.LT(-1)
self.state = 102
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.ComparisonExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparisonExpression)
self.state = 97
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 98
self.match(STIXPatternParser.OR)
self.state = 99
self.comparisonExpression(3)
self.state = 104
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,6,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class ComparisonExpressionAndContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ComparisonExpressionAndContext, self).__init__(parent, invokingState)
self.parser = parser
def propTest(self):
return self.getTypedRuleContext(STIXPatternParser.PropTestContext,0)
def comparisonExpressionAnd(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ComparisonExpressionAndContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ComparisonExpressionAndContext,i)
def AND(self):
return self.getToken(STIXPatternParser.AND, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_comparisonExpressionAnd
def enterRule(self, listener):
if hasattr(listener, "enterComparisonExpressionAnd"):
listener.enterComparisonExpressionAnd(self)
def exitRule(self, listener):
if hasattr(listener, "exitComparisonExpressionAnd"):
listener.exitComparisonExpressionAnd(self)
def accept(self, visitor):
if hasattr(visitor, "visitComparisonExpressionAnd"):
return visitor.visitComparisonExpressionAnd(self)
else:
return visitor.visitChildren(self)
def comparisonExpressionAnd(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ComparisonExpressionAndContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 12
self.enterRecursionRule(localctx, 12, self.RULE_comparisonExpressionAnd, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 106
self.propTest()
self._ctx.stop = self._input.LT(-1)
self.state = 113
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.ComparisonExpressionAndContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_comparisonExpressionAnd)
self.state = 108
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 109
self.match(STIXPatternParser.AND)
self.state = 110
self.comparisonExpressionAnd(3)
self.state = 115
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class PropTestContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.PropTestContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return STIXPatternParser.RULE_propTest
def copyFrom(self, ctx):
super(STIXPatternParser.PropTestContext, self).copyFrom(ctx)
class PropTestRegexContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestRegexContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def MATCHES(self):
return self.getToken(STIXPatternParser.MATCHES, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestRegex"):
listener.enterPropTestRegex(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestRegex"):
listener.exitPropTestRegex(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestRegex"):
return visitor.visitPropTestRegex(self)
else:
return visitor.visitChildren(self)
class PropTestOrderContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestOrderContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def orderableLiteral(self):
return self.getTypedRuleContext(STIXPatternParser.OrderableLiteralContext,0)
def GT(self):
return self.getToken(STIXPatternParser.GT, 0)
def LT(self):
return self.getToken(STIXPatternParser.LT, 0)
def GE(self):
return self.getToken(STIXPatternParser.GE, 0)
def LE(self):
return self.getToken(STIXPatternParser.LE, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestOrder"):
listener.enterPropTestOrder(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestOrder"):
listener.exitPropTestOrder(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestOrder"):
return visitor.visitPropTestOrder(self)
else:
return visitor.visitChildren(self)
class PropTestLikeContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestLikeContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def LIKE(self):
return self.getToken(STIXPatternParser.LIKE, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestLike"):
listener.enterPropTestLike(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestLike"):
listener.exitPropTestLike(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestLike"):
return visitor.visitPropTestLike(self)
else:
return visitor.visitChildren(self)
class PropTestEqualContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestEqualContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def primitiveLiteral(self):
return self.getTypedRuleContext(STIXPatternParser.PrimitiveLiteralContext,0)
def EQ(self):
return self.getToken(STIXPatternParser.EQ, 0)
def NEQ(self):
return self.getToken(STIXPatternParser.NEQ, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestEqual"):
listener.enterPropTestEqual(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestEqual"):
listener.exitPropTestEqual(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestEqual"):
return visitor.visitPropTestEqual(self)
else:
return visitor.visitChildren(self)
class PropTestSetContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestSetContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def IN(self):
return self.getToken(STIXPatternParser.IN, 0)
def setLiteral(self):
return self.getTypedRuleContext(STIXPatternParser.SetLiteralContext,0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestSet"):
listener.enterPropTestSet(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestSet"):
listener.exitPropTestSet(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestSet"):
return visitor.visitPropTestSet(self)
else:
return visitor.visitChildren(self)
class PropTestIsSubsetContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestIsSubsetContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def ISSUBSET(self):
return self.getToken(STIXPatternParser.ISSUBSET, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestIsSubset"):
listener.enterPropTestIsSubset(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestIsSubset"):
listener.exitPropTestIsSubset(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestIsSubset"):
return visitor.visitPropTestIsSubset(self)
else:
return visitor.visitChildren(self)
class PropTestParenContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestParenContext, self).__init__(parser)
self.copyFrom(ctx)
def LPAREN(self):
return self.getToken(STIXPatternParser.LPAREN, 0)
def comparisonExpression(self):
return self.getTypedRuleContext(STIXPatternParser.ComparisonExpressionContext,0)
def RPAREN(self):
return self.getToken(STIXPatternParser.RPAREN, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestParen"):
listener.enterPropTestParen(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestParen"):
listener.exitPropTestParen(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestParen"):
return visitor.visitPropTestParen(self)
else:
return visitor.visitChildren(self)
class PropTestIsSupersetContext(PropTestContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.PropTestContext)
super(STIXPatternParser.PropTestIsSupersetContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPath(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathContext,0)
def ISSUPERSET(self):
return self.getToken(STIXPatternParser.ISSUPERSET, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def NOT(self):
return self.getToken(STIXPatternParser.NOT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterPropTestIsSuperset"):
listener.enterPropTestIsSuperset(self)
def exitRule(self, listener):
if hasattr(listener, "exitPropTestIsSuperset"):
listener.exitPropTestIsSuperset(self)
def accept(self, visitor):
if hasattr(visitor, "visitPropTestIsSuperset"):
return visitor.visitPropTestIsSuperset(self)
else:
return visitor.visitChildren(self)
def propTest(self):
localctx = STIXPatternParser.PropTestContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_propTest)
self._la = 0 # Token type
try:
self.state = 169
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,15,self._ctx)
if la_ == 1:
localctx = STIXPatternParser.PropTestEqualContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 116
self.objectPath()
self.state = 118
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 117
self.match(STIXPatternParser.NOT)
self.state = 120
_la = self._input.LA(1)
if not(_la==STIXPatternParser.EQ or _la==STIXPatternParser.NEQ):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 121
self.primitiveLiteral()
pass
elif la_ == 2:
localctx = STIXPatternParser.PropTestOrderContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 123
self.objectPath()
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 124
self.match(STIXPatternParser.NOT)
self.state = 127
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.LT) | (1 << STIXPatternParser.LE) | (1 << STIXPatternParser.GT) | (1 << STIXPatternParser.GE))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 128
self.orderableLiteral()
pass
elif la_ == 3:
localctx = STIXPatternParser.PropTestSetContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 130
self.objectPath()
self.state = 132
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 131
self.match(STIXPatternParser.NOT)
self.state = 134
self.match(STIXPatternParser.IN)
self.state = 135
self.setLiteral()
pass
elif la_ == 4:
localctx = STIXPatternParser.PropTestLikeContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 137
self.objectPath()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 138
self.match(STIXPatternParser.NOT)
self.state = 141
self.match(STIXPatternParser.LIKE)
self.state = 142
self.match(STIXPatternParser.StringLiteral)
pass
elif la_ == 5:
localctx = STIXPatternParser.PropTestRegexContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 144
self.objectPath()
self.state = 146
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 145
self.match(STIXPatternParser.NOT)
self.state = 148
self.match(STIXPatternParser.MATCHES)
self.state = 149
self.match(STIXPatternParser.StringLiteral)
pass
elif la_ == 6:
localctx = STIXPatternParser.PropTestIsSubsetContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 151
self.objectPath()
self.state = 153
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 152
self.match(STIXPatternParser.NOT)
self.state = 155
self.match(STIXPatternParser.ISSUBSET)
self.state = 156
self.match(STIXPatternParser.StringLiteral)
pass
elif la_ == 7:
localctx = STIXPatternParser.PropTestIsSupersetContext(self, localctx)
self.enterOuterAlt(localctx, 7)
self.state = 158
self.objectPath()
self.state = 160
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.NOT:
self.state = 159
self.match(STIXPatternParser.NOT)
self.state = 162
self.match(STIXPatternParser.ISSUPERSET)
self.state = 163
self.match(STIXPatternParser.StringLiteral)
pass
elif la_ == 8:
localctx = STIXPatternParser.PropTestParenContext(self, localctx)
self.enterOuterAlt(localctx, 8)
self.state = 165
self.match(STIXPatternParser.LPAREN)
self.state = 166
self.comparisonExpression(0)
self.state = 167
self.match(STIXPatternParser.RPAREN)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StartStopQualifierContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.StartStopQualifierContext, self).__init__(parent, invokingState)
self.parser = parser
def START(self):
return self.getToken(STIXPatternParser.START, 0)
def StringLiteral(self, i=None):
if i is None:
return self.getTokens(STIXPatternParser.StringLiteral)
else:
return self.getToken(STIXPatternParser.StringLiteral, i)
def STOP(self):
return self.getToken(STIXPatternParser.STOP, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_startStopQualifier
def enterRule(self, listener):
if hasattr(listener, "enterStartStopQualifier"):
listener.enterStartStopQualifier(self)
def exitRule(self, listener):
if hasattr(listener, "exitStartStopQualifier"):
listener.exitStartStopQualifier(self)
def accept(self, visitor):
if hasattr(visitor, "visitStartStopQualifier"):
return visitor.visitStartStopQualifier(self)
else:
return visitor.visitChildren(self)
def startStopQualifier(self):
localctx = STIXPatternParser.StartStopQualifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_startStopQualifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
self.match(STIXPatternParser.START)
self.state = 172
self.match(STIXPatternParser.StringLiteral)
self.state = 173
self.match(STIXPatternParser.STOP)
self.state = 174
self.match(STIXPatternParser.StringLiteral)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WithinQualifierContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.WithinQualifierContext, self).__init__(parent, invokingState)
self.parser = parser
def WITHIN(self):
return self.getToken(STIXPatternParser.WITHIN, 0)
def SECONDS(self):
return self.getToken(STIXPatternParser.SECONDS, 0)
def IntPosLiteral(self):
return self.getToken(STIXPatternParser.IntPosLiteral, 0)
def FloatPosLiteral(self):
return self.getToken(STIXPatternParser.FloatPosLiteral, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_withinQualifier
def enterRule(self, listener):
if hasattr(listener, "enterWithinQualifier"):
listener.enterWithinQualifier(self)
def exitRule(self, listener):
if hasattr(listener, "exitWithinQualifier"):
listener.exitWithinQualifier(self)
def accept(self, visitor):
if hasattr(visitor, "visitWithinQualifier"):
return visitor.visitWithinQualifier(self)
else:
return visitor.visitChildren(self)
def withinQualifier(self):
localctx = STIXPatternParser.WithinQualifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_withinQualifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 176
self.match(STIXPatternParser.WITHIN)
self.state = 177
_la = self._input.LA(1)
if not(_la==STIXPatternParser.IntPosLiteral or _la==STIXPatternParser.FloatPosLiteral):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 178
self.match(STIXPatternParser.SECONDS)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RepeatedQualifierContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.RepeatedQualifierContext, self).__init__(parent, invokingState)
self.parser = parser
def REPEATS(self):
return self.getToken(STIXPatternParser.REPEATS, 0)
def IntPosLiteral(self):
return self.getToken(STIXPatternParser.IntPosLiteral, 0)
def TIMES(self):
return self.getToken(STIXPatternParser.TIMES, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_repeatedQualifier
def enterRule(self, listener):
if hasattr(listener, "enterRepeatedQualifier"):
listener.enterRepeatedQualifier(self)
def exitRule(self, listener):
if hasattr(listener, "exitRepeatedQualifier"):
listener.exitRepeatedQualifier(self)
def accept(self, visitor):
if hasattr(visitor, "visitRepeatedQualifier"):
return visitor.visitRepeatedQualifier(self)
else:
return visitor.visitChildren(self)
def repeatedQualifier(self):
localctx = STIXPatternParser.RepeatedQualifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_repeatedQualifier)
try:
self.enterOuterAlt(localctx, 1)
self.state = 180
self.match(STIXPatternParser.REPEATS)
self.state = 181
self.match(STIXPatternParser.IntPosLiteral)
self.state = 182
self.match(STIXPatternParser.TIMES)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectPathContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObjectPathContext, self).__init__(parent, invokingState)
self.parser = parser
def objectType(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectTypeContext,0)
def COLON(self):
return self.getToken(STIXPatternParser.COLON, 0)
def firstPathComponent(self):
return self.getTypedRuleContext(STIXPatternParser.FirstPathComponentContext,0)
def objectPathComponent(self):
return self.getTypedRuleContext(STIXPatternParser.ObjectPathComponentContext,0)
def getRuleIndex(self):
return STIXPatternParser.RULE_objectPath
def enterRule(self, listener):
if hasattr(listener, "enterObjectPath"):
listener.enterObjectPath(self)
def exitRule(self, listener):
if hasattr(listener, "exitObjectPath"):
listener.exitObjectPath(self)
def accept(self, visitor):
if hasattr(visitor, "visitObjectPath"):
return visitor.visitObjectPath(self)
else:
return visitor.visitChildren(self)
def objectPath(self):
localctx = STIXPatternParser.ObjectPathContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_objectPath)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 184
self.objectType()
self.state = 185
self.match(STIXPatternParser.COLON)
self.state = 186
self.firstPathComponent()
self.state = 188
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==STIXPatternParser.DOT or _la==STIXPatternParser.LBRACK:
self.state = 187
self.objectPathComponent(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectTypeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObjectTypeContext, self).__init__(parent, invokingState)
self.parser = parser
def IdentifierWithoutHyphen(self):
return self.getToken(STIXPatternParser.IdentifierWithoutHyphen, 0)
def IdentifierWithHyphen(self):
return self.getToken(STIXPatternParser.IdentifierWithHyphen, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_objectType
def enterRule(self, listener):
if hasattr(listener, "enterObjectType"):
listener.enterObjectType(self)
def exitRule(self, listener):
if hasattr(listener, "exitObjectType"):
listener.exitObjectType(self)
def accept(self, visitor):
if hasattr(visitor, "visitObjectType"):
return visitor.visitObjectType(self)
else:
return visitor.visitChildren(self)
def objectType(self):
localctx = STIXPatternParser.ObjectTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_objectType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 190
_la = self._input.LA(1)
if not(_la==STIXPatternParser.IdentifierWithoutHyphen or _la==STIXPatternParser.IdentifierWithHyphen):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FirstPathComponentContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.FirstPathComponentContext, self).__init__(parent, invokingState)
self.parser = parser
def IdentifierWithoutHyphen(self):
return self.getToken(STIXPatternParser.IdentifierWithoutHyphen, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_firstPathComponent
def enterRule(self, listener):
if hasattr(listener, "enterFirstPathComponent"):
listener.enterFirstPathComponent(self)
def exitRule(self, listener):
if hasattr(listener, "exitFirstPathComponent"):
listener.exitFirstPathComponent(self)
def accept(self, visitor):
if hasattr(visitor, "visitFirstPathComponent"):
return visitor.visitFirstPathComponent(self)
else:
return visitor.visitChildren(self)
def firstPathComponent(self):
localctx = STIXPatternParser.FirstPathComponentContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_firstPathComponent)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
_la = self._input.LA(1)
if not(_la==STIXPatternParser.StringLiteral or _la==STIXPatternParser.IdentifierWithoutHyphen):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ObjectPathComponentContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.ObjectPathComponentContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return STIXPatternParser.RULE_objectPathComponent
def copyFrom(self, ctx):
super(STIXPatternParser.ObjectPathComponentContext, self).copyFrom(ctx)
class IndexPathStepContext(ObjectPathComponentContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObjectPathComponentContext)
super(STIXPatternParser.IndexPathStepContext, self).__init__(parser)
self.copyFrom(ctx)
def LBRACK(self):
return self.getToken(STIXPatternParser.LBRACK, 0)
def RBRACK(self):
return self.getToken(STIXPatternParser.RBRACK, 0)
def IntPosLiteral(self):
return self.getToken(STIXPatternParser.IntPosLiteral, 0)
def IntNegLiteral(self):
return self.getToken(STIXPatternParser.IntNegLiteral, 0)
def ASTERISK(self):
return self.getToken(STIXPatternParser.ASTERISK, 0)
def enterRule(self, listener):
if hasattr(listener, "enterIndexPathStep"):
listener.enterIndexPathStep(self)
def exitRule(self, listener):
if hasattr(listener, "exitIndexPathStep"):
listener.exitIndexPathStep(self)
def accept(self, visitor):
if hasattr(visitor, "visitIndexPathStep"):
return visitor.visitIndexPathStep(self)
else:
return visitor.visitChildren(self)
class PathStepContext(ObjectPathComponentContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObjectPathComponentContext)
super(STIXPatternParser.PathStepContext, self).__init__(parser)
self.copyFrom(ctx)
def objectPathComponent(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.ObjectPathComponentContext)
else:
return self.getTypedRuleContext(STIXPatternParser.ObjectPathComponentContext,i)
def enterRule(self, listener):
if hasattr(listener, "enterPathStep"):
listener.enterPathStep(self)
def exitRule(self, listener):
if hasattr(listener, "exitPathStep"):
listener.exitPathStep(self)
def accept(self, visitor):
if hasattr(visitor, "visitPathStep"):
return visitor.visitPathStep(self)
else:
return visitor.visitChildren(self)
class KeyPathStepContext(ObjectPathComponentContext):
def __init__(self, parser, ctx): # actually a STIXPatternParser.ObjectPathComponentContext)
super(STIXPatternParser.KeyPathStepContext, self).__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(STIXPatternParser.DOT, 0)
def IdentifierWithoutHyphen(self):
return self.getToken(STIXPatternParser.IdentifierWithoutHyphen, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def enterRule(self, listener):
if hasattr(listener, "enterKeyPathStep"):
listener.enterKeyPathStep(self)
def exitRule(self, listener):
if hasattr(listener, "exitKeyPathStep"):
listener.exitKeyPathStep(self)
def accept(self, visitor):
if hasattr(visitor, "visitKeyPathStep"):
return visitor.visitKeyPathStep(self)
else:
return visitor.visitChildren(self)
def objectPathComponent(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = STIXPatternParser.ObjectPathComponentContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 28
self.enterRecursionRule(localctx, 28, self.RULE_objectPathComponent, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 200
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [STIXPatternParser.DOT]:
localctx = STIXPatternParser.KeyPathStepContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 195
self.match(STIXPatternParser.DOT)
self.state = 196
_la = self._input.LA(1)
if not(_la==STIXPatternParser.StringLiteral or _la==STIXPatternParser.IdentifierWithoutHyphen):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
elif token in [STIXPatternParser.LBRACK]:
localctx = STIXPatternParser.IndexPathStepContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 197
self.match(STIXPatternParser.LBRACK)
self.state = 198
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.IntNegLiteral) | (1 << STIXPatternParser.IntPosLiteral) | (1 << STIXPatternParser.ASTERISK))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 199
self.match(STIXPatternParser.RBRACK)
pass
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 206
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,18,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = STIXPatternParser.PathStepContext(self, STIXPatternParser.ObjectPathComponentContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_objectPathComponent)
self.state = 202
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 203
self.objectPathComponent(4)
self.state = 208
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,18,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class SetLiteralContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.SetLiteralContext, self).__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(STIXPatternParser.LPAREN, 0)
def RPAREN(self):
return self.getToken(STIXPatternParser.RPAREN, 0)
def primitiveLiteral(self, i=None):
if i is None:
return self.getTypedRuleContexts(STIXPatternParser.PrimitiveLiteralContext)
else:
return self.getTypedRuleContext(STIXPatternParser.PrimitiveLiteralContext,i)
def COMMA(self, i=None):
if i is None:
return self.getTokens(STIXPatternParser.COMMA)
else:
return self.getToken(STIXPatternParser.COMMA, i)
def getRuleIndex(self):
return STIXPatternParser.RULE_setLiteral
def enterRule(self, listener):
if hasattr(listener, "enterSetLiteral"):
listener.enterSetLiteral(self)
def exitRule(self, listener):
if hasattr(listener, "exitSetLiteral"):
listener.exitSetLiteral(self)
def accept(self, visitor):
if hasattr(visitor, "visitSetLiteral"):
return visitor.visitSetLiteral(self)
else:
return visitor.visitChildren(self)
def setLiteral(self):
localctx = STIXPatternParser.SetLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_setLiteral)
self._la = 0 # Token type
try:
self.state = 222
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(STIXPatternParser.LPAREN)
self.state = 210
self.match(STIXPatternParser.RPAREN)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 211
self.match(STIXPatternParser.LPAREN)
self.state = 212
self.primitiveLiteral()
self.state = 217
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==STIXPatternParser.COMMA:
self.state = 213
self.match(STIXPatternParser.COMMA)
self.state = 214
self.primitiveLiteral()
self.state = 219
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 220
self.match(STIXPatternParser.RPAREN)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimitiveLiteralContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.PrimitiveLiteralContext, self).__init__(parent, invokingState)
self.parser = parser
def orderableLiteral(self):
return self.getTypedRuleContext(STIXPatternParser.OrderableLiteralContext,0)
def BoolLiteral(self):
return self.getToken(STIXPatternParser.BoolLiteral, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_primitiveLiteral
def enterRule(self, listener):
if hasattr(listener, "enterPrimitiveLiteral"):
listener.enterPrimitiveLiteral(self)
def exitRule(self, listener):
if hasattr(listener, "exitPrimitiveLiteral"):
listener.exitPrimitiveLiteral(self)
def accept(self, visitor):
if hasattr(visitor, "visitPrimitiveLiteral"):
return visitor.visitPrimitiveLiteral(self)
else:
return visitor.visitChildren(self)
def primitiveLiteral(self):
localctx = STIXPatternParser.PrimitiveLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_primitiveLiteral)
try:
self.state = 226
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [STIXPatternParser.IntNegLiteral, STIXPatternParser.IntPosLiteral, STIXPatternParser.FloatNegLiteral, STIXPatternParser.FloatPosLiteral, STIXPatternParser.HexLiteral, STIXPatternParser.BinaryLiteral, STIXPatternParser.StringLiteral, STIXPatternParser.TimestampLiteral]:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.orderableLiteral()
pass
elif token in [STIXPatternParser.BoolLiteral]:
self.enterOuterAlt(localctx, 2)
self.state = 225
self.match(STIXPatternParser.BoolLiteral)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OrderableLiteralContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(STIXPatternParser.OrderableLiteralContext, self).__init__(parent, invokingState)
self.parser = parser
def IntPosLiteral(self):
return self.getToken(STIXPatternParser.IntPosLiteral, 0)
def IntNegLiteral(self):
return self.getToken(STIXPatternParser.IntNegLiteral, 0)
def FloatPosLiteral(self):
return self.getToken(STIXPatternParser.FloatPosLiteral, 0)
def FloatNegLiteral(self):
return self.getToken(STIXPatternParser.FloatNegLiteral, 0)
def StringLiteral(self):
return self.getToken(STIXPatternParser.StringLiteral, 0)
def BinaryLiteral(self):
return self.getToken(STIXPatternParser.BinaryLiteral, 0)
def HexLiteral(self):
return self.getToken(STIXPatternParser.HexLiteral, 0)
def TimestampLiteral(self):
return self.getToken(STIXPatternParser.TimestampLiteral, 0)
def getRuleIndex(self):
return STIXPatternParser.RULE_orderableLiteral
def enterRule(self, listener):
if hasattr(listener, "enterOrderableLiteral"):
listener.enterOrderableLiteral(self)
def exitRule(self, listener):
if hasattr(listener, "exitOrderableLiteral"):
listener.exitOrderableLiteral(self)
def accept(self, visitor):
if hasattr(visitor, "visitOrderableLiteral"):
return visitor.visitOrderableLiteral(self)
else:
return visitor.visitChildren(self)
def orderableLiteral(self):
localctx = STIXPatternParser.OrderableLiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_orderableLiteral)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 228
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.IntNegLiteral) | (1 << STIXPatternParser.IntPosLiteral) | (1 << STIXPatternParser.FloatNegLiteral) | (1 << STIXPatternParser.FloatPosLiteral) | (1 << STIXPatternParser.HexLiteral) | (1 << STIXPatternParser.BinaryLiteral) | (1 << STIXPatternParser.StringLiteral) | (1 << STIXPatternParser.TimestampLiteral))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx, ruleIndex, predIndex):
if self._predicates == None:
self._predicates = dict()
self._predicates[1] = self.observationExpressions_sempred
self._predicates[2] = self.observationExpressionOr_sempred
self._predicates[3] = self.observationExpressionAnd_sempred
self._predicates[4] = self.observationExpression_sempred
self._predicates[5] = self.comparisonExpression_sempred
self._predicates[6] = self.comparisonExpressionAnd_sempred
self._predicates[14] = self.objectPathComponent_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def observationExpressions_sempred(self, localctx, predIndex):
if predIndex == 0:
return self.precpred(self._ctx, 2)
def observationExpressionOr_sempred(self, localctx, predIndex):
if predIndex == 1:
return self.precpred(self._ctx, 2)
def observationExpressionAnd_sempred(self, localctx, predIndex):
if predIndex == 2:
return self.precpred(self._ctx, 2)
def observationExpression_sempred(self, localctx, predIndex):
if predIndex == 3:
return self.precpred(self._ctx, 3)
if predIndex == 4:
return self.precpred(self._ctx, 2)
if predIndex == 5:
return self.precpred(self._ctx, 1)
def comparisonExpression_sempred(self, localctx, predIndex):
if predIndex == 6:
return self.precpred(self._ctx, 2)
def comparisonExpressionAnd_sempred(self, localctx, predIndex):
if predIndex == 7:
return self.precpred(self._ctx, 2)
def objectPathComponent_sempred(self, localctx, predIndex):
if predIndex == 8:
return self.precpred(self._ctx, 3)
| 39.504694 | 399 | 0.60621 |
20f6f8511b493f65d1edfb25d769e7fe98ade580 | 479 | py | Python | app/app/urls.py | pinussilvestrus/django-solid-app | de3f450a0e1c283e86e067ce4d78cbd22da7c394 | [
"MIT"
] | null | null | null | app/app/urls.py | pinussilvestrus/django-solid-app | de3f450a0e1c283e86e067ce4d78cbd22da7c394 | [
"MIT"
] | null | null | null | app/app/urls.py | pinussilvestrus/django-solid-app | de3f450a0e1c283e86e067ce4d78cbd22da7c394 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include # add this
from rest_framework import routers # add this
from todo import views # add this
router = routers.DefaultRouter() # add this
router.register(r'todos', views.TodoView, 'todo') # add this
urlpatterns = [
path('admin/', admin.site.urls), path('api/', include(router.urls)) # add this
] | 43.545455 | 105 | 0.565762 |
b213380ca7e6972830c5d13bc7b06bee5a640aed | 1,914 | py | Python | official/cv/MCNN/preprocess.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/MCNN/preprocess.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/MCNN/preprocess.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
import argparse
import cv2
import numpy as np
batch_size = 1
parser = argparse.ArgumentParser(description="mcnn preprocess data")
parser.add_argument("--dataset_path", type=str, default="./test_data/images/", help="dataset path.")
parser.add_argument("--output_path", type=str, default="./test_data/preprocess_data/", help="output path.")
args = parser.parse_args()
def save_mnist_to_jpg(data_path, output_path):
data_files = [filename for filename in os.listdir(data_path) \
if os.path.isfile(os.path.join(data_path, filename))]
if not os.path.exists(output_path):
os.makedirs(output_path)
for fname in data_files:
img = cv2.imread(os.path.join(data_path, fname), 0)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = (ht // 4) * 4
wd_1 = (wd // 4) * 4
img = cv2.resize(img, (wd_1, ht_1))
hang = (1024 - ht_1) // 2
lie = (1024 - wd_1) // 2
img = np.pad(img, ((hang, hang), (lie, lie)), 'constant')
img.tofile(os.path.join(output_path, fname+'.bin'))
if __name__ == '__main__':
save_mnist_to_jpg(args.dataset_path, args.output_path)
| 39.061224 | 107 | 0.655695 |
d94ba05c01e974d4d7cae916196ddc5bfbe664ca | 496 | py | Python | roles/diodonfrost.terraform/filter_plugins/sort_versions.py | justin-p/ansible-my-linux-workstation | 3f1173f859442f3d5717c839b737a4a13628ce54 | [
"MIT"
] | 8 | 2019-05-03T00:08:39.000Z | 2021-02-18T14:25:00.000Z | filter_plugins/sort_versions.py | diodonfrost/ansible-role-jetbrains-toolbox | e52bb48d991eb4ffa5097c860939937bb5be639d | [
"Apache-2.0"
] | 5 | 2020-10-08T19:25:41.000Z | 2020-10-08T19:26:55.000Z | filter_plugins/sort_versions.py | diodonfrost/ansible-role-jetbrains-toolbox | e52bb48d991eb4ffa5097c860939937bb5be639d | [
"Apache-2.0"
] | 6 | 2020-11-25T23:27:30.000Z | 2022-03-28T09:37:16.000Z | """Sort complex versions"""
from distutils.version import LooseVersion
def filter_sort_versions(value):
"""
Ansible entrypoint function
"""
return sorted(value, key=LooseVersion)
class FilterModule(object):
"""
Sort complex versions like 0.10.2, 0.1.1, 0.10.12
"""
filter_sort = {
'sort_versions': filter_sort_versions,
}
def filters(self):
"""
Return the sorted values
"""
return self.filter_sort
| 19.076923 | 57 | 0.604839 |
5f1c9159d690df35b1299e502a552dd8aa7a672c | 6,889 | py | Python | vv_create.py | y-chan/amitaro-ita-corpus | 16b3a4473c9c4bf68d1eb30b980ccb1828668e40 | [
"Unlicense"
] | null | null | null | vv_create.py | y-chan/amitaro-ita-corpus | 16b3a4473c9c4bf68d1eb30b980ccb1828668e40 | [
"Unlicense"
] | null | null | null | vv_create.py | y-chan/amitaro-ita-corpus | 16b3a4473c9c4bf68d1eb30b980ccb1828668e40 | [
"Unlicense"
] | null | null | null | import copy
import uuid
import json
vowel_phoneme_list = ["a", "i", "u", "e", "o", "N", "A", "I", "U", "E", "O", "cl"]
_mora_list_minimum = [
["ヴォ", "v", "o"],
["ヴェ", "v", "e"],
["ヴィ", "v", "i"],
["ヴァ", "v", "a"],
["ヴ", "v", "u"],
["ン", "", "N"],
["ワ", "w", "a"],
["ロ", "r", "o"],
["レ", "r", "e"],
["ル", "r", "u"],
["リョ", "ry", "o"],
["リュ", "ry", "u"],
["リャ", "ry", "a"],
["リェ", "ry", "e"],
["リ", "r", "i"],
["ラ", "r", "a"],
["ヨ", "y", "o"],
["ユ", "y", "u"],
["ヤ", "y", "a"],
["モ", "m", "o"],
["メ", "m", "e"],
["ム", "m", "u"],
["ミョ", "my", "o"],
["ミュ", "my", "u"],
["ミャ", "my", "a"],
["ミェ", "my", "e"],
["ミ", "m", "i"],
["マ", "m", "a"],
["ポ", "p", "o"],
["ボ", "b", "o"],
["ホ", "h", "o"],
["ペ", "p", "e"],
["ベ", "b", "e"],
["ヘ", "h", "e"],
["プ", "p", "u"],
["ブ", "b", "u"],
["フォ", "f", "o"],
["フェ", "f", "e"],
["フィ", "f", "i"],
["ファ", "f", "a"],
["フ", "f", "u"],
["ピョ", "py", "o"],
["ピュ", "py", "u"],
["ピャ", "py", "a"],
["ピェ", "py", "e"],
["ピ", "p", "i"],
["ビョ", "by", "o"],
["ビュ", "by", "u"],
["ビャ", "by", "a"],
["ビェ", "by", "e"],
["ビ", "b", "i"],
["ヒョ", "hy", "o"],
["ヒュ", "hy", "u"],
["ヒャ", "hy", "a"],
["ヒェ", "hy", "e"],
["ヒ", "h", "i"],
["パ", "p", "a"],
["バ", "b", "a"],
["ハ", "h", "a"],
["ノ", "n", "o"],
["ネ", "n", "e"],
["ヌ", "n", "u"],
["ニョ", "ny", "o"],
["ニュ", "ny", "u"],
["ニャ", "ny", "a"],
["ニェ", "ny", "e"],
["ニ", "n", "i"],
["ナ", "n", "a"],
["ドゥ", "d", "u"],
["ド", "d", "o"],
["トゥ", "t", "u"],
["ト", "t", "o"],
["デョ", "dy", "o"],
["デュ", "dy", "u"],
["デャ", "dy", "a"],
["デェ", "dy", "e"],
["ディ", "d", "i"],
["デ", "d", "e"],
["テョ", "ty", "o"],
["テュ", "ty", "u"],
["テャ", "ty", "a"],
["ティ", "t", "i"],
["テ", "t", "e"],
["ツォ", "ts", "o"],
["ツェ", "ts", "e"],
["ツィ", "ts", "i"],
["ツァ", "ts", "a"],
["ツ", "ts", "u"],
["ッ", "", "cl"],
["チョ", "ch", "o"],
["チュ", "ch", "u"],
["チャ", "ch", "a"],
["チェ", "ch", "e"],
["チ", "ch", "i"],
["ダ", "d", "a"],
["タ", "t", "a"],
["ゾ", "z", "o"],
["ソ", "s", "o"],
["ゼ", "z", "e"],
["セ", "s", "e"],
["ズィ", "z", "i"],
["ズ", "z", "u"],
["スィ", "s", "i"],
["ス", "s", "u"],
["ジョ", "j", "o"],
["ジュ", "j", "u"],
["ジャ", "j", "a"],
["ジェ", "j", "e"],
["ジ", "j", "i"],
["ショ", "sh", "o"],
["シュ", "sh", "u"],
["シャ", "sh", "a"],
["シェ", "sh", "e"],
["シ", "sh", "i"],
["ザ", "z", "a"],
["サ", "s", "a"],
["ゴ", "g", "o"],
["コ", "k", "o"],
["ゲ", "g", "e"],
["ケ", "k", "e"],
["グヮ", "gw", "a"],
["グ", "g", "u"],
["クヮ", "kw", "a"],
["ク", "k", "u"],
["ギョ", "gy", "o"],
["ギュ", "gy", "u"],
["ギャ", "gy", "a"],
["ギェ", "gy", "e"],
["ギ", "g", "i"],
["キョ", "ky", "o"],
["キュ", "ky", "u"],
["キャ", "ky", "a"],
["キェ", "ky", "e"],
["キ", "k", "i"],
["ガ", "g", "a"],
["カ", "k", "a"],
["オ", "", "o"],
["エ", "", "e"],
["ウォ", "w", "o"],
["ウェ", "w", "e"],
["ウィ", "w", "i"],
["ウ", "", "u"],
["イェ", "y", "e"],
["イ", "", "i"],
["ア", "", "a"],
]
openjtalk_mora2text = {
consonant + vowel: text for [text, consonant, vowel] in _mora_list_minimum
}
def mora_to_text(mora: str) -> str:
if mora[-1:] in ["A", "I", "U", "E", "O"]:
# 無声化母音を小文字に
mora = mora[:-1] + mora[-1].lower()
if mora in openjtalk_mora2text:
return openjtalk_mora2text[mora]
else:
return mora
with open("index.csv") as f:
labels = f.read().split("\n")
project_base = {
"appVersion": "0.10.4",
"audioKeys": [],
"audioItems": {}
}
query_base = {
"speedScale": 1,
"pitchScale": 0,
"intonationScale": 1,
"volumeScale": 1,
"prePhonemeLength": 0.1,
"postPhonemeLength": 0.1,
"outputSamplingRate":24000,
"outputStereo": False,
"kana": ""
}
with open("./transcript.txt", encoding="utf8") as ts:
transcript = [s.split(",")[0] for s in ts.read().split("\n")]
project = copy.deepcopy(project_base)
for i in range(0, len(labels), 2):
if i != 0 and i % 100 == 0:
with open(f"project{(i//2) - 49}-{i//2}.vvproj", mode="w", encoding="utf8") as pjf:
pjf.write(json.dumps(project, ensure_ascii=False))
project = copy.deepcopy(project_base)
phonemes = labels[i].split()
accents = labels[i+1].replace(", ", ",").replace(" ", " ").replace(" ", " ").split()
phonemes[0] = phonemes[0].split(",")[1]
accents[0] = accents[0].split(",")[1]
accent_phrases = []
accent_phrase = {}
count = 0
mora = {}
for j, phoneme in enumerate(phonemes):
if accent_phrase.get("moras") is None:
accent_phrase["moras"] = []
if phoneme == "pau":
if accent_phrase.get("accent") is None:
accent_phrase["accent"] = count
accent_phrase["pauseMora"] = {
"vowel": phoneme,
"vowelLength": 0,
"pitch": 0,
"text": "、"
}
accent_phrase["isInterrogative"] = False
accent_phrases.append(copy.deepcopy(accent_phrase))
accent_phrase = {}
count = 0
continue
if phoneme in vowel_phoneme_list:
mora["vowel"] = phoneme
mora["vowelLength"] = 0
if mora.get("consonant") is not None:
text = mora_to_text(mora["consonant"] + phoneme)
else:
text = mora_to_text(phoneme)
mora["text"] = text
mora["pitch"] = 0
count += 1
end = accents[j] in ["#", "?"]
if "]" == accents[j]:
accent_phrase["accent"] = count
elif end and accent_phrase.get("accent") is None:
accent_phrase["accent"] = count
accent_phrase["moras"].append(copy.deepcopy(mora))
if end:
accent_phrase["isInterrogative"] = accents[j] == "?"
accent_phrases.append(copy.deepcopy(accent_phrase))
accent_phrase = {}
count = 0
mora = {}
else:
mora["consonant"] = phoneme
mora["consonantLength"] = 0
query = copy.deepcopy(query_base)
query["accentPhrases"] = accent_phrases
key = str(uuid.uuid4())
project["audioKeys"].append(key)
project["audioItems"][key] = {
# "text": transcript[i].split(":")[1],
"text": transcript[i//2],
"styleId": 0,
"query": query
}
with open(f"project{(i//2) - 23}-{(i+2)//2}.vvproj", mode="w", encoding="utf8") as pjf:
pjf.write(json.dumps(project, ensure_ascii=False))
| 26.394636 | 91 | 0.367397 |
9938c8fac9e196a561aef5a58f306bf1da4cb7f5 | 9,188 | py | Python | pyPLANES/utils/io.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
] | null | null | null | pyPLANES/utils/io.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
] | null | null | null | pyPLANES/utils/io.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
] | 1 | 2020-12-15T16:24:08.000Z | 2020-12-15T16:24:08.000Z | #! /usr/bin/env python
# -*- coding:utf8 -*-
#
# utils_io.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# Olivier Dazel <olivier.dazel@univ-lemans.fr>
# Mathieu Gaborit <gaborit@kth.se>
# Peter Göransson <pege@kth.se>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import socket
import datetime
import time
import numpy as np
import pyvtk
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from pyPLANES.fem.fem_entities_surfacic import *
from pyPLANES.fem.fem_entities_volumic import *
# def initialisation_out_files_plain(self):
# pass
from pymls import from_yaml, Solver, Layer, backing
from mediapack import Air, Fluid
def load_material(mat):
if mat == "Air":
Air_mat = Air()
return Fluid(c=Air_mat.c,rho=Air_mat.rho)
else:
return from_yaml("materials/" + mat + ".yaml")
def result_pymls(**kwargs):
name_project = kwargs.get("name_project", "unnamed_project")
ml = kwargs.get("ml", False)
termination = kwargs.get("termination", "rigid")
theta_d = kwargs.get("theta_d", 45)
freq = kwargs.get("frequencies", np.array([440]))
plot_RT = kwargs.get("plot_RT", False)
solver = Solver()
for _l in ml:
mat = load_material(_l[0])
solver.layers.append(Layer(mat, _l[1]))
R = []
if termination in ["rigid", "Rigid", "Rigid Wall", "Wall"]:
solver.backing = backing.rigid
T = False
else:
T = []
solver.backing = backing.transmission
for _f in freq:
_ = solver.solve(_f, theta_d)
R.append(_["R"][0])
if termination == "transmission":
T.append(_["T"][0])
if plot_RT:
plt.figure(name_project + "/ Reflection coefficient")
plt.plot(freq, [_.real for _ in R], 'r',label="Re(R) pymls")
plt.plot(freq, [_.imag for _ in R], 'b',label="Im(R) pymls")
plt.legend()
if T is not False:
plt.figure(name_project + "/ Transmission coefficient")
plt.plot(freq, [_.real for _ in T], 'r',label="Re(T) pymls")
plt.plot(freq, [_.imag for _ in T], 'b',label="Im(T) pymls")
plt.legend()
return freq, R, T
def close_out_files(self):
duration = time.time()-self.start_time
self.info_file.write("Calculus ended at %s.\n"%(datetime.datetime.now()))
self.info_file.write("Total duration = {} s\n".format(duration))
self.info_file.write("duration / freq (averaged) = {} s\n".format(duration/len(self.frequencies)))
self.out_file.close()
self.info_file.close()
def print_entities(self):
for _ in self.entities:
print(_)
def print_elements(self):
for _ in self.elements[1:]:
print(_)
def print_vertices(self):
for _ in self.vertices[1:]:
print(_)
def print_edges(self):
for _ in self.edges:
print(_)
def print_faces(self):
for _ in self.faces:
print(_)
def print_model_entities(self):
for _ in self.model_entities:
print(_)
def print_reference_elements(self):
print(self.reference_elements)
def plot_fem_solution(self, kx=0.):
if self.plot[5]: # Determination of the maximum value of the pressure
p_max = 0
p_min = 1e308
for _en in self.entities:
if isinstance(_en, FluidFem):
for _elem in _en.elements:
_, __, p_elem = _elem.display_sol(3)
_max = np.amax(np.abs(p_elem))
_min = np.amin(np.abs(p_elem))
if _max >p_max: p_max = _max
if _min <p_min: p_min = _min
if any(self.plot[3:]):
x, y, u_x, u_y, pr = [], [], [], [], []
for _en in self.entities:
if isinstance(_en, FluidFem):
if any(self.plot[2::3]): # Plot of pressure == True
for ie, _elem in enumerate(_en.elements):
# print(ie/len(_en.elements))
x_elem, y_elem, p_elem = _elem.display_sol(3)
p_elem = p_elem[:, 0]
p_elem *= np.exp(1j*kx*x_elem)
if self.plot[2]:
plt.figure("Pressure")
plt.plot(y_elem, np.abs(p_elem), 'r+')
plt.plot(y_elem, np.imag(p_elem), 'm.')
if self.plot[5]:
triang = mtri.Triangulation(x_elem, y_elem)
plt.figure("Pressure map")
# plt.tricontourf(triang, np.abs(p_elem), cmap=cm.jet, levels=np.linspace(p_min, p_max,40))
x.extend(list(x_elem))
y.extend(list(y_elem))
pr.extend(list(p_elem))
elif isinstance(_en, PemFem):
if any(self.plot): # Plot of pressure == True
for _elem in _en.elements:
x_elem, y_elem, f_elem = _elem.display_sol([0, 1, 3])
ux_elem = f_elem[:, 0]*np.exp(1j*kx*x_elem)
uy_elem = f_elem[:, 1]*np.exp(1j*kx*x_elem)
p_elem = f_elem[:, 2]*np.exp(1j*kx*x_elem)
if self.plot[0]:
plt.figure("Solid displacement along x")
plt.plot(y_elem, np.abs(ux_elem), 'r+')
plt.plot(y_elem, np.imag(ux_elem), 'm.')
if self.plot[1]:
plt.figure("Solid displacement along y")
plt.plot(y_elem, np.abs(uy_elem), 'r+')
plt.plot(y_elem, np.imag(uy_elem), 'm.')
if self.plot[2]:
plt.figure("Pressure")
plt.plot(y_elem, np.abs(p_elem), 'r+')
plt.plot(y_elem, np.imag(p_elem), 'm.')
if self.plot[5]:
x.extend(list(x_elem))
y.extend(list(y_elem))
pr.extend(list(p_elem))
elif isinstance(_en, ElasticFem):
if any(self.plot): # Plot of pressure == True
for _elem in _en.elements:
x_elem, y_elem, f_elem = _elem.display_sol([0, 1, 3])
ux_elem = f_elem[:, 0]*np.exp(1j*kx*x_elem)
uy_elem = f_elem[:, 1]*np.exp(1j*kx*x_elem)
if self.plot[0]:
plt.figure("Solid displacement along x")
plt.plot(y_elem, np.abs(ux_elem), 'r+')
plt.plot(y_elem, np.imag(ux_elem), 'm.')
if self.plot[1]:
plt.figure("Solid displacement along y")
plt.plot(y_elem, np.abs(uy_elem), 'r+')
plt.plot(y_elem, np.imag(uy_elem), 'm.')
if any(self.plot[3:]):
triang = mtri.Triangulation(x, y)
if self.plot[5]:
plt.figure("Pressure map")
plt.tricontourf(triang, np.abs(pr), 40, cmap=cm.jet)
self.display_mesh()
plt.colorbar()
plt.axis("off")
plt.axis('equal')
# plt.show()
def export_paraview(self):
if self.export_paraview == 0:
self.vtk_points = [_v.coord for _v in self.vertices[1:]]
self.vtk_triangle = [[_e.vertices[0].tag-1, _e.vertices[1].tag-1, _e.vertices[2].tag-1] for _e in self.elements[1:] if _e.typ==2]
pressure = [np.abs(_v.sol[3]) for _v in self.vertices[1:]]
# Bidouille pour que la tour et les lettres clignotent dans IAGS 20201
pressure_max = max(pressure)
light_on = self.export_paraview%4
if light_on<2:
tower_on = 0
else:
tower_on = 1
for _ent in self.fem_entities:
if _ent.dim ==2:
if _ent.mat.MEDIUM_TYPE == "eqf":
if _ent.mat.name == "tower":
for _elem in _ent.elements:
for _v in _elem.vertices:
_v.sol[3] = (1+(-1)**tower_on)*(pressure_max/2.)
if _ent.mat.name == "letter":
for _elem in _ent.elements:
for _v in _elem.vertices:
_v.sol[3] = (1+(-1)**(tower_on+1))*(pressure_max/2.)
pressure = [np.abs(_v.sol[3]) for _v in self.vertices[1:]]
vtk = pyvtk.VtkData(pyvtk.UnstructuredGrid(self.vtk_points,triangle=self.vtk_triangle), pyvtk.PointData(pyvtk.Scalars(pressure,name='Pressure')))
vtk.tofile("vtk/"+self.name_project + "-{}".format(self.export_paraview))
self.export_paraview +=1
| 37.655738 | 149 | 0.556922 |
83db61cc5fb33cd485a1668387c1886716867da4 | 7,385 | py | Python | morpheus/app/utils.py | alextford11/morpheus | 74cd3482e06b66316847587dcdd124ac989f809d | [
"MIT"
] | null | null | null | morpheus/app/utils.py | alextford11/morpheus | 74cd3482e06b66316847587dcdd124ac989f809d | [
"MIT"
] | null | null | null | morpheus/app/utils.py | alextford11/morpheus | 74cd3482e06b66316847587dcdd124ac989f809d | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import base64
import hashlib
import hmac
import re
import secrets
import ujson
from aiohttp.hdrs import METH_GET, METH_HEAD, METH_OPTIONS
from aiohttp.web import Application, Request, Response
from aiohttp.web_exceptions import HTTPException
from aiohttp_jinja2 import render_template
from arq import ArqRedis
from asyncio import shield
from atoolbox import JsonErrors
from atoolbox.json_tools import JSON_CONTENT_TYPE
from datetime import datetime, timezone
from functools import update_wrapper
from markupsafe import Markup
from pathlib import Path
from pydantic import BaseModel, ValidationError
from typing import Dict, Optional, Type, TypeVar
from .ext import ApiError
from .models import SendMethod
from .settings import Settings
THIS_DIR = Path(__file__).parent.resolve()
AModel = TypeVar('AModel', bound=BaseModel)
class Session(BaseModel):
company: str
expires: datetime
@dataclass
class PreResponse:
text: str = None
body: bytes = None
status: int = 200
content_type: str = 'text/plain'
headers: Dict[str, str] = None
class View:
headers = None
def __init__(self, request):
self.request: Request = request
self.app: Application = request.app
self.settings: Settings = self.app['settings']
self.session: Optional[Session] = None
self.redis: ArqRedis = self.app['redis']
def full_url(self, path=''):
return Markup(f'{self.request.scheme}://{self.request.host}{path}')
@classmethod
def view(cls):
async def view(request):
self = cls(request)
await self.authenticate(request)
return await self._raw_call(request)
view.view_class = cls
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
update_wrapper(view, cls.call, assigned=())
return view
async def _raw_call(self, request):
try:
if request.method in {METH_GET, METH_OPTIONS, METH_HEAD}:
r = await self.call(request)
else:
r = await shield(self.call(request))
except HTTPException as e:
if self.headers:
e.headers.update(self.headers)
raise e
return self._modify_response(request, r)
@classmethod
def _modify_response(cls, request, response):
if isinstance(response, PreResponse):
if response.text:
body = response.text.encode()
elif response.body:
body = response.body
else:
raise RuntimeError('either body or text are required on PreResponse')
response = Response(
body=body, status=response.status, headers=response.headers, content_type=response.content_type
)
if cls.headers:
response.headers.update(cls.headers)
return response
async def authenticate(self, request):
pass
async def call(self, request):
raise NotImplementedError()
async def request_data(self, model: Type[BaseModel]) -> AModel:
error_details = None
try:
data = await self.request.json()
except ValueError:
error_msg = 'Error decoding JSON'
else:
try:
return model.parse_obj(data)
except ValidationError as e:
error_msg = 'Invalid Data'
error_details = e.errors()
raise JsonErrors.HTTPBadRequest(message=error_msg, details=error_details)
def get_arg_int(self, name, default=None):
v = self.request.query.get(name)
if v is None:
return default
try:
return int(v)
except ValueError:
raise JsonErrors.HTTPBadRequest(f"invalid get argument '{name}': {v!r}")
@classmethod
def json_response(cls, *, status_=200, json_str_=None, headers_=None, **data):
if not json_str_:
json_str_ = ujson.dumps(data)
return Response(text=json_str_, status=status_, content_type=JSON_CONTENT_TYPE, headers=headers_)
class AuthView(View):
"""
token authentication with no "Token " prefix
"""
auth_token_field = None
async def authenticate(self, request):
auth_token = getattr(self.settings, self.auth_token_field)
if not secrets.compare_digest(auth_token, request.headers.get('Authorization', '')):
raise JsonErrors.HTTPForbidden('Invalid Authorization header')
class ServiceView(AuthView):
"""
Views used by services. Services are in charge and can be trusted to do "whatever they like".
"""
auth_token_field = 'auth_key'
class UserView(View):
"""
Views used by users via ajax
"""
headers = {'Access-Control-Allow-Origin': '*'}
async def authenticate(self, request):
company = request.query.get('company', None)
expires = request.query.get('expires', None)
body = f'{company}:{expires}'.encode()
expected_sig = hmac.new(self.settings.user_auth_key, body, hashlib.sha256).hexdigest()
signature = request.query.get('signature', '-')
if not secrets.compare_digest(expected_sig, signature):
raise JsonErrors.HTTPForbidden('Invalid token', headers=self.headers)
try:
self.session = Session(company=company, expires=expires)
except ValidationError as e:
raise JsonErrors.HTTPBadRequest(message='Invalid Data', details=e.errors(), headers=self.headers)
if self.session.expires < datetime.utcnow().replace(tzinfo=timezone.utc):
raise JsonErrors.HTTPForbidden('token expired', headers=self.headers)
class BasicAuthView(View):
"""
Views used by admin, applies basic auth.
"""
async def authenticate(self, request):
token = re.sub('^Basic *', '', request.headers.get('Authorization', '')) or 'x'
try:
_, password = base64.b64decode(token).decode().split(':', 1)
except (ValueError, UnicodeDecodeError):
password = ''
if not secrets.compare_digest(password, self.settings.admin_basic_auth_password):
raise JsonErrors.HTTPUnauthorized('Invalid basic auth', headers={'WWW-Authenticate': 'Basic'})
class TemplateView(View):
template = None
@classmethod
def _modify_response(cls, request, context):
status = context.pop('http_status_', None)
response = render_template(cls.template, request, context)
if status:
response.set_status(status)
return super()._modify_response(request, response)
class AdminView(TemplateView, BasicAuthView):
template = 'admin-list.jinja'
async def get_context(self, morpheus_api):
raise NotImplementedError()
async def call(self, request):
morpheus_api = self.app['morpheus_api']
ctx = dict(
methods=[m.value for m in SendMethod],
method=self.request.match_info.get(
'method', self.request.query.get('method', SendMethod.email_mandrill.value)
),
)
try:
ctx.update(await self.get_context(morpheus_api))
except ApiError as e:
raise JsonErrors.HTTPBadRequest(str(e))
return ctx
| 31.559829 | 111 | 0.64807 |
b486f76ee0d686b9aed85f8bba9f703f30c9de6d | 805 | py | Python | electrum_mona/scripts/get_history.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 61 | 2017-08-06T08:51:49.000Z | 2021-12-28T06:25:36.000Z | electrum_mona/scripts/get_history.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 15 | 2017-09-12T07:15:01.000Z | 2021-12-28T06:25:15.000Z | electrum_mona/scripts/get_history.py | david4neblio/electrum-mona | 2d13b066be2d6205aeaa7ca859884c3ec1b92e83 | [
"MIT"
] | 27 | 2017-08-18T19:40:30.000Z | 2021-03-01T11:16:02.000Z | #!/usr/bin/env python3
import sys
import asyncio
from electrum_mona import bitcoin
from electrum_mona.network import Network
from electrum_mona.util import json_encode, print_msg, create_and_start_event_loop, log_exceptions
from electrum_mona.simple_config import SimpleConfig
try:
addr = sys.argv[1]
except Exception:
print("usage: get_history <bitcoin_address>")
sys.exit(1)
config = SimpleConfig()
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
sh = bitcoin.address_to_scripthash(addr)
hist = await network.get_history_for_scripthash(sh)
print_msg(json_encode(hist))
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 23.676471 | 98 | 0.761491 |
952ffcb57709b8f488f0edffb0695aea7ef16fc0 | 1,837 | py | Python | eval_main_local.py | googleinterns/humangps | ce279292adfbf3f7f700f3f5889014be64bbc544 | [
"Apache-2.0"
] | 37 | 2021-05-14T05:35:51.000Z | 2022-03-30T02:46:33.000Z | eval_main_local.py | googleinterns/humangps | ce279292adfbf3f7f700f3f5889014be64bbc544 | [
"Apache-2.0"
] | 6 | 2021-05-28T10:05:47.000Z | 2021-11-17T15:23:08.000Z | eval_main_local.py | googleinterns/humangps | ce279292adfbf3f7f700f3f5889014be64bbc544 | [
"Apache-2.0"
] | 5 | 2021-05-23T21:50:06.000Z | 2022-02-09T13:42:21.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A evaluation framework using distributed strategy."""
import os
from absl import app
from absl import flags
import gin.tf
import train_eval_lib_local
flags.DEFINE_enum('mode', None, ['cpu', 'gpu'],
'Distributed strategy approach.')
flags.DEFINE_string('base_folder', None, 'Path to checkpoints/summaries.')
flags.DEFINE_string('job_name', '', 'Name of the job.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin parameter bindings.')
flags.DEFINE_multi_string('gin_configs', None, 'Gin config files.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
gin.parse_config_files_and_bindings(
config_files=FLAGS.gin_configs,
bindings=FLAGS.gin_bindings,
skip_unknown=True)
base_folder = FLAGS.base_folder
train_base_folder = os.path.join(base_folder, 'train')
eval_base_folder = os.path.join(base_folder, FLAGS.job_name)
train_eval_lib_local.eval_pipeline(
eval_mode=FLAGS.mode,
dataset_params=gin.REQUIRED,
train_base_folder=train_base_folder,
eval_base_folder=eval_base_folder,
batch_size=gin.REQUIRED,
eval_name=FLAGS.job_name)
if __name__ == '__main__':
flags.mark_flag_as_required('mode')
flags.mark_flag_as_required('base_folder')
app.run(main)
| 31.135593 | 74 | 0.74687 |
ae294e72120eaf1f6485a035710a2a4d7778c9a5 | 101 | py | Python | run.py | easystef/covid-webapp | a0c6a303a4a275c8fdfc169ce73f3ce8c94cbfa5 | [
"MIT"
] | null | null | null | run.py | easystef/covid-webapp | a0c6a303a4a275c8fdfc169ce73f3ce8c94cbfa5 | [
"MIT"
] | null | null | null | run.py | easystef/covid-webapp | a0c6a303a4a275c8fdfc169ce73f3ce8c94cbfa5 | [
"MIT"
] | null | null | null | from app import app
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
| 20.2 | 52 | 0.663366 |
d5cfecfd726e8f6859348f743c6474fda8de6765 | 1,878 | py | Python | flask-jwt/jwt_util.py | hmtmcse/flask-exp | b3dd28b0f283ada7e1ea81f2627678e6ce4ce6a8 | [
"Apache-2.0"
] | 3 | 2021-02-16T10:05:40.000Z | 2022-03-08T12:45:50.000Z | flask-jwt/jwt_util.py | hmtmcse/flask-exp | b3dd28b0f283ada7e1ea81f2627678e6ce4ce6a8 | [
"Apache-2.0"
] | null | null | null | flask-jwt/jwt_util.py | hmtmcse/flask-exp | b3dd28b0f283ada7e1ea81f2627678e6ce4ce6a8 | [
"Apache-2.0"
] | 1 | 2021-02-16T10:05:55.000Z | 2021-02-16T10:05:55.000Z | import datetime
import jwt
class JWTUtil:
ALGORITHMS: str = "HS256"
JWT_ACCESS_TOKEN_VALIDITY_MIN = 1
JWT_REFRESH_TOKEN_VALIDITY_MIN = 60
SECRET = "SECRET____KEY"
def get_token(self, exp: datetime, payload: dict = None, iss=None):
if not payload:
payload = {}
payload["exp"] = exp
if iss:
payload["iss"] = iss
return jwt.encode(payload, self.SECRET, algorithm=self.ALGORITHMS)
def get_access_token(self, payload: dict = None, iss=None):
validity = self.get_access_token_validity()
return self.get_token(validity, payload=payload, iss=iss)
def get_refresh_token(self, payload: dict = None, iss=None):
validity = self.get_refresh_token_validity()
return self.get_token(validity, payload=payload, iss=iss)
def validate_token(self, token: str):
try:
return jwt.decode(token, self.SECRET, algorithms=[self.ALGORITHMS])
except jwt.ExpiredSignatureError:
return None
def get_access_token_validity(self, minutes=None):
if not minutes:
minutes = self.JWT_ACCESS_TOKEN_VALIDITY_MIN
return self.get_token_validity(minutes)
def get_refresh_token_validity(self, minutes=None):
if not minutes:
minutes = self.JWT_REFRESH_TOKEN_VALIDITY_MIN
return self.get_token_validity(minutes)
def get_token_validity(self, minutes):
return datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(minutes=minutes)
if __name__ == "__main__":
jwt_util = JWTUtil()
_validity = jwt_util.get_access_token_validity()
print(_validity)
print(jwt_util.get_access_token())
response = jwt_util.validate_token("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MzcyMzg1NTJ9.jCDlzb89Pyyc36BCV2JUcLTIB_OnKmnc-VSx4oXPdB8")
print(response)
| 34.777778 | 147 | 0.696486 |
1522c371cd2520dce444393c2e70a1f09d267ed9 | 3,922 | py | Python | jittermodel/tests/test_base.py | ryanpdwyer/jittermodel | de0c230e242517442b8a6d98512eaa15386b7ce5 | [
"MIT"
] | 1 | 2015-07-16T23:24:49.000Z | 2015-07-16T23:24:49.000Z | jittermodel/tests/test_base.py | ryanpdwyer/jittermodel | de0c230e242517442b8a6d98512eaa15386b7ce5 | [
"MIT"
] | null | null | null | jittermodel/tests/test_base.py | ryanpdwyer/jittermodel | de0c230e242517442b8a6d98512eaa15386b7ce5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test UnitBase Classes
=========================
2013-12-12
Ryan Dwyer
"""
from jittermodel.base import (u, SimpleCantilever, Cantilever,
Experiment, Transistor)
from jittermodel.tests import pint_assert_almost_equal
from nose.tools import assert_raises
from pint import DimensionalityError
import unittest
# ----- SUCantilever Tests ----------------------------------------------
class TestSimpleCantilver(unittest.TestCase):
def test_SimpleCantilever_bad_init(self):
"""Test that SUnit Cantilever returns an error when
too few arguements are given."""
to_test = [{'f_c': -10 * u.Hz, 'Q': 39 * u.dimensionless},
{'Q': 39 * u.dimensionless, 'k_c': 1 * u.N / u.m},
{'f_c': 40 * u.kHz, 'k_c': 1 * u.N / u.m}]
for kwargs in to_test:
assert_raises(TypeError, SimpleCantilever, **kwargs)
# ---- Unit Cantilever Tests --------------------------------------------
class TestCantilever(unittest.TestCase):
def setUp(self):
self.c = Cantilever(f_c=50*u.kHz, k_c=3*u.N/u.m,
Q=20000*u.dimensionless)
def test_Cantilever_input(self):
"""Make sure that defining a Cantilever with an incorrect geometry, or
negative number raises a ValueError."""
to_test = [{'f_c': -10 * u.Hz},
{'Q': -39 * u.dimensionless},
{'f_c': 40 * u.kHz, 'R_tip': -0.023 * u.nm},
{'theta_tip': -10 * u.degrees},
{'theta_tip': 100 * u.degrees},
{'geometry_c': 'not perpendicular or parallel'}]
for kwargs in to_test:
assert_raises(ValueError, Cantilever, **kwargs)
def test_Cantilever_input_units(self):
to_test = [{'f_c': -10 * u.s},
{'Q': -39},
{'f_c': 40 * u.kHz, 'R_tip': -0.023 * u.C},
{'theta_tip': 12 * u.degK},
{'theta_tip': 100}]
for kwargs in to_test:
assert_raises(DimensionalityError, Cantilever, **kwargs)
def test_Cantilever_init(self):
"""Make sure the unit cantilever initializes properly."""
c1 = Cantilever(f_c=50 * u.kHz, k_c=3 * u.N/u.m,
Q=1000 * u.dimensionless)
assert c1.f_c == 50 * u.kHz
def test_F_min(self):
ex_F_min = 2.8125685411157023e-3 * u.pN
pint_assert_almost_equal(ex_F_min, self.c.F_min(300*u.K))
def test_Gamma_i(self):
c = self.c
ex_Gamma_i = 477.46482927568604 * u.pN * u.s / u.m
pint_assert_almost_equal(ex_Gamma_i, c.Gamma_i)
# ---- Unit Experiment Tests --------------------------------------------
class TestExperiment(unittest.TestCase):
def test_Experiment_init(self):
e1 = Experiment(jitter_f_f=4*u.Hz)
e2 = Experiment(d=0.3 * u.um, V_ts=10 * u.V)
assert e1.jitter_f_f == 4*u.Hz
assert e2.d == 0.3 * u.um
assert_raises(DimensionalityError, Experiment, **{'d': 4 * u.K})
assert_raises(ValueError, Experiment, **{'d': -1*u.nm})
# ----- Unit Transistor Tests --------------------------------------------
class TestTransistor(unittest.TestCase):
def test_Transistor_init_success(self):
samp1 = Transistor(semiconductor='TPD',
h=70 * u.nm, h_trans=1 * u.nm, h_i=300 * u.nm,
E_s1=3.5, E_s2=-0.0005, E_i1=4.65, E_i2=0,
mobility=3e-6 * u.cm ** 2 / u.V / u.s, T=298 * u.K,
V_g=10 * u.V, rho=None)
assert samp1.h == 70 * u.nm
assert samp1.mobility == 3e-6 * u.cm ** 2 / u.V / u.s
def test_Transistor_bad_init(self):
"""Try some things that shoud raise errors."""
assert_raises(ValueError, Transistor, **{'T': -23 * u.K})
assert_raises(DimensionalityError, Transistor, **{'h': 70 * u.s})
| 35.654545 | 78 | 0.539776 |
b26f454d0bec0cf0a48053621fd5f917a8cbace5 | 4,619 | py | Python | code/tasks/REGEX/misc/util.py | khanhptnk/iliad | 3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825 | [
"MIT"
] | 7 | 2021-06-10T22:17:13.000Z | 2022-03-03T05:58:55.000Z | code/tasks/REGEX/misc/util.py | khanhptnk/iliad | 3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825 | [
"MIT"
] | null | null | null | code/tasks/REGEX/misc/util.py | khanhptnk/iliad | 3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825 | [
"MIT"
] | null | null | null | import re
import os
import math
import time
import logging
import sys
from datetime import datetime
class Struct:
def __init__(self, **entries):
rec_entries = {}
for k, v in entries.items():
if isinstance(v, dict):
rv = Struct(**v)
elif isinstance(v, list):
rv = []
for item in v:
if isinstance(item, dict):
rv.append(Struct(**item))
else:
rv.append(item)
else:
rv = v
rec_entries[k] = rv
self.__dict__.update(rec_entries)
def __str_helper(self, depth):
lines = []
for k, v in self.__dict__.items():
if isinstance(v, Struct):
v_str = v.__str_helper(depth + 1)
lines.append("%s:\n%s" % (k, v_str))
else:
lines.append("%s: %r" % (k, v))
indented_lines = [" " * depth + l for l in lines]
return "\n".join(indented_lines)
def __str__(self):
return "struct {\n%s\n}" % self.__str_helper(1)
def __repr__(self):
return "Struct(%r)" % self.__dict__
class Index:
def __init__(self):
self.contents = dict()
self.ordered_contents = []
self.reverse_contents = dict()
def __getitem__(self, item):
if item not in self.contents:
return None
return self.contents[item]
def index(self, item):
if item not in self.contents:
idx = len(self.contents) + 1
self.ordered_contents.append(item)
self.contents[item] = idx
self.reverse_contents[idx] = item
idx = self[item]
assert idx != 0
return idx
def get(self, idx):
if idx == 0:
return "*invalid*"
return self.reverse_contents[idx]
def __len__(self):
return len(self.contents) + 1
def __iter__(self):
return iter(self.ordered_contents)
class Vocab(Index):
def __init__(self):
super(Vocab, self).__init__()
self.index('<PAD>')
self.index('<UNK>')
def __getitem__(self, item):
if item not in self.contents:
return self.contents['<UNK>']
return self.contents[item]
class ElapsedFormatter():
def __init__(self):
self.start_time = datetime.now()
def format_time(self, t):
return str(t)[:-7]
def format(self, record):
elapsed_time = self.format_time(datetime.now() - self.start_time)
log_str = "%s %s: %s" % (elapsed_time,
record.levelname,
record.getMessage())
return log_str
def config_logging(log_file):
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(ElapsedFormatter())
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(ElapsedFormatter())
logging.basicConfig(level=logging.INFO,
handlers=[stream_handler, file_handler])
def handler(type, value, tb):
logging.exception("Uncaught exception: %s", str(value))
logging.exception("\n".join(traceback.format_exception(type, value, tb)))
sys.excepthook = handler
def flatten(lol):
if isinstance(lol, tuple) or isinstance(lol, list):
return sum([flatten(l) for l in lol], [])
else:
return [lol]
def postorder(tree):
if isinstance(tree, tuple):
for subtree in tree[1:]:
for node in postorder(subtree):
yield node
yield tree[0]
else:
yield tree
def tree_map(function, tree):
if isinstance(tree, tuple):
head = function(tree)
tail = tuple(tree_map(function, subtree) for subtree in tree[1:])
return (head,) + tail
return function(tree)
def tree_zip(*trees):
if isinstance(trees[0], tuple):
zipped_children = [[t[i] for t in trees] for i in range(len(trees[0]))]
zipped_children_rec = [tree_zip(*z) for z in zipped_children]
return tuple(zipped_children_rec)
return trees
FEXP_RE = re.compile(r"(.*)\[(.*)\]")
def parse_fexp(fexp):
m = FEXP_RE.match(fexp)
return (m.group(1), m.group(2))
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def add_stat(a, b):
return (a[0] + sum(b), a[1] + len(b))
| 26.545977 | 81 | 0.560294 |
b9bae777c2dd900f4077cb59d1dc13a3880b3273 | 2,285 | py | Python | plugins/swap_pagefault.py | Book-smile-man/lmp | 02095421036bc48af06b4e6c63567cc7f5e2d92a | [
"Apache-2.0"
] | null | null | null | plugins/swap_pagefault.py | Book-smile-man/lmp | 02095421036bc48af06b4e6c63567cc7f5e2d92a | [
"Apache-2.0"
] | null | null | null | plugins/swap_pagefault.py | Book-smile-man/lmp | 02095421036bc48af06b4e6c63567cc7f5e2d92a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
# for influxdb
from influxdb import InfluxDBClient
import lmp_influxdb as db
from db_modules import write2db
from datetime import datetime
DBNAME = 'lmp'
client = db.connect(DBNAME,user='root',passwd=123456)
b = BPF(text = '''
#include <uapi/linux/ptrace.h>
#include <linux/ktime.h>
BPF_HASH(timer, u32, ktime_t);
int kprobe__do_swap_page(struct pt_regs *ctx)
{
u32 pid = bpf_get_current_pid_tgid();
ktime_t start = bpf_ktime_get_ns();
timer.update(&pid, &start);
return 0;
}
int kretprobe__do_swap_page(struct pt_regs *ctx)
{
ktime_t end = bpf_ktime_get_ns();
int ret = PT_REGS_RC(ctx);
u32 pid = bpf_get_current_pid_tgid();
ktime_t delta;
ktime_t *tsp = timer.lookup(&pid);
if ((ret >= 0) && (tsp != NULL))
delta = end - *tsp;
if (delta >= 10000000) {/* 大于10ms的进行输出 */
bpf_trace_printk("%lld\\n", delta);
}
//bpf_trace_printk("%lld\\n", delta);
return 0;
}
''')
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c):
self.time = a
self.glob = b
self.duration = c
data_struct = {"measurement":'swap_pagefault',
"time":[],
"tags":['glob'],
"fields":['duration']}
timer = b.get_table("timer")
#print("%-6s%-6s%-6s%-6s" % ("CPU", "PID", "TGID", "TIME(us)"))
while (1):
try:
sleep(1)
for k, v in timer.items():
#print("%-6d%-6d%-6d%-6d" % (k.cpu, k.pid, k.tgid, v.value / 1000))
test_data = lmp_data(datetime.now().isoformat(),'glob', v.value/1000)
write2db(data_struct, test_data, client)
#print("This is success")
timer.clear()
except KeyboardInterrupt:
exit()
| 24.308511 | 81 | 0.498031 |
1255212df4a1ff8e180a984ccf5dfbc1de5742b0 | 1,546 | py | Python | response/zoom/client.py | dmichau85/response | 672a675660400766286628c349f324bff56e986c | [
"MIT"
] | 2 | 2020-09-17T17:24:32.000Z | 2020-10-16T10:49:03.000Z | response/zoom/client.py | dmichau85/response | 672a675660400766286628c349f324bff56e986c | [
"MIT"
] | 39 | 2020-10-02T15:56:55.000Z | 2022-01-19T11:58:41.000Z | response/zoom/client.py | dmichau85/response | 672a675660400766286628c349f324bff56e986c | [
"MIT"
] | 3 | 2020-10-30T19:46:31.000Z | 2021-05-14T04:59:39.000Z | import json
import time
import requests
from jose import jwt
API_BASE_URI = "https://api.zoom.us/v2"
class ZoomClient:
"""Simple HTTP Client for Zoom Calls."""
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
self.token = generate_jwt(self.api_key, self.api_secret)
self.headers = self._get_headers()
self.timeout = 15
def _get_headers(self):
headers = {}
headers["authorization"] = f"Bearer {self.token}"
headers["content-type"] = "application/json"
return headers
def get(self, path, params=None):
return requests.get(
"{}/{}".format(API_BASE_URI, path),
params=params,
headers=self.headers,
timeout=self.timeout,
)
def post(self, path, data):
return requests.post(
"{}/{}".format(API_BASE_URI, path),
data=json.dumps(data),
headers=self.headers,
timeout=self.timeout,
)
def delete(self, path, data=None, params=None):
return requests.delete(
"{}/{}".format(API_BASE_URI, path),
data=json.dumps(data),
params=params,
headers=self.headers,
timeout=self.timeout,
)
def generate_jwt(key, secret):
header = {"alg": "HS256", "typ": "JWT"}
payload = {"iss": key, "exp": int(time.time() + 3600)}
token = jwt.encode(payload, secret, algorithm="HS256", headers=header)
return token
| 27.122807 | 74 | 0.578913 |
cf993c1477a1442fbe75449ff11b7f386a4dab66 | 14,969 | py | Python | python/paddle/fluid/transpiler/collective.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/fluid/transpiler/collective.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 1 | 2021-01-07T11:00:58.000Z | 2021-01-07T11:00:58.000Z | python/paddle/fluid/transpiler/collective.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 18 | 2021-05-19T08:01:49.000Z | 2022-02-11T03:11:32.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import math
from functools import reduce
import collections
import six
import logging
import numpy as np
from .. import core, unique_name
from ..framework import Program, default_main_program, default_startup_program
from .details import wait_server_ready
__all__ = ['GradAllReduce', 'LocalSGD', 'MultiThread']
OpRole = core.op_proto_and_checker_maker.OpRole
class Collective(object):
'''
'''
def __init__(self, nrings):
self.nrings = nrings
self.endpoints = None
self.current_endpoint = None
self.nranks = None
self.rank = None
self.startup_program = None
self.main_program = None
op_maker = core.op_proto_and_checker_maker
self.op_role_key = op_maker.kOpRoleAttrName()
self.op_role_var_key = op_maker.kOpRoleVarAttrName()
def transpile(self, startup_program, main_program, rank, endpoints,
current_endpoint, wait_port):
# in case of '127.0.0.1:6700,127.0.0.1:6701,...'
if isinstance(endpoints, str):
endpoints = endpoints.split(',')
self.startup_program = startup_program
if startup_program is None:
self.startup_program = default_startup_program()
self.main_program = main_program
if main_program is None:
self.main_program = default_main_program()
self.nranks = len(endpoints)
if self.nranks == 1 and self.mode != "single_process_multi_thread":
raise ValueError('the number of endpoints must > 1')
if rank < 0:
raise ValueError('rank must >= 0')
self.rank = rank
if current_endpoint not in endpoints:
raise ValueError('current endpoint %s is not in %s',
current_endpoint, str(endpoints))
self.endpoints = endpoints
self.current_endpoint = current_endpoint
self.wait_port = wait_port
self.startup_program._origin_program = self.startup_program.clone()
self._transpile_startup_program()
self.main_program._origin_program = self.main_program.clone()
self._transpile_main_program()
def _transpile_main_program(self):
raise NotImplementedError('call the inherited method of subclasses')
def _transpile_startup_program(self):
for ring_id in range(self.nrings):
self._init_communicator(self.startup_program, self.current_endpoint,
self.endpoints, self.rank, ring_id,
self.wait_port)
self._broadcast_params()
def _init_communicator(self,
program,
current_endpoint,
endpoints,
rank,
ring_id,
wait_port,
has_multitrainer=False):
nranks = len(endpoints)
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
wait_server_ready(other_endpoints)
block = program.global_block()
nccl_id_var = block.create_var(
name=unique_name.generate('nccl_id'),
persistable=True,
type=core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints,
self.op_role_key: OpRole.Forward
})
if not has_multitrainer:
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': ring_id,
self.op_role_key: OpRole.Forward
})
else:
block.append_op(
type='c_comm_init_multitrainer',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'ntrainers': nranks,
'trainer_id': rank,
'ring_id': ring_id,
self.op_role_key: OpRole.Forward
})
def _broadcast_params(self):
block = self.startup_program.global_block()
ring_id = -1
for param in block.iter_parameters():
if param.is_distributed:
continue
ring_id = (ring_id + 1) % self.nrings
block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': ring_id,
'root': 0,
self.op_role_key: OpRole.Forward
})
for ring_id in range(self.nrings):
block.append_op(
type='c_sync_comm_stream',
inputs={'X': param},
outputs={'Out': param},
attrs={'ring_id': ring_id,
self.op_role_key: OpRole.Forward})
def _is_loss_grad_op(self, op):
if self.op_role_key not in op.attr_names:
return False
op_role = int(op.all_attrs()[self.op_role_key])
return op_role & int(OpRole.Backward) and op_role & int(OpRole.Loss)
def _is_backward_op(self, op):
return self.op_role_key in op.attr_names and \
int(op.all_attrs()[self.op_role_key]) & int(OpRole.Backward)
def _is_update_op(self, op):
return 'Param' in op.input_names and 'Grad' in op.input_names and \
"LearningRate" in op.input_names
def _is_optimizer_op(self, op):
return self.op_role_key in op.attr_names and \
int(op.all_attrs()[self.op_role_key]) & int(OpRole.Optimize)
class GradAllReduce(Collective):
'''
'''
def __init__(self, nrings=2):
Collective.__init__(self, nrings)
self.mode = "grad_allreduce"
def _transpile_main_program(self):
self._insert_scale_loss_grad_ops()
self._insert_allreduce_ops()
def _insert_scale_loss_grad_ops(self):
'''
In order to keep the learning rate consistent in different numbers of
training workers, we scale the loss grad by the number of workers
'''
block = self.main_program.global_block()
for idx, op in reversed(list(enumerate(block.ops))):
if self._is_loss_grad_op(op):
loss_grad_var = block.vars[op.output_arg_names[0]]
block._insert_op(
idx + 1,
type='scale',
inputs={'X': loss_grad_var},
outputs={'Out': loss_grad_var},
attrs={
'scale': 1.0 / self.nranks,
self.op_role_key: OpRole.Backward
})
def _insert_allreduce_ops(self):
block = self.main_program.global_block()
ring_id = -1
grad = None
for idx, op in reversed(list(enumerate(block.ops))):
if self._is_backward_op(op) and \
self.op_role_var_key in op.attr_names:
op_role_var = op.all_attrs()[self.op_role_var_key]
if len(op_role_var) == 0:
continue
assert len(op_role_var) % 2 == 0
offset = idx
for i in range(0, len(op_role_var), 2):
param = block.vars[op_role_var[i]]
grad = block.vars[op_role_var[i + 1]]
if param.is_distributed:
continue
if offset == idx:
offset += 1
block._insert_op(
offset,
type='c_sync_calc_stream',
inputs={'X': grad},
outputs={'Out': grad},
attrs={self.op_role_key: OpRole.Backward})
offset += 1
# As we search ops reversedly, we should insert c_allreduce_sum
# op in the same way to keep the ring_id alternate
ring_id = (ring_id + 1) % self.nrings
block._insert_op(
offset,
type='c_allreduce_sum',
inputs={'X': grad},
outputs={'Out': grad},
attrs={
'ring_id': ring_id,
self.op_role_key: OpRole.Backward
})
if grad is None:
return
for idx, op in enumerate(block.ops):
if self._is_optimizer_op(op):
for ring_id in range(self.nrings):
block._insert_op(
idx + ring_id,
type='c_sync_comm_stream',
inputs={'X': grad},
outputs={'Out': grad},
attrs={
'ring_id': ring_id,
self.op_role_key: OpRole.Backward
})
break
class LocalSGD(Collective):
'''
'''
def __init__(self, nrings=2):
Collective.__init__(self, nrings)
self.snapshot_key = '@SNAPSHOT'
self.mode = "local_sgd"
def _transpile_startup_program(self):
Collective._transpile_startup_program(self)
block = self.startup_program.global_block()
non_dist_params = []
for param in block.iter_parameters():
if not param.is_distributed:
non_dist_params.append(param)
for param in non_dist_params:
snapshot = block.create_var(
name=self.snapshot_name(param.name),
shape=param.shape,
persistable=True,
stop_gradient=True)
block.append_op(
type='assign',
inputs={'X': [param]},
outputs={'Out': [snapshot]},
attrs={self.op_role_key: OpRole.Forward})
def snapshot_name(self, param_name):
return param_name + self.snapshot_key
def _transpile_main_program(self):
block = self.main_program.global_block()
ordered_param_snapshot = []
ring_id = -1
for idx, op in reversed(list(enumerate(block.ops))):
if self._is_update_op(op):
param = block.vars[op.input('Param')[0]]
if param.is_distributed:
continue
snapshot = block.create_var(
name=self.snapshot_name(param.name),
shape=param.shape,
persistable=True,
stop_gradient=True,
dtype=param.dtype)
block._insert_op(
idx + 1,
type='elementwise_sub',
inputs={'X': [snapshot],
'Y': [param]},
outputs={'Out': [param]},
attrs={self.op_role_key: OpRole.Optimize})
block._insert_op(
idx + 2,
type='c_sync_calc_stream',
inputs={'X': param},
outputs={'Out': param},
attrs={self.op_role_key: OpRole.Optimize})
ring_id = (ring_id + 1) % self.nrings
block._insert_op(
idx + 3,
type='c_allreduce_sum',
inputs={'X': [param]},
outputs={'Out': [param]},
attrs={
'ring_id': ring_id,
self.op_role_key: OpRole.Optimize
})
ordered_param_snapshot.append((param, snapshot))
for ring_id in range(self.nrings):
block.append_op(
type='c_sync_comm_stream',
inputs={'X': param},
outputs={'Out': param},
attrs={'ring_id': ring_id,
self.op_role_key: OpRole.Optimize})
for param_snapshot in reversed(ordered_param_snapshot):
param = param_snapshot[0]
snapshot = param_snapshot[1]
block.append_op(
type='scale',
inputs={'X': [param]},
outputs={'Out': [param]},
attrs={
'scale': 1.0 / self.nranks,
self.op_role_key: OpRole.Optimize
})
block.append_op(
type='elementwise_sub',
inputs={'X': [snapshot],
'Y': [param]},
outputs={'Out': [param]},
attrs={self.op_role_key: OpRole.Optimize})
block.append_op(
type='assign',
inputs={'X': [param]},
outputs={'Out': [snapshot]},
attrs={self.op_role_key: OpRole.Optimize})
class MultiThread(GradAllReduce):
'''
'''
def __init__(self, nrings=1):
GradAllReduce.__init__(self, nrings)
self.mode = "single_process_multi_thread"
def _transpile_startup_program(self):
if len(self.endpoints) > 1:
print("begin to _transpile_startup_program for multi-node")
print("current_endpoint: ", self.current_endpoint)
print("total endpoints: ", self.endpoints)
print("rank: %d, ring_id: %d" % (self.rank, self.nrings))
for ring_id in range(self.nrings):
self._init_communicator(
self.startup_program, self.current_endpoint, self.endpoints,
self.rank, ring_id, self.wait_port, True)
else:
print("begin to _transpile_startup_program for single-node")
block = self.startup_program.global_block()
block.append_op(type='c_comm_init_all', attrs={'ring_id': 0})
| 35.811005 | 83 | 0.523949 |
4f9a7cac0d7c3290c7f6ecd897f741a62d87167b | 15,454 | py | Python | tests/contrib/pymysql/test_pymysql.py | someboredkiddo/dd-trace-py | 2e1b1b43ffb60f3f26981eb6d9cfe7f26b9285cb | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/pymysql/test_pymysql.py | someboredkiddo/dd-trace-py | 2e1b1b43ffb60f3f26981eb6d9cfe7f26b9285cb | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:56:55.000Z | 2020-12-22T16:56:55.000Z | tests/contrib/pymysql/test_pymysql.py | kenferrara/dd-trace-py | 12e52e0ab804061e72b0f76214f5e4bb475ae20f | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:54:02.000Z | 2020-12-22T16:54:02.000Z | # 3p
import pymysql
# project
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.compat import PY2
from ddtrace.compat import stringify
from ddtrace.contrib.pymysql.patch import patch, unpatch
# testing
from tests.opentracer.utils import init_tracer
from ... import TracerTestCase, assert_is_measured, assert_dict_issuperset
from ...contrib.config import MYSQL_CONFIG
class PyMySQLCore(object):
"""PyMySQL test case reuses the connection across tests"""
conn = None
DB_INFO = {
'out.host': MYSQL_CONFIG.get('host'),
}
if PY2:
DB_INFO.update({
'db.user': MYSQL_CONFIG.get('user'),
'db.name': MYSQL_CONFIG.get('database')
})
else:
DB_INFO.update({
'db.user': stringify(bytes(MYSQL_CONFIG.get('user'), encoding='utf-8')),
'db.name': stringify(bytes(MYSQL_CONFIG.get('database'), encoding='utf-8'))
})
def setUp(self):
super(PyMySQLCore, self).setUp()
patch()
def tearDown(self):
super(PyMySQLCore, self).tearDown()
if self.conn and not self.conn._closed:
self.conn.close()
unpatch()
def _get_conn_tracer(self):
# implement me
pass
def test_simple_query(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
# PyMySQL returns back the rowcount instead of a cursor
rowcount = cursor.execute('SELECT 1')
assert rowcount == 1
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == 'pymysql.query'
assert span.span_type == 'sql'
assert span.error == 0
assert span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
def test_simple_query_fetchall(self):
with self.override_config('dbapi2', dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
assert len(spans) == 2
span = spans[0]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == 'pymysql.query'
assert span.span_type == 'sql'
assert span.error == 0
assert span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
fetch_span = spans[1]
assert fetch_span.name == 'pymysql.query.fetchall'
def test_query_with_several_rows(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m'
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 3
spans = writer.pop()
assert len(spans) == 1
self.assertEqual(spans[0].name, 'pymysql.query')
def test_query_with_several_rows_fetchall(self):
with self.override_config('dbapi2', dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m'
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 3
spans = writer.pop()
assert len(spans) == 2
fetch_span = spans[1]
assert fetch_span.name == 'pymysql.query.fetchall'
def test_query_many(self):
# tests that the executemany method is correctly wrapped.
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
tracer.enabled = False
cursor = conn.cursor()
cursor.execute("""
create table if not exists dummy (
dummy_key VARCHAR(32) PRIMARY KEY,
dummy_value TEXT NOT NULL)""")
tracer.enabled = True
stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)'
data = [('foo', 'this is foo'),
('bar', 'this is bar')]
# PyMySQL `executemany()` returns the rowcount
rowcount = cursor.executemany(stmt, data)
assert rowcount == 2
query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key'
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 2
assert rows[0][0] == 'bar'
assert rows[0][1] == 'this is bar'
assert rows[1][0] == 'foo'
assert rows[1][1] == 'this is foo'
spans = writer.pop()
assert len(spans) == 2
cursor.execute('drop table if exists dummy')
def test_query_many_fetchall(self):
with self.override_config('dbapi2', dict(trace_fetch_methods=True)):
# tests that the executemany method is correctly wrapped.
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
tracer.enabled = False
cursor = conn.cursor()
cursor.execute("""
create table if not exists dummy (
dummy_key VARCHAR(32) PRIMARY KEY,
dummy_value TEXT NOT NULL)""")
tracer.enabled = True
stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)'
data = [('foo', 'this is foo'),
('bar', 'this is bar')]
cursor.executemany(stmt, data)
query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key'
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 2
assert rows[0][0] == 'bar'
assert rows[0][1] == 'this is bar'
assert rows[1][0] == 'foo'
assert rows[1][1] == 'this is foo'
spans = writer.pop()
assert len(spans) == 3
cursor.execute('drop table if exists dummy')
fetch_span = spans[2]
assert fetch_span.name == 'pymysql.query.fetchall'
def test_query_proc(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
# create a procedure
tracer.enabled = False
cursor = conn.cursor()
cursor.execute('DROP PROCEDURE IF EXISTS sp_sum')
cursor.execute("""
CREATE PROCEDURE sp_sum (IN p1 INTEGER, IN p2 INTEGER, OUT p3 INTEGER)
BEGIN
SET p3 := p1 + p2;
END;""")
tracer.enabled = True
proc = 'sp_sum'
data = (40, 2, None)
# spans[len(spans) - 2]
cursor.callproc(proc, data)
# spans[len(spans) - 1]
cursor.execute("""
SELECT @_sp_sum_0, @_sp_sum_1, @_sp_sum_2
""")
output = cursor.fetchone()
assert len(output) == 3
assert output[2] == 42
spans = writer.pop()
assert spans, spans
# number of spans depends on PyMySQL implementation details,
# typically, internal calls to execute, but at least we
# can expect the last closed span to be our proc.
span = spans[len(spans) - 2]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == 'pymysql.query'
assert span.span_type == 'sql'
assert span.error == 0
assert span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
def test_simple_query_ot(self):
"""OpenTracing version of test_simple_query."""
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
ot_tracer = init_tracer('mysql_svc', tracer)
with ot_tracer.start_active_span('mysql_op'):
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == 'mysql_svc'
assert ot_span.name == 'mysql_op'
assert_is_measured(dd_span)
assert dd_span.service == "pymysql"
assert dd_span.name == 'pymysql.query'
assert dd_span.span_type == 'sql'
assert dd_span.error == 0
assert dd_span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(dd_span.meta, meta)
def test_simple_query_ot_fetchall(self):
"""OpenTracing version of test_simple_query."""
with self.override_config('dbapi2', dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
ot_tracer = init_tracer('mysql_svc', tracer)
with ot_tracer.start_active_span('mysql_op'):
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
assert len(spans) == 3
ot_span, dd_span, fetch_span = spans
# confirm parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == 'mysql_svc'
assert ot_span.name == 'mysql_op'
assert_is_measured(dd_span)
assert dd_span.service == "pymysql"
assert dd_span.name == 'pymysql.query'
assert dd_span.span_type == 'sql'
assert dd_span.error == 0
assert dd_span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(dd_span.meta, meta)
assert fetch_span.name == 'pymysql.query.fetchall'
def test_commit(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
conn.commit()
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == 'pymysql.connection.commit'
def test_rollback(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
conn.rollback()
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == 'pymysql.connection.rollback'
def test_analytics_default(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config(
'dbapi2',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config(
'dbapi2',
dict(analytics_enabled=True)
):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
class TestPyMysqlPatch(PyMySQLCore, TracerTestCase):
def _get_conn_tracer(self):
if not self.conn:
self.conn = pymysql.connect(**MYSQL_CONFIG)
assert not self.conn._closed
# Ensure that the default pin is there, with its default value
pin = Pin.get_from(self.conn)
assert pin
# Customize the service
# we have to apply it on the existing one since new one won't inherit `app`
pin.clone(tracer=self.tracer).onto(self.conn)
return self.conn, self.tracer
def test_patch_unpatch(self):
unpatch()
# assert we start unpatched
conn = pymysql.connect(**MYSQL_CONFIG)
assert not Pin.get_from(conn)
conn.close()
patch()
try:
writer = self.tracer.writer
conn = pymysql.connect(**MYSQL_CONFIG)
pin = Pin.get_from(conn)
assert pin
pin.clone(tracer=self.tracer).onto(conn)
assert not conn._closed
cursor = conn.cursor()
cursor.execute('SELECT 1')
rows = cursor.fetchall()
assert len(rows) == 1
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == 'pymysql.query'
assert span.span_type == 'sql'
assert span.error == 0
assert span.get_metric('out.port') == MYSQL_CONFIG.get('port')
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
finally:
unpatch()
# assert we finish unpatched
conn = pymysql.connect(**MYSQL_CONFIG)
assert not Pin.get_from(conn)
conn.close()
patch()
def test_user_pin_override(self):
conn, tracer = self._get_conn_tracer()
pin = Pin.get_from(conn)
pin.clone(service="pin-svc", tracer=self.tracer).onto(conn)
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pin-svc"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_PYMYSQL_SERVICE="mysvc"))
def test_user_specified_service_integration(self):
conn, tracer = self._get_conn_tracer()
writer = tracer.writer
conn.rollback()
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "mysvc"
| 33.964835 | 87 | 0.571891 |
5c267c09bbdd0e9bc1ccf9596bea7ec58d3c959e | 6,961 | py | Python | archivesspace_export/harvest_oai_eads.py | ndlib/mellon-manifest-pipeline | aa90494e73fbc30ce701771ac653d28d533217db | [
"Apache-2.0"
] | 1 | 2021-06-27T15:16:13.000Z | 2021-06-27T15:16:13.000Z | archivesspace_export/harvest_oai_eads.py | ndlib/marble-manifest-pipeline | abc036e4c81a8a5e938373a43153e2492a17cbf8 | [
"Apache-2.0"
] | 8 | 2019-11-05T18:58:23.000Z | 2021-09-03T14:54:42.000Z | archivesspace_export/harvest_oai_eads.py | ndlib/mellon-manifest-pipeline | aa90494e73fbc30ce701771ac653d28d533217db | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import os
import requests # noqa: E402
from xml.etree import ElementTree
from create_json_from_xml import createJsonFromXml
from dependencies.sentry_sdk import capture_exception
from dynamo_query_functions import get_item_record
class HarvestOaiEads():
""" This performs all EAD-related processing """
def __init__(self, config: dict):
self.config = config
self.base_oai_url = self.config['archive-space-server-base-url']
self.jsonFromXMLClass = createJsonFromXml()
self.temporary_local_path = '/tmp'
self.require_dao_flag = False
validate_json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dependencies', 'pipelineutilities', 'validate_json.py')
self.validate_json_modified_date = datetime.fromtimestamp(os.path.getmtime(validate_json_path)).isoformat()
local_control_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'xml_to_json_translation_control_file.json')
self.local_control_file_modified_date = datetime.fromtimestamp(os.path.getmtime(local_control_file_path)).isoformat()
def get_standard_json_from_archives_space_url(self, id_url: str) -> dict:
""" Retrieve one EAD xml record given the ArchivesSpace identifier """
oai_url = self._get_oai_url_given_id_url(id_url)
standard_json = {}
xml_string = self._get_xml_string_given_oai_url(oai_url)
if xml_string:
xml_tree = self._get_xml_tree_given_xml_string(xml_string, id_url)
xml_record = xml_tree.find('./GetRecord/record')
if xml_record is not None: # Occassionally, we get malformed results from ArchivesSpace. This protects against that.
item_id = self._get_id_from_xml_record(xml_record)
date_modified_in_source_system = self._get_modified_date_from_xml_record(xml_record)
process_record_flag = self._get_process_record_flag(item_id, date_modified_in_source_system)
if process_record_flag:
standard_json = self._process_record(oai_url, xml_record)
else:
print(item_id, 'already current in Dynamo. No need to reprocess', id_url)
return standard_json
def _get_oai_url_given_id_url(self, user_interface_url: str) -> str:
""" Define the ArchivesSpace URL to retrive a given identifier.
user will pass: https://archivesspace.library.nd.edu/repositories/3/resources/1644
we return: https://archivesspace.library.nd.edu/oai?verb=GetRecord&identifier=oai:und//repositories/3/resources/1644&metadataPrefix=oai_ead"""
url = user_interface_url.replace("/repositories", "/oai?verb=GetRecord&identifier=oai:und//repositories") + "&metadataPrefix=oai_ead"
url = url.replace("archivesspace.library.nd.edu", "archivesspace-api.library.nd.edu") # Added when we started hosting ArchivesSpace
return url
def _get_xml_string_given_oai_url(self, oai_url: str) -> str:
""" Given the oai url, return xml string, stripped of it's namespace information """
try:
xml_string = requests.get(oai_url, timeout=60).text
xml_string = self._strip_namespaces(xml_string)
except ConnectionError as e:
capture_exception(e)
xml_string = ""
print("ConnectionError calling " + oai_url)
except Exception as e:
capture_exception(e)
xml_string = ""
print("Error calling " + oai_url)
return xml_string
def _strip_namespaces(self, xml_string: str) -> str:
""" In order to simplify xml harvest, we must strip these namespaces """
namespaces_to_strip = ["ns4:", "ns3:", "ns2:", "ns1:", "ns0:",
"xlink:", "xmlns=\"http://www.openarchives.org/OAI/2.0/\"",
"xmlns=\"urn:isbn:1-931666-22-9\"",
"xmlns:ns1=\"http://www.w3.org/1999/xlink\"",
"xmlns:xlink=\"http://www.w3.org/1999/xlink\"",
"<i>", "</i>", '<emph render="italic">', '</emph>']
for string in namespaces_to_strip:
xml_string = xml_string.replace(string, "")
return xml_string
def _get_xml_tree_given_xml_string(self, xml_string: str, id_url: str) -> ElementTree:
""" translate the xml string into an ElementTree object for further use """
xml_tree = ElementTree.fromstring("<empty/>")
try:
xml_tree = ElementTree.fromstring(xml_string)
except ElementTree.ParseError as e:
print("Error converting to xml results of this url: " + id_url)
capture_exception(e)
return xml_tree
def _process_record(self, source_system_url: str, xml_record: ElementTree) -> dict:
""" Call a process to create ND.JSON from complex ArchivesSpace EAD xml """
standard_json = self.jsonFromXMLClass.get_standard_json_from_xml(xml_record)
return standard_json
def _get_modified_date_from_xml_record(self, xml_record: ElementTree) -> str:
""" Return modified date from xml_record """
return xml_record.find("./header/datestamp").text
def _get_id_from_xml_record(self, xml_record: ElementTree) -> str:
""" Return Item Id from xml_record """
return xml_record.find("./metadata/ead/eadheader/eadid").text
def _get_modified_date_from_dynamo(self, item_id: str) -> str:
""" Modified Date represents the date modified in the source system record stored in dynamo """
record_from_dynamo = get_item_record(self.config.get('website-metadata-tablename'), item_id)
return record_from_dynamo.get('modifiedDate', '')
def _get_process_record_flag(self, item_id: str, date_modified_in_source_system: str):
""" Process if forced, if no modifiedDate in source system, or if date in source system is newer than date (if any) in dynamo.
Also process if the record does not exist in dynamo or if either the validate_json file or the local control json file are newer than the date the dynamo record was last modified """
if self.config.get('forceSaveStandardJson', False):
return True
if not date_modified_in_source_system:
return True
record_from_dynamo = get_item_record(self.config.get('website-metadata-tablename'), item_id)
if not record_from_dynamo:
return True
modified_date_from_dynamo = record_from_dynamo.get('modifiedDate')
if date_modified_in_source_system > modified_date_from_dynamo:
return True
if self.validate_json_modified_date > record_from_dynamo.get('dateModifiedInDynamo'):
return True
if self.local_control_file_modified_date > record_from_dynamo.get('dateModifiedInDynamo'):
return True
return False
| 57.057377 | 194 | 0.680075 |
a8b69e48e5e64fa478e412b0b6dc0f2d71868087 | 141 | py | Python | bestiary/context_processors.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 66 | 2017-09-11T04:46:00.000Z | 2021-03-13T00:02:42.000Z | bestiary/context_processors.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 133 | 2017-09-24T21:28:59.000Z | 2021-04-02T10:35:31.000Z | bestiary/context_processors.py | Itori/swarfarm | 7192e2d8bca093b4254023bbec42b6a2b1887547 | [
"Apache-2.0"
] | 28 | 2017-08-30T19:04:32.000Z | 2020-11-16T04:09:00.000Z | from .forms import BestiaryQuickSearchForm
def quick_search_form(request):
return {'bestiary_quick_search': BestiaryQuickSearchForm()}
| 23.5 | 63 | 0.815603 |
e2a8579efd49a087eabe44b939399b1af6d15bfa | 1,555 | py | Python | we_do_secure/urls.py | Kou-Guandong/NYU_CS_6083_Database_Project_Part2 | 0e1bef0dd1d747c54e278363a773aac3826ab30b | [
"OML"
] | null | null | null | we_do_secure/urls.py | Kou-Guandong/NYU_CS_6083_Database_Project_Part2 | 0e1bef0dd1d747c54e278363a773aac3826ab30b | [
"OML"
] | null | null | null | we_do_secure/urls.py | Kou-Guandong/NYU_CS_6083_Database_Project_Part2 | 0e1bef0dd1d747c54e278363a773aac3826ab30b | [
"OML"
] | null | null | null | from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import routers
from insurance import views
from django.conf.urls import include, url
from django_registration.backends.one_step.views import RegistrationView
from django.urls import include, path
from django.contrib.auth import views as auth_views
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'drivers', views.DriverViewSet)
router.register(r'customers', views.CustomerViewSet)
urlpatterns = \
[
path('', views.index, name='index'),
path('overview/', views.overview),
path('overviewAPI/', views.overview_api),
path('homes/', views.HomeList.as_view()),
path('homes/<int:pk>/', views.HomeDetail.as_view()),
path('customers/<int:pk>/', views.CustomerDetail.as_view()),
path('admin/', admin.site.urls),
path('accounts/register/',
RegistrationView.as_view(success_url='/accounts/profile/'),
name='django_registration_register'),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/profile/', views.user_profile),
path('api-auth/', include('rest_framework.urls')),
url(r'^', include(router.urls)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns = format_suffix_patterns(urlpatterns)
| 40.921053 | 81 | 0.710611 |
15105d1c08d8e11e7aa68a516584b51cb578fc66 | 1,302 | py | Python | studies/SmartFitting/lasso.py | peterdsharpe/FastFlow3D | d02c8ff97fa84adcd9db988b09157695d9e2b318 | [
"MIT"
] | 1 | 2021-11-01T22:48:12.000Z | 2021-11-01T22:48:12.000Z | studies/SmartFitting/lasso.py | peterdsharpe/FastFlow3D | d02c8ff97fa84adcd9db988b09157695d9e2b318 | [
"MIT"
] | null | null | null | studies/SmartFitting/lasso.py | peterdsharpe/FastFlow3D | d02c8ff97fa84adcd9db988b09157695d9e2b318 | [
"MIT"
] | null | null | null | from data import x, y_data
import aerosandbox as asb
import aerosandbox.numpy as np
degree = 10
opti = asb.Opti()
coeffs = opti.variable(init_guess=np.zeros(degree + 1))
vandermonde = np.ones((len(x), degree + 1))
for j in range(1, degree + 1):
vandermonde[:, j] = vandermonde[:, j - 1] * x
y_model = vandermonde @ coeffs
error = np.sum((y_model - y_data) ** 2)
abs_coeffs = opti.variable(init_guess=np.zeros(degree + 1))
opti.subject_to([
abs_coeffs > coeffs,
abs_coeffs > -coeffs
])
opti.minimize(
error + 1e-4 * np.sum(abs_coeffs)
)
sol = opti.solve(verbose=False)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(palette=sns.color_palette("husl"))
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
x_plot = np.linspace(x[0], x[-1], 100)
vandermonde_plot = np.ones((len(x_plot), degree + 1))
for j in range(1, degree + 1):
vandermonde_plot[:, j] = vandermonde_plot[:, j - 1] * x_plot
y_plot = vandermonde_plot @ sol.value(coeffs)
plt.plot(x, y_data, ".")
plt.plot(x_plot, sol.value(y_plot), "-")
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.bar(
x=np.arange(degree + 1),
height=sol.value(coeffs)
)
plt.show() | 24.111111 | 68 | 0.631336 |
972c3f478f1ce14d8510c11a61807b1d39b6cdba | 114 | py | Python | nsxsdk/__init__.py | purple-dbu/nsx-python-sdk | 76f6dc3de874bd761f9eb07cbe5dede92fe91ecd | [
"MIT"
] | 7 | 2015-03-16T19:03:26.000Z | 2018-01-06T18:57:58.000Z | nsxsdk/__init__.py | rvichery/nsx-python-sdk | 76f6dc3de874bd761f9eb07cbe5dede92fe91ecd | [
"MIT"
] | null | null | null | nsxsdk/__init__.py | rvichery/nsx-python-sdk | 76f6dc3de874bd761f9eb07cbe5dede92fe91ecd | [
"MIT"
] | 5 | 2015-04-07T16:04:38.000Z | 2019-07-31T11:32:10.000Z | """NSX Python SDK"""
from . import edge
from . import utils
from . import firewall
from . import logicalswitches
| 16.285714 | 29 | 0.736842 |
8461cd26b275b1be82bebe649069e56e4c42e507 | 335 | py | Python | python/prob050.py | ChihoWon/Project-Euler | 5b645248ce79c1d7546243a1e4edf4b3d8b915e8 | [
"MIT"
] | 2 | 2017-06-29T11:42:18.000Z | 2019-04-07T23:20:31.000Z | python/prob050.py | ChihoWon/Project-Euler | 5b645248ce79c1d7546243a1e4edf4b3d8b915e8 | [
"MIT"
] | null | null | null | python/prob050.py | ChihoWon/Project-Euler | 5b645248ce79c1d7546243a1e4edf4b3d8b915e8 | [
"MIT"
] | null | null | null | from math import sqrt
def Sieve_of_Eratosthenes(n):
sieve = range(n + 1)
sieve[1] = 0
rn = int(round(sqrt(n)))
for x in range(2, rn + 1):
if sieve[x]:
sieve[x*x:n+1:x] = [0] * len(sieve[x*x:n+1:x])
return filter(lambda x: x != 0, sieve)
l = Sieve_of_Eratosthenes(100)
print sum(l[0:])
print l
| 22.333333 | 58 | 0.570149 |
82ce84c5ed3ca208f501c45dcd358fe2607c7ef6 | 50,194 | py | Python | src/sage/quivers/morphism.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/quivers/morphism.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/quivers/morphism.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | """
Quiver Morphisms
"""
# ****************************************************************************
# Copyright (C) 2012 Jim Stark <jstarx@gmail.com>
# 2013 Simon King <simon.king@uni-jena.de>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details; the full text
# is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.morphism import CallMorphism
from sage.matrix.constructor import Matrix
class QuiverRepHom(CallMorphism):
r"""
A homomorphism of quiver representations (of one and the same quiver)
is given by specifying, for each vertex of the quiver, a homomorphism
of the spaces assigned to this vertex such that these homomorphisms
commute with the edge maps. The domain and codomain of the
homomorphism are required to be representations of the same quiver
over the same base ring.
INPUT:
- ``domain`` -- :class:`QuiverRep`, the domain of the homomorphism
- ``codomain`` -- :class:`QuiverRep`, the codomain of the homomorphism
- ``data`` - dict, list, or :class:`QuiverRepElement`
(default: empty dict),
with the following meaning:
- list: ``data`` can be a list of images for the generators of
the domain. "Generators" means the output of the ``gens()``
method. An error will be generated if the map so defined
is not equivariant with respect to the action of the quiver.
- dictionary: ``data`` can be a dictionary associating to each
vertex of the quiver either a homomorphism with domain and
codomain the spaces associated to this vertex in the domain
and codomain modules respectively, or a matrix defining such
a homomorphism, or an object that sage can construct such a
matrix from. Not all vertices must be specified, unspecified
vertices are assigned the zero map, and keys not corresponding
to vertices of the quiver are ignored. An error will be
generated if these maps do not commute with the edge maps of
the domain and codomain.
- :class:`QuiverRepElement`: if the domain is a
:class:`QuiverRep_with_path_basis` then ``data`` can be a single
:class:`QuiverRepElement` belonging to the codomain. The map is
then defined by sending each path, ``p``, in the basis to ``data*p``.
If ``data`` is not an element of the codomain or the domain is not a
:class:`QuiverRep_with_path_basis` then an error will be generated.
- :class:`QuiverRepHom`: the input can also be a map `f : D \to C` such
that there is a coercion from the domain of ``self`` to ``D``
and from ``C`` to the codomain of ``self``. The composition
of these maps is the result.
OUTPUT:
- :class:`QuiverRepHom`
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
With no additional data this creates the zero map::
sage: f = S.hom(M)
sage: f.is_zero()
True
We must specify maps at the vertices to get a nonzero homomorphism.
Note that if the dimensions of the spaces assigned to the domain and
codomain of a vertex are equal then Sage will construct the identity
matrix from ``1``::
sage: maps2 = {2:[1, -1], 3:1}
sage: g = S.hom(maps2, M)
Here we create the same map by specifying images for the generators::
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = S.hom([x, y], M)
sage: g == h
True
If the domain is a module of type QuiverRep_with_path_basis (for example,
the indecomposable projectives) we can create maps by specifying a single
image::
sage: Proj = Q.P(GF(7), 3)
sage: Simp = Q.S(GF(7), 3)
sage: im = Simp({3: (1,)})
sage: Proj.hom(im, Simp).is_surjective()
True
"""
###########################################################################
# #
# PRIVATE FUNCTIONS #
# These functions are not meant to be seen by the end user. #
# #
###########################################################################
def __init__(self, domain, codomain, data={}):
"""
Initialize ``self``. Type ``QuiverRepHom?`` for more information.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: f = S.hom(M)
sage: f.is_zero()
True
sage: maps2 = {2:[1, -1], 3:1}
sage: g = S.hom(maps2, M)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = S.hom([x, y], M)
sage: g == h
True
sage: Proj = Q.P(GF(7), 3)
sage: Simp = Q.S(GF(7), 3)
sage: im = Simp({3: (1,)})
sage: Proj.hom(im, Simp).is_surjective()
True
::
sage: Q = DiGraph({1:{2:['a']}}).path_semigroup()
sage: H1 = Q.P(GF(3), 2).Hom(Q.S(GF(3), 2))
sage: H2 = Q.P(GF(3), 2).Hom(Q.S(GF(3), 1))
sage: H1.an_element() in H1 # indirect doctest
True
"""
# The data of a representation is held in the following private
# variables:
#
# * _quiver
# The quiver of the representation.
# * _base_ring
# The base ring of the representation.
# * _domain
# The QuiverRep object that is the domain of the homomorphism.
# * _codomain
# The QuiverRep object that is the codomain of the homomorphism.
# * _vector
# A vector in some free module over the base ring of a length such
# that each coordinate corresponds to an entry in the matrix of a
# homomorphism attached to a vertex.
#
# The variable data can also be a vector of appropriate length. When
# this is the case it will be loaded directly into _vector and then
# _assert_valid_hom is called.
from sage.quivers.representation import QuiverRepElement, QuiverRep_with_path_basis
self._domain = domain
self._codomain = codomain
self._quiver = domain._quiver
self._base_ring = domain.base_ring()
# Check that the quiver and base ring match
if codomain._quiver != self._quiver:
raise ValueError("the quivers of the domain and codomain must be equal")
if codomain.base_ring() != self._base_ring:
raise ValueError("the base ring of the domain and codomain must be equal")
# Get the dimensions of the spaces
mat_dims = {}
domain_dims = {}
codomain_dims = {}
for v in self._quiver:
domain_dims[v] = domain._spaces[v].dimension()
codomain_dims[v] = codomain._spaces[v].dimension()
mat_dims[v] = domain_dims[v] * codomain_dims[v]
total_dim = sum(mat_dims.values())
# Handle the case when data is a vector
if data in self._base_ring**total_dim:
self._vector = data
self._assert_valid_hom()
super(QuiverRepHom, self).__init__(domain.Hom(codomain))
return
# If data is not a dict, create one
if isinstance(data, dict):
maps_dict = data
else:
# If data is not a list create one, then create a dict from it
if isinstance(data, list):
im_list = data
else:
# If data is a QuiverRepHom, create a list from it
if isinstance(data, QuiverRepHom):
f = data._domain.coerce_map_from(domain)
g = self._codomain.coerce_map_from(data._codomain)
im_list = [g(data(f(x))) for x in domain.gens()]
# The only case left is that data is a QuiverRepElement
else:
if not isinstance(data, QuiverRepElement):
raise TypeError("input data must be dictionary, list, "
"QuiverRepElement or vector")
if not isinstance(domain, QuiverRep_with_path_basis):
raise TypeError("if data is a QuiverRepElement then domain "
"must be a QuiverRep_with_path_basis.")
if data not in codomain:
raise ValueError("if data is a QuiverRepElement then it must "
"be an element of codomain")
im_list = [codomain.right_edge_action(data, p) for v in domain._quiver for p in domain._bases[v]]
# WARNING: This code assumes that the function QuiverRep.gens() returns
# the generators ordered first by vertex and then by the order of the
# gens() method of the space associated to that vertex. In particular
# this is the order that corresponds to how maps are represented via
# matrices
# Get the gens of the domain and check that im_list is the right length
dom_gens = domain.gens()
if len(im_list) != len(dom_gens):
raise ValueError(("domain is dimension {} but only {} images"
" were supplied").format(len(dom_gens), len(im_list)))
# Get the matrices of the maps
start_index = 0
maps_dict = {}
for v in self._quiver:
maps_dict[v] = []
dim = domain._spaces[v].dimension()
for i in range(start_index, start_index + dim):
if im_list[i].support() and im_list[i].support() != [v]:
# If the element does not have the correct
# support raise an error here, otherwise we
# might create a valid hom that does not map
# the generators to the supplied images
txt = "generator supported at vertex {} cannot"
txt += " map to element with support {}"
raise ValueError(txt.format(v, im_list[i].support()))
else:
# If the support works out add the images coordinates
# as a row of the matrix
maps_dict[v].append(codomain._spaces[v].coordinates(im_list[i]._elems[v]))
start_index += dim
# Get the coordinates of the vector
from sage.categories.map import is_Map
vector = []
for v in self._quiver:
if v in maps_dict:
if is_Map(maps_dict[v]):
try:
m = maps_dict[v].matrix()
except (AttributeError, ValueError):
gens_images = [codomain._spaces[v].coordinate_vector(maps_dict[v](x))
for x in domain._spaces[v].gens()]
m = Matrix(self._base_ring, domain_dims[v], codomain_dims[v], gens_images)
else:
m = Matrix(self._base_ring, domain_dims[v], codomain_dims[v], maps_dict[v])
else:
m = Matrix(self._base_ring, domain_dims[v], codomain_dims[v])
for i in range(domain_dims[v]):
vector += list(m[i])
# Wrap as a vector, check it, and return
self._vector = (self._base_ring**total_dim)(vector)
self._assert_valid_hom()
super(QuiverRepHom, self).__init__(domain.Hom(codomain))
def _repr_(self):
"""
Default string representation.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: S.hom(M) # indirect doctest
Homomorphism of representations of Multi-digraph on 3 vertices
"""
return "Homomorphism of representations of " + repr(self._quiver)
def _call_(self, x):
"""
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = S.hom([x, y], M)
sage: h(S.gens()[0]) == x
True
sage: h(S.gens()[1]) == y
True
The following was an issue during work on :trac:`12630`::
sage: Q = DiGraph({1: {}}).path_semigroup()
sage: M = Q.I(GF(3), 1)
sage: m = M.an_element()
sage: R = M.quotient(M)
sage: R(m)
Element of quiver representation
"""
from sage.quivers.representation import QuiverRepElement
# Check the input
if not isinstance(x, QuiverRepElement):
raise ValueError("QuiverRepHom can only be called on QuiverRepElement")
elements = dict((v, self.get_map(v)(x._elems[v])) for v in self._quiver)
return self._codomain(elements)
def __add__(left, right):
"""
This function overloads the ``+`` operator.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: z = M.zero()
sage: h = S.hom([x, z], M)
sage: g = S.hom([z, z], M)
sage: f = g + h
sage: f(S.gens()[0]) == x
True
sage: f(S.gens()[1]) == z
True
"""
new_vector = left._vector + right._vector
return left._domain.hom(new_vector, left._codomain)
def __iadd__(self, other):
"""
This function overloads the ``+=`` operator.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: z = M.zero()
sage: h = S.hom([x, z], M)
sage: g = S.hom([z, z], M)
sage: g += h
sage: g(S.gens()[0]) == x
True
sage: g(S.gens()[1]) == z
True
"""
self._vector += other._vector
return self
def __sub__(left, right):
"""
This function overloads the ``-`` operator.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: z = M.zero()
sage: h = S.hom([x, z], M)
sage: g = S.hom([z, y], M)
sage: f = h - g
sage: f(S.gens()[0]) == x
True
sage: f(S.gens()[1]) == -y
True
"""
new_vector = left._vector - right._vector
return left._domain.hom(new_vector, left._codomain)
def __isub__(self, other):
"""
This function overloads the ``-=`` operator.
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: z = M.zero()
sage: h = S.hom([x, z], M)
sage: g = S.hom([z, y], M)
sage: h -= g
sage: h(S.gens()[0]) == x
True
sage: h(S.gens()[1]) == -y
True
"""
self._vector -= other._vector
return self
def __neg__(self):
"""
This function overrides the unary ``-`` operator
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = S.hom([x, y], M)
sage: g = -h
sage: g(S.gens()[0]) == -x
True
sage: g(S.gens()[1]) == -y
True
"""
return self._domain.hom(-self._vector, self._codomain)
def __pos__(self):
"""
This function overrides the unary ``+`` operator
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: h = S.hom([x, y], M)
sage: g = +h
sage: g == h
True
"""
return self
def __eq__(self, other):
"""
This function overrides the ``==`` operator
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: g = S.hom([x, y], M)
sage: h = S.hom([x, y], M)
sage: g == h
True
"""
# A homomorphism can only be equal to another homomorphism between the
# same domain and codomain
if not isinstance(other, QuiverRepHom) or self._domain != other._domain or self._codomain != other._codomain:
return False
# If all that holds just check the vectors
return self._vector == other._vector
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: g = S.hom([x, y], M)
sage: H = hash(g)
"""
return hash(tuple(self._vector))
def __ne__(self, other):
"""
This function overrides the ``!=`` operator
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: z = M.zero()
sage: g = S.hom([x, y], M)
sage: h = S.hom([x, z], M)
sage: g != h
True
"""
# A homomorphism can only be equal to another homomorphism between the
# same domain and codomain
if not isinstance(other, QuiverRepHom) or self._domain != other._domain or self._codomain != other._codomain:
return True
# If all that holds just check the vectors
return self._vector != other._vector
def __bool__(self):
"""
Return whether ``self`` is the zero morphism.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = M({2: (1, -1)})
sage: y = M({3: (1,)})
sage: z = M.zero()
sage: g = S.hom([x, y], M)
sage: h = S.hom([z, z], M)
sage: bool(g)
True
sage: bool(h)
False
"""
return any(self._vector)
__nonzero__ = __bool__
def __mul__(self, other):
"""
This function overrides the ``*`` operator
TESTS::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: x = S.gens()[0]
sage: y = S.gens()[1]
sage: g = S.hom([x, y], S)
sage: h = S.hom(S)
sage: (g*h).is_zero()
True
"""
maps = {v: other.get_matrix(v) * self.get_matrix(v)
for v in self._quiver}
return other._domain.hom(maps, self._codomain)
###########################################################################
# #
# WELL DEFINEDNESS FUNCTIONS #
# These functions test and assert well definedness of the #
# homomorphism. #
# #
###########################################################################
def _assert_valid_hom(self):
"""
Raise a ``ValueError`` if the homomorphism is not well defined.
Specifically it checks that the domain and codomains of the maps are
correct and that the edge diagrams commute.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^1, 3: QQ^1}
sage: S = Q.representation(QQ, spaces2)
sage: maps2 = {2:[1, -1], 3:1}
sage: g = S.hom(maps2, M) # indirect doctest
sage: f = S.hom(maps2, S) # indirect doctest
Traceback (most recent call last):
...
TypeError: unable to convert {2: [1, -1], 3: 1} to an element of Dimension 2 QuiverHomSpace
"""
# Check that the domain and codomains dimensions add correctly
totaldim = 0
for v in self._quiver:
totaldim += self._domain._spaces[v].dimension() * self._codomain._spaces[v].dimension()
if totaldim != len(self._vector):
raise ValueError("dimensions do not match domain and codomain")
# Check that the edge diagrams commute
for e in self._domain._semigroup._sorted_edges:
if self.get_matrix(e[0]) * self._codomain._maps[e].matrix() != self._domain._maps[e].matrix() * self.get_matrix(e[1]):
raise ValueError("the diagram of edge {} does not commute".format(e))
###########################################################################
# #
# ACCESS FUNCTIONS #
# These functions are used to view the homomorphism data. #
# #
###########################################################################
def domain(self):
"""
Return the domain of the homomorphism.
OUTPUT:
- :class:`QuiverRep`, the domain
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: S = Q.representation(QQ)
sage: g = M.hom(S)
sage: g.domain() is M
True
"""
return self._domain
def codomain(self):
"""
Return the codomain of the homomorphism.
OUTPUT:
- :class:`QuiverRep`, the codomain
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: S = Q.representation(QQ)
sage: g = S.hom(M)
sage: g.codomain() is M
True
"""
return self._codomain
def get_matrix(self, vertex):
"""
Return the matrix of the homomorphism attached to vertex
``vertex``.
INPUT:
- ``vertex`` -- integer, a vertex of the quiver
OUTPUT:
- matrix, the matrix representing the homomorphism associated to
the given vertex
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: I = Q.I(QQ, 3)
sage: M = I/I.radical()
sage: f = M.coerce_map_from(I)
sage: f.get_matrix(1)
[1 0]
[0 1]
"""
# Get dimensions
startdim = 0
for v in self._quiver:
if v == vertex:
break
startdim += self._domain._spaces[v].dimension() * self._codomain._spaces[v].dimension()
rows = self._domain._spaces[vertex].dimension()
cols = self._codomain._spaces[vertex].dimension()
# Slice out the matrix and return
return Matrix(self._base_ring, rows, cols,
self._vector.list()[startdim:startdim + rows * cols])
def get_map(self, vertex):
"""
Return the homomorphism at the given vertex ``vertex``.
INPUT:
- ``vertex`` -- integer, a vertex of the quiver
OUTPUT:
- homomorphism, the homomorphism associated to the given vertex
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: S = P/P.radical()
sage: f = S.coerce_map_from(P)
sage: f.get_map(1).is_bijective()
True
"""
return self._domain._spaces[vertex].hom(self.get_matrix(vertex), self._codomain._spaces[vertex])
def quiver(self):
"""
Return the quiver of the representations in the domain/codomain.
OUTPUT:
- :class:`DiGraph`, the quiver of the representations in the domain
and codomain
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.quiver() is Q.quiver()
True
"""
return self._quiver
def base_ring(self):
"""
Return the base ring of the representation in the codomain.
OUTPUT:
- ring, the base ring of the codomain
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.base_ring() is QQ
True
"""
return self._base_ring
###########################################################################
# #
# DATA FUNCTIONS #
# These functions return data collected from the homomorphism. #
# #
###########################################################################
def is_injective(self):
"""
Test whether the homomorphism is injective.
OUTPUT:
- bool, ``True`` if the homomorphism is injective, ``False`` otherwise
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.is_injective()
True
sage: g = P.hom(P)
sage: g.is_injective()
False
"""
# The homomorphism is injective if and only if it is injective at every
# vertex
return not any(self.get_matrix(v).nullity() for v in self._quiver)
def is_surjective(self):
"""
Test whether the homomorphism is surjective.
OUTPUT:
- bool, ``True`` if the homomorphism is surjective, ``False`` otherwise
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.is_surjective()
True
sage: g = P.hom(P)
sage: g.is_surjective()
False
"""
# The homomorphism is surjective if and only if it is surjective at
# every vertex
for v in self._quiver:
m = self.get_matrix(v)
if m.rank() != m.ncols():
return False
return True
def is_isomorphism(self):
"""
Test whether the homomorphism is an isomorphism.
OUTPUT:
- bool, ``True`` if the homomorphism is bijective, ``False``
otherwise
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.is_isomorphism()
True
sage: g = P.hom(P)
sage: g.is_isomorphism()
False
"""
# It's an iso if and only if it's an iso at every vertex
return all(self.get_matrix(v).is_invertible() for v in self._quiver)
def is_zero(self):
"""
Test whether the homomorphism is the zero homomorphism.
OUTPUT:
- bool, ``True`` if the homomorphism is zero, ``False`` otherwise
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.is_zero()
False
sage: g = P.hom(P)
sage: g.is_zero()
True
"""
# The homomorphism is zero if and only if it is zero at every vertex
return all(self.get_matrix(v).is_zero() for v in self._quiver)
def is_endomorphism(self):
"""
Test whether the homomorphism is an endomorphism.
OUTPUT:
- bool, ``True`` if the domain equals the codomain, ``False``
otherwise
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: f = P.hom({1: 1, 2: 1, 3: 1}, P)
sage: f.is_endomorphism()
True
sage: S = P/P.radical()
sage: g = S.coerce_map_from(P)
sage: g.is_endomorphism()
False
"""
return self._domain == self._codomain
def rank(self):
"""
Return the rank of the homomorphism ``self`` (as a `k`-linear
map).
OUTPUT:
- integer, the rank
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: S = P/P.radical()
sage: f = S.coerce_map_from(P)
sage: assert(f.rank() == 1)
"""
# The rank is the sum of the ranks at each vertex
return sum(self.get_matrix(v).rank() for v in self._quiver)
###########################################################################
# #
# CONSTRUCTION FUNCTIONS #
# These functions create new homomorphisms, representations, and #
# elements from the given homomorphism. #
# #
###########################################################################
def kernel(self):
"""
Return the kernel of ``self``.
OUTPUT:
- :class:`QuiverRep`, the kernel
.. NOTE::
To get the inclusion map of the kernel, ``K``, into the
domain, ``D``, use ``D.coerce_map_from(K)``.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^2, 3: QQ^1}
sage: N = Q.representation(QQ, spaces2, {(2, 3, 'c'): [[1], [0]]})
sage: maps2 = {2:[[1, 0], [0, 0]], 3:1}
sage: g = N.hom(maps2, M)
sage: g.kernel().dimension_vector()
(0, 1, 0)
"""
spaces = dict((v, self.get_map(v).kernel()) for v in self._quiver)
return self._domain._submodule(spaces)
def image(self):
"""
Return the image of ``self``.
OUTPUT:
- :class:`QuiverRep`, the image
.. NOTE::
To get the inclusion map of the image, ``I``, into the
codomain, ``C``, use ``C.coerce_map_from(I)``.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^2, 3: QQ^1}
sage: N = Q.representation(QQ, spaces2, {(2, 3, 'c'): [[1], [0]]})
sage: maps2 = {2:[[1, 0], [0, 0]], 3:1}
sage: g = N.hom(maps2, M)
sage: g.image().dimension_vector()
(0, 1, 1)
"""
spaces = dict((v, self.get_map(v).image()) for v in self._quiver)
return self._codomain._submodule(spaces)
def cokernel(self):
"""
Return the cokernel of ``self``.
OUTPUT:
- :class:`QuiverRep`, the cokernel
.. NOTE::
To get the factor map of the codomain, ``D``, onto the
cokernel, ``C``, use ``C.coerce_map_from(D)``.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: spaces = {1: QQ^2, 2: QQ^2, 3:QQ^1}
sage: maps = {(1, 2, 'a'): [[1, 0], [0, 0]], (1, 2, 'b'): [[0, 0], [0, 1]], (2, 3, 'c'): [[1], [1]]}
sage: M = Q.representation(QQ, spaces, maps)
sage: spaces2 = {2: QQ^2, 3: QQ^1}
sage: N = Q.representation(QQ, spaces2, {(2, 3, 'c'): [[1], [0]]})
sage: maps2 = {2:[[1, 0], [0, 0]], 3:1}
sage: g = N.hom(maps2, M)
sage: g.cokernel().dimension_vector()
(2, 1, 0)
"""
return self._codomain.quotient(self.image())
def linear_dual(self):
r"""
Compute the linear dual `Df : DN \to DM` of
``self`` = `f : M \to N` where `D(-) = Hom_k(-, k)`.
OUTPUT:
- :class:`QuiverRepHom`, the map `Df : DN \to DM`
.. NOTE::
If `e` is an edge of the quiver `Q` and `g` is an element of
`Hom_k(N, k)` then we let `(ga)(m) = g(ma)`. This gives
`Hom_k(N, k)` its structure as a module over the opposite
quiver ``Q.reverse()``. The map `Hom_k(N, k) \to Hom_k(M, k)`
returned sends `g` to `gf`.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}, 2:{3:['c']}}).path_semigroup()
sage: P = Q.P(QQ, 1)
sage: S = P/P.radical()
sage: f = S.coerce_map_from(P)
The dual of a surjective map is injective and vice versa::
sage: f.is_surjective()
True
sage: g = f.linear_dual()
sage: g.is_injective()
True
The dual of a right module is a left module for the same quiver, Sage
represents this as a right module for the opposite quiver::
sage: g.quiver().path_semigroup() is Q.reverse()
True
The double dual of a map is the original representation::
sage: g.linear_dual() == f
True
"""
# The effect of the functor D is that it just transposes the matrix of
# a hom
maps = dict((v, self.get_matrix(v).transpose()) for v in self._quiver)
return self._codomain.linear_dual().hom(maps, self._domain.linear_dual())
def algebraic_dual(self):
r"""
Compute the algebraic dual `f^t : N^t \to M^t` of
``self`` = `f : M \to N` where `(-)^t = Hom_Q(-, kQ)`.
OUTPUT:
- :class:`QuiverRepHom`, the map `f^t : N^t \to M^t`
.. NOTE::
If `e` is an edge of the quiver `Q` and `g` is an element of
`Hom_Q(N, kQ)` then we let `(ge)(m) = eg(m)`. This gives
`Hom_Q(N, kQ)` its structure as a module over the opposite
quiver ``Q.reverse()``. The map
`Hom_Q(N, kQ) \to Hom_Q(M, kQ)` returned sends `g` to `gf`.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a'], 3:['b','c','d']}, 2:{4:['e','f']}, 3:{4:['g']}, 5:{2:['h','i']}}).path_semigroup()
sage: P1 = Q.P(QQ, 4)
sage: P1.algebraic_dual()
Representation with dimension vector (5, 2, 1, 1, 4)
The algebraic dual of an indecomposable projective is the indecomposable
projective of the same vertex in the opposite quiver.
sage: Q.reverse().P(QQ, 4)
Representation with dimension vector (5, 2, 1, 1, 4)
"""
# Get the domain, its basis, and the codomain
domain, domain_gens = self._codomain.algebraic_dual(True)
codomain, co_domain_gens = self._domain.algebraic_dual(True)
# Find the images in the domain and create the module
# H = QuiverHomSpace(self._domain, self._quiver.free_module(self._base_ring))
im_gens = [codomain({v: (g * self)._vector})
for v in self._quiver for g in domain_gens[v]]
return domain.hom(im_gens, codomain)
def direct_sum(self, maps, return_maps=False, pinch=None):
r"""
Return the direct sum of ``self`` with the maps in the list ``maps``.
INPUT:
- ``maps`` -- :class:`QuiverRepHom` or list of :class:`QuiverRepHom`'s
- ``return_maps`` -- bool (default: ``False``). If ``False``, then
the return value is a :class:`QuiverRepHom` which is the direct sum
of ``self`` with the :class:`QuiverRepHoms` in ``maps``.
If ``True``, then the return value is a tuple of length either 3
or 5. The first entry of the tuple is the QuiverRepHom giving
the direct sum. If ``pinch`` is either ``None`` or
``'codomain'`` then the next two entries in the tuple are lists
giving respectively the inclusion and the projection maps for
the factors of the direct sum. Summands are ordered as given
in maps with ``self`` as the zeroth summand. If ``pinch`` is
either ``None`` or ``'domain'`` then the next two entries in the
tuple are the inclusion and projection maps for the codomain.
Thus if ``pinch`` is ``None`` then the tuple will have length 5.
If ``pinch`` is either ``'domain'`` or ``'codomain'`` then the
tuple will have length 3.
- ``pinch`` -- string or ``None`` (default: ``None``). If this is
equal to ``'domain'``, then the domains of ``self`` and the
given maps must be equal. The direct sum of `f: A \to B` and
`g: A \to C` returned is then the map `A \to B \oplus C` defined
by sending `x` to `(f(x), g(x))`. If ``pinch`` equals
``'codomain'``, then the codomains of ``self`` and the given
maps must be equal. The direct sum of `f: A \to C` and
`g: B \to C` returned is then the map `A \oplus B \to C` defined
by sending `(x, y)` to `f(x) + g(y)`. Finally, if ``pinch`` is
anything other than ``'domain'`` or ``'codomain'``, then the
direct sum of `f: A \to B` and `g: C \to D` returned is the map
`A \oplus C \to B \oplus D` defined by sending `(x, y)` to
`(f(x), g(y))`.
OUTPUT:
- :class:`QuiverRepHom` or tuple
EXAMPLES::
sage: Q = DiGraph({1:{2:['a', 'b']}}).path_semigroup()
sage: P1 = Q.P(GF(3), 1)
sage: P2 = Q.P(GF(3), 2)
sage: S1 = P1/P1.radical()
sage: S2 = P2/P2.radical()
sage: pi1 = S1.coerce_map_from(P1)
sage: pi2 = S2.coerce_map_from(P2)
sage: f = pi1.direct_sum(pi2)
sage: f.domain().dimension_vector() == Q.free_module(GF(3)).dimension_vector()
True
sage: f.is_surjective()
True
sage: id = P1.Hom(P1).identity()
sage: g = pi1.direct_sum(id, pinch='domain')
sage: g.is_surjective()
False
"""
# Get the list of maps to be summed
if isinstance(maps, QuiverRepHom):
maplist = [self, maps]
else:
maplist = [self] + maps
# Check that the quivers/base rings are the same. If pinching also
# check that the domain/codomains are correct
for x in maplist:
if not isinstance(x, QuiverRepHom):
raise TypeError("maps must be a QuiverRepHom or list of QuiverRepHoms")
if self._quiver is not x._quiver:
raise ValueError("cannot direct sum maps from different quivers")
if self._base_ring is not x._base_ring:
raise ValueError("base rings must be identical")
if pinch == 'domain' and self._domain is not x._domain:
raise ValueError("cannot pinch maps, domains do not agree")
if pinch == 'codomain' and self._codomain is not x._codomain:
raise ValueError("cannot pinch maps, codomains do not agree")
# Get the sums and their maps
if pinch == 'domain':
domain = self._domain
else:
domain, d_incl, d_proj = self._domain.direct_sum([x._domain for x in maplist[1:]], return_maps=True)
if pinch == 'codomain':
codomain = self._codomain
else:
codomain, c_incl, c_proj = self._codomain.direct_sum([x._codomain for x in maplist[1:]], return_maps=True)
# Start with the zero map
result = domain.hom(codomain)
# Add each factor
for i in range(len(maplist)):
if pinch == 'domain':
result += c_incl[i] * maplist[i]
elif pinch == 'codomain':
result += maplist[i] * d_proj[i]
else:
result += c_incl[i] * maplist[i] * d_proj[i]
# Return the results
if return_maps:
if pinch == 'domain':
return (result, c_incl, c_proj)
elif pinch == 'codomain':
return (result, d_incl, d_proj)
else:
return (result, d_incl, d_proj, c_incl, c_proj)
else:
return result
def lift(self, x):
"""
Given an element `x` of the image, return an element of the domain
that maps onto it under ``self``.
INPUT:
- ``x`` -- :class:`QuiverRepElement`
OUTPUT:
- :class:`QuiverRepElement`
EXAMPLES::
sage: Q = DiGraph({1:{2:['a','b']}, 2:{3:['c','d']}}).path_semigroup()
sage: P = Q.P(QQ, 3)
sage: S = P/P.radical()
sage: proj = S.coerce_map_from(P)
sage: x = S.an_element()
sage: y = proj.lift(x)
sage: proj(y) == x
True
sage: zero = S.hom(S, {})
sage: zero.lift(x)
Traceback (most recent call last):
...
ValueError: element is not in the image
"""
# Lift at each vertex
elems = dict((v, self.get_map(v).lift(x._elems[v])) for v in self._quiver)
return self._domain(elems)
###########################################################################
# #
# ADDITIONAL OPERATIONS #
# These functions operations that are not implemented via binary #
# operators. #
# #
###########################################################################
def scalar_mult(self, scalar):
r"""
Return the result of the scalar multiplication ``scalar * self``,
where ``scalar`` is an element of the base ring `k`.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a','b']}}).path_semigroup()
sage: M = Q.P(QQ, 1)
sage: f = M.Hom(M).an_element()
sage: x = M.an_element()
sage: g = f.scalar_mult(6)
sage: g(x) == 6*f(x)
True
"""
return self._domain.hom(scalar * self._vector, self._codomain)
def iscalar_mult(self, scalar):
"""
Multiply ``self`` by ``scalar`` in place.
EXAMPLES::
sage: Q = DiGraph({1:{2:['a','b']}}).path_semigroup()
sage: M = Q.P(QQ, 1)
sage: f = M.Hom(M).an_element()
sage: x = M.an_element()
sage: y = f(x)
sage: f.iscalar_mult(6)
sage: f(x) == 6*y
True
"""
self._vector *= scalar
| 38.141337 | 130 | 0.474758 |
8e81f1ce69d881eb7628d1980132933650fd559b | 9,118 | py | Python | main.py | ktrips/InstaChange | 0e8834b3ef103f245602862d2ed2094d62519733 | [
"Apache-2.0"
] | null | null | null | main.py | ktrips/InstaChange | 0e8834b3ef103f245602862d2ed2094d62519733 | [
"Apache-2.0"
] | null | null | null | main.py | ktrips/InstaChange | 0e8834b3ef103f245602862d2ed2094d62519733 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io, os, uuid
from flask import Flask, render_template, request, redirect, url_for
from flask_wtf.file import FileField
from pytz import timezone, utc
from wtforms import Form, validators, ValidationError, SelectField
from werkzeug.utils import secure_filename
from werkzeug.datastructures import CombinedMultiDict
import cloudstorage as gcs
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from google.cloud import vision, translate
#from google.cloud.vision import types
import random
#client = vision.ImageAnnotatorClient()
app = Flask(__name__)
MAX_PHOTOS = 10
content_types = {'jpg': 'image/jpeg', 'jpeg': 'image/jpeg',
'png': 'image/png', 'gif': 'image/gif'}
extensions = sorted(content_types.keys())
bucket_name = app_identity.get_default_gcs_bucket_name()
storage_path = 'https://storage.cloud.google.com/%s' % bucket_name
tag_language = os.getenv('LANG_TAG', 'en')
timestamp_tz = os.getenv('TIMESTAMP_TZ', 'US/Pacific')
tz = timezone(timestamp_tz)
class Tags(ndb.Model):
timestamp = ndb.DateTimeProperty(auto_now_add=True)
count = ndb.IntegerProperty(required=True)
price = ndb.IntegerProperty(required=False)
calorie=ndb.IntegerProperty(required=False)
@classmethod
def all(cls):
user = ndb.Key('User', 'default')
return cls.query(ancestor=user)
class Photo(ndb.Model):
timestamp = ndb.DateTimeProperty(auto_now_add=True)
tags =ndb.StringProperty(repeated=True)
costs =ndb.IntegerProperty(required=False)
calories=ndb.IntegerProperty(required=False)
changes =ndb.IntegerProperty(required=False)
meals =ndb.StringProperty(required=False)
"""local_timestamp = utc.localize(timestamp).astimezone(tz)
month = local_timestamp.strftime("%m")
@classmethod
def month_filter(cls, month):
user = ndb.Key('User', 'default')
return cls.query(ancestor=user).filter(cls.months == month).order(
-cls.timestamp)"""
@classmethod
def tag_filter(cls, tag):
user = ndb.Key('User', 'default')
return cls.query(ancestor=user).filter(cls.tags == tag).order(
-cls.timestamp)
@classmethod
def all(cls):
user = ndb.Key('User', 'default')
return cls.query(ancestor=user).order(-cls.timestamp)
@app.template_filter('local_tz')
def local_tz_filter(timestamp):
local_timestamp = utc.localize(timestamp).astimezone(tz)
return local_timestamp.strftime("%m/%d %H:%M")
@app.template_filter('dates')
def date_filter(timestamp):
date_time = utc.localize(timestamp).astimezone(tz)
return date_time.strftime("%d-%b")
def is_image():
def _is_image(form, field):
if not field.data:
raise ValidationError()
elif field.data.filename.split('.')[-1] not in extensions:
raise ValidationError()
return _is_image
def get_labels(photo_file):
vision_client = vision.Client()
"""image = types.Image()
image.source.image_uri = 'gs://%s/%s' % (bucket_name, photo_file)
res_label= client.label_detection(image=image)
res_text = client.text_detection(image=image)
labels = res_label.label_annotations
texts = res_text.text_annotations"""
image = vision_client.image(
source_uri = 'gs://%s/%s' % (bucket_name, photo_file))
labels = image.detect_labels(limit=5)
return labels
def translate_text(text):
if tag_language == 'en':
return text
translate_client = translate.Client()
result = translate_client.translate(text, target_language=tag_language)
return result['translatedText']
def is_happy(photo_file):
vision_client = vision.Client()
image = vision_client.image(
source_uri = 'gs://%s/%s' % (bucket_name, photo_file))
num_faces, joy_faces = 0, 0
for face in image.detect_faces(limit=10):
num_faces += 1
if face.joy == vision.likelihood.Likelihood.VERY_LIKELY:
joy_faces += 1
if joy_faces > num_faces * 0.5:
return True
return False
class PhotoForm(Form):
input_photo = FileField('',
#'Photo file (File extension should be: %s)' % ', '.join(extensions),
validators=[is_image()])
class TagForm(Form):
tag = SelectField('') #'Tag')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/photos', methods=['GET', 'POST'])
def photos():
tag = '__all__'
if request.method == 'POST':
tag = request.form['tag']
if tag == '__all__':
photos = Photo.all().fetch(MAX_PHOTOS)
else:
photos = Photo.tag_filter(tag).fetch(MAX_PHOTOS)
photo_form = PhotoForm(request.form)
tag_form = TagForm(request.form, select=tag)
choices = [('__all__', 'Show All')]
for tag in Tags.all().fetch():
tag_id = unicode(tag.key.id(), 'utf8')
choices.append((tag_id, tag_id))
tag_form.tag.choices = choices
return render_template('photos.html', storage_path=storage_path,
photo_form=photo_form, tag_form=tag_form,
photos=photos, max_photos=MAX_PHOTOS)
@app.route('/delete', methods=['POST'])
def delete():
filename = request.form.keys()[0]
photo = ndb.Key('User', 'default', 'Photo', filename).get()
for tag in photo.tags:
entity = ndb.Key('User', 'default', 'Tags', tag).get()
if entity:
entity.count -= 1
if entity.count == 0:
entity.key.delete()
else:
entity.put()
photo.key.delete()
gcs.delete('/%s/%s' % (bucket_name, filename))
return redirect(url_for('photos'))
@app.route('/post', methods=['POST'])
def post():
form = PhotoForm(CombinedMultiDict((request.files, request.form)))
if request.method == 'POST' and form.validate():
filename = '%s.%s' % (str(uuid.uuid4()),
secure_filename(form.input_photo.data.filename))
content_type = content_types[filename.split('.')[-1]]
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open('/%s/%s' % (bucket_name, filename), 'w',
retry_params=write_retry_params,
content_type=content_type,
options={'x-goog-acl': 'authenticated-read'})
for _ in form.input_photo.data.stream:
gcs_file.write(_)
gcs_file.close()
labels = get_labels(filename)
faces = is_happy(filename)
print faces
tags = [label.description for label in labels] #[translate_text(label.description) for label in labels]
calories= 0
costs = 0
for tag in tags:
entity = ndb.Key('User', 'default', 'Tags', tag).get()
if entity:
entity.count += 1
else:
price = random.randint(4,10)
calorie= random.randint(100,300)
entity = Tags(count=1, id=tag, price=price, calorie=calorie,
parent=ndb.Key('User', 'default'))
entity.put()
if entity.price:
costs = costs + entity.price
if entity.calorie:
calories = calories + entity.calorie
changes = 0
if costs < 10:
changes = 10 - costs
elif costs >= 10 and costs < 20:
changes = 20 - costs
else:
changes = 50 - costs
"""date_time = utc.localize(timestamp).astimezone(tz)
hour = int(date_time.strftime("%H"))
Bmonth= date_time.strftime("%B")
if hour > 7 and hour < 11:
meals = "BF"
elif itime >= 11 and hour < 15:
meals = "lunch"
elif itime >= 15 and hour < 17:
meals = "snack"
elif hour >= 17 and hour <20:
meals = "dinner"
else:
meals = "other"
print(meals)"""
meals = "lunch"
entity = Photo(id=filename, tags=tags, costs=costs, calories=calories, changes=changes, meals=meals,
parent=ndb.Key('User', 'default'))
entity.put()
return render_template('post.html', storage_path=storage_path,
filename=filename, tags=tags, costs=costs, calories=calories, changes=changes, meals=meals)
else:
return redirect(url_for('photos'))
| 33.77037 | 122 | 0.621518 |
78963c0a67655bb281073e7478b971d3f41dc6f2 | 3,378 | py | Python | aea/cli/registry/fetch.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | aea/cli/registry/fetch.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | aea/cli/registry/fetch.py | marcofavorito/agents-aea | e520f2f5d076a193514e194d94aa76c6423ac5bc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Methods for CLI fetch functionality."""
import os
import shutil
from typing import Optional
import click
from aea.cli.add import add_item
from aea.cli.registry.utils import download_file, extract, request_api
from aea.cli.utils.config import try_to_load_agent_config
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import clean_after
from aea.configurations.base import PublicId
from aea.configurations.constants import (
CONNECTION,
CONTRACT,
DEFAULT_AEA_CONFIG_FILE,
PROTOCOL,
SKILL,
)
@clean_after
def fetch_agent(
ctx: Context,
public_id: PublicId,
alias: Optional[str] = None,
target_dir: Optional[str] = None,
) -> None:
"""
Fetch Agent from Registry.
:param ctx: Context
:param public_id: str public ID of desirable agent.
:param alias: an optional alias.
:param target_dir: the target directory to which the agent is fetched.
:return: None
"""
author, name, version = public_id.author, public_id.name, public_id.version
api_path = f"/agents/{author}/{name}/{version}"
resp = request_api("GET", api_path)
file_url = resp["file"]
filepath = download_file(file_url, ctx.cwd)
folder_name = target_dir or (name if alias is None else alias)
aea_folder = os.path.join(ctx.cwd, folder_name)
ctx.clean_paths.append(aea_folder)
extract(filepath, ctx.cwd)
if alias or target_dir:
shutil.move(
os.path.join(ctx.cwd, name), aea_folder,
)
ctx.cwd = aea_folder
try_to_load_agent_config(ctx)
if alias is not None:
ctx.agent_config.agent_name = alias
with open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w") as fp:
ctx.agent_loader.dump(ctx.agent_config, fp)
click.echo("Fetching dependencies...")
for item_type in (CONNECTION, CONTRACT, SKILL, PROTOCOL):
item_type_plural = item_type + "s"
# initialize fetched agent with empty folders for custom packages
custom_items_folder = os.path.join(ctx.cwd, item_type_plural)
os.makedirs(custom_items_folder)
config = getattr(ctx.agent_config, item_type_plural)
for item_public_id in config:
try:
add_item(ctx, item_type, item_public_id)
except Exception as e:
raise click.ClickException(
f'Unable to fetch dependency for agent "{name}", aborting. {e}'
)
click.echo("Dependencies successfully fetched.")
click.echo(f"Agent {name} successfully fetched to {aea_folder}.")
| 33.445545 | 83 | 0.656898 |
e492ac628030b0d15392c27945fbd7070ac23423 | 2,203 | py | Python | modules/text/embedding/glove_twitter_target_word-word_dim50_en/module.py | tjy1985001/PaddleHub | 28a57e1649fcb2927c5c4f631a9f8a29ba17bc54 | [
"Apache-2.0"
] | 1 | 2021-01-27T09:15:58.000Z | 2021-01-27T09:15:58.000Z | modules/text/embedding/glove_twitter_target_word-word_dim50_en/module.py | chenyao821/PaddleHub | 12267027796103c53e869936fa24290615abbc9b | [
"Apache-2.0"
] | null | null | null | modules/text/embedding/glove_twitter_target_word-word_dim50_en/module.py | chenyao821/PaddleHub | 12267027796103c53e869936fa24290615abbc9b | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from paddlenlp.embeddings import TokenEmbedding
from paddlehub.module.module import moduleinfo, serving
@moduleinfo(
name="glove_twitter_target_word-word_dim50_en",
version="1.0.0",
summary="",
author="paddlepaddle",
author_email="",
type="nlp/semantic_model")
class Embedding(TokenEmbedding):
"""
Embedding model
"""
def __init__(self, *args, **kwargs):
super(Embedding, self).__init__(embedding_name="glove.twitter.target.word-word.dim50.en", *args, **kwargs)
@serving
def calc_similarity(self, data: List[List[str]]):
"""
Calculate similarities of giving word pairs.
"""
results = []
for word_pair in data:
if len(word_pair) != 2:
raise RuntimeError(
f'The input must have two words, but got {len(word_pair)}. Please check your inputs.')
if not isinstance(word_pair[0], str) or not isinstance(word_pair[1], str):
raise RuntimeError(
f'The types of text pair must be (str, str), but got'
f' ({type(word_pair[0]).__name__}, {type(word_pair[1]).__name__}). Please check your inputs.')
for word in word_pair:
if self.get_idx_from_word(word) == \
self.get_idx_from_word(self.vocab.unk_token):
raise RuntimeError(
f'Word "{word}" is not in vocab. Please check your inputs.')
results.append(str(self.cosine_sim(*word_pair)))
return results
| 39.339286 | 114 | 0.64503 |
f390a3e5e31d2ff280859557bbe2488e5499a1f4 | 7,103 | py | Python | trinity/components/eth2/beacon/component.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | trinity/components/eth2/beacon/component.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | trinity/components/eth2/beacon/component.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser, _SubParsersAction
import asyncio
import contextlib
import logging
from typing import Set, Tuple
from lahja import EndpointAPI
from libp2p.crypto.keys import KeyPair
from libp2p.crypto.secp256k1 import create_new_key_pair
from eth2.beacon.typing import SubnetId
from p2p.service import BaseService, run_service
from trinity.config import BeaconAppConfig, TrinityConfig
from trinity.db.beacon.chain import AsyncBeaconChainDB
from trinity.db.manager import DBClient
from trinity.extensibility import AsyncioIsolatedComponent
from trinity.http.apps.validator_api import ValidatorAPIHandler
from trinity.http.handlers.api_handler import APIHandler
from trinity.http.handlers.metrics_handler import MetricsHandler
from trinity.http.main import HTTPServer
from trinity.http.server import HTTPServer as HTTPAppServer
from trinity.protocol.bcc_libp2p.configs import ATTESTATION_SUBNET_COUNT
from trinity.protocol.bcc_libp2p.node import Node
from trinity.protocol.bcc_libp2p.servers import BCCReceiveServer
from trinity.sync.beacon.chain import BeaconChainSyncer
from trinity.sync.common.chain import SyncBlockImporter
from .chain_maintainer import ChainMaintainer
from .slot_ticker import SlotTicker
from .validator_handler import ValidatorHandler
def _load_secp256k1_key_pair_from(trinity_config: TrinityConfig) -> KeyPair:
return create_new_key_pair(trinity_config.nodekey.to_bytes())
class BeaconNodeComponent(AsyncioIsolatedComponent):
name = "Beacon Node"
logger = logging.getLogger("trinity.components.beacon.BeaconNode")
@classmethod
def configure_parser(
cls, arg_parser: ArgumentParser, subparser: _SubParsersAction
) -> None:
arg_parser.add_argument(
"--enable-metrics", action="store_true", help="Enables the Metrics Server"
)
arg_parser.add_argument(
"--metrics-port", type=int, help="Metrics server port", default=8008
)
arg_parser.add_argument(
"--debug-libp2p", action="store_true", help="Enable debug logging of libp2p"
)
arg_parser.add_argument(
"--enable-api", action="store_true", help="Enables the API Server"
)
arg_parser.add_argument(
"--api-port", type=int, help="API server port", default=5005
)
arg_parser.add_argument(
"--bn-only", action="store_true", help="Run with BeaconNode only mode"
)
@property
def is_enabled(self) -> bool:
return self._boot_info.trinity_config.has_app_config(BeaconAppConfig)
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
trinity_config = boot_info.trinity_config
key_pair = _load_secp256k1_key_pair_from(trinity_config)
beacon_app_config = trinity_config.get_app_config(BeaconAppConfig)
base_db = DBClient.connect(trinity_config.database_ipc_path)
if boot_info.args.debug_libp2p:
logging.getLogger("libp2p").setLevel(logging.DEBUG)
else:
logging.getLogger("libp2p").setLevel(logging.INFO)
with base_db:
chain_config = beacon_app_config.get_chain_config()
chain = chain_config.beacon_chain_class(
base_db, chain_config.genesis_config
)
# TODO: To simplify, subsribe all subnets
subnets: Set[SubnetId] = set(
SubnetId(subnet_id) for subnet_id in range(ATTESTATION_SUBNET_COUNT)
)
# TODO: Handle `bootstrap_nodes`.
libp2p_node = Node(
key_pair=key_pair,
listen_ip="0.0.0.0",
listen_port=boot_info.args.port,
preferred_nodes=trinity_config.preferred_nodes,
chain=chain,
subnets=subnets,
event_bus=event_bus,
)
receive_server = BCCReceiveServer(
chain=chain,
p2p_node=libp2p_node,
topic_msg_queues=libp2p_node.pubsub.my_topics,
subnets=subnets,
cancel_token=libp2p_node.cancel_token,
)
chain_maintainer = ChainMaintainer(
chain=chain, event_bus=event_bus, token=libp2p_node.cancel_token
)
validator_handler = ValidatorHandler(
chain=chain,
p2p_node=libp2p_node,
event_bus=event_bus,
get_ready_attestations_fn=receive_server.get_ready_attestations,
get_aggregatable_attestations_fn=receive_server.get_aggregatable_attestations,
import_attestation_fn=receive_server.import_attestation,
token=libp2p_node.cancel_token,
)
slot_ticker = SlotTicker(
genesis_slot=chain_config.genesis_config.GENESIS_SLOT,
genesis_time=chain_config.genesis_time,
seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT,
event_bus=event_bus,
token=libp2p_node.cancel_token,
)
syncer = BeaconChainSyncer(
chain_db=AsyncBeaconChainDB(base_db, chain_config.genesis_config),
peer_pool=libp2p_node.handshaked_peers,
block_importer=SyncBlockImporter(chain),
genesis_config=chain_config.genesis_config,
event_bus=event_bus,
token=libp2p_node.cancel_token,
)
metrics_server = HTTPServer(
handler=MetricsHandler.handle(chain)(event_bus),
port=boot_info.args.metrics_port,
)
# NOTE: this API server provides an interface into the beacon node
api_server = HTTPServer(
handler=APIHandler.handle(chain)(event_bus),
port=boot_info.args.api_port,
)
# NOTE: this API server provides an interface between the beacon node and
# any connected validator clients.
validator_api_handler = ValidatorAPIHandler(
chain, event_bus, chain_config.genesis_time
)
validator_api_server = HTTPAppServer(
routes=validator_api_handler.make_routes(), port=30303
)
services: Tuple[BaseService, ...] = (
libp2p_node,
receive_server,
slot_ticker,
syncer,
validator_api_server,
)
if boot_info.args.enable_metrics:
services += (metrics_server,)
if boot_info.args.enable_api:
services += (api_server,)
if boot_info.args.bn_only:
services += (chain_maintainer, validator_handler)
async with contextlib.AsyncExitStack() as stack:
for service in services:
await stack.enter_async_context(run_service(service))
await asyncio.gather(*(service.cancellation() for service in services))
| 39.243094 | 94 | 0.652823 |
1e318ca2924aa9bf233e5c3158b6bc40a82e018e | 151 | py | Python | Automation/Verify_Stripping.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | Automation/Verify_Stripping.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | Automation/Verify_Stripping.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from Automation import Publishing
def Run () -> bool:
Publishing.VerifyStripping()
return True
if __name__ == "__main__":
if not Run():
exit(1) | 15.1 | 33 | 0.701987 |
868f553ad3ceda940d91dc597dd2125ef79932dd | 3,469 | py | Python | .history/my_classes/FirstClassFunctions/reducing_functions_20210707181058.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707181058.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707181058.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
from msilib import sequence
from unittest import result
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
Notice the sequence of steps:
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
l = [5, 8, 6, 10, 9]
^ | | | |
| | |
5 | |
\ | |
max(5, 8) | | |
8 |
\ |
\ |
max(8, 6)
8 | |
\
max(8, 10)
10
\ |
max(10, 9)
10
result -> 10
To caculate the min: # I just need to change (max) to (min)
l = l[5, 8, 6, 10, 9] # result = 5
min_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def min_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = min_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
# I could just write:
def _reduce(fn, sequence):
result = sequence[0
for x in sequence[1:]]:
result = fn(result, x)
return result
_reduce(lambda a, b: a if a > b else b, l) # maximum
_reduce(lambda a, b: a if a < b else b, l) # minimum
# Adding all the elements to a list
add = lambda a, b: a+b
# result = 5
l = [5, 8, 6, 10, 9]
# result = add(5, 8) = 13
# result = add(13, 6) = 19
def _reduce(fn, sequence): # result = add(19, 10) = 29
result = sequence[0]
for x in sequence[1:]: # result = add(29. 9) = 38
result = fn(result, x)
return result # result = 38
_reduce(add. l)
""" The functools module
Pthon implements a reduce function that will handle any iterable, but works similarly to what I just saw.
"""
from functools import reduce
l = [5, 8, 6, 10, 9]
reduce(lambda a, b: a if a > b else b, l) # max -> 10
| 25.507353 | 105 | 0.473047 |
bb69a79a45a594fadb46ad18b38c594d39ff770e | 24,439 | py | Python | discord/voice_client.py | Alcamoru/discord.py | 670256b924324f91f19bf5e5c6234c72897cbb11 | [
"MIT"
] | 1 | 2021-08-31T11:28:42.000Z | 2021-08-31T11:28:42.000Z | discord/voice_client.py | Alcamoru/discord.py | 670256b924324f91f19bf5e5c6234c72897cbb11 | [
"MIT"
] | null | null | null | discord/voice_client.py | Alcamoru/discord.py | 670256b924324f91f19bf5e5c6234c72897cbb11 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Some documentation to refer to:
- Our main web socket (mWS) sends opcode 4 with a guild ID and channel ID.
- The mWS receives VOICE_STATE_UPDATE and VOICE_SERVER_UPDATE.
- We pull the session_id from VOICE_STATE_UPDATE.
- We pull the token, endpoint and server_id from VOICE_SERVER_UPDATE.
- Then we initiate the voice web socket (vWS) pointing to the endpoint.
- We send opcode 0 with the user_id, server_id, session_id and token using the vWS.
- The vWS sends back opcode 2 with an ssrc, port, modes(array) and hearbeat_interval.
- We send a UDP discovery packet to endpoint:port and receive our IP and our port in LE.
- Then we send our IP and port via vWS with opcode 1.
- When that's all done, we receive opcode 4 from the vWS.
- Finally we can transmit data to endpoint:port.
"""
from __future__ import annotations
import asyncio
import logging
import socket
import struct
import threading
from typing import Any, Callable, List, Optional, TYPE_CHECKING, Tuple
from . import opus, utils
from .backoff import ExponentialBackoff
from .errors import ClientException, ConnectionClosed
from .gateway import *
from .player import AudioPlayer, AudioSource
from .utils import MISSING
if TYPE_CHECKING:
from .client import Client
from .guild import Guild
from .state import ConnectionState
from .user import ClientUser
from .opus import Encoder
from . import abc
from .types.voice import (
GuildVoiceState as GuildVoiceStatePayload,
VoiceServerUpdate as VoiceServerUpdatePayload,
SupportedModes,
)
has_nacl: bool
try:
import nacl.secret # type: ignore
has_nacl = True
except ImportError:
has_nacl = False
__all__ = (
'VoiceProtocol',
'VoiceClient',
)
_log = logging.getLogger(__name__)
class VoiceProtocol:
"""A class that represents the Discord voice protocol.
This is an abstract class. The library provides a concrete implementation
under :class:`VoiceClient`.
This class allows you to implement a protocol to allow for an external
method of sending voice, such as Lavalink_ or a native library implementation.
These classes are passed to :meth:`abc.Connectable.connect <VoiceChannel.connect>`.
.. _Lavalink: https://github.com/freyacodes/Lavalink
Parameters
------------
client: :class:`Client`
The client (or its subclasses) that started the connection request.
channel: :class:`abc.Connectable`
The voice channel that is being connected to.
"""
def __init__(self, client: Client, channel: abc.Connectable) -> None:
self.client: Client = client
self.channel: abc.Connectable = channel
async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None:
"""|coro|
An abstract method that is called when the client's voice state
has changed. This corresponds to ``VOICE_STATE_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice state payload`__.
.. _voice_state_update_payload: https://discord.com/developers/docs/resources/voice#voice-state-object
__ voice_state_update_payload_
"""
raise NotImplementedError
async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None:
"""|coro|
An abstract method that is called when initially connecting to voice.
This corresponds to ``VOICE_SERVER_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice server update payload`__.
.. _voice_server_update_payload: https://discord.com/developers/docs/topics/gateway#voice-server-update-voice-server-update-event-fields
__ voice_server_update_payload_
"""
raise NotImplementedError
async def connect(self, *, timeout: float, reconnect: bool) -> None:
"""|coro|
An abstract method called when the client initiates the connection request.
When a connection is requested initially, the library calls the constructor
under ``__init__`` and then calls :meth:`connect`. If :meth:`connect` fails at
some point then :meth:`disconnect` is called.
Within this method, to start the voice connection flow it is recommended to
use :meth:`Guild.change_voice_state` to start the flow. After which,
:meth:`on_voice_server_update` and :meth:`on_voice_state_update` will be called.
The order that these two are called is unspecified.
Parameters
------------
timeout: :class:`float`
The timeout for the connection.
reconnect: :class:`bool`
Whether reconnection is expected.
"""
raise NotImplementedError
async def disconnect(self, *, force: bool) -> None:
"""|coro|
An abstract method called when the client terminates the connection.
See :meth:`cleanup`.
Parameters
------------
force: :class:`bool`
Whether the disconnection was forced.
"""
raise NotImplementedError
def cleanup(self) -> None:
"""This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
"""
key_id, _ = self.channel._get_voice_client_key()
self.client._connection._remove_voice_client(key_id)
class VoiceClient(VoiceProtocol):
"""Represents a Discord voice connection.
You do not create these, you typically get them from
e.g. :meth:`VoiceChannel.connect`.
Warning
--------
In order to use PCM based AudioSources, you must have the opus library
installed on your system and loaded through :func:`opus.load_opus`.
Otherwise, your AudioSources must be opus encoded (e.g. using :class:`FFmpegOpusAudio`)
or the library will not be able to transmit audio.
Attributes
-----------
session_id: :class:`str`
The voice connection session ID.
token: :class:`str`
The voice connection token.
endpoint: :class:`str`
The endpoint we are connecting to.
channel: :class:`abc.Connectable`
The voice channel connected to.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the voice client is running on.
"""
endpoint_ip: str
voice_port: int
secret_key: List[int]
ssrc: int
def __init__(self, client: Client, channel: abc.Connectable):
if not has_nacl:
raise RuntimeError("PyNaCl library needed in order to use voice")
super().__init__(client, channel)
state = client._connection
self.token: str = MISSING
self.socket = MISSING
self.loop: asyncio.AbstractEventLoop = state.loop
self._state: ConnectionState = state
# this will be used in the AudioPlayer thread
self._connected: threading.Event = threading.Event()
self._handshaking: bool = False
self._potentially_reconnecting: bool = False
self._voice_state_complete: asyncio.Event = asyncio.Event()
self._voice_server_complete: asyncio.Event = asyncio.Event()
self.mode: str = MISSING
self._connections: int = 0
self.sequence: int = 0
self.timestamp: int = 0
self.timeout: float = 0
self._runner: asyncio.Task = MISSING
self._player: Optional[AudioPlayer] = None
self.encoder: Encoder = MISSING
self._lite_nonce: int = 0
self.ws: DiscordVoiceWebSocket = MISSING
warn_nacl = not has_nacl
supported_modes: Tuple[SupportedModes, ...] = (
'xsalsa20_poly1305_lite',
'xsalsa20_poly1305_suffix',
'xsalsa20_poly1305',
)
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild we're connected to, if applicable."""
return getattr(self.channel, 'guild', None)
@property
def user(self) -> ClientUser:
""":class:`ClientUser`: The user connected to voice (i.e. ourselves)."""
return self._state.user
def checked_add(self, attr, value, limit):
val = getattr(self, attr)
if val + value > limit:
setattr(self, attr, 0)
else:
setattr(self, attr, val + value)
# connection related
async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None:
self.session_id = data['session_id']
channel_id = data['channel_id']
if not self._handshaking or self._potentially_reconnecting:
# If we're done handshaking then we just need to update ourselves
# If we're potentially reconnecting due to a 4014, then we need to differentiate
# a channel move and an actual force disconnect
if channel_id is None:
# We're being disconnected so cleanup
await self.disconnect()
else:
guild = self.guild
self.channel = channel_id and guild and guild.get_channel(int(channel_id)) # type: ignore
else:
self._voice_state_complete.set()
async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None:
if self._voice_server_complete.is_set():
_log.info('Ignoring extraneous voice server update.')
return
self.token = data.get('token')
self.server_id = int(data['guild_id'])
endpoint = data.get('endpoint')
if endpoint is None or self.token is None:
_log.warning('Awaiting endpoint... This requires waiting. ' \
'If timeout occurred considering raising the timeout and reconnecting.')
return
self.endpoint, _, _ = endpoint.rpartition(':')
if self.endpoint.startswith('wss://'):
# Just in case, strip it off since we're going to add it later
self.endpoint = self.endpoint[6:]
# This gets set later
self.endpoint_ip = MISSING
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(False)
if not self._handshaking:
# If we're not handshaking then we need to terminate our previous connection in the websocket
await self.ws.close(4000)
return
self._voice_server_complete.set()
async def voice_connect(self) -> None:
await self.channel.guild.change_voice_state(channel=self.channel)
async def voice_disconnect(self) -> None:
_log.info('The voice handshake is being terminated for Channel ID %s (Guild ID %s)', self.channel.id,
self.guild.id)
await self.channel.guild.change_voice_state(channel=None)
def prepare_handshake(self) -> None:
self._voice_state_complete.clear()
self._voice_server_complete.clear()
self._handshaking = True
_log.info('Starting voice handshake... (connection attempt %d)', self._connections + 1)
self._connections += 1
def finish_handshake(self) -> None:
_log.info('Voice handshake complete. Endpoint found %s', self.endpoint)
self._handshaking = False
self._voice_server_complete.clear()
self._voice_state_complete.clear()
async def connect_websocket(self) -> DiscordVoiceWebSocket:
ws = await DiscordVoiceWebSocket.from_client(self)
self._connected.clear()
while ws.secret_key is None:
await ws.poll_event()
self._connected.set()
return ws
async def connect(self, *, reconnect: bool, timeout: float) -> None:
_log.info('Connecting to voice...')
self.timeout = timeout
for i in range(5):
self.prepare_handshake()
# This has to be created before we start the flow.
futures = [
self._voice_state_complete.wait(),
self._voice_server_complete.wait(),
]
# Start the connection flow
await self.voice_connect()
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
await self.disconnect(force=True)
raise
self.finish_handshake()
try:
self.ws = await self.connect_websocket()
break
except (ConnectionClosed, asyncio.TimeoutError):
if reconnect:
_log.exception('Failed to connect to voice... Retrying...')
await asyncio.sleep(1 + i * 2.0)
await self.voice_disconnect()
continue
else:
raise
if self._runner is MISSING:
self._runner = self.loop.create_task(self.poll_voice_ws(reconnect))
async def potential_reconnect(self) -> bool:
# Attempt to stop the player thread from playing early
self._connected.clear()
self.prepare_handshake()
self._potentially_reconnecting = True
try:
# We only care about VOICE_SERVER_UPDATE since VOICE_STATE_UPDATE can come before we get disconnected
await asyncio.wait_for(self._voice_server_complete.wait(), timeout=self.timeout)
except asyncio.TimeoutError:
self._potentially_reconnecting = False
await self.disconnect(force=True)
return False
self.finish_handshake()
self._potentially_reconnecting = False
try:
self.ws = await self.connect_websocket()
except (ConnectionClosed, asyncio.TimeoutError):
return False
else:
return True
@property
def latency(self) -> float:
""":class:`float`: Latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord Voice WebSocket latency and is
an analogue of user's voice latencies as seen in the Discord client.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.latency
@property
def average_latency(self) -> float:
""":class:`float`: Average of most recent 20 HEARTBEAT latencies in seconds.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.average_latency
async def poll_voice_ws(self, reconnect: bool) -> None:
backoff = ExponentialBackoff()
while True:
try:
await self.ws.poll_event()
except (ConnectionClosed, asyncio.TimeoutError) as exc:
if isinstance(exc, ConnectionClosed):
# The following close codes are undocumented so I will document them here.
# 1000 - normal closure (obviously)
# 4014 - voice channel has been deleted.
# 4015 - voice server has crashed
if exc.code in (1000, 4015):
_log.info('Disconnecting from voice normally, close code %d.', exc.code)
await self.disconnect()
break
if exc.code == 4014:
_log.info('Disconnected from voice by force... potentially reconnecting.')
successful = await self.potential_reconnect()
if not successful:
_log.info('Reconnect was unsuccessful, disconnecting from voice normally...')
await self.disconnect()
break
else:
continue
if not reconnect:
await self.disconnect()
raise
retry = backoff.delay()
_log.exception('Disconnected from voice... Reconnecting in %.2fs.', retry)
self._connected.clear()
await asyncio.sleep(retry)
await self.voice_disconnect()
try:
await self.connect(reconnect=True, timeout=self.timeout)
except asyncio.TimeoutError:
# at this point we've retried 5 times... let's continue the loop.
_log.warning('Could not connect to voice... Retrying...')
continue
async def disconnect(self, *, force: bool = False) -> None:
"""|coro|
Disconnects this voice client from voice.
"""
if not force and not self.is_connected():
return
self.stop()
self._connected.clear()
try:
if self.ws:
await self.ws.close()
await self.voice_disconnect()
finally:
self.cleanup()
if self.socket:
self.socket.close()
async def move_to(self, channel: abc.Snowflake) -> None:
"""|coro|
Moves you to a different voice channel.
Parameters
-----------
channel: :class:`abc.Snowflake`
The channel to move to. Must be a voice channel.
"""
await self.channel.guild.change_voice_state(channel=channel)
def is_connected(self) -> bool:
"""Indicates if the voice client is connected to voice."""
return self._connected.is_set()
# audio related
def _get_voice_packet(self, data):
header = bytearray(12)
# Formulate rtp header
header[0] = 0x80
header[1] = 0x78
struct.pack_into('>H', header, 2, self.sequence)
struct.pack_into('>I', header, 4, self.timestamp)
struct.pack_into('>I', header, 8, self.ssrc)
encrypt_packet = getattr(self, '_encrypt_' + self.mode)
return encrypt_packet(header, data)
def _encrypt_xsalsa20_poly1305(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:12] = header
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext
def _encrypt_xsalsa20_poly1305_suffix(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
return header + box.encrypt(bytes(data), nonce).ciphertext + nonce
def _encrypt_xsalsa20_poly1305_lite(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:4] = struct.pack('>I', self._lite_nonce)
self.checked_add('_lite_nonce', 1, 4294967295)
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext + nonce[:4]
def play(self, source: AudioSource, *, after: Callable[[Optional[Exception]], Any] = None) -> None:
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped. If no after callback is
passed, any caught exception will be displayed as if it were raised.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after: Callable[[Optional[:class:`Exception`]], Any]
The finalizer that is called after the stream is exhausted.
This function must have a single parameter, ``error``, that
denotes an optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
Source is not a :class:`AudioSource` or after is not a callable.
OpusNotLoaded
Source is not opus encoded and opus is not loaded.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.')
if self.is_playing():
raise ClientException('Already playing audio.')
if not isinstance(source, AudioSource):
raise TypeError(f'source must be an AudioSource not {source.__class__.__name__}')
if not self.encoder and not source.is_opus():
self.encoder = opus.Encoder()
self._player = AudioPlayer(source, self, after=after)
self._player.start()
def is_playing(self) -> bool:
"""Indicates if we're currently playing audio."""
return self._player is not None and self._player.is_playing()
def is_paused(self) -> bool:
"""Indicates if we're playing audio, but if we're paused."""
return self._player is not None and self._player.is_paused()
def stop(self) -> None:
"""Stops playing audio."""
if self._player:
self._player.stop()
self._player = None
def pause(self) -> None:
"""Pauses the audio playing."""
if self._player:
self._player.pause()
def resume(self) -> None:
"""Resumes the audio playing."""
if self._player:
self._player.resume()
@property
def source(self) -> Optional[AudioSource]:
"""Optional[:class:`AudioSource`]: The audio source being played, if playing.
This property can also be used to change the audio source currently being played.
"""
return self._player.source if self._player else None
@source.setter
def source(self, value: AudioSource) -> None:
if not isinstance(value, AudioSource):
raise TypeError(f'expected AudioSource not {value.__class__.__name__}.')
if self._player is None:
raise ValueError('Not playing anything.')
self._player._set_source(value)
def send_audio_packet(self, data: bytes, *, encode: bool = True) -> None:
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: :class:`bytes`
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: :class:`bool`
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
opus.OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
_log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', opus.Encoder.SAMPLES_PER_FRAME, 4294967295)
| 36.152367 | 148 | 0.629854 |
31c3eb2ba3182f248a73ce4415282f50c4d40ac5 | 222 | py | Python | backuppc_clone/__init__.py | SetBased/BackupPC-Clone | 73a71fcb8caefed7b0ea6f4d86b87863ec9e79a4 | [
"MIT"
] | 6 | 2019-04-17T12:16:46.000Z | 2022-02-08T17:39:24.000Z | backuppc_clone/__init__.py | SetBased/BackupPC-Clone | 73a71fcb8caefed7b0ea6f4d86b87863ec9e79a4 | [
"MIT"
] | null | null | null | backuppc_clone/__init__.py | SetBased/BackupPC-Clone | 73a71fcb8caefed7b0ea6f4d86b87863ec9e79a4 | [
"MIT"
] | null | null | null | """
BackupPC Clone
"""
from backuppc_clone.application.BackupPcCloneApplication import BackupPcCloneApplication
def main() -> int:
application = BackupPcCloneApplication()
ret = application.run()
return ret
| 18.5 | 88 | 0.752252 |
0da7a1bd42f1b29cee94598462b5fdffecd1fb37 | 3,531 | py | Python | cryptoquant/app/cta_strategy/strategies/boll_channel_strategy.py | studyquant/StudyQuant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 74 | 2018-08-10T17:05:57.000Z | 2022-03-26T07:06:02.000Z | cryptoquant/app/cta_strategy/strategies/boll_channel_strategy.py | ezailwoo/studyquant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 1 | 2022-03-24T06:42:00.000Z | 2022-03-24T06:42:00.000Z | cryptoquant/app/cta_strategy/strategies/boll_channel_strategy.py | ezailwoo/studyquant | 24790634ac320b25361672754558c3797f4fc9e3 | [
"Apache-2.0"
] | 18 | 2020-09-22T09:03:49.000Z | 2022-03-31T20:48:54.000Z | from cryptoquant.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager,
)
class BollChannelStrategy(CtaTemplate):
""""""
author = "用Python的交易员"
boll_window = 18
boll_dev = 3.4
cci_window = 10
atr_window = 30
sl_multiplier = 5.2
fixed_size = 1
boll_up = 0
boll_down = 0
cci_value = 0
atr_value = 0
intra_trade_high = 0
intra_trade_low = 0
long_stop = 0
short_stop = 0
parameters = [
"boll_window",
"boll_dev",
"cci_window",
"atr_window",
"sl_multiplier",
"fixed_size"
]
variables = [
"boll_up",
"boll_down",
"cci_value",
"atr_value",
"intra_trade_high",
"intra_trade_low",
"long_stop",
"short_stop"
]
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.bg = BarGenerator(self.on_bar, 15, self.on_15min_bar)
self.am = ArrayManager()
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.bg.update_bar(bar)
def on_15min_bar(self, bar: BarData):
""""""
self.cancel_all()
am = self.am
am.update_bar(bar)
if not am.inited:
return
self.boll_up, self.boll_down = am.boll(self.boll_window, self.boll_dev)
self.cci_value = am.cci(self.cci_window)
self.atr_value = am.atr(self.atr_window)
if self.pos == 0:
self.intra_trade_high = bar.high_price
self.intra_trade_low = bar.low_price
if self.cci_value > 0:
self.buy(self.boll_up, self.fixed_size, True)
elif self.cci_value < 0:
self.short(self.boll_down, self.fixed_size, True)
elif self.pos > 0:
self.intra_trade_high = max(self.intra_trade_high, bar.high_price)
self.intra_trade_low = bar.low_price
self.long_stop = self.intra_trade_high - self.atr_value * self.sl_multiplier
self.sell(self.long_stop, abs(self.pos), True)
elif self.pos < 0:
self.intra_trade_high = bar.high_price
self.intra_trade_low = min(self.intra_trade_low, bar.low_price)
self.short_stop = self.intra_trade_low + self.atr_value * self.sl_multiplier
self.cover(self.short_stop, abs(self.pos), True)
self.put_event()
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| 24.020408 | 88 | 0.566129 |
dff190ae6c6588d95e2cae9bb6008e7bf4e708fd | 2,713 | py | Python | test/rules/resources/codepipeline/test_stages.py | KeyCore/cfn-python-lint | 342ac61db052224314ca1219a7a073b45841d78e | [
"MIT-0"
] | 1 | 2020-05-28T00:00:56.000Z | 2020-05-28T00:00:56.000Z | test/rules/resources/codepipeline/test_stages.py | eshack94/cfn-python-lint | 9ec44f41ae24b9d62576aed53efa888b00641e04 | [
"MIT-0"
] | null | null | null | test/rules/resources/codepipeline/test_stages.py | eshack94/cfn-python-lint | 9ec44f41ae24b9d62576aed53efa888b00641e04 | [
"MIT-0"
] | null | null | null | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.resources.codepipeline.CodepipelineStages import CodepipelineStages # pylint: disable=E0401
from ... import BaseRuleTestCase
class TestCodePipelineStages(BaseRuleTestCase):
"""Test CodePipeline Stages Configuration"""
def setUp(self):
"""Setup"""
super(TestCodePipelineStages, self).setUp()
self.collection.register(CodepipelineStages())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative_onestage(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/codepipeline/stages_one_stage.yaml', 2)
def test_file_negative_no_source(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/codepipeline/stages_no_source.yaml', 1)
def test_file_negative_second_stage(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/codepipeline/stages_second_stage.yaml', 1)
def test_file_negative_non_unique(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/codepipeline/stages_non_unique.yaml', 1)
def test_file_negative_only_source_types(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/codepipeline/stages_only_source.yaml', 2)
def test_scenario_format(self):
"""Test scenario formatting"""
rule = CodepipelineStages()
self.assertEqual(rule._format_error_message('Test.', {'Condition': True}), 'Test. When condition "Condition" is True')
self.assertEqual(rule._format_error_message('Test.', None), 'Test.')
| 46.775862 | 126 | 0.740877 |
68f1fcfa71d3303afe10d78d08c42025e38e8f59 | 3,231 | py | Python | src/djangoreactredux/settings/base.py | TehThreeMusketeers/happy-kowalevski | b82aed72a1fe3dec8dec64dabda6807dfa5f987a | [
"MIT"
] | null | null | null | src/djangoreactredux/settings/base.py | TehThreeMusketeers/happy-kowalevski | b82aed72a1fe3dec8dec64dabda6807dfa5f987a | [
"MIT"
] | 13 | 2018-01-28T15:39:32.000Z | 2018-02-23T16:05:18.000Z | src/djangoreactredux/settings/base.py | TehThreeMusketeers/happy-kowalevski | b82aed72a1fe3dec8dec64dabda6807dfa5f987a | [
"MIT"
] | null | null | null | """Django settings for djangoreactredux project."""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) # remove /sswmain/settings to get base folder
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ajsdgas7&*kosdsa21[]jaksdhlka-;kmcv8l$#diepsm8&ah^'
DEBUG = True
ALLOWED_HOSTS = ['localhost','sccug-330-04.lancs.ac.uk']
APPEND_SLASH = False
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.admin',
'requests',
'rest_framework',
'knox',
'django_extensions',
'accounts',
'base',
'devices',
)
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'djangoreactredux.urls'
WSGI_APPLICATION = 'djangoreactredux.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'accounts.User'
ACCOUNT_ACTIVATION_DAYS = 7 # days
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static_dist'),
)
# store static files locally and serve with whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# ############# REST FRAMEWORK ###################
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
),
}
# ############ REST KNOX ########################
REST_KNOX = {
'SECURE_HASH_ALGORITHM': 'cryptography.hazmat.primitives.hashes.SHA512',
'AUTH_TOKEN_CHARACTER_LENGTH': 64,
'USER_SERIALIZER': 'knox.serializers.UserSerializer'
}
######## Particle Cloud Settings #############
PARTICLE_PRODUCT_ID = os.environ['PARTICLE_PRODUCT_ID']
PARTICLE_API_KEY = os.environ['PARTICLE_API_KEY']
PARTICLE_API_CLIENT = os.environ['PARTICLE_API_CLIENT']
| 27.615385 | 117 | 0.692974 |
aa4bc076b67f7d7b86ba2b8daf32413c8a007c47 | 316 | py | Python | tests/test_qt.py | ancasag/labelDetection_2 | 96dc7447e5fc27f27371e07dc9edb4fb7a4de94f | [
"MIT"
] | 1 | 2020-12-10T09:26:27.000Z | 2020-12-10T09:26:27.000Z | tests/test_qt.py | ancasag/labelDetection_2 | 96dc7447e5fc27f27371e07dc9edb4fb7a4de94f | [
"MIT"
] | 25 | 2020-09-25T22:33:07.000Z | 2022-03-12T00:15:27.000Z | tests/test_qt.py | ancasag/labelDetection_2 | 96dc7447e5fc27f27371e07dc9edb4fb7a4de94f | [
"MIT"
] | null | null | null |
from unittest import TestCase
from labelDetection import get_main_app
class TestMainWindow(TestCase):
app = None
win = None
def setUp(self):
self.app, self.win = get_main_app()
def tearDown(self):
self.win.close()
self.app.quit()
def test_noop(self):
pass
| 15.047619 | 43 | 0.629747 |
c830de705323715d7a28867e34943bdf06bc9b77 | 3,335 | py | Python | othello/tensorflow/OthelloNNet.py | mchaney22/alpha-zero-general | 8705c9ebff6a4af7eaf9229b3ab61c2305fa5b4e | [
"MIT"
] | null | null | null | othello/tensorflow/OthelloNNet.py | mchaney22/alpha-zero-general | 8705c9ebff6a4af7eaf9229b3ab61c2305fa5b4e | [
"MIT"
] | null | null | null | othello/tensorflow/OthelloNNet.py | mchaney22/alpha-zero-general | 8705c9ebff6a4af7eaf9229b3ab61c2305fa5b4e | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
from utils import *
import tensorflow as tf
## Code based on OthelloNNet with minimal changes.
class OthelloNNet():
def __init__(self, game, args):
# game params
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
# Renaming functions
Relu = tf.nn.relu
Tanh = tf.nn.tanh
BatchNormalization = tf.layers.batch_normalization
Dropout = tf.layers.dropout
Dense = tf.layers.dense
# Neural Net
self.graph = tf.Graph()
with self.graph.as_default():
self.input_boards = tf.placeholder(tf.float32, shape=[None, self.board_x, self.board_y]) # s: batch_size x board_x x board_y
self.dropout = tf.placeholder(tf.float32)
self.isTraining = tf.placeholder(tf.bool, name="is_training")
x_image = tf.reshape(self.input_boards, [-1, self.board_x, self.board_y, 1]) # batch_size x board_x x board_y x 1
h_conv1 = Relu(BatchNormalization(self.conv2d(x_image, args.num_channels, 'same'), axis=3, training=self.isTraining)) # batch_size x board_x x board_y x num_channels
h_conv2 = Relu(BatchNormalization(self.conv2d(h_conv1, args.num_channels, 'same'), axis=3, training=self.isTraining)) # batch_size x board_x x board_y x num_channels
h_conv3 = Relu(BatchNormalization(self.conv2d(h_conv2, args.num_channels, 'valid'), axis=3, training=self.isTraining)) # batch_size x (board_x-2) x (board_y-2) x num_channels
h_conv4 = Relu(BatchNormalization(self.conv2d(h_conv3, args.num_channels, 'valid'), axis=3, training=self.isTraining)) # batch_size x (board_x-4) x (board_y-4) x num_channels
h_conv4_flat = tf.reshape(h_conv4, [-1, args.num_channels * (self.board_x - 4) * (self.board_y - 4)])
s_fc1 = Dropout(Relu(BatchNormalization(Dense(h_conv4_flat, 1024), axis=1, training=self.isTraining)), rate=self.dropout) # batch_size x 1024
s_fc2 = Dropout(Relu(BatchNormalization(Dense(s_fc1, 512), axis=1, training=self.isTraining)), rate=self.dropout) # batch_size x 512
self.pi = Dense(s_fc2, self.action_size) # batch_size x self.action_size
self.prob = tf.nn.softmax(self.pi)
self.v = Tanh(Dense(s_fc2, 1)) # batch_size x 1
self.calculate_loss()
def conv2d(self, x, out_channels, padding):
return tf.layers.conv2d(x, out_channels, kernel_size=[3, 3], padding=padding)
def calculate_loss(self):
self.target_pis = tf.placeholder(tf.float32, shape=[None, self.action_size])
self.target_vs = tf.placeholder(tf.float32, shape=[None])
self.loss_pi = tf.losses.softmax_cross_entropy(self.target_pis, self.pi)
self.loss_v = tf.losses.mean_squared_error(self.target_vs, tf.reshape(self.v, shape=[-1, ]))
self.total_loss = self.loss_pi + self.loss_v
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_step = tf.train.AdamOptimizer(self.args.lr).minimize(self.total_loss)
| 58.508772 | 191 | 0.642879 |
54b2205673c0127fdd08b0ef62f65ebd331e6a91 | 280 | py | Python | automation/tincrepo/main/pxf/features/hdfs/readable/text/wildcard/runTest.py | lchx1010/pxf | f6e11f91fb8c01ed27fc829beb3800f3b253c209 | [
"Apache-2.0"
] | 46 | 2018-10-22T23:34:03.000Z | 2022-03-31T09:31:34.000Z | automation/tincrepo/main/pxf/features/hdfs/readable/text/wildcard/runTest.py | lchx1010/pxf | f6e11f91fb8c01ed27fc829beb3800f3b253c209 | [
"Apache-2.0"
] | 317 | 2018-10-05T23:51:48.000Z | 2022-03-22T17:38:52.000Z | automation/tincrepo/main/pxf/features/hdfs/readable/text/wildcard/runTest.py | lchx1010/pxf | f6e11f91fb8c01ed27fc829beb3800f3b253c209 | [
"Apache-2.0"
] | 46 | 2018-10-10T18:55:00.000Z | 2022-03-28T07:27:04.000Z | from mpp.models import SQLTestCase
from mpp.models import SQLConcurrencyTestCase
class PxfHdfsReadWildcard(SQLConcurrencyTestCase):
"""
@db_name pxfautomation
@concurrency 1
@gpdiff True
"""
sql_dir = 'sql'
ans_dir = 'expected'
out_dir = 'output'
| 21.538462 | 50 | 0.703571 |
2779a35a208de817b112715049009cfd41d66547 | 895 | py | Python | tests/unit/v0x04/test_controller2switch/test_group_stats_request.py | josemauro/python-openflow | 0537c626f3aeb4c53995b65e0783f09ad5e63101 | [
"MIT"
] | 48 | 2016-06-09T14:36:22.000Z | 2021-11-11T16:05:19.000Z | tests/unit/v0x04/test_controller2switch/test_group_stats_request.py | josemauro/python-openflow | 0537c626f3aeb4c53995b65e0783f09ad5e63101 | [
"MIT"
] | 338 | 2016-05-06T18:42:36.000Z | 2021-04-29T17:57:09.000Z | tests/unit/v0x04/test_controller2switch/test_group_stats_request.py | josemauro/python-openflow | 0537c626f3aeb4c53995b65e0783f09ad5e63101 | [
"MIT"
] | 46 | 2016-05-24T15:32:56.000Z | 2021-06-01T12:16:17.000Z | """Group stats request message."""
from pyof.v0x04.controller2switch.common import MultipartType
from pyof.v0x04.controller2switch.multipart_request import (
GroupStatsRequest, MultipartRequest)
from tests.unit.test_struct import TestStruct
class TestGroupStatsRequest(TestStruct):
"""Group stats request message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_group_stats_request')
super().set_raw_dump_object(MultipartRequest, xid=1,
multipart_type=MultipartType.OFPMP_GROUP,
flags=0, body=_get_body())
super().set_minimum_size(16)
def _get_body():
"""Return the body used by MultipartRequest message."""
return GroupStatsRequest()
| 35.8 | 77 | 0.67933 |
d03fe5fa3e29094a9bdd409b235485ed657f385d | 105 | py | Python | edge/model/__init__.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | edge/model/__init__.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | edge/model/__init__.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | from .models import Model, ContinuousModel, DiscreteModel, GPModel
from .ground_truth import GroundTruth
| 35 | 66 | 0.847619 |
01c8e83d55c886bf3ba688881bad87c697309987 | 5,419 | py | Python | benchmarks/ltl_maxplus/f3/maxplus_4_20.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_maxplus/f3/maxplus_4_20.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_maxplus/f3/maxplus_4_20.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_14_0 = msat_make_number(menv, "14.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_6_0 = msat_make_number(menv, "6.0")
n_8_0 = msat_make_number(menv, "8.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[1], n_6_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_2_0)
expr1 = msat_make_plus(menv, xs[3], n_20_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[1], n_14_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_18_0)
expr1 = msat_make_plus(menv, xs[2], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),))
trans = msat_make_and(menv, trans, _t)
# ltl property: ((x_2 - x_1 >= 1) | (X (F (x_2 - x_1 >= 9))))
ltl = msat_make_or(menv, msat_make_geq(menv, msat_make_minus(menv, xs[2], xs[1]), msat_make_number(menv, "1")), enc.make_X(enc.make_F(msat_make_geq(menv, msat_make_minus(menv, xs[2], xs[1]), msat_make_number(menv, "9")))))
return TermMap(curr2next), init, trans, ltl
| 37.372414 | 226 | 0.617826 |
aa2d8a019471397f0a10d3e4700dcf4f8e141a71 | 104 | py | Python | setup.py | adornetejr/manim | e0715ceeff4778d11ef4ac31f8f8f2b56a2187ad | [
"MIT"
] | 658 | 2018-04-06T19:14:03.000Z | 2022-03-31T14:48:39.000Z | setup.py | im-AMS/manim | 19e3c97589181ffd43ef14d9169af4e40e054664 | [
"MIT"
] | 783 | 2018-04-06T16:47:30.000Z | 2022-03-31T14:24:18.000Z | setup.py | im-AMS/manim | 19e3c97589181ffd43ef14d9169af4e40e054664 | [
"MIT"
] | 249 | 2018-04-06T16:44:34.000Z | 2022-03-28T10:26:19.000Z | #!/usr/bin/env python
from setuptools import setup
setup(
setup_requires=['pbr'],
pbr=True,
)
| 11.555556 | 28 | 0.663462 |
b96972bca1e42363e5dae94c9b13b464bebbc0bc | 7,533 | py | Python | diagnostics_tools/src/diagnostic_tools/periodic_event_status.py | ChrisScianna/ROS-Underwater-RnD | f928bcc6b19a830b98e2cc2aedd65ff35b887901 | [
"BSD-3-Clause"
] | null | null | null | diagnostics_tools/src/diagnostic_tools/periodic_event_status.py | ChrisScianna/ROS-Underwater-RnD | f928bcc6b19a830b98e2cc2aedd65ff35b887901 | [
"BSD-3-Clause"
] | 85 | 2020-10-05T11:44:46.000Z | 2021-09-08T14:31:07.000Z | diagnostics_tools/src/diagnostic_tools/periodic_event_status.py | ChrisScianna/ROS-Underwater-RnD | f928bcc6b19a830b98e2cc2aedd65ff35b887901 | [
"BSD-3-Clause"
] | 1 | 2021-11-04T13:18:17.000Z | 2021-11-04T13:18:17.000Z | """
* Software License Agreement (BSD License)
*
* Copyright (c) 2020, QinetiQ, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of QinetiQ nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import collections
import rospy
from diagnostic_tools.diagnostic import Diagnostic
from diagnostic_tools.periodic_diagnostic_task import PeriodicDiagnosticTask
from diagnostic_tools.sampled_statistics import SampledStatistics
class PeriodicEventStatus(PeriodicDiagnosticTask):
Diagnostics = collections.namedtuple('Diagnostics', 'normal abnormal stale')
Configuration = collections.namedtuple(
'Configuration',
' '.join([
'min_acceptable_period max_acceptable_period max_reasonable_period',
'short_term_avg_window long_term_avg_window diagnostics',
])
)
@staticmethod
def configure(
min_acceptable_period=-1,
max_acceptable_period=5,
max_reasonable_period=None,
short_term_avg_window=1,
long_term_avg_window=10000,
normal_diagnostic=Diagnostic.OK,
abnormal_diagnostic=Diagnostic.WARN,
stale_diagnostic=Diagnostic.STALE
):
normal_diagnostic = Diagnostic.build(normal_diagnostic)
if not normal_diagnostic.description:
normal_diagnostic.description = 'Rate within tolerance'
abnormal_diagnostic = Diagnostic.build(abnormal_diagnostic)
if not abnormal_diagnostic.description:
abnormal_diagnostic.description = 'Rate too high or too low'
stale_diagnostic = Diagnostic.build(stale_diagnostic)
if not stale_diagnostic.description:
stale_diagnostic.description = 'Not enough data since last update'
if max_reasonable_period is None:
max_reasonable_period = 5 * max_acceptable_period
return PeriodicEventStatus.Configuration(
min_acceptable_period=min_acceptable_period,
max_acceptable_period=max_acceptable_period,
max_reasonable_period=max_reasonable_period,
short_term_avg_window=short_term_avg_window,
long_term_avg_window=long_term_avg_window,
diagnostics=PeriodicEventStatus.Diagnostics(
normal=normal_diagnostic,
abnormal=abnormal_diagnostic,
stale=stale_diagnostic
)
)
def __init__(self, name='rate check', config=None, **kwargs):
PeriodicDiagnosticTask.__init__(self, name, **kwargs)
if config is None:
config = PeriodicEventStatus.configure()
self._config = config
self._last_time = rospy.Time(0)
self._start_time = rospy.Time.now()
self._short_term_period = SampledStatistics(
self._config.short_term_avg_window, dtype=float
)
self._long_term_period = SampledStatistics(
self._config.long_term_avg_window, dtype=float
)
def tick(self, time=None):
time = time or rospy.Time.now()
with self._lock:
if not self._last_time.is_zero():
if self._last_time <= time:
time_delta = (time - self._last_time).to_sec()
if time_delta <= self._config.max_reasonable_period:
self._short_term_period.update(time_delta)
self._long_term_period.update(time_delta)
else:
rospy.logdebug(
'Time delta %f too long, ignoring', time_delta
)
else:
rospy.logdebug(
'Time went backwards from %f to %f, ignoring',
self._last_time.to_sec(), time.to_sec()
)
self._last_time = time
def run(self, stat):
with self._lock:
diagnostic = self._config.diagnostics.normal
current_time = rospy.Time.now()
time_delta = (current_time - self._last_time).to_sec()
if self._short_term_period.sample_count > 0 and \
time_delta < self._config.max_reasonable_period:
if self._short_term_period.average < self._config.min_acceptable_period or \
self._short_term_period.average > self._config.max_acceptable_period:
diagnostic = self._config.diagnostics.abnormal
stat.summary(diagnostic.status, diagnostic.description)
if diagnostic.code is not None:
stat.add('Code', diagnostic.code)
stat.add(
'Average period (short term)', self._short_term_period.average)
stat.add(
'Minimum period (short term)', self._short_term_period.minimum)
stat.add(
'Maximum period (short term)', self._short_term_period.maximum)
else:
time_since_init = (current_time - self._start_time).to_sec()
if time_since_init <= self._config.max_reasonable_period:
# Likely still initializing, skip diagnostic checks
return
# Event is likely stale, reset estimate
self._short_term_period.reset()
diagnostic = self._config.diagnostics.stale
stat.summary(diagnostic.status, diagnostic.description)
if diagnostic.code is not None:
stat.add('Code', diagnostic.code)
if self._long_term_period.sample_count > 0:
stat.add(
'Average period (historic)', self._long_term_period.average)
stat.add(
'Minimum period (historic)', self._long_term_period.minimum)
stat.add(
'Maximum period (historic)', self._long_term_period.maximum)
stat.add('Minimum acceptable period', self._config.min_acceptable_period)
stat.add('Maximum acceptable period', self._config.max_acceptable_period)
return stat
| 45.379518 | 92 | 0.646887 |
dffabe7fcee90488c421029df8799a35806fcfd1 | 15,450 | py | Python | lib-python/3/distutils/unixccompiler.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib-python/3/distutils/unixccompiler.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib-python/3/distutils/unixccompiler.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | """distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
import os, sys, re
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
import platform
if platform.machine() == 'i386':
if platform.architecture()[0] == '32bit':
arch = 'i386'
else:
arch = 'x86_64'
else:
# just a guess
arch = platform.machine()
executables['ranlib'] = ["ranlib"]
executables['linker_so'] += ['-undefined', 'dynamic_lookup']
for k, v in executables.items():
if v and v[0] == 'cc':
v += ['-arch', arch]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
xcode_stub_lib_extension = ".tbd"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
xcode_stub_lib_format = dylib_lib_format
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
fixed_args = self._fix_compile_args(None, macros, include_dirs)
ignore, macros, include_dirs = fixed_args
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
raise CompileError(msg)
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so,
cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if not isinstance(output_dir, (str, type(None))):
raise TypeError("'output_dir' must be a string or None")
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i += 1
if os.path.basename(linker[i]) == 'ld_so_aix':
# AIX platforms prefix the compiler with the ld_so_aix
# script, so we need to adjust our linker index
offset = 1
else:
offset = 0
linker[i+offset] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def _is_gcc(self, compiler_name):
if "__pypy__" in sys.builtin_module_names: # issue #2747
if (compiler_name.startswith('cc') or
compiler_name.startswith('c++')):
return True
return "gcc" in compiler_name or "g++" in compiler_name
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
# be told to pass the -R option through to the linker, whereas
# other compilers and gcc on other systems just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:7] == "freebsd":
return "-Wl,-rpath=" + dir
elif sys.platform[:5] == "hp-ux":
if self._is_gcc(compiler):
return ["-Wl,+s", "-L" + dir]
return ["+s", "-L" + dir]
else:
if self._is_gcc(compiler):
# gcc on non-GNU systems does not need -Wl, but can
# use it anyway. Since distutils has always passed in
# -Wl whenever gcc was used in the past it is probably
# safest to keep doing so.
if sysconfig.get_config_var("GNULD") == "yes":
# GNU ld needs an extra option to get a RUNPATH
# instead of just an RPATH.
return "-Wl,--enable-new-dtags,-R" + dir
else:
return "-Wl,-R" + dir
else:
# No idea how --enable-new-dtags would be passed on to
# ld if this system was using GNU ld. Don't know if a
# system like this even exists.
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
#
# Note that, as of Xcode 7, Apple SDKs may contain textual stub
# libraries with .tbd extensions rather than the normal .dylib
# shared libraries installed in /. The Apple compiler tool
# chain handles this transparently but it can cause problems
# for programs that are being built with an SDK and searching
# for specific libraries. Callers of find_library_file need to
# keep in mind that the base filename of the returned SDK library
# file might have a different extension from that of the library
# file installed on the running system, for example:
# /Applications/Xcode.app/Contents/Developer/Platforms/
# MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
# usr/lib/libedit.tbd
# vs
# /usr/lib/libedit.dylib
cflags = sysconfig.get_config_var('CFLAGS') or ''
m = re.search(r'-isysroot\s*(\S+)', cflags)
if m is None:
sysroot = _osx_support._default_sysroot(sysconfig.get_config_var('CC'))
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
xcode_stub = os.path.join(dir, xcode_stub_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(xcode_stub):
return xcode_stub
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
| 44.396552 | 87 | 0.583689 |
09adff637390f1dab9df9472164a458a30b1f885 | 14,023 | py | Python | LightField.py | dscran/tango-lightfield | 4e519e9d3a7de9f5ddda24f2e60eb30f355ae7dc | [
"MIT"
] | null | null | null | LightField.py | dscran/tango-lightfield | 4e519e9d3a7de9f5ddda24f2e60eb30f355ae7dc | [
"MIT"
] | null | null | null | LightField.py | dscran/tango-lightfield | 4e519e9d3a7de9f5ddda24f2e60eb30f355ae7dc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu May 21 10:31:21 2020
@author: Michael Schneider, Max Born Institut Berlin
"""
import ctypes
import sys
import os
import clr
sys.path.append(os.environ['LIGHTFIELD_ROOT'])
sys.path.append(os.environ['LIGHTFIELD_ROOT'] + '\\AddInViews')
clr.AddReference('PrincetonInstruments.LightFieldViewV5')
clr.AddReference('PrincetonInstruments.LightField.AutomationV5')
clr.AddReference('PrincetonInstruments.LightFieldAddInSupportServices')
clr.AddReference('System.Collections')
from PrincetonInstruments.LightField.Automation import Automation
from PrincetonInstruments.LightField.AddIns import CameraSettings as cs
from PrincetonInstruments.LightField.AddIns import ExperimentSettings as es
from PrincetonInstruments.LightField.AddIns import DeviceType
from PrincetonInstruments.LightField.AddIns import ImageDataFormat
from PrincetonInstruments.LightField.AddIns import RegionOfInterest
from System.Runtime.InteropServices import GCHandle, GCHandleType
from System import String
from System.Collections.Generic import List
import numpy as np
import tango
from tango import DevState, Attr, READ, READ_WRITE
from tango.server import Device, command, attribute
class LightFieldCamera(Device):
# list of simple scalar controls that can be created automatically
# required fields: name, access, dtype, lf
# `lf` is the LightField settingName
DYN_ATTRS = [
# camera settings
dict(name='temp_read', label='sensor temperature', access=READ,
dtype=tango.DevFloat, unit='degC', lf=cs.SensorTemperatureReading),
dict(name='temp_set', label='temperature setpoint', access=READ_WRITE,
dtype=tango.DevFloat, unit='degC', lf=cs.SensorTemperatureSetPoint),
# FIXME: this should be a DevEnum, which is currently bugged in
# dynamic creation: https://github.com/tango-controls/pytango/pull/348
dict(name='temp_status', label='temperature locked', access=READ,
dtype=tango.DevLong, lf=cs.SensorTemperatureStatus,
enum_labels=['invalid', 'unlocked', 'locked', 'fault']),
# FIXME: DevEnum
dict(name='shutter_mode', label='shutter mode', access=READ_WRITE,
dtype=tango.DevLong, lf=cs.ShutterTimingMode,
enum_labels=['invalid', 'normal', 'closed', 'open', 'trigger']),
dict(name='shutter_close', label='shutter closing time', access=READ_WRITE,
dtype=tango.DevFloat, unit='ms', lf=cs.ShutterTimingClosingDelay),
dict(name='exposure', label='exposure time', access=READ_WRITE,
dtype=tango.DevFloat, unit='ms', lf=cs.ShutterTimingExposureTime),
dict(name='n_ports', label='readout ports', access=READ_WRITE,
dtype=tango.DevLong, lf=cs.ReadoutControlPortsUsed),
dict(name='adc_speed', label='ADC speed', access=READ_WRITE,
dtype=tango.DevFloat, lf=cs.AdcSpeed, unit='MHz'),
# experiment settings
dict(name='accumulations', label='number of acquisitions per frame',
access=READ_WRITE, dtype=tango.DevLong,
lf=es.OnlineProcessingFrameCombinationFramesCombined),
dict(name='save_folder', label='data folder', access=READ_WRITE,
dtype=tango.DevString, lf=es.FileNameGenerationDirectory),
dict(name='save_base', label='base name', access=READ_WRITE,
dtype=tango.DevString, lf=es.FileNameGenerationBaseFileName),
dict(name='save_index', label='file index', access=READ_WRITE,
dtype=tango.DevLong, lf=es.FileNameGenerationIncrementNumber,
min_value='0'),
dict(name='save_digits', label='index length', access=READ_WRITE,
dtype=tango.DevLong, min_value='1', max_value='10',
lf=es.FileNameGenerationIncrementMinimumDigits),
dict(name='orient_on', label='apply image orientatiation',
access=READ_WRITE, dtype=tango.DevBoolean,
lf=es.OnlineCorrectionsOrientationCorrectionEnabled),
dict(name='orient_hor', label='flip horizontally',
access=READ_WRITE, dtype=tango.DevBoolean,
lf=es.OnlineCorrectionsOrientationCorrectionFlipHorizontally),
dict(name='orient_ver', label='flip vertically',
access=READ_WRITE, dtype=tango.DevBoolean,
lf=es.OnlineCorrectionsOrientationCorrectionFlipVertically),
dict(name='orient_rot', label='rotate 90 degree',
access=READ_WRITE, dtype=tango.DevLong,
lf=es.OnlineCorrectionsOrientationCorrectionRotateClockwise),
]
attr_keys = {d['name']: d['lf'] for d in DYN_ATTRS}
image = attribute(name='image', label='CCD image', max_dim_x=4096,
max_dim_y=4096, dtype=((tango.DevFloat,),), access=READ)
def init_device(self):
Device.init_device(self)
self.set_change_event('image', True, False)
self.set_state(DevState.INIT)
self.lf = Automation(True, List[String]()) # starts LF instance
self.exp = self.lf.LightFieldApplication.Experiment
self.device = self.get_camera_device()
if self.device is not None:
self.set_state(DevState.ON)
name, model, sn, shape = self.get_sensor_info()
print('Connected:', model, name, sn, file=self.log_info)
self._image = np.zeros(shape)
self._accum = 0
self.register_events()
self.setup_file_save()
else:
print('No camera found.', file=self.log_error)
self.set_state(DevState.FAULT)
def initialize_dynamic_attributes(self):
for d in self.DYN_ATTRS:
self.make_attribute(d)
def make_attribute(self, attr_dict):
'''Dynamically generate simple attributes for LightField settings.'''
baseprops = ['name', 'dtype', 'access', 'lf']
name, dtype, access, lf = [attr_dict.pop(k) for k in baseprops]
if self.exp.Exists(lf):
print('making attribute', name, file=self.log_debug)
new_attr = Attr(name, dtype, access)
prop = tango.UserDefaultAttrProp()
for k, v in attr_dict.items():
try:
property_setter = getattr(prop, 'set_' + k)
property_setter(v)
except AttributeError:
print("error setting attribute property:", name, k, v,
file=self.log_error)
new_attr.set_default_properties(prop)
self.add_attribute(
new_attr,
r_meth=self.read_general,
w_meth=self.write_general,
)
else:
print(f'Skipping attribute {name}: Does not exist on this device',
file=self.log_warn)
def setup_file_save(self):
'''Make sure that file save options are correct.'''
self.lightfield_set(es.FileNameGenerationAttachDate, False)
self.lightfield_set(es.FileNameGenerationAttachTime, False)
self.lightfield_set(es.FileNameGenerationAttachIncrement, True)
return
def get_sensor_info(self):
'''Query the sensor name, model, serial number and active area.'''
width = self.lightfield_get(cs.SensorInformationActiveAreaWidth)
height = self.lightfield_get(cs.SensorInformationActiveAreaHeight)
name = self.lightfield_get(cs.SensorInformationSensorName)
model = self.device.Model
serial = self.device.SerialNumber
return name, model, serial, (height, width)
def get_camera_device(self):
'''Returns the first registered camera device.'''
for device in self.exp.ExperimentDevices:
if device.Type == DeviceType.Camera:
return device
return None
def lightfield_set(self, key, value):
if not self.exp.IsRunning:
if self.exp.IsValid(key, value):
self.exp.SetValue(key, value)
print(f'set {key} -> {value}', file=self.log_debug)
else:
print(f'invalid setting: {key}->{value}', file=self.log_error)
else:
print(f'Cannot set {key}: acquiring', file=self.log_warn)
def lightfield_get(self, key):
val = self.exp.GetValue(key)
return val
def read_general(self, attr):
key = self.attr_keys[attr.get_name()]
print('reading', key, file=self.log_debug)
attr.set_value(self.lightfield_get(key))
def write_general(self, attr):
key = self.attr_keys[attr.get_name()]
val = attr.get_write_value()
print('setting', key, '->', val, file=self.log_debug)
self.lightfield_set(key, val)
def next_file_exists(self):
'''Check whether the next file name is available.'''
folder = self.lightfield_get(self.attr_keys['save_folder'])
fname = self.lightfield_get(es.FileNameGenerationExampleFileName)
fpath = os.path.join(folder, fname + '.spe')
return os.path.exists(fpath)
def increment_to_next_free(self):
'''
Make sure next file name is avilable by incrementing the file index.
'''
while self.next_file_exists():
index = self.lightfield_get(self.attr_keys['save_index'])
print('file exists! Incrementing index.', file=self.log_warn)
self.lightfield_set(self.attr_keys['save_index'], index + 1)
def read_image(self):
return self._image
@command(dtype_in=int)
def set_binning(self, N):
'''Sets the camera to full chip binning mode.
Use the `set_roi` command to setup a region of interest with binning.
'''
if not self.exp.IsRunning:
if N > 1:
self.exp.SetBinnedSensorRegion(N, N)
print(f'full chip binning {N}x{N}', file=self.log_debug)
else:
self.exp.SetFullSensorRegion()
print('full chip unbinned', file=self.log_debug)
@command(dtype_in=(int,), doc_in='list of ints [x0, x1, y0, y1, bin]',
dtype_out=bool, doc_out='True if successful')
def set_roi(self, roi):
'''Sets the camera to a (possibly binned) ROI.
input is a list of ints [x0, x1, y0, y1, binning]
'''
if not self.exp.IsRunning:
if len(roi) == 4:
x0, x1, y0, y1 = [roi[i] for i in range(4)]
N = 1
elif len(roi) > 4:
x0, x1, y0, y1, N = [roi[i] for i in range(5)]
else:
print('cannot understand ROI', file=self.log_error)
return False
region = RegionOfInterest(x0, y0, x1 - x0, y1 - y0, N, N)
self.exp.SetCustomRegions((region,))
print('set custom ROI', file=self.log_debug)
return True
else:
print('Cannot set ROI during acquisition', file=self.log_error)
return False
@command
def acquire(self):
self.increment_to_next_free()
if self.exp.IsReadyToRun:
self._image = 0
self._accum = 0
self._preview = False
self.exp.Acquire()
@command
def stop(self):
self.exp.Stop()
@command
def preview(self):
self.increment_to_next_free()
if self.exp.IsReadyToRun:
self._preview = True
self.exp.Preview()
def handler_new_data(self, sender, event_args):
data = event_args.ImageDataSet
if data.Frames > 0:
frame = data.GetFrame(0, 0)
im = imageframe_to_numpy(frame).astype(np.float32)
if not self._preview:
self._image = ((self._image * self._accum) + im) / (self._accum + 1)
self._accum += 1
else:
self._image = im
dim_x, dim_y = self._image.shape
print('new image:', self._image.shape, file=self.log_info)
self.push_change_event('image', self._image, dim_y, dim_x)
else:
print('no frames:', data.Frames, file=self.log_error)
def handler_acq_finished(self, sender, event_args):
self.set_state(DevState.ON)
def handler_acq_start(self, sender, event_args):
self.set_state(DevState.RUNNING)
def handler_lightfield_close(self, sender, event_args):
self.set_state(DevState.OFF)
def register_events(self):
self.exp.ExperimentStarted += self.handler_acq_start
self.exp.ExperimentCompleted += self.handler_acq_finished
self.lf.LightFieldClosed += self.handler_lightfield_close
self.exp.ImageDataSetReceived += self.handler_new_data
def imageframe_to_numpy(frame):
'''
Retrieve data from LightField DisplaySource.
Parameters
----------
frame :
LightField display source. Could be the live view or a loaded file.
Returns
-------
data
numpy array.
'''
buffer = frame.GetData()
image_format = frame.Format
src_hndl = GCHandle.Alloc(buffer, GCHandleType.Pinned)
try:
src_ptr = src_hndl.AddrOfPinnedObject().ToInt64()
# Possible data types returned from acquisition
dtypes = {ImageDataFormat.MonochromeUnsigned16: ctypes.c_ushort,
ImageDataFormat.MonochromeUnsigned32: ctypes.c_uint,
ImageDataFormat.MonochromeFloating32: ctypes.c_float}
buf_type = dtypes[image_format] * len(buffer)
cbuf = buf_type.from_address(src_ptr)
image = np.frombuffer(cbuf, dtype=cbuf._type_).copy()
image = np.rot90(image.reshape(frame.Height, frame.Width), -1).T
finally:
if src_hndl.IsAllocated:
src_hndl.Free()
return image
if __name__ == '__main__':
LightFieldCamera.run_server()
| 41.123167 | 84 | 0.628468 |
482331f120ee367b9690e4773d331b58739431be | 8,275 | py | Python | aawork/proj0/stage1/train.py | mayi140611/mayiutils | 5340d7bd4590e2a41afd5d02ffc569745d67c866 | [
"Apache-2.0"
] | null | null | null | aawork/proj0/stage1/train.py | mayi140611/mayiutils | 5340d7bd4590e2a41afd5d02ffc569745d67c866 | [
"Apache-2.0"
] | null | null | null | aawork/proj0/stage1/train.py | mayi140611/mayiutils | 5340d7bd4590e2a41afd5d02ffc569745d67c866 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: train.py
@time: 2019-04-19 11:52
"""
import pandas as pd
import numpy as np
from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew
from mayiutils.algorithm.algorithmset.calcPearson import calcPearson
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report, f1_score
import xgboost
import itertools
# from feature_selector import FeatureSelector
import lightgbm as lgb
if __name__ == '__main__':
mode = 9
df = picklew.loadFromFile('train_data2.pkl')
print(df.info())
# print(df.head())
X = df.values
print(X.shape)
y = picklew.loadFromFile('label.pkl')
# print(y.value_counts())
# y = y[:, np.newaxis]
# print(list(y))
y = np.array(list(y))
if mode == 9:
"""
结果评估
"""
pred = pd.read_csv('tt.csv', header=None)
# print(pred[:5])
df = pd.DataFrame()
df['score'] = pred.iloc[:, 1]
df['s0.4'] = 1
df.loc[df['score']<0.4, 's0.4']=0
print(df['s0.4'].value_counts())
print(classification_report(y, list(df['s0.4'])))
df['s0.5'] = 1
df.loc[df['score']<0.5, 's0.5']=0
print(df['s0.5'].value_counts())
print(classification_report(y, list(df['s0.5'])))
"""
0 421
1 141
Name: s0.5, dtype: int64
precision recall f1-score support
0 0.98 0.95 0.96 432
1 0.85 0.92 0.89 130
"""
df['s0.6'] = 1
df.loc[df['score']<0.6, 's0.6']=0
print(df['s0.6'].value_counts())
print(classification_report(y, list(df['s0.6'])))
df['s0.7'] = 1
df.loc[df['score']<0.7, 's0.7']=0
print(df['s0.7'].value_counts())
print(classification_report(y, list(df['s0.7'])))
if mode == 8:
"""
使用lightgbm, 输出概率
"""
### 数据转换
lgb_train = lgb.Dataset(X, y, free_raw_data=False)
lgb_eval = lgb.Dataset(X, y, reference=lgb_train, free_raw_data=False)
print('设置参数')
params = {
'boosting_type': 'gbdt',
'boosting': 'dart',
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': 0.01,
'num_leaves': 25,
'max_depth': 3,
'max_bin': 10,
'min_data_in_leaf': 8,
'feature_fraction': 0.6,
'bagging_fraction': 1,
'bagging_freq': 0,
'lambda_l1': 0,
'lambda_l2': 0,
'min_split_gain': 0
}
print("开始训练")
gbm = lgb.train(params, # 参数字典
lgb_train, # 训练集
num_boost_round = 2000, # 迭代次数
valid_sets = lgb_eval, # 验证集
early_stopping_rounds = 30) # 早停系数
preds_offline = gbm.predict(X, num_iteration=gbm.best_iteration) # 输出概率
print(preds_offline)
pd.Series(preds_offline).to_csv('tt.csv')
if mode == 7:
"""
使用feature-selector得到特征重要性
"""
fs = FeatureSelector(data=df, labels=y)
fs.identify_collinear(correlation_threshold=0.975)
correlated_features = fs.ops['collinear']
print(correlated_features)
# fs.plot_collinear()
# fs.plot_collinear(plot_all=True)
print(fs.record_collinear)
# 4. Zero Importance Features
fs.identify_zero_importance(task='classification', eval_metric='auc',
n_iterations=10, early_stopping=True)
one_hot_features = fs.one_hot_features
base_features = fs.base_features
print('There are %d original features' % len(base_features))
print('There are %d one-hot features' % len(one_hot_features))
zero_importance_features = fs.ops['zero_importance']
print(zero_importance_features[:15])
# fs.plot_feature_importances(threshold=0.99, plot_n=12)
# print(fs.feature_importances)
# fs.feature_importances.to_csv('fs_rs.csv', encoding='gbk')
df_removed = fs.remove(methods=['collinear', 'zero_importance'])
print(df_removed.shape)
picklew.dump2File(df_removed, 'train_fs_removed.pkl')
if mode == 6:
"""
rf求特征重要性
"""
rfmodel = RandomForestClassifier(n_estimators=80)
rfmodel.fit(X, y)
rs = pd.Series(rfmodel.feature_importances_, index=df.columns).sort_values(ascending=False)
rs.to_csv('randomforest_rs.csv', encoding='gbk')
if mode == 5:
"""
计算皮尔逊相关系数
"""
r = np.apply_along_axis(lambda x: calcPearson(x, y), axis=0, arr=X)
print(r)
rs = pd.Series(r, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('pearson_rs.csv', encoding='gbk')
if mode == 4:
"""
whole xgboost train
"""
model = xgboost.XGBClassifier(learning_rate=0.05, n_estimators=80, max_depth=7)
model.fit(X, y)
prediction = model.predict(X)
# print(prediction)
print(classification_report(y, prediction))
# f1 = f1_score(y, prediction)
print(model.feature_importances_)
rs = pd.Series(model.feature_importances_, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('xgboost_rs.csv', encoding='gbk')
if mode == 3:
"""
xgboost
"""
skf = StratifiedKFold(n_splits=4)
lr = [0.05, 0.1, 0.2]
max_depth = [3, 5, 7]
n_estimators = [80, 100, 120]
# lr = [0.1, 0.12]
# max_depth = [5, 6, 7]
# n_estimators = [110, 120, 130]
for l, n, m in itertools.product(lr, n_estimators, max_depth):
print(l, n, m)
f1 = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model = xgboost.XGBClassifier(learning_rate=l, n_estimators=n, max_depth=m)
model.fit(X_train, y_train)
prediction = model.predict(X_test)
# print(prediction)
# print(classification_report(y_test, prediction))
f1 = f1_score(y_test, prediction)
print(np.mean(f1))
if mode == 2:
"""
dt whole train
"""
clf = DecisionTreeClassifier(max_depth=4)
# 拟合模型
clf.fit(X, y)
y_p = clf.predict(X)
print(classification_report(y, y_p))
print(clf.feature_importances_)
rs = pd.Series(clf.feature_importances_, index=df.columns).sort_values(ascending=False)
print(rs)
rs.to_csv('dt_rs.csv', encoding='gbk')
# dot_data = tree.export_graphviz(clf, out_file=None,
# feature_names=df.columns,
# # class_names=iris.target_names,
# filled=True, rounded=True,
# special_characters=True)
# graph = graphviz.Source(dot_data)
# graph.view()
if mode == 1:
"""
dt
"""
skf = StratifiedKFold(n_splits=4)
max_depths = [3, 6, 9]
"""
0.7333333333333334
0.5925925925925926
0.5384615384615384
"""
max_depths = [2, 3, 4, 5]
"""
0.6575342465753423
0.7333333333333334
0.7540983606557378
0.6181818181818182
"""
for max_depth in max_depths:
f1 = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# 训练模型,限制树的最大深度
clf = DecisionTreeClassifier(max_depth=max_depth)
# 拟合模型
clf.fit(X_train, y_train)
y_p = clf.predict(X_test)
# print(classification_report(y_test, y_p))
f1 = f1_score(y_test, y_p)
print(np.mean(f1))
| 34.479167 | 99 | 0.556133 |
d069cc25cf456921bd3959cd95bcb961faf21b30 | 2,400 | py | Python | research/ConsoleAppTest.py | nikhilrj/CARDS | 509815f23c11881e6444308fa1014aed6a1af358 | [
"MIT"
] | null | null | null | research/ConsoleAppTest.py | nikhilrj/CARDS | 509815f23c11881e6444308fa1014aed6a1af358 | [
"MIT"
] | null | null | null | research/ConsoleAppTest.py | nikhilrj/CARDS | 509815f23c11881e6444308fa1014aed6a1af358 | [
"MIT"
] | null | null | null | from selenium import webdriver
#import socketserver
import socket
import webbrowser
import os.path
portListen = 1337
testIP = '127.0.0.1'
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((testIP, portListen))
server.listen(5)
size = 1024
conn, client_addr = server.accept()
driver = webdriver.Firefox()
driver.get("file:///" + os.path.abspath('ConsoleAppWebpage.html'))
pageTemplate = '''
<!DOCTYPE html>
<html>
<head>
<title>change picture</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
</head>
<body>
<img id="img1" src="image1.png" class="img-circle" alt="Cinque Terre" width="200" height="200" border="300" style="border:{border1}">
<img id="img2" src="image2.png" class="img-circle" alt="Cinque Terre" width="200" height="200" border="300" style="border:{border2}">
<img id="img3" src="image3.png" class="img-circle" alt="Cinque Terre" width="200" height="200" border="300" style="border:{border3}">
<img id="img4" src="image4.png" class="img-circle" alt="Cinque Terre" width="200" height="200" border="300" style="border:{border4}">
</body>
</html>
'''
def strToFile(text, filename):
"""Write a file with the given name and the given text."""
output = open(filename,"w")
output.write(text)
output.close()
def browseLocal(webpageText, filename='ConsoleAppWebpage.html'):
strToFile(webpageText, filename)
driver.refresh()
while(True):
raw_msg = conn.recv(size)
str_msg = raw_msg.decode('utf-8')
state_num = int(str_msg)
border1 = "none"
border2 = "none"
border3 = "none"
border4 = "none"
if state_num == 1:
border1 = "5px solid black"
elif state_num == 2:
border2 = "5px solid black"
elif state_num == 3:
border3 = "5px solid black"
elif state_num == 4:
border4 = "5px solid black"
contents = pageTemplate.format(**locals())
print(state_num)
browseLocal(contents)
| 31.168831 | 140 | 0.641667 |
639a4874d5fb7669e11620bfd712c207e2c604ae | 1,103 | py | Python | jtyoui/reflex/FucStr.py | Abcbaisan/Jtyoui | 96b8eebc3c42eaf337221d6485ba8395f07af821 | [
"MIT"
] | 2 | 2019-11-06T01:47:17.000Z | 2019-11-06T01:48:19.000Z | jtyoui/reflex/FucStr.py | yy1244/Jtyoui | d3c212ed9d6ffa6b37a8ca49098ab59c89216f09 | [
"MIT"
] | null | null | null | jtyoui/reflex/FucStr.py | yy1244/Jtyoui | d3c212ed9d6ffa6b37a8ca49098ab59c89216f09 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/8/16 17:45
# @Author: Jtyoui@qq.com
from types import FunctionType, ModuleType
def string_function(module: [str, ModuleType], func: str, *args, **kwargs):
"""根据字符串方法名来进行调用模块方法
:param module: 模块名
:param func: 模块中的方法名字
:param args:方法里面的参数值
:param kwargs:方法里面的参数值
:return: 返回一个返回值
"""
if isinstance(module, ModuleType):
if hasattr(module, func):
func_or_var = getattr(module, func)
if isinstance(func_or_var, FunctionType):
return func_or_var(*args, **kwargs)
else:
return func_or_var
else:
print(f'{func}不是一个方法')
elif isinstance(module, str):
m = __import__(module)
return string_function(m, func, *args, **kwargs)
else:
print(f'{module}不是一个模块')
return None
if __name__ == '__main__':
print(string_function('jtyoui', 'BaiDuInfoSearch')('万绮雯').info())
print(list(string_function('jtyoui', 'f_to_j', str_='載')))
print(list(string_function('jtyoui', 'f_to_j', '載')))
| 29.810811 | 75 | 0.611061 |
ff1ce04e08bef0b027e8e120145e479b81ec538e | 39,803 | py | Python | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/tgrep.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/tgrep.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/tgrep.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: TGrep search
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Will Roberts <wildwilhelm@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
'''
============================================
TGrep search implementation for NLTK trees
============================================
This module supports TGrep2 syntax for matching parts of NLTK Trees.
Note that many tgrep operators require the tree passed to be a
``ParentedTree``.
External links:
- `Tgrep tutorial <http://www.stanford.edu/dept/linguistics/corpora/cas-tut-tgrep.html>`_
- `Tgrep2 manual <http://tedlab.mit.edu/~dr/Tgrep2/tgrep2.pdf>`_
- `Tgrep2 source <http://tedlab.mit.edu/~dr/Tgrep2/>`_
Usage
=====
>>> from nltk.tree import ParentedTree
>>> from nltk.tgrep import tgrep_nodes, tgrep_positions
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> list(tgrep_nodes('NN', [tree]))
[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]]
>>> list(tgrep_positions('NN', [tree]))
[[(0, 2), (2, 1)]]
>>> list(tgrep_nodes('DT', [tree]))
[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]]
>>> list(tgrep_nodes('DT $ JJ', [tree]))
[[ParentedTree('DT', ['the'])]]
This implementation adds syntax to select nodes based on their NLTK
tree position. This syntax is ``N`` plus a Python tuple representing
the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are
valid node selectors. Example:
>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))')
>>> tree[0,0]
ParentedTree('DT', ['the'])
>>> tree[0,0].treeposition()
(0, 0)
>>> list(tgrep_nodes('N(0,0)', [tree]))
[[ParentedTree('DT', ['the'])]]
Caveats:
========
- Link modifiers: "?" and "=" are not implemented.
- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are
not implemented.
- The "=" and "~" links are not implemented.
Known Issues:
=============
- There are some issues with link relations involving leaf nodes
(which are represented as bare strings in NLTK trees). For
instance, consider the tree::
(S (A x))
The search string ``* !>> S`` should select all nodes which are not
dominated in some way by an ``S`` node (i.e., all nodes which are
not descendants of an ``S``). Clearly, in this tree, the only node
which fulfills this criterion is the top node (since it is not
dominated by anything). However, the code here will find both the
top node and the leaf node ``x``. This is because we cannot recover
the parent of the leaf, since it is stored as a bare string.
A possible workaround, when performing this kind of search, would be
to filter out all leaf nodes.
Implementation notes
====================
This implementation is (somewhat awkwardly) based on lambda functions
which are predicates on a node. A predicate is a function which is
either True or False; using a predicate function, we can identify sets
of nodes with particular properties. A predicate function, could, for
instance, return True only if a particular node has a label matching a
particular regular expression, and has a daughter node which has no
sisters. Because tgrep2 search strings can do things statefully (such
as substituting in macros, and binding nodes with node labels), the
actual predicate function is declared with three arguments::
pred = lambda n, m, l: return True # some logic here
``n``
is a node in a tree; this argument must always be given
``m``
contains a dictionary, mapping macro names onto predicate functions
``l``
is a dictionary to map node labels onto nodes in the tree
``m`` and ``l`` are declared to default to ``None``, and so need not be
specified in a call to a predicate. Predicates which call other
predicates must always pass the value of these arguments on. The
top-level predicate (constructed by ``_tgrep_exprs_action``) binds the
macro definitions to ``m`` and initialises ``l`` to an empty dictionary.
'''
from __future__ import absolute_import, print_function, unicode_literals
from nltk.compat import binary_type, text_type
import functools
import nltk.tree
try:
import pyparsing
except ImportError:
print('Warning: nltk.tgrep will not work without the `pyparsing` package')
print('installed.')
import re
class TgrepException(Exception):
'''Tgrep exception type.'''
pass
def ancestors(node):
'''
Returns the list of all nodes dominating the given tree node.
This method will not work with leaf nodes, since there is no way
to recover the parent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current:
results.append(current)
current = current.parent()
return results
def unique_ancestors(node):
'''
Returns the list of all nodes dominating the given node, where
there is only a single path of descent.
'''
results = []
try:
current = node.parent()
except AttributeError:
# if node is a leaf, we cannot retrieve its parent
return results
while current and len(current) == 1:
results.append(current)
current = current.parent()
return results
def _descendants(node):
'''
Returns the list of all nodes which are descended from the given
tree node in some way.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:]]
def _leftmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
left branches from this node.
'''
try:
treepos = node.treepositions()
except AttributeError:
return []
return [node[x] for x in treepos[1:] if all(y == 0 for y in x)]
def _rightmost_descendants(node):
'''
Returns the set of all nodes descended in some way through
right branches from this node.
'''
try:
rightmost_leaf = max(node.treepositions())
except AttributeError:
return []
return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)]
def _istree(obj):
'''Predicate to check whether `obj` is a nltk.tree.Tree.'''
return isinstance(obj, nltk.tree.Tree)
def _unique_descendants(node):
'''
Returns the list of all nodes descended from the given node, where
there is only a single path of descent.
'''
results = []
current = node
while current and _istree(current) and len(current) == 1:
current = current[0]
results.append(current)
return results
def _before(node):
'''
Returns the set of all nodes that are before the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] < pos[:len(x)]]
def _immediately_before(node):
'''
Returns the set of all nodes that are immediately before the given
node.
Tree node A immediately precedes node B if the last terminal
symbol (word) produced by A immediately precedes the first
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the left
idx = len(pos) - 1
while 0 <= idx and pos[idx] == 0:
idx -= 1
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] -= 1
before = tree[pos]
return [before] + _rightmost_descendants(before)
def _after(node):
'''
Returns the set of all nodes that are after the given node.
'''
try:
pos = node.treeposition()
tree = node.root()
except AttributeError:
return []
return [tree[x] for x in tree.treepositions()
if x[:len(pos)] > pos[:len(x)]]
def _immediately_after(node):
'''
Returns the set of all nodes that are immediately after the given
node.
Tree node A immediately follows node B if the first terminal
symbol (word) produced by A immediately follows the last
terminal symbol produced by B.
'''
try:
pos = node.treeposition()
tree = node.root()
current = node.parent()
except AttributeError:
return []
# go "upwards" from pos until there is a place we can go to the
# right
idx = len(pos) - 1
while 0 <= idx and pos[idx] == len(current) - 1:
idx -= 1
current = current.parent()
if idx < 0:
return []
pos = list(pos[:idx + 1])
pos[-1] += 1
after = tree[pos]
return [after] + _leftmost_descendants(after)
def _tgrep_node_literal_value(node):
'''
Gets the string value of a given parse tree node, for comparison
using the tgrep node literal predicates.
'''
return (node.label() if _istree(node) else text_type(node))
def _tgrep_macro_use_action(_s, _l, tokens):
'''
Builds a lambda function which looks up the macro name used.
'''
assert len(tokens) == 1
assert tokens[0][0] == '@'
macro_name = tokens[0][1:]
def macro_use(n, m=None, l=None):
if m is None or macro_name not in m:
raise TgrepException('macro {0} not defined'.format(macro_name))
return m[macro_name](n, m, l)
return macro_use
def _tgrep_node_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on the name of its node.
'''
# print 'node tokens: ', tokens
if tokens[0] == "'":
# strip initial apostrophe (tgrep2 print command)
tokens = tokens[1:]
if len(tokens) > 1:
# disjunctive definition of a node name
assert list(set(tokens[1::2])) == ['|']
# recursively call self to interpret each node name definition
tokens = [_tgrep_node_action(None, None, [node])
for node in tokens[::2]]
# capture tokens and return the disjunction
return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens)
else:
if hasattr(tokens[0], '__call__'):
# this is a previously interpreted parenthetical node
# definition (lambda function)
return tokens[0]
elif tokens[0] == '*' or tokens[0] == '__':
return lambda n, m=None, l=None: True
elif tokens[0].startswith('"'):
assert tokens[0].endswith('"')
node_lit = tokens[0][1:-1].replace('\\"', '"').replace('\\\\', '\\')
return (lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s)(node_lit)
elif tokens[0].startswith('/'):
assert tokens[0].endswith('/')
node_lit = tokens[0][1:-1]
return (lambda r: lambda n, m=None, l=None:
r.search(_tgrep_node_literal_value(n)))(re.compile(node_lit))
elif tokens[0].startswith('i@'):
node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()])
return (lambda f: lambda n, m=None, l=None:
f(_tgrep_node_literal_value(n).lower()))(node_func)
else:
return (lambda s: lambda n, m=None, l=None:
_tgrep_node_literal_value(n) == s)(tokens[0])
def _tgrep_parens_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from a parenthetical notation.
'''
# print 'parenthetical tokens: ', tokens
assert len(tokens) == 3
assert tokens[0] == '('
assert tokens[2] == ')'
return tokens[1]
def _tgrep_nltk_tree_pos_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which returns true if the node is located at a specific tree
position.
'''
# recover the tuple from the parsed sting
node_tree_position = tuple(int(x) for x in tokens if x.isdigit())
# capture the node's tree position
return (lambda i: lambda n, m=None, l=None: (hasattr(n, 'treeposition') and
n.treeposition() == i))(node_tree_position)
def _tgrep_relation_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
depending on its relation to other nodes in the tree.
'''
# print 'relation tokens: ', tokens
# process negation first if needed
negated = False
if tokens[0] == '!':
negated = True
tokens = tokens[1:]
if tokens[0] == '[':
# process square-bracketed relation expressions
assert len(tokens) == 3
assert tokens[2] == ']'
retval = tokens[1]
else:
# process operator-node relation expressions
assert len(tokens) == 2
operator, predicate = tokens
# A < B A is the parent of (immediately dominates) B.
if operator == '<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in n))
# A > B A is the child of B.
elif operator == '>':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
predicate(n.parent(), m, l))
# A <, B Synonymous with A <1 B.
elif operator == '<,' or operator == '<1':
retval = lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
predicate(n[0], m, l))
# A >, B Synonymous with A >1 B.
elif operator == '>,' or operator == '>1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[0]) and
predicate(n.parent(), m, l))
# A <N B B is the Nth child of A (the first child is <1).
elif operator[0] == '<' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= i < len(n) and
predicate(n[i], m, l)))(idx - 1)
# A >N B A is the Nth child of B (the first child is >1).
elif operator[0] == '>' and operator[1:].isdigit():
idx = int(operator[1:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
0 <= i < len(n.parent()) and
(n is n.parent()[i]) and
predicate(n.parent(), m, l)))(idx - 1)
# A <' B B is the last child of A (also synonymous with A <-1 B).
# A <- B B is the last child of A (synonymous with A <-1 B).
elif operator == '<\'' or operator == '<-' or operator == '<-1':
retval = lambda n, m=None, l=None: (_istree(n) and bool(list(n))
and predicate(n[-1], m, l))
# A >' B A is the last child of B (also synonymous with A >-1 B).
# A >- B A is the last child of B (synonymous with A >-1 B).
elif operator == '>\'' or operator == '>-' or operator == '>-1':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
(n is n.parent()[-1]) and
predicate(n.parent(), m, l))
# A <-N B B is the N th-to-last child of A (the last child is <-1).
elif operator[:2] == '<-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None: (_istree(n) and
bool(list(n)) and
0 <= (i + len(n)) < len(n) and
predicate(n[i + len(n)], m, l)))(idx)
# A >-N B A is the N th-to-last child of B (the last child is >-1).
elif operator[:2] == '>-' and operator[2:].isdigit():
idx = -int(operator[2:])
# capture the index parameter
retval = (lambda i: lambda n, m=None, l=None:
(hasattr(n, 'parent') and
bool(n.parent()) and
0 <= (i + len(n.parent())) < len(n.parent()) and
(n is n.parent()[i + len(n.parent())]) and
predicate(n.parent(), m, l)))(idx)
# A <: B B is the only child of A
elif operator == '<:':
retval = lambda n, m=None, l=None: (_istree(n) and
len(n) == 1 and
predicate(n[0], m, l))
# A >: B A is the only child of B.
elif operator == '>:':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
len(n.parent()) == 1 and
predicate(n.parent(), m, l))
# A << B A dominates B (A is an ancestor of B).
elif operator == '<<':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l) for x in _descendants(n)))
# A >> B A is dominated by B (A is a descendant of B).
elif operator == '>>':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in ancestors(n))
# A <<, B B is a left-most descendant of A.
elif operator == '<<,' or operator == '<<1':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _leftmost_descendants(n)))
# A >>, B A is a left-most descendant of B.
elif operator == '>>,':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _leftmost_descendants(x))
for x in ancestors(n))
# A <<' B B is a right-most descendant of A.
elif operator == '<<\'':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _rightmost_descendants(n)))
# A >>' B A is a right-most descendant of B.
elif operator == '>>\'':
retval = lambda n, m=None, l=None: any((predicate(x, m, l) and
n in _rightmost_descendants(x))
for x in ancestors(n))
# A <<: B There is a single path of descent from A and B is on it.
elif operator == '<<:':
retval = lambda n, m=None, l=None: (_istree(n) and
any(predicate(x, m, l)
for x in _unique_descendants(n)))
# A >>: B There is a single path of descent from B and A is on it.
elif operator == '>>:':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in unique_ancestors(n))
# A . B A immediately precedes B.
elif operator == '.':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_after(n))
# A , B A immediately follows B.
elif operator == ',':
retval = lambda n, m=None, l=None: any(predicate(x, m, l)
for x in _immediately_before(n))
# A .. B A precedes B.
elif operator == '..':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _after(n))
# A ,, B A follows B.
elif operator == ',,':
retval = lambda n, m=None, l=None: any(predicate(x, m, l) for x in _before(n))
# A $ B A is a sister of B (and A != B).
elif operator == '$' or operator == '%':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
bool(n.parent()) and
any(predicate(x, m, l)
for x in n.parent() if x is not n))
# A $. B A is a sister of and immediately precedes B.
elif operator == '$.' or operator == '%.':
retval = lambda n, m=None, l=None: (hasattr(n, 'right_sibling') and
bool(n.right_sibling()) and
predicate(n.right_sibling(), m, l))
# A $, B A is a sister of and immediately follows B.
elif operator == '$,' or operator == '%,':
retval = lambda n, m=None, l=None: (hasattr(n, 'left_sibling') and
bool(n.left_sibling()) and
predicate(n.left_sibling(), m, l))
# A $.. B A is a sister of and precedes B.
elif operator == '$..' or operator == '%..':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[n.parent_index() + 1:]))
# A $,, B A is a sister of and follows B.
elif operator == '$,,' or operator == '%,,':
retval = lambda n, m=None, l=None: (hasattr(n, 'parent') and
hasattr(n, 'parent_index') and
bool(n.parent()) and
any(predicate(x, m, l) for x in
n.parent()[:n.parent_index()]))
else:
raise TgrepException(
'cannot interpret tgrep operator "{0}"'.format(operator))
# now return the built function
if negated:
return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval)
else:
return retval
def _tgrep_conjunction_action(_s, _l, tokens, join_char = '&'):
'''
Builds a lambda function representing a predicate on a tree node
from the conjunction of several other such lambda functions.
This is prototypically called for expressions like
(`tgrep_rel_conjunction`)::
< NP & < AP < VP
where tokens is a list of predicates representing the relations
(`< NP`, `< AP`, and `< VP`), possibly with the character `&`
included (as in the example here).
This is also called for expressions like (`tgrep_node_expr2`)::
NP < NN
S=s < /NP/=n : s < /VP/=v : n .. v
tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional)
list of segmented patterns (`tgrep_expr_labeled`, processed by
`_tgrep_segmented_pattern_action`).
'''
# filter out the ampersand
tokens = [x for x in tokens if x != join_char]
# print 'relation conjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
else:
return (lambda ts: lambda n, m=None, l=None: all(predicate(n, m, l)
for predicate in ts))(tokens)
def _tgrep_segmented_pattern_action(_s, _l, tokens):
'''
Builds a lambda function representing a segmented pattern.
Called for expressions like (`tgrep_expr_labeled`)::
=s .. =v < =n
This is a segmented pattern, a tgrep2 expression which begins with
a node label.
The problem is that for segemented_pattern_action (': =v < =s'),
the first element (in this case, =v) is specifically selected by
virtue of matching a particular node in the tree; to retrieve
the node, we need the label, not a lambda function. For node
labels inside a tgrep_node_expr, we need a lambda function which
returns true if the node visited is the same as =v.
We solve this by creating two copies of a node_label_use in the
grammar; the label use inside a tgrep_expr_labeled has a separate
parse action to the pred use inside a node_expr. See
`_tgrep_node_label_use_action` and
`_tgrep_node_label_pred_use_action`.
'''
# tokens[0] is a string containing the node label
node_label = tokens[0]
# tokens[1:] is an (optional) list of predicates which must all
# hold of the bound node
reln_preds = tokens[1:]
def pattern_segment_pred(n, m=None, l=None):
'''This predicate function ignores its node argument.'''
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# match the relation predicates against the node
return all(pred(node, m, l) for pred in reln_preds)
return pattern_segment_pred
def _tgrep_node_label_use_action(_s, _l, tokens):
'''
Returns the node label used to begin a tgrep_expr_labeled. See
`_tgrep_segmented_pattern_action`.
Called for expressions like (`tgrep_node_label_use`)::
=s
when they appear as the first element of a `tgrep_expr_labeled`
expression (see `_tgrep_segmented_pattern_action`).
It returns the node label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
return tokens[0][1:]
def _tgrep_node_label_pred_use_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which describes the use of a previously bound node label.
Called for expressions like (`tgrep_node_label_use_pred`)::
=s
when they appear inside a tgrep_node_expr (for example, inside a
relation). The predicate returns true if and only if its node
argument is identical the the node looked up in the node label
dictionary using the node's label.
'''
assert len(tokens) == 1
assert tokens[0].startswith('=')
node_label = tokens[0][1:]
def node_label_use_pred(n, m=None, l=None):
# look up the bound node using its label
if l is None or node_label not in l:
raise TgrepException('node_label ={0} not bound in pattern'.format(
node_label))
node = l[node_label]
# truth means the given node is this node
return n is node
return node_label_use_pred
def _tgrep_bind_node_label_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
which can optionally bind a matching node into the tgrep2 string's
label_dict.
Called for expressions like (`tgrep_node_expr2`)::
/NP/
@NP=n
'''
# tokens[0] is a tgrep_node_expr
if len(tokens) == 1:
return tokens[0]
else:
# if present, tokens[1] is the character '=', and tokens[2] is
# a tgrep_node_label, a string value containing the node label
assert len(tokens) == 3
assert tokens[1] == '='
node_pred = tokens[0]
node_label = tokens[2]
def node_label_bind_pred(n, m=None, l=None):
if node_pred(n, m, l):
# bind `n` into the dictionary `l`
if l is None:
raise TgrepException(
'cannot bind node_label {0}: label_dict is None'.format(
node_label))
l[node_label] = n
return True
else:
return False
return node_label_bind_pred
def _tgrep_rel_disjunction_action(_s, _l, tokens):
'''
Builds a lambda function representing a predicate on a tree node
from the disjunction of several other such lambda functions.
'''
# filter out the pipe
tokens = [x for x in tokens if x != '|']
# print 'relation disjunction tokens: ', tokens
if len(tokens) == 1:
return tokens[0]
elif len(tokens) == 2:
return (lambda a, b: lambda n, m=None, l=None:
a(n, m, l) or b(n, m, l))(tokens[0], tokens[1])
def _macro_defn_action(_s, _l, tokens):
'''
Builds a dictionary structure which defines the given macro.
'''
assert len(tokens) == 3
assert tokens[0] == '@'
return {tokens[1]: tokens[2]}
def _tgrep_exprs_action(_s, _l, tokens):
'''
This is the top-lebel node in a tgrep2 search string; the
predicate function it returns binds together all the state of a
tgrep2 search string.
Builds a lambda function representing a predicate on a tree node
from the disjunction of several tgrep expressions. Also handles
macro definitions and macro name binding, and node label
definitions and node label binding.
'''
if len(tokens) == 1:
return lambda n, m=None, l=None: tokens[0](n, None, {})
# filter out all the semicolons
tokens = [x for x in tokens if x != ';']
# collect all macro definitions
macro_dict = {}
macro_defs = [tok for tok in tokens if isinstance(tok, dict)]
for macro_def in macro_defs:
macro_dict.update(macro_def)
# collect all tgrep expressions
tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)]
# create a new scope for the node label dictionary
def top_level_pred(n, m=macro_dict, l=None):
label_dict = {}
# bind macro definitions and OR together all tgrep_exprs
return any(predicate(n, m, label_dict) for predicate in tgrep_exprs)
return top_level_pred
def _build_tgrep_parser(set_parse_actions = True):
'''
Builds a pyparsing-based parser object for tokenizing and
interpreting tgrep search strings.
'''
tgrep_op = (pyparsing.Optional('!') +
pyparsing.Regex('[$%,.<>][%,.<>0-9-\':]*'))
tgrep_qstring = pyparsing.QuotedString(quoteChar='"', escChar='\\',
unquoteResults=False)
tgrep_node_regex = pyparsing.QuotedString(quoteChar='/', escChar='\\',
unquoteResults=False)
tgrep_qstring_icase = pyparsing.Regex(
'i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"')
tgrep_node_regex_icase = pyparsing.Regex(
'i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/')
tgrep_node_literal = pyparsing.Regex('[^][ \r\t\n;:.,&|<>()$!@%\'^=]+')
tgrep_expr = pyparsing.Forward()
tgrep_relations = pyparsing.Forward()
tgrep_parens = pyparsing.Literal('(') + tgrep_expr + ')'
tgrep_nltk_tree_pos = (
pyparsing.Literal('N(') +
pyparsing.Optional(pyparsing.Word(pyparsing.nums) + ',' +
pyparsing.Optional(pyparsing.delimitedList(
pyparsing.Word(pyparsing.nums), delim=',') +
pyparsing.Optional(','))) + ')')
tgrep_node_label = pyparsing.Regex('[A-Za-z0-9]+')
tgrep_node_label_use = pyparsing.Combine('=' + tgrep_node_label)
# see _tgrep_segmented_pattern_action
tgrep_node_label_use_pred = tgrep_node_label_use.copy()
macro_name = pyparsing.Regex('[^];:.,&|<>()[$!@%\'^=\r\t\n ]+')
macro_name.setWhitespaceChars('')
macro_use = pyparsing.Combine('@' + macro_name)
tgrep_node_expr = (tgrep_node_label_use_pred |
macro_use |
tgrep_nltk_tree_pos |
tgrep_qstring_icase |
tgrep_node_regex_icase |
tgrep_qstring |
tgrep_node_regex |
'*' |
tgrep_node_literal)
tgrep_node_expr2 = ((tgrep_node_expr +
pyparsing.Literal('=').setWhitespaceChars('') +
tgrep_node_label.copy().setWhitespaceChars('')) |
tgrep_node_expr)
tgrep_node = (tgrep_parens |
(pyparsing.Optional("'") +
tgrep_node_expr2 +
pyparsing.ZeroOrMore("|" + tgrep_node_expr)))
tgrep_brackets = pyparsing.Optional('!') + '[' + tgrep_relations + ']'
tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node)
tgrep_rel_conjunction = pyparsing.Forward()
tgrep_rel_conjunction << (tgrep_relation +
pyparsing.ZeroOrMore(pyparsing.Optional('&') +
tgrep_rel_conjunction))
tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore(
"|" + tgrep_relations)
tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations)
tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations)
tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(':' + tgrep_expr_labeled)
macro_defn = (pyparsing.Literal('@') +
pyparsing.White().suppress() +
macro_name +
tgrep_expr2)
tgrep_exprs = (pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(';' + macro_defn) + ';') +
tgrep_expr2 +
pyparsing.ZeroOrMore(';' + (macro_defn | tgrep_expr2)) +
pyparsing.ZeroOrMore(';').suppress())
if set_parse_actions:
tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action)
tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action)
macro_use.setParseAction(_tgrep_macro_use_action)
tgrep_node.setParseAction(_tgrep_node_action)
tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action)
tgrep_parens.setParseAction(_tgrep_parens_action)
tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action)
tgrep_relation.setParseAction(_tgrep_relation_action)
tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action)
tgrep_relations.setParseAction(_tgrep_rel_disjunction_action)
macro_defn.setParseAction(_macro_defn_action)
# the whole expression is also the conjunction of two
# predicates: the first node predicate, and the remaining
# relation predicates
tgrep_expr.setParseAction(_tgrep_conjunction_action)
tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action)
tgrep_expr2.setParseAction(functools.partial(_tgrep_conjunction_action,
join_char = ':'))
tgrep_exprs.setParseAction(_tgrep_exprs_action)
return tgrep_exprs.ignore('#' + pyparsing.restOfLine)
def tgrep_tokenize(tgrep_string):
'''
Tokenizes a TGrep search string into separate tokens.
'''
parser = _build_tgrep_parser(False)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string))
def tgrep_compile(tgrep_string):
'''
Parses (and tokenizes, if necessary) a TGrep search string into a
lambda function.
'''
parser = _build_tgrep_parser(True)
if isinstance(tgrep_string, binary_type):
tgrep_string = tgrep_string.decode()
return list(parser.parseString(tgrep_string, parseAll=True))[0]
def treepositions_no_leaves(tree):
'''
Returns all the tree positions in the given tree which are not
leaf nodes.
'''
treepositions = tree.treepositions()
# leaves are treeposition tuples that are not prefixes of any
# other treeposition
prefixes = set()
for pos in treepositions:
for length in range(len(pos)):
prefixes.add(pos[:length])
return [pos for pos in treepositions if pos in prefixes]
def tgrep_positions(pattern, trees, search_leaves=True):
"""
Return the tree positions in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree positions)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [position for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
def tgrep_nodes(pattern, trees, search_leaves=True):
"""
Return the tree nodes in the trees which match the given pattern.
:param pattern: a tgrep search pattern
:type pattern: str or output of tgrep_compile()
:param trees: a sequence of NLTK trees (usually ParentedTrees)
:type trees: iter(ParentedTree) or iter(Tree)
:param search_leaves: whether ot return matching leaf nodes
:type search_leaves: bool
:rtype: iter(tree nodes)
"""
if isinstance(pattern, (binary_type, text_type)):
pattern = tgrep_compile(pattern)
for tree in trees:
try:
if search_leaves:
positions = tree.treepositions()
else:
positions = treepositions_no_leaves(tree)
yield [tree[position] for position in positions
if pattern(tree[position])]
except AttributeError:
yield []
| 42.433902 | 104 | 0.55335 |
e9e07c0c0480c73c1ba036cfb943f57413c566b9 | 346 | py | Python | tfidf_tokenizer/Tokenizer.py | koaNam/YATA2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | tfidf_tokenizer/Tokenizer.py | koaNam/YATA2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | tfidf_tokenizer/Tokenizer.py | koaNam/YATA2 | 59608eed03df5ed4fc4eb5c3e75c448bd26e30b0 | [
"MIT"
] | null | null | null | import abc
class Tokenizer(abc.ABC):
@abc.abstractmethod
def fit(self, corpus):
pass
@abc.abstractmethod
def transform(self, input_text, value_count):
return
@abc.abstractmethod
def save_model(self, filepath):
return
@abc.abstractmethod
def load_model(self, filepath):
return
| 16.47619 | 49 | 0.641618 |
bc0534af55bbae0e10d5efd6afbe8be1bd93bf13 | 23,014 | py | Python | simpleeval.py | bozokopic/simpleeval | e41a216c6e63517739cac95561d1fc48623b2cd8 | [
"MIT"
] | null | null | null | simpleeval.py | bozokopic/simpleeval | e41a216c6e63517739cac95561d1fc48623b2cd8 | [
"MIT"
] | null | null | null | simpleeval.py | bozokopic/simpleeval | e41a216c6e63517739cac95561d1fc48623b2cd8 | [
"MIT"
] | null | null | null | """
SimpleEval - (C) 2013-2022 Daniel Fairhead
-------------------------------------
An short, easy to use, safe and reasonably extensible expression evaluator.
Designed for things like in a website where you want to allow the user to
generate a string, or a number from some other input, without allowing full
eval() or other unsafe or needlessly complex linguistics.
-------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-------------------------------------
Initial idea copied from J.F. Sebastian on Stack Overflow
( http://stackoverflow.com/a/9558001/1973500 ) with
modifications and many improvements.
-------------------------------------
Contributors:
- corro (Robin Baumgartner) (py3k)
- dratchkov (David R) (nested dicts)
- marky1991 (Mark Young) (slicing)
- T045T (Nils Berg) (!=, py3kstr, obj.
- perkinslr (Logan Perkins) (.__globals__ or .func_ breakouts)
- impala2 (Kirill Stepanov) (massive _eval refactor)
- gk (ugik) (Other iterables than str can DOS too, and can be made)
- daveisfera (Dave Johansen) 'not' Boolean op, Pycharm, pep8, various other fixes
- xaled (Khalid Grandi) method chaining correctly, double-eval bugfix.
- EdwardBetts (Edward Betts) spelling correction.
- charlax (Charles-Axel Dein charlax) Makefile and cleanups
- mommothazaz123 (Andrew Zhu) f"string" support, Python 3.8 support
- lubieowoce (Uryga) various potential vulnerabilities
- JCavallo (Jean Cavallo) names dict shouldn't be modified
- Birne94 (Daniel Birnstiel) for fixing leaking generators.
- patricksurry (Patrick Surry) or should return last value, even if falsy.
- shughes-uk (Samantha Hughes) python w/o 'site' should not fail to import.
- KOLANICH packaging / deployment / setup help & << + >> ops
- graingert (Thomas Grainger) packaging / deployment / setup help
- bozokopic (Bozo Kopic) Memory leak fix
-------------------------------------
Basic Usage:
>>> s = SimpleEval()
>>> s.eval("20 + 30")
50
You can add your own functions easily too:
if file.txt contents is "11"
>>> def get_file():
... with open("file.txt", 'r') as f:
... return f.read()
>>> s.functions["get_file"] = get_file
>>> s.eval("int(get_file()) + 31")
42
For more information, see the full package documentation on pypi, or the github
repo.
-----------
If you don't need to re-use the evaluator (with it's names, functions, etc),
then you can use the simple_eval() function:
>>> simple_eval("21 + 19")
40
You can pass names, operators and functions to the simple_eval function as
well:
>>> simple_eval("40 + two", names={"two": 2})
42
"""
import ast
import collections.abc
import inspect
import operator as op
import sys
import warnings
import weakref
from random import random
PYTHON3 = sys.version_info[0] == 3
########################################
# Module wide 'globals'
MAX_STRING_LENGTH = 100000
MAX_COMPREHENSION_LENGTH = 10000
MAX_POWER = 4000000 # highest exponent
MAX_SHIFT = 10000 # highest << or >> (lshift / rshift)
DISALLOW_PREFIXES = ["_", "func_"]
DISALLOW_METHODS = ["format", "format_map", "mro"]
# Disallow functions:
# This, strictly speaking, is not necessary. These /should/ never be accessable anyway,
# if DISALLOW_PREFIXES and DISALLOW_METHODS are all right. This is here to try and help
# people not be stupid. Allowing these functions opens up all sorts of holes - if any of
# their functionality is required, then please wrap them up in a safe container. And think
# very hard about it first. And don't say I didn't warn you.
# builtins is a dict in python >3.6 but a module before
DISALLOW_FUNCTIONS = {type, isinstance, eval, getattr, setattr, repr, compile, open}
if hasattr(__builtins__, "help") or (
hasattr(__builtins__, "__contains__") and "help" in __builtins__
):
# PyInstaller environment doesn't include this module.
DISALLOW_FUNCTIONS.add(help)
# Use weak method references (eliminate cyclic references in SimpleEval)
USE_WEAK_METHOD_REF = True
if PYTHON3:
# exec is not a function in Python2...
exec("DISALLOW_FUNCTIONS.add(exec)") # pylint: disable=exec-used
########################################
# Exceptions:
class InvalidExpression(Exception):
"""Generic Exception"""
pass
class FunctionNotDefined(InvalidExpression):
"""sorry! That function isn't defined!"""
def __init__(self, func_name, expression):
self.message = "Function '{0}' not defined," " for expression '{1}'.".format(
func_name, expression
)
setattr(self, "func_name", func_name) # bypass 2to3 confusion.
self.expression = expression
super(InvalidExpression, self).__init__(self.message)
class NameNotDefined(InvalidExpression):
"""a name isn't defined."""
def __init__(self, name, expression):
self.name = name
self.message = "'{0}' is not defined for expression '{1}'".format(name, expression)
self.expression = expression
super(InvalidExpression, self).__init__(self.message)
class AttributeDoesNotExist(InvalidExpression):
"""attribute does not exist"""
def __init__(self, attr, expression):
self.message = "Attribute '{0}' does not exist in expression '{1}'".format(
attr, expression
)
self.attr = attr
self.expression = expression
super(InvalidExpression, self).__init__(self.message)
class FeatureNotAvailable(InvalidExpression):
"""What you're trying to do is not allowed."""
pass
class NumberTooHigh(InvalidExpression):
"""Sorry! That number is too high. I don't want to spend the
next 10 years evaluating this expression!"""
pass
class IterableTooLong(InvalidExpression):
"""That iterable is **way** too long, baby."""
pass
class AssignmentAttempted(UserWarning):
"""Assignment not allowed in SimpleEval"""
pass
########################################
# Default simple functions to include:
def random_int(top):
"""return a random int below <top>"""
return int(random() * top)
def safe_power(a, b): # pylint: disable=invalid-name
"""a limited exponent/to-the-power-of function, for safety reasons"""
if abs(a) > MAX_POWER or abs(b) > MAX_POWER:
raise NumberTooHigh("Sorry! I don't want to evaluate {0} ** {1}".format(a, b))
return a ** b
def safe_mult(a, b): # pylint: disable=invalid-name
"""limit the number of times an iterable can be repeated..."""
if hasattr(a, "__len__") and b * len(a) > MAX_STRING_LENGTH:
raise IterableTooLong("Sorry, I will not evalute something that long.")
if hasattr(b, "__len__") and a * len(b) > MAX_STRING_LENGTH:
raise IterableTooLong("Sorry, I will not evalute something that long.")
return a * b
def safe_add(a, b): # pylint: disable=invalid-name
"""iterable length limit again"""
if hasattr(a, "__len__") and hasattr(b, "__len__"):
if len(a) + len(b) > MAX_STRING_LENGTH:
raise IterableTooLong(
"Sorry, adding those two together would" " make something too long."
)
return a + b
def safe_rshift(a, b): # pylint: disable=invalid-name
"""rshift, but with the maximum"""
if abs(b) > MAX_SHIFT:
raise NumberTooHigh("Sorry! I don't want to evaluate {0} >> {1}".format(a, b))
return a >> b
def safe_lshift(a, b): # pylint: disable=invalid-name
"""lshift, but with the maximum"""
if abs(b) > MAX_SHIFT:
raise NumberTooHigh("Sorry! I don't want to evaluate {0} << {1}".format(a, b))
return a << b
########################################
# Defaults for the evaluator:
DEFAULT_OPERATORS = {
ast.Add: safe_add,
ast.Sub: op.sub,
ast.Mult: safe_mult,
ast.Div: op.truediv,
ast.FloorDiv: op.floordiv,
ast.RShift: safe_rshift,
ast.LShift: safe_lshift,
ast.Pow: safe_power,
ast.Mod: op.mod,
ast.Eq: op.eq,
ast.NotEq: op.ne,
ast.Gt: op.gt,
ast.Lt: op.lt,
ast.GtE: op.ge,
ast.LtE: op.le,
ast.Not: op.not_,
ast.USub: op.neg,
ast.UAdd: op.pos,
ast.In: lambda x, y: op.contains(y, x),
ast.NotIn: lambda x, y: not op.contains(y, x),
ast.Is: lambda x, y: x is y,
ast.IsNot: lambda x, y: x is not y,
}
DEFAULT_FUNCTIONS = {
"rand": random,
"randint": random_int,
"int": int,
"float": float,
"str": str if PYTHON3 else unicode,
}
DEFAULT_NAMES = {"True": True, "False": False, "None": None}
ATTR_INDEX_FALLBACK = True
########################################
# Helper dict with weakref methods
class _WeakMethodValueDictionary(collections.abc.MutableMapping):
def __init__(self, data={}):
self._data = {k: self._wrap_value(v) for k, v in data.items()}
def _wrap_value(self, value):
return (weakref.WeakMethod(value) if inspect.ismethod(value)
else lambda: value)
def __getitem__(self, key):
return self._data[key]()
def __setitem__(self, key, value):
self._data[key] = self._wrap_value(value)
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
########################################
# And the actual evaluator:
class SimpleEval(object): # pylint: disable=too-few-public-methods
"""A very simple expression parser.
>>> s = SimpleEval()
>>> s.eval("20 + 30 - ( 10 * 5)")
0
"""
expr = ""
def __init__(self, operators=None, functions=None, names=None):
"""
Create the evaluator instance. Set up valid operators (+,-, etc)
functions (add, random, get_val, whatever) and names."""
if not operators:
operators = DEFAULT_OPERATORS.copy()
if not functions:
functions = DEFAULT_FUNCTIONS.copy()
if not names:
names = DEFAULT_NAMES.copy()
self.operators = operators
self.functions = functions
self.names = names
self.nodes = {
ast.Expr: self._eval_expr,
ast.Assign: self._eval_assign,
ast.AugAssign: self._eval_aug_assign,
ast.Import: self._eval_import,
ast.Num: self._eval_num,
ast.Str: self._eval_str,
ast.Name: self._eval_name,
ast.UnaryOp: self._eval_unaryop,
ast.BinOp: self._eval_binop,
ast.BoolOp: self._eval_boolop,
ast.Compare: self._eval_compare,
ast.IfExp: self._eval_ifexp,
ast.Call: self._eval_call,
ast.keyword: self._eval_keyword,
ast.Subscript: self._eval_subscript,
ast.Attribute: self._eval_attribute,
ast.Index: self._eval_index,
ast.Slice: self._eval_slice,
}
if USE_WEAK_METHOD_REF:
self.nodes = _WeakMethodValueDictionary(self.nodes)
# py3k stuff:
if hasattr(ast, "NameConstant"):
self.nodes[ast.NameConstant] = self._eval_constant
# py3.6, f-strings
if hasattr(ast, "JoinedStr"):
self.nodes[ast.JoinedStr] = self._eval_joinedstr # f-string
self.nodes[
ast.FormattedValue
] = self._eval_formattedvalue # formatted value in f-string
# py3.8 uses ast.Constant instead of ast.Num, ast.Str, ast.NameConstant
if hasattr(ast, "Constant"):
self.nodes[ast.Constant] = self._eval_constant
# Defaults:
self.ATTR_INDEX_FALLBACK = ATTR_INDEX_FALLBACK
# Check for forbidden functions:
for f in self.functions.values():
if f in DISALLOW_FUNCTIONS:
raise FeatureNotAvailable("This function {} is a really bad idea.".format(f))
def eval(self, expr):
"""evaluate an expresssion, using the operators, functions and
names previously set up."""
# set a copy of the expression aside, so we can give nice errors...
self.expr = expr
# and evaluate:
return self._eval(ast.parse(expr.strip()).body[0])
def _eval(self, node):
"""The internal evaluator used on each node in the parsed tree."""
try:
handler = self.nodes[type(node)]
except KeyError:
raise FeatureNotAvailable(
"Sorry, {0} is not available in this " "evaluator".format(type(node).__name__)
)
return handler(node)
def _eval_expr(self, node):
return self._eval(node.value)
def _eval_assign(self, node):
warnings.warn(
"Assignment ({}) attempted, but this is ignored".format(self.expr), AssignmentAttempted
)
return self._eval(node.value)
def _eval_aug_assign(self, node):
warnings.warn(
"Assignment ({}) attempted, but this is ignored".format(self.expr), AssignmentAttempted
)
return self._eval(node.value)
def _eval_import(self, node):
raise FeatureNotAvailable("Sorry, 'import' is not allowed.")
@staticmethod
def _eval_num(node):
return node.n
@staticmethod
def _eval_str(node):
if len(node.s) > MAX_STRING_LENGTH:
raise IterableTooLong(
"String Literal in statement is too long!"
" ({0}, when {1} is max)".format(len(node.s), MAX_STRING_LENGTH)
)
return node.s
@staticmethod
def _eval_constant(node):
if hasattr(node.value, "__len__") and len(node.value) > MAX_STRING_LENGTH:
raise IterableTooLong(
"Literal in statement is too long!"
" ({0}, when {1} is max)".format(len(node.value), MAX_STRING_LENGTH)
)
return node.value
def _eval_unaryop(self, node):
return self.operators[type(node.op)](self._eval(node.operand))
def _eval_binop(self, node):
return self.operators[type(node.op)](self._eval(node.left), self._eval(node.right))
def _eval_boolop(self, node):
if isinstance(node.op, ast.And):
vout = False
for value in node.values:
vout = self._eval(value)
if not vout:
return vout
return vout
elif isinstance(node.op, ast.Or):
for value in node.values:
vout = self._eval(value)
if vout:
return vout
return vout
def _eval_compare(self, node):
right = self._eval(node.left)
to_return = True
for operation, comp in zip(node.ops, node.comparators):
if not to_return:
break
left = right
right = self._eval(comp)
to_return = self.operators[type(operation)](left, right)
return to_return
def _eval_ifexp(self, node):
return self._eval(node.body) if self._eval(node.test) else self._eval(node.orelse)
def _eval_call(self, node):
if isinstance(node.func, ast.Attribute):
func = self._eval(node.func)
else:
try:
func = self.functions[node.func.id]
except KeyError:
raise FunctionNotDefined(node.func.id, self.expr)
except AttributeError as e:
raise FeatureNotAvailable("Lambda Functions not implemented")
if func in DISALLOW_FUNCTIONS:
raise FeatureNotAvailable("This function is forbidden")
return func(
*(self._eval(a) for a in node.args), **dict(self._eval(k) for k in node.keywords)
)
def _eval_keyword(self, node):
return node.arg, self._eval(node.value)
def _eval_name(self, node):
try:
# This happens at least for slicing
# This is a safe thing to do because it is impossible
# that there is a true exression assigning to none
# (the compiler rejects it, so you can't even
# pass that to ast.parse)
if hasattr(self.names, "__getitem__"):
return self.names[node.id]
elif callable(self.names):
return self.names(node)
else:
raise InvalidExpression(
'Trying to use name (variable) "{0}"'
' when no "names" defined for'
" evaluator".format(node.id)
)
except KeyError:
if node.id in self.functions:
return self.functions[node.id]
raise NameNotDefined(node.id, self.expr)
def _eval_subscript(self, node):
container = self._eval(node.value)
key = self._eval(node.slice)
try:
return container[key]
except KeyError:
raise
def _eval_attribute(self, node):
for prefix in DISALLOW_PREFIXES:
if node.attr.startswith(prefix):
raise FeatureNotAvailable(
"Sorry, access to __attributes "
" or func_ attributes is not available. "
"({0})".format(node.attr)
)
if node.attr in DISALLOW_METHODS:
raise FeatureNotAvailable(
"Sorry, this method is not available. " "({0})".format(node.attr)
)
# eval node
node_evaluated = self._eval(node.value)
# Maybe the base object is an actual object, not just a dict
try:
return getattr(node_evaluated, node.attr)
except (AttributeError, TypeError):
pass
# TODO: is this a good idea? Try and look for [x] if .x doesn't work?
if self.ATTR_INDEX_FALLBACK:
try:
return node_evaluated[node.attr]
except (KeyError, TypeError):
pass
# If it is neither, raise an exception
raise AttributeDoesNotExist(node.attr, self.expr)
def _eval_index(self, node):
return self._eval(node.value)
def _eval_slice(self, node):
lower = upper = step = None
if node.lower is not None:
lower = self._eval(node.lower)
if node.upper is not None:
upper = self._eval(node.upper)
if node.step is not None:
step = self._eval(node.step)
return slice(lower, upper, step)
def _eval_joinedstr(self, node):
length = 0
evaluated_values = []
for n in node.values:
val = str(self._eval(n))
if len(val) + length > MAX_STRING_LENGTH:
raise IterableTooLong("Sorry, I will not evaluate something this long.")
evaluated_values.append(val)
return "".join(evaluated_values)
def _eval_formattedvalue(self, node):
if node.format_spec:
fmt = "{:" + self._eval(node.format_spec) + "}"
return fmt.format(self._eval(node.value))
return self._eval(node.value)
class EvalWithCompoundTypes(SimpleEval):
"""
SimpleEval with additional Compound Types, and their respective
function editions. (list, tuple, dict, set).
"""
def __init__(self, operators=None, functions=None, names=None):
super(EvalWithCompoundTypes, self).__init__(operators, functions, names)
self.functions.update(list=list, tuple=tuple, dict=dict, set=set)
self.nodes.update(
{
ast.Dict: self._eval_dict,
ast.Tuple: self._eval_tuple,
ast.List: self._eval_list,
ast.Set: self._eval_set,
ast.ListComp: self._eval_comprehension,
ast.GeneratorExp: self._eval_comprehension,
}
)
def eval(self, expr):
self._max_count = 0
return super(EvalWithCompoundTypes, self).eval(expr)
def _eval_dict(self, node):
return {self._eval(k): self._eval(v) for (k, v) in zip(node.keys, node.values)}
def _eval_tuple(self, node):
return tuple(self._eval(x) for x in node.elts)
def _eval_list(self, node):
return list(self._eval(x) for x in node.elts)
def _eval_set(self, node):
return set(self._eval(x) for x in node.elts)
def _eval_comprehension(self, node):
to_return = []
extra_names = {}
previous_name_evaller = self.nodes[ast.Name]
def eval_names_extra(node):
"""
Here we hide our extra scope for within this comprehension
"""
if node.id in extra_names:
return extra_names[node.id]
return previous_name_evaller(node)
self.nodes.update({ast.Name: eval_names_extra})
def recurse_targets(target, value):
"""
Recursively (enter, (into, (nested, name), unpacking)) = \
and, (assign, (values, to), each
"""
if isinstance(target, ast.Name):
extra_names[target.id] = value
else:
for t, v in zip(target.elts, value):
recurse_targets(t, v)
def do_generator(gi=0):
g = node.generators[gi]
for i in self._eval(g.iter):
self._max_count += 1
if self._max_count > MAX_COMPREHENSION_LENGTH:
raise IterableTooLong("Comprehension generates too many elements")
recurse_targets(g.target, i)
if all(self._eval(iff) for iff in g.ifs):
if len(node.generators) > gi + 1:
do_generator(gi + 1)
else:
to_return.append(self._eval(node.elt))
try:
do_generator()
finally:
self.nodes.update({ast.Name: previous_name_evaller})
return to_return
def simple_eval(expr, operators=None, functions=None, names=None):
"""Simply evaluate an expresssion"""
s = SimpleEval(operators=operators, functions=functions, names=names)
return s.eval(expr)
| 32.097629 | 99 | 0.611019 |
6e7a29b7051cf63d729d38ae942ab26b7b3fef23 | 3,519 | py | Python | eend/bin/infer.py | PierreTsr/EEND | bbaf8286f83c05c65c73d2820601a117b8faa382 | [
"MIT"
] | null | null | null | eend/bin/infer.py | PierreTsr/EEND | bbaf8286f83c05c65c73d2820601a117b8faa382 | [
"MIT"
] | null | null | null | eend/bin/infer.py | PierreTsr/EEND | bbaf8286f83c05c65c73d2820601a117b8faa382 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Licensed under the MIT license.
#
import yamlargparse
from eend import system_info
parser = yamlargparse.ArgumentParser(description='decoding')
parser.add_argument('-c', '--config', help='config file path',
action=yamlargparse.ActionConfigFile)
parser.add_argument('data_dir',
help='kaldi-style data dir')
parser.add_argument('--train_dir',
help='kaldi-style data dir used for training.')
parser.add_argument('model_file',
help='best.nnet')
parser.add_argument('out_dir',
help='output directory.')
parser.add_argument('--backend', default='chainer',
choices=['chainer', 'pytorch'],
help='backend framework')
parser.add_argument('--model_type', default='LSTM', type=str)
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--num-speakers', type=int, default=4)
parser.add_argument('--hidden-size', default=256, type=int,
help='number of lstm output nodes')
parser.add_argument('--num-lstm-layers', default=1, type=int,
help='number of lstm layers')
parser.add_argument('--input-transform', default='',
choices=['', 'log', 'logmel',
'logmel23', 'logmel23_swn', 'logmel23_mn'],
help='input transform')
parser.add_argument('--lambda-loss', default=0.01, type=float)
parser.add_argument('--embedding-size', default=256, type=int)
parser.add_argument('--embedding-layers', default=2, type=int)
parser.add_argument('--chunk-size', default=2000, type=int,
help='input is chunked with this size')
parser.add_argument('--context-size', default=0, type=int,
help='frame splicing')
parser.add_argument('--subsampling', default=1, type=int)
parser.add_argument('--sampling-rate', default=16000, type=int,
help='sampling rate')
parser.add_argument('--frame-size', default=1024, type=int,
help='frame size')
parser.add_argument('--frame-shift', default=256, type=int,
help='frame shift')
parser.add_argument('--transformer-encoder-n-heads', default=4, type=int)
parser.add_argument('--transformer-encoder-n-layers', default=2, type=int)
parser.add_argument('--save-attention-weight', default=0, type=int)
attractor_args = parser.add_argument_group('attractor')
attractor_args.add_argument('--use-attractor', action='store_true',
help='Enable encoder-decoder attractor mode')
attractor_args.add_argument('--shuffle', action='store_true',
help='Shuffle the order in time-axis before input to the network')
attractor_args.add_argument('--attractor-loss-ratio', default=1.0, type=float,
help='weighting parameter')
attractor_args.add_argument('--attractor-encoder-dropout', default=0.1, type=float)
attractor_args.add_argument('--attractor-decoder-dropout', default=0.1, type=float)
attractor_args.add_argument('--attractor-threshold', default=0.5, type=float)
args = parser.parse_args()
system_info.print_system_info()
print(args)
if args.backend == 'chainer':
from eend.chainer_backend.infer import infer
infer(args)
elif args.backend == 'pytorch':
# TODO
# from eend.pytorch_backend.infer import infer
# infer(args)
raise NotImplementedError()
else:
raise ValueError()
| 46.302632 | 94 | 0.662404 |
3f8cc18d15276f24acc4a12a81887838b6670bb5 | 1,445 | py | Python | src/infrastructure/clients/provider/mock/requests.py | sdediego/forex-django-clean-architecture | 915a8d844a8db5a40c726fe4cf9f6d50f7c95275 | [
"MIT"
] | 8 | 2021-11-09T16:43:38.000Z | 2022-03-25T16:04:26.000Z | src/infrastructure/clients/provider/mock/requests.py | sdediego/forex-django-clean-architecture | 915a8d844a8db5a40c726fe4cf9f6d50f7c95275 | [
"MIT"
] | null | null | null | src/infrastructure/clients/provider/mock/requests.py | sdediego/forex-django-clean-architecture | 915a8d844a8db5a40c726fe4cf9f6d50f7c95275 | [
"MIT"
] | 2 | 2021-11-16T21:17:31.000Z | 2022-02-11T11:15:29.000Z | # coding: utf-8
import json
import random
from typing import List
from src.domain.exchange_rate import CurrencyEntity, CurrencyExchangeRateEntity
from src.infrastructure.clients.provider.utils import (
get_business_days, get_last_business_day)
def currencies() -> List[CurrencyEntity]:
with open('../xchange_api/currencies.json', 'r') as currencies_file:
data = json.load(currencies_file)
return [CurrencyEntity(**currency) for currency in data['availableCurrencies']]
def historical_rate(data: dict) -> CurrencyExchangeRateEntity:
return CurrencyExchangeRateEntity(
source_currency=data.get('source_currency'),
exchanged_currency=data.get('exchanged_currency'),
valuation_date=get_last_business_day(data.get('valuation_date')),
rate_value=round(random.uniform(0.5, 1.5), 6)
)
def timeseries_rates(data: dict) -> List[CurrencyExchangeRateEntity]:
source_currency = data.get('source_currency')
exchanged_currencies = data.get('exchanged_currency').split(',')
business_days = get_business_days(data.get('date_from'), data.get('date_to'))
return [
CurrencyExchangeRateEntity(
source_currency=source_currency,
exchanged_currency=currency,
valuation_date=business_day,
rate_value=round(random.uniform(0.5, 1.5), 6)
)
for business_day in business_days for currency in exchanged_currencies
]
| 36.125 | 83 | 0.725952 |
45a8e3bf36bd811fc2ee4659901e718a9e4be8f0 | 6,304 | py | Python | extensions/reactroles.py | ooxxe04/Merely-Framework | 94a6456b82b00d25904465df877110c0ef4ea6e9 | [
"MIT"
] | null | null | null | extensions/reactroles.py | ooxxe04/Merely-Framework | 94a6456b82b00d25904465df877110c0ef4ea6e9 | [
"MIT"
] | null | null | null | extensions/reactroles.py | ooxxe04/Merely-Framework | 94a6456b82b00d25904465df877110c0ef4ea6e9 | [
"MIT"
] | null | null | null | import nextcord, asyncio
from nextcord.ext import commands
class ReactRoles(commands.Cog):
"""allows admins to set up messages where reacting grants users roles"""
def __init__(self, bot:commands.Bot):
self.bot = bot
if not bot.config.getboolean('extensions', 'auth', fallback=False):
raise Exception("'auth' must be enabled to use 'reactroles'")
if not bot.config.getboolean('extensions', 'help', fallback=False):
print(Warning("'help' is a recommended extension for 'reactroles'"))
self.auth = bot.cogs['Auth']
# ensure config file has required data
if not bot.config.has_section('reactroles'):
bot.config.add_section('reactroles')
msglist = 'list[nextcord.abc.Messageable]'
self.watching:msglist = []
#TODO: make it possible for admins to add more reaction roles or delete them later
#TODO: notice if the rr prompt is deleted during setup
@commands.Cog.listener("on_ready")
async def fetch_tracking_messages(self):
search = [k for k in self.bot.config['reactroles'].keys()]
for chid,msgid in set([(rr.split('_')[0], rr.split('_')[1]) for rr in search]):
try:
ch = await self.bot.fetch_channel(chid)
msg = await ch.fetch_message(msgid)
self.watching.append(msg)
except Exception as e:
print(f"failed to get reactionrole message {msgid} from channel {chid}. {e}")
await self.catchup()
@commands.Cog.listener("on_message_delete")
async def revoke_tracking_message(self, message):
if message in self.watching:
matches = [k for k in self.bot.config['reactroles'].keys() if k.split('_')[1] == str(message.id)]
[self.bot.config.remove_option('reactroles',k) for k in matches]
#TODO: (low priority) maybe remove deleted message from self.watching?
@commands.Cog.listener("on_raw_reaction_add")
async def reactrole_add(self, data:nextcord.RawReactionActionEvent):
if isinstance(data.member, nextcord.Member):
emojiid = data.emoji if data.emoji.is_unicode_emoji() else data.emoji.id
if f"{data.channel_id}_{data.message_id}_{emojiid}_roles" in self.bot.config['reactroles']:
channel = await self.bot.fetch_channel(data.channel_id)
roleids = [int(r) for r in self.bot.config['reactroles'][f"{data.channel_id}_{data.message_id}_{emojiid}_roles"].split(' ')]
roles = []
for roleid in roleids:
try:
roles.append(channel.guild.get_role(roleid))
except Exception as e:
print("failed to get role for reactrole: "+str(e))
await data.member.send(self.bot.babel((data.member.id, data.guild_id,), 'reactroles', 'role_granted', roles=', '.join([role.name for role in roles])))
await data.member.add_roles(*roles, reason='reactroles')
@commands.Cog.listener("on_raw_reaction_remove")
async def reactrole_remove(self, data:nextcord.RawReactionActionEvent):
if data.guild_id:
guild = await self.bot.fetch_guild(data.guild_id)
member = guild.get_member(data.user_id)
emojiid = data.emoji if data.emoji.is_unicode_emoji() else data.emoji.id
if f"{data.channel_id}_{data.message_id}_{emojiid}_roles" in self.bot.config['reactroles']:
channel = await self.bot.fetch_channel(data.channel_id)
roleids = [int(r) for r in self.bot.config['reactroles'][f"{data.channel_id}_{data.message_id}_{emojiid}_roles"].split(' ')]
roles = []
for roleid in roleids:
try:
roles.append(channel.guild.get_role(roleid))
except Exception as e:
print("failed to get role for reactrole: "+str(e))
await member.send(self.bot.babel((member.id, data.guild_id,), 'reactroles', 'role_taken', roles=', '.join([role.name for role in roles])))
await member.remove_roles(*roles, reason='reactroles')
async def catchup(self):
#TODO: give and take roles as needed to catch up to reality
pass
@commands.command(aliases=['reactionrole', 'rr', 'reactroles', 'reactionroles'])
@commands.guild_only()
async def reactrole(self, ctx:commands.Context, *, prompt:str):
"""react role setup interface"""
self.auth.admins(ctx)
target = await ctx.reply(prompt)
tmp = None
emojis = []
try:
while len(emojis) < 10:
tmp = await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup1', canstop=len(emojis) > 0))
reaction, _ = await self.bot.wait_for('reaction_add', check=lambda r, u: u==ctx.author and r.message == target, timeout=30)
if reaction.emoji not in emojis:
await target.add_reaction(reaction)
try:
await target.remove_reaction(reaction, ctx.author)
except:
pass
await tmp.delete()
tmp = await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup2', emoji=str(reaction.emoji)))
msg = await self.bot.wait_for('message', check=lambda m: m.channel == ctx.channel and m.author == ctx.author and len(m.role_mentions) > 0, timeout=30)
emojiid = reaction.emoji if isinstance(reaction.emoji, str) else str(reaction.emoji.id)
self.bot.config['reactroles'][f"{ctx.channel.id}_{target.id}_{emojiid}_roles"] = ' '.join([str(r.id) for r in msg.role_mentions])
await tmp.delete()
await msg.delete()
emojis.append(reaction)
else:
try:
await target.remove_reaction(reaction, ctx.author)
except:
pass
await tmp.delete()
tmp = await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup2_repeat'))
await asyncio.sleep(5)
await tmp.delete()
except asyncio.TimeoutError:
if len(emojis) == 0:
try:
await target.delete()
if tmp is not None: await tmp.delete()
except:
pass
await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup_cancel'))
else:
try:
await tmp.delete()
except:
pass
await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup_success'))
self.watching.append(target)
else:
await ctx.reply(self.bot.babel(ctx, 'reactroles', 'setup_success'))
self.watching.append(target)
self.bot.config.save()
def setup(bot):
bot.add_cog(ReactRoles(bot)) | 44.394366 | 160 | 0.653395 |
828cf2676b7b44570e1543aa3ebe4ab3751a8158 | 5,463 | py | Python | tests/test_huggingfacemodel.py | L-Net-1992/sahi | 43f008948419ebff1e0c1861c3cee4ead23fcad1 | [
"MIT"
] | null | null | null | tests/test_huggingfacemodel.py | L-Net-1992/sahi | 43f008948419ebff1e0c1861c3cee4ead23fcad1 | [
"MIT"
] | null | null | null | tests/test_huggingfacemodel.py | L-Net-1992/sahi | 43f008948419ebff1e0c1861c3cee4ead23fcad1 | [
"MIT"
] | null | null | null | # OBSS SAHI Tool
# Code written by Devrim Cavusoglu, 2022.
import unittest
import pybboxes.functional as pbf
from sahi.utils.cv import read_image
from sahi.utils.huggingface import HuggingfaceTestConstants
MODEL_DEVICE = "cpu"
CONFIDENCE_THRESHOLD = 0.3
IMAGE_SIZE = 320
class TestHuggingfaceDetectionModel(unittest.TestCase):
def test_load_model(self):
from sahi.model import HuggingfaceDetectionModel
huggingface_detection_model = HuggingfaceDetectionModel(
model_path=HuggingfaceTestConstants.YOLOS_TINY_MODEL_PATH,
confidence_threshold=CONFIDENCE_THRESHOLD,
device=MODEL_DEVICE,
category_remapping=None,
load_at_init=True,
)
self.assertNotEqual(huggingface_detection_model.model, None)
def test_set_model(self):
from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
from sahi.model import HuggingfaceDetectionModel
huggingface_model = AutoModelForObjectDetection.from_pretrained(HuggingfaceTestConstants.YOLOS_TINY_MODEL_PATH)
huggingface_feature_extractor = AutoFeatureExtractor.from_pretrained(
HuggingfaceTestConstants.YOLOS_TINY_MODEL_PATH
)
huggingface_detection_model = HuggingfaceDetectionModel(
model=huggingface_model,
feature_extractor=huggingface_feature_extractor,
confidence_threshold=CONFIDENCE_THRESHOLD,
device=MODEL_DEVICE,
category_remapping=None,
load_at_init=True,
)
self.assertNotEqual(huggingface_detection_model.model, None)
def test_perform_inference(self):
from sahi.model import HuggingfaceDetectionModel
huggingface_detection_model = HuggingfaceDetectionModel(
model_path=HuggingfaceTestConstants.YOLOS_TINY_MODEL_PATH,
confidence_threshold=CONFIDENCE_THRESHOLD,
device=MODEL_DEVICE,
category_remapping=None,
load_at_init=True,
image_size=IMAGE_SIZE,
)
# prepare image
image_path = "tests/data/small-vehicles1.jpeg"
image = read_image(image_path)
# perform inference
huggingface_detection_model.perform_inference(image)
original_predictions = huggingface_detection_model.original_predictions
scores, cat_ids, boxes = huggingface_detection_model.get_valid_predictions(
logits=original_predictions.logits[0], pred_boxes=original_predictions.pred_boxes[0]
)
# find box of first car detection with conf greater than 0.5
for i, box in enumerate(boxes):
if huggingface_detection_model.category_mapping[cat_ids[i].item()] == "car": # if category car
break
image_height, image_width, _ = huggingface_detection_model.image_shapes[0]
box = list(
pbf.convert_bbox(
box.tolist(),
from_type="yolo",
to_type="voc",
image_size=(image_width, image_height),
return_values=True,
)
)
# compare
desired_bbox = [639, 198, 663, 218]
predicted_bbox = list(map(int, box[:4]))
margin = 2
for ind, point in enumerate(predicted_bbox):
assert point < desired_bbox[ind] + margin and point > desired_bbox[ind] - margin
for score in scores:
self.assertGreaterEqual(score.item(), CONFIDENCE_THRESHOLD)
def test_convert_original_predictions(self):
from sahi.model import HuggingfaceDetectionModel
huggingface_detection_model = HuggingfaceDetectionModel(
model_path=HuggingfaceTestConstants.YOLOS_TINY_MODEL_PATH,
confidence_threshold=CONFIDENCE_THRESHOLD,
device=MODEL_DEVICE,
category_remapping=None,
load_at_init=True,
image_size=IMAGE_SIZE,
)
# prepare image
image_path = "tests/data/small-vehicles1.jpeg"
image = read_image(image_path)
# perform inference
huggingface_detection_model.perform_inference(image)
# convert predictions to ObjectPrediction list
huggingface_detection_model.convert_original_predictions()
object_prediction_list = huggingface_detection_model.object_prediction_list
# compare
self.assertEqual(len(object_prediction_list), 46)
self.assertEqual(object_prediction_list[0].category.id, 3)
self.assertEqual(object_prediction_list[0].category.name, "car")
desired_bbox = [639, 198, 24, 20]
predicted_bbox = object_prediction_list[0].bbox.to_coco_bbox()
margin = 2
for ind, point in enumerate(predicted_bbox):
assert point < desired_bbox[ind] + margin and point > desired_bbox[ind] - margin
self.assertEqual(object_prediction_list[2].category.id, 3)
self.assertEqual(object_prediction_list[2].category.name, "car")
desired_bbox = [663, 187, 17, 16]
predicted_bbox = object_prediction_list[2].bbox.to_coco_bbox()
for ind, point in enumerate(predicted_bbox):
assert point < desired_bbox[ind] + margin and point > desired_bbox[ind] - margin
for object_prediction in object_prediction_list:
self.assertGreaterEqual(object_prediction.score.value, CONFIDENCE_THRESHOLD)
if __name__ == "__main__":
unittest.main()
| 37.675862 | 119 | 0.688083 |
cfb8cb56e561e01f4cb03dc1d009436000ac7419 | 1,204 | py | Python | apps/users/models.py | Niracler/website_py | 4c28f82a34122e4a02cc1f940e14f43ee0a4571d | [
"MIT"
] | 1 | 2018-11-21T08:31:37.000Z | 2018-11-21T08:31:37.000Z | apps/users/models.py | Niracler/website_py | 4c28f82a34122e4a02cc1f940e14f43ee0a4571d | [
"MIT"
] | 6 | 2018-09-21T12:34:58.000Z | 2018-09-22T12:05:01.000Z | apps/users/models.py | niracler/django-blog | 4c28f82a34122e4a02cc1f940e14f43ee0a4571d | [
"MIT"
] | 1 | 2018-11-14T01:09:46.000Z | 2018-11-14T01:09:46.000Z | from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
"""
用户
"""
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="姓名")
birthday = models.DateField(null=True, blank=True, verbose_name="出生年月")
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", "女")), default="female", verbose_name="性别")
mobile = models.CharField(null=True, blank=True, max_length=11, verbose_name="电话")
email = models.EmailField(max_length=100, null=True, blank=True, verbose_name="邮箱")
class Meta:
verbose_name = "用户"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class VerifyCode(models.Model):
"""
短信验证码
"""
code = models.CharField(max_length=10, verbose_name="验证码")
mobile = models.CharField(max_length=11, verbose_name="电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "短信验证码"
verbose_name_plural = verbose_name
def __str__(self):
return self.code | 30.1 | 123 | 0.686877 |
ccacb700822fb032e2a8b954c0ecd834c42fa5a9 | 83 | py | Python | 01-meet_python/fact.py | palmieric/Tecnologie_Web-Introduzione_a_Python | b10ce49a947b239ca2af1938248f7191937b2f89 | [
"CC0-1.0"
] | 3 | 2021-05-17T14:48:42.000Z | 2021-05-24T10:12:06.000Z | 01-meet_python/fact.py | palmieric/Tecnologie_Web-Introduzione_a_Python | b10ce49a947b239ca2af1938248f7191937b2f89 | [
"CC0-1.0"
] | null | null | null | 01-meet_python/fact.py | palmieric/Tecnologie_Web-Introduzione_a_Python | b10ce49a947b239ca2af1938248f7191937b2f89 | [
"CC0-1.0"
] | 2 | 2021-05-17T13:52:15.000Z | 2021-05-24T10:44:54.000Z | def fact(n):
if n==0:
return 1
else:
return n * fact(n - 1)
print(fact(3))
| 10.375 | 25 | 0.554217 |
8e679365622092dff88d78a2e03607e87b21df3a | 1,228 | py | Python | roboticstoolbox/models/DH/Planar2.py | LabRobPL/robotics-toolbox-python | 4fe4d8a23bda77f5fde39c5d7b53dc953c2a07dd | [
"MIT"
] | 1 | 2021-07-02T09:08:06.000Z | 2021-07-02T09:08:06.000Z | roboticstoolbox/models/DH/Planar2.py | LabRobPL/robotics-toolbox-python | 4fe4d8a23bda77f5fde39c5d7b53dc953c2a07dd | [
"MIT"
] | null | null | null | roboticstoolbox/models/DH/Planar2.py | LabRobPL/robotics-toolbox-python | 4fe4d8a23bda77f5fde39c5d7b53dc953c2a07dd | [
"MIT"
] | 1 | 2022-02-02T20:27:58.000Z | 2022-02-02T20:27:58.000Z | """
@author: Luis Fernando Lara Tobar
@author: Peter Corke
@author: Samuel Drew
"""
from roboticstoolbox import DHRobot, RevoluteDH
from math import pi
class Planar2(DHRobot):
"""
Class that models a planar 2-link robot
``Planar2()`` is a class which models a 2-link planar robot and
describes its kinematic characteristics using standard DH
conventions.
.. runblock:: pycon
>>> import roboticstoolbox as rtb
>>> robot = rtb.models.DH.Planar2()
>>> print(robot)
Defined joint configurations are:
- qz, zero angles, all folded up
- q1, links are horizontal and vertical respectively
- q2, links are vertical and horizontal respectively
.. note::
- Robot has only 2 DoF.
.. codeauthor:: Peter Corke
"""
def __init__(self):
L = [
RevoluteDH(a=1),
RevoluteDH(a=1)
]
super().__init__(L, name='Planar 2 link', keywords=('planar',))
self.addconfiguration("qz", [0, 0])
self.addconfiguration("q1", [0, pi/2])
self.addconfiguration("q2", [pi/2, -pi/2])
if __name__ == '__main__': # pragma nocover
robot = Planar2()
print(robot)
| 22.327273 | 71 | 0.602606 |
8d8cf6cb899f4c35e4aea8552c7bdb91734b00ca | 9,158 | py | Python | lib/hmmlearn/tests/test_base.py | mvaliadis/hmmlearn | a401dfa854a111138f95e368cd0441dc266ca2c0 | [
"BSD-3-Clause"
] | null | null | null | lib/hmmlearn/tests/test_base.py | mvaliadis/hmmlearn | a401dfa854a111138f95e368cd0441dc266ca2c0 | [
"BSD-3-Clause"
] | null | null | null | lib/hmmlearn/tests/test_base.py | mvaliadis/hmmlearn | a401dfa854a111138f95e368cd0441dc266ca2c0 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy import special
from hmmlearn.base import BaseHMM, ConvergenceMonitor
class TestMonitor:
def test_converged_by_iterations(self):
m = ConvergenceMonitor(tol=1e-3, n_iter=2, verbose=False)
assert not m.converged
m.report(-0.01)
assert not m.converged
m.report(-0.1)
assert m.converged
def test_converged_by_log_prob(self):
m = ConvergenceMonitor(tol=1e-3, n_iter=10, verbose=False)
for log_prob in [-0.03, -0.02, -0.01]:
m.report(log_prob)
assert not m.converged
m.report(-0.0101)
assert m.converged
def test_reset(self):
m = ConvergenceMonitor(tol=1e-3, n_iter=10, verbose=False)
m.iter = 1
m.history.append(-0.01)
m._reset()
assert m.iter == 0
assert not m.history
def test_report_first_iteration(self, capsys):
m = ConvergenceMonitor(tol=1e-3, n_iter=10, verbose=True)
m.report(-0.01)
out, err = capsys.readouterr()
assert not out
expected = m._template.format(iter=1, log_prob=-0.01, delta=np.nan)
assert err.splitlines() == [expected]
def test_report(self, capsys):
n_iter = 10
m = ConvergenceMonitor(tol=1e-3, n_iter=n_iter, verbose=True)
for i in reversed(range(n_iter)):
m.report(-0.01 * i)
out, err = capsys.readouterr()
assert not out
assert len(err.splitlines()) == n_iter
assert len(m.history) == n_iter
class StubHMM(BaseHMM):
"""An HMM with hardcoded observation probabilities."""
def _compute_log_likelihood(self, X):
return self.log_frameprob
class TestBaseAgainstWikipedia:
def setup_method(self, method):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
self.frameprob = np.asarray([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
self.log_frameprob = np.log(self.frameprob)
h = StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
h.log_frameprob = self.log_frameprob
h.frameprob = self.frameprob
self.hmm = h
def test_do_forward_scaling_pass(self):
log_prob, fwdlattice, scaling_factors = \
self.hmm._do_forward_scaling_pass(self.frameprob)
ref_log_prob = -3.3725
assert round(log_prob, 4) == ref_log_prob
reffwdlattice = np.exp([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_allclose(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_forward_pass(self):
log_prob, fwdlattice = \
self.hmm._do_forward_log_pass(self.log_frameprob)
ref_log_prob = -3.3725
assert round(log_prob, 4) == ref_log_prob
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_allclose(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_scaling_pass(self):
log_prob, fwdlattice, scaling_factors = \
self.hmm._do_forward_scaling_pass(self.frameprob)
bwdlattice = self.hmm._do_backward_scaling_pass(
self.frameprob, scaling_factors)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
scaling_factors = np.cumprod(scaling_factors[::-1])[::-1]
bwdlattice_scaled = bwdlattice / scaling_factors[:, None]
# Answer will be equivalent when the scaling factor is accounted for
assert_allclose(bwdlattice_scaled, refbwdlattice, 4)
def test_do_backward_log_pass(self):
bwdlattice = self.hmm._do_backward_log_pass(self.log_frameprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_allclose(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
log_prob, state_sequence = \
self.hmm._do_viterbi_pass(self.log_frameprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_allclose(state_sequence, refstate_sequence)
ref_log_prob = -4.4590
assert round(log_prob, 4) == ref_log_prob
def test_score_samples(self):
# ``StubHMM` ignores the values in ```X``, so we just pass in an
# array of the appropriate shape.
log_prob, posteriors = self.hmm.score_samples(self.log_frameprob)
assert_allclose(posteriors.sum(axis=1), np.ones(len(posteriors)))
ref_log_prob = -3.3725
assert round(log_prob, 4) == ref_log_prob
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_allclose(posteriors, refposteriors, atol=1e-4)
def test_generate_samples(self):
X0, Z0 = self.hmm.sample(n_samples=10)
X, Z = self.hmm.sample(n_samples=10, currstate=Z0[-1])
assert len(Z0) == len(Z) == 10 and Z[0] == Z0[-1]
class TestBaseConsistentWithGMM:
def setup_method(self, method):
n_components = 8
n_samples = 10
self.log_frameprob = np.log(
np.random.random((n_samples, n_components)))
h = StubHMM(n_components)
h.log_frameprob = self.log_frameprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
h.startprob_ = np.ones(n_components) / n_components
h.transmat_ = np.ones((n_components, n_components)) / n_components
self.hmm = h
def test_score_samples(self):
log_prob, hmmposteriors = self.hmm.score_samples(self.log_frameprob)
n_samples, n_components = self.log_frameprob.shape
assert_allclose(hmmposteriors.sum(axis=1), np.ones(n_samples))
norm = special.logsumexp(self.log_frameprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(self.log_frameprob
- np.tile(norm, (1, n_components)))
assert_allclose(hmmposteriors, gmmposteriors)
def test_decode(self):
_log_prob, state_sequence = self.hmm.decode(self.log_frameprob)
n_samples, n_components = self.log_frameprob.shape
norm = special.logsumexp(self.log_frameprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(self.log_frameprob
- np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_allclose(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes():
n_components = 20
startprob = np.random.random(n_components)
startprob /= startprob.sum()
transmat = np.random.random((n_components, n_components))
transmat /= np.tile(transmat.sum(axis=1)[:, np.newaxis], (1, n_components))
h = StubHMM(n_components)
assert h.n_components == n_components
h.startprob_ = startprob
assert_allclose(h.startprob_, startprob)
with pytest.raises(ValueError):
h.startprob_ = 2 * startprob
h._check()
with pytest.raises(ValueError):
h.startprob_ = []
h._check()
with pytest.raises(ValueError):
h.startprob_ = np.zeros((n_components - 2, 2))
h._check()
h.startprob_ = startprob
h.transmat_ = transmat
assert_allclose(h.transmat_, transmat)
with pytest.raises(ValueError):
h.transmat_ = 2 * transmat
h._check()
with pytest.raises(ValueError):
h.transmat_ = []
h._check()
with pytest.raises(ValueError):
h.transmat_ = np.zeros((n_components - 2, n_components))
h._check()
def test_stationary_distribution():
n_components = 10
h = StubHMM(n_components)
transmat = np.random.random((n_components, n_components))
transmat /= np.tile(transmat.sum(axis=1)[:, np.newaxis], (1, n_components))
h.transmat_ = transmat
stationary = h.get_stationary_distribution()
assert stationary.dtype == float
assert np.dot(h.get_stationary_distribution().T, h.transmat_) \
== pytest.approx(stationary)
| 37.687243 | 79 | 0.586045 |
4a03abd6550d92cf8ff4230f0f29b12cb96f51ee | 34,484 | py | Python | arc/species/speciesTest.py | goldmanm/ARC | e2fd97942cb50e3ccbf80ee344c8c9ca83f195de | [
"MIT"
] | null | null | null | arc/species/speciesTest.py | goldmanm/ARC | e2fd97942cb50e3ccbf80ee344c8c9ca83f195de | [
"MIT"
] | null | null | null | arc/species/speciesTest.py | goldmanm/ARC | e2fd97942cb50e3ccbf80ee344c8c9ca83f195de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains unit tests of the arc.species.species module
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import unittest
import os
import shutil
from rmgpy.molecule.molecule import Molecule
from rmgpy.species import Species
from rmgpy.reaction import Reaction
from arc.species.species import ARCSpecies, TSGuess, get_min_energy_conformer,\
determine_rotor_type, determine_rotor_symmetry, check_species_xyz
from arc.species.converter import get_xyz_string, get_xyz_matrix, molecules_from_xyz
from arc.settings import arc_path, default_levels_of_theory
from arc.rmgdb import make_rmg_database_object
from arc.scheduler import Scheduler
################################################################################
class TestARCSpecies(unittest.TestCase):
"""
Contains unit tests for the ARCSpecies class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
# Method 1: RMG Species object (here by SMILES)
cls.spc1_rmg = Species(molecule=[Molecule().fromSMILES(str('C=C[O]'))]) # delocalized radical + amine
cls.spc1_rmg.label = str('vinoxy')
cls.spc1 = ARCSpecies(rmg_species=cls.spc1_rmg)
# Method 2: ARCSpecies object by XYZ (also give SMILES for thermo BAC)
oh_xyz = str("""O 0.00000000 0.00000000 -0.12002167
H 0.00000000 0.00000000 0.85098324""")
cls.spc2 = ARCSpecies(label=str('OH'), xyz=oh_xyz, smiles=str('[OH]'), multiplicity=2, charge=0)
# Method 3: ARCSpecies object by SMILES
cls.spc3 = ARCSpecies(label=str('methylamine'), smiles=str('CN'), multiplicity=1, charge=0)
# Method 4: ARCSpecies object by RMG Molecule object
mol4 = Molecule().fromSMILES(str('C=CC'))
cls.spc4 = ARCSpecies(label=str('propene'), mol=mol4, multiplicity=1, charge=0)
# Method 5: ARCSpecies by AdjacencyList (to generate AdjLists, see https://rmg.mit.edu/molecule_search)
n2h4_adj = str("""1 N u0 p1 c0 {2,S} {3,S} {4,S}
2 N u0 p1 c0 {1,S} {5,S} {6,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {2,S}
6 H u0 p0 c0 {2,S}""")
cls.spc5 = ARCSpecies(label=str('N2H4'), adjlist=n2h4_adj, multiplicity=1, charge=0)
n3_xyz = str("""N -1.1997440839 -0.1610052059 0.0274738287
H -1.4016624407 -0.6229695533 -0.8487034080
H -0.0000018759 1.2861082773 0.5926077870
N 0.0000008520 0.5651072858 -0.1124621525
H -1.1294692206 -0.8709078271 0.7537518889
N 1.1997613019 -0.1609980472 0.0274604887
H 1.1294795781 -0.8708998550 0.7537444446
H 1.4015274689 -0.6230592706 -0.8487058662""")
cls.spc6 = ARCSpecies(label=str('N3'), xyz=n3_xyz, multiplicity=1, charge=0, smiles=str('NNN'))
xyz1 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'AIBN.gjf')
cls.spc7 = ARCSpecies(label='AIBN', smiles=str('N#CC(C)(C)N=NC(C)(C)C#N'), xyz=xyz1)
hso3_xyz = str("""S -0.12383700 0.10918200 -0.21334200
O 0.97332200 -0.98800100 0.31790100
O -1.41608500 -0.43976300 0.14487300
O 0.32370100 1.42850400 0.21585900
H 1.84477700 -0.57224200 0.35517700""")
cls.spc8 = ARCSpecies(label=str('HSO3'), xyz=hso3_xyz, multiplicity=2, charge=0, smiles=str('O[S](=O)=O'))
nh_s_adj = str("""1 N u0 p2 c0 {2,S}
2 H u0 p0 c0 {1,S}""")
nh_s_xyz = str("""N 0.50949998 0.00000000 0.00000000
H -0.50949998 0.00000000 0.00000000""")
cls.spc9 = ARCSpecies(label=str('NH2(S)'), adjlist=nh_s_adj, xyz=nh_s_xyz, multiplicity=1, charge=0)
def test_conformers(self):
"""Test conformer generation"""
self.spc1.generate_conformers() # vinoxy has two res. structures, each is assigned two conformers (RDkit/ob)
self.assertEqual(len(self.spc1.conformers), 4)
self.assertEqual(len(self.spc1.conformers), len(self.spc1.conformer_energies))
def test_rmg_species_conversion_into_arc_species(self):
"""Test the conversion of an RMG species into an ARCSpecies"""
self.spc1_rmg.label = None
self.spc = ARCSpecies(rmg_species=self.spc1_rmg, label=str('vinoxy'))
self.assertEqual(self.spc.label, str('vinoxy'))
self.assertEqual(self.spc.multiplicity, 2)
self.assertEqual(self.spc.charge, 0)
def test_determine_rotors(self):
"""Test determination of rotors in ARCSpecies"""
self.spc1.determine_rotors()
self.spc2.determine_rotors()
self.spc3.determine_rotors()
self.spc4.determine_rotors()
self.spc5.determine_rotors()
self.spc6.determine_rotors()
self.assertEqual(len(self.spc1.rotors_dict), 1)
self.assertEqual(len(self.spc2.rotors_dict), 0)
self.assertEqual(len(self.spc3.rotors_dict), 1)
self.assertEqual(len(self.spc4.rotors_dict), 1)
self.assertEqual(len(self.spc5.rotors_dict), 1)
self.assertEqual(len(self.spc6.rotors_dict), 2)
self.assertEqual(self.spc1.rotors_dict[0][str('pivots')], [2, 3])
self.assertEqual(self.spc1.rotors_dict[0][str('scan')], [4, 2, 3, 1])
self.assertTrue(all([t in [2, 4, 5] for t in self.spc1.rotors_dict[0][str('top')]]))
self.assertEqual(self.spc1.rotors_dict[0][str('times_dihedral_set')], 0)
self.assertEqual(self.spc3.rotors_dict[0][str('pivots')], [1, 2])
self.assertEqual(self.spc4.rotors_dict[0][str('pivots')], [1, 2])
self.assertEqual(self.spc5.rotors_dict[0][str('pivots')], [1, 2])
self.assertEqual(self.spc6.rotors_dict[0][str('pivots')], [1, 4])
self.assertEqual(self.spc6.rotors_dict[0][str('scan')], [2, 1, 4, 6])
self.assertEqual(len(self.spc6.rotors_dict[0][str('top')]), 3)
self.assertTrue(all([t in [1, 5, 2] for t in self.spc6.rotors_dict[0][str('top')]]))
self.assertEqual(self.spc6.rotors_dict[1][str('pivots')], [4, 6])
self.assertEqual(self.spc6.rotors_dict[1][str('scan')], [1, 4, 6, 7])
self.assertEqual(len(self.spc6.rotors_dict[1][str('top')]), 3)
self.assertTrue(all([t in [6, 7, 8] for t in self.spc6.rotors_dict[1][str('top')]]))
def test_symmetry(self):
"""Test external symmetry and chirality determination"""
allene = ARCSpecies(label=str('allene'), smiles=str('C=C=C'), multiplicity=1, charge=0)
allene.final_xyz = """C -1.01646 0.10640 -0.91445
H -1.39000 1.03728 -1.16672
C 0.00000 0.00000 0.00000
C 1.01653 -0.10640 0.91438
H -1.40975 -0.74420 -1.35206
H 0.79874 -0.20864 1.92036
H 2.00101 -0.08444 0.59842"""
allene.determine_symmetry()
self.assertEqual(allene.optical_isomers, 1)
self.assertEqual(allene.external_symmetry, 4)
ammonia = ARCSpecies(label=str('ammonia'), smiles=str('N'), multiplicity=1, charge=0)
ammonia.final_xyz = """N 0.06617 0.20024 0.13886
H -0.62578 -0.34119 0.63709
H -0.32018 0.51306 -0.74036
H 0.87976 -0.37219 -0.03564"""
ammonia.determine_symmetry()
self.assertEqual(ammonia.optical_isomers, 1)
self.assertEqual(ammonia.external_symmetry, 3)
methane = ARCSpecies(label=str('methane'), smiles=str('C'), multiplicity=1, charge=0)
methane.final_xyz = """C 0.00000 0.00000 0.00000
H -0.29717 0.97009 -0.39841
H 1.08773 -0.06879 0.01517
H -0.38523 -0.10991 1.01373
H -0.40533 -0.79140 -0.63049"""
methane.determine_symmetry()
self.assertEqual(methane.optical_isomers, 1)
self.assertEqual(methane.external_symmetry, 12)
chiral = ARCSpecies(label=str('chiral'), smiles=str('C(C)(O)(N)'), multiplicity=1, charge=0)
chiral.final_xyz = """C -0.49341625 0.37828349 0.00442108
H -1.56331545 0.39193350 0.01003359
N 0.01167132 1.06479568 1.20212111
H 1.01157784 1.05203730 1.19687531
H -0.30960193 2.01178202 1.20391932
O -0.03399634 -0.97590449 0.00184366
H -0.36384913 -1.42423238 -0.78033350
C 0.02253835 1.09779040 -1.25561654
H -0.34510997 0.59808430 -2.12741255
H -0.32122209 2.11106387 -1.25369100
H 1.09243518 1.08414066 -1.26122530"""
chiral.determine_symmetry()
self.assertEqual(chiral.optical_isomers, 2)
self.assertEqual(chiral.external_symmetry, 1)
s8 = ARCSpecies(label=str('s8'), smiles=str('S1SSSSSSS1'), multiplicity=1, charge=0)
s8.final_xyz = """S 2.38341 0.12608 0.09413
S 1.45489 1.88955 -0.13515
S -0.07226 2.09247 1.14966
S -1.81072 1.52327 0.32608
S -2.23488 -0.39181 0.74645
S -1.60342 -1.62383 -0.70542
S 0.22079 -2.35820 -0.30909
S 1.66220 -1.25754 -1.16665"""
s8.determine_symmetry()
self.assertEqual(s8.optical_isomers, 1)
self.assertEqual(s8.external_symmetry, 8)
water = ARCSpecies(label=str('H2O'), smiles=str('O'), multiplicity=1, charge=0)
water.final_xyz = """O 0.19927 0.29049 -0.11186
H 0.50770 -0.61852 -0.09124
H -0.70697 0.32803 0.20310"""
water.determine_symmetry()
self.assertEqual(water.optical_isomers, 1)
self.assertEqual(water.external_symmetry, 2)
def test_xyz_format_conversion(self):
"""Test conversions from string to list xyz formats"""
xyz_str0 = """N 2.24690600 -0.00006500 0.11597700
C -1.05654800 1.29155000 -0.02642500
C -1.05661400 -1.29150400 -0.02650600
C -0.30514100 0.00000200 0.00533200
C 1.08358900 -0.00003400 0.06558000
H -0.39168300 2.15448600 -0.00132500
H -1.67242600 1.35091400 -0.93175000
H -1.74185400 1.35367700 0.82742800
H -0.39187100 -2.15447800 0.00045500
H -1.74341400 -1.35278100 0.82619100
H -1.67091600 -1.35164600 -0.93286400
"""
xyz_list, atoms, x, y, z = get_xyz_matrix(xyz_str0)
# test all forms of input for get_xyz_string():
xyz_str1 = get_xyz_string(xyz_list, symbol=atoms)
xyz_str2 = get_xyz_string(xyz_list, number=[7, 6, 6, 6, 6, 1, 1, 1, 1, 1, 1])
mol, _ = molecules_from_xyz(xyz_str0)
xyz_str3 = get_xyz_string(xyz_list, mol=mol)
self.assertEqual(xyz_str0, xyz_str1)
self.assertEqual(xyz_str1, xyz_str2)
self.assertEqual(xyz_str2, xyz_str3)
self.assertEqual(atoms, ['N', 'C', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H'])
self.assertEqual(x, [2.246906, -1.056548, -1.056614, -0.305141, 1.083589, -0.391683, -1.672426, -1.741854,
-0.391871, -1.743414, -1.670916])
self.assertEqual(y[1], 1.29155)
self.assertEqual(z[-1], -0.932864)
def test_is_linear(self):
"""Test determination of molecule linearity by xyz"""
xyz1 = """C 0.000000 0.000000 0.000000
O 0.000000 0.000000 1.159076
O 0.000000 0.000000 -1.159076""" # a trivial case
xyz2 = """S -0.06618943 -0.12360663 -0.07631983
O -0.79539707 0.86755487 1.02675668
O -0.68919931 0.25421823 -1.34830853
N 0.01546439 -1.54297548 0.44580391
C 1.59721519 0.47861334 0.00711000
H 1.94428095 0.40772394 1.03719428
H 2.20318015 -0.14715186 -0.64755729
H 1.59252246 1.51178950 -0.33908352
H -0.87856890 -2.02453514 0.38494433
H -1.34135876 1.49608206 0.53295071""" # a non linear molecule
xyz3 = """N 0.0000000000 0.0000000000 0.3146069129
O -1.0906813653 0.0000000000 -0.1376405244
O 1.0906813653 0.0000000000 -0.1376405244""" # a non linear 3-atom molecule
xyz4 = """N 0.0000000000 0.0000000000 0.1413439534
H -0.8031792912 0.0000000000 -0.4947038368
H 0.8031792912 0.0000000000 -0.4947038368""" # a non linear 3-atom molecule
xyz5 = """S -0.5417345330 0.8208150346 0.0000000000
O 0.9206183692 1.6432038228 0.0000000000
H -1.2739176462 1.9692549926 0.0000000000""" # a non linear 3-atom molecule
xyz6 = """N 1.18784533 0.98526702 0.00000000
C 0.04124533 0.98526702 0.00000000
H -1.02875467 0.98526702 0.00000000""" # linear
xyz7 = """C -4.02394116 0.56169428 0.00000000
H -5.09394116 0.56169428 0.00000000
C -2.82274116 0.56169428 0.00000000
H -1.75274116 0.56169428 0.00000000""" # linear
xyz8 = """C -1.02600933 2.12845307 0.00000000
C -0.77966935 0.95278385 0.00000000
H -1.23666197 3.17751246 0.00000000
H -0.56023545 -0.09447399 0.00000000""" # just 0.5 degree off from linearity, so NOT linear...
xyz9 = """O -1.1998 0.1610 0.0275
O -1.4021 0.6223 -0.8489
O -1.48302 0.80682 -1.19946""" # just 3 points in space on a straight line (not a physical molecule)
spc1 = ARCSpecies(label=str('test_spc'), xyz=xyz1, multiplicity=1, charge=0, smiles=str('O=C=O'))
spc2 = ARCSpecies(label=str('test_spc'), xyz=xyz2, multiplicity=1, charge=0, smiles=str('[NH-][S+](=O)(O)C'))
spc3 = ARCSpecies(label=str('test_spc'), xyz=xyz3, multiplicity=2, charge=0, smiles=str('[O]N=O'))
spc4 = ARCSpecies(label=str('test_spc'), xyz=xyz4, multiplicity=2, charge=0, smiles=str('[NH2]'))
spc5 = ARCSpecies(label=str('test_spc'), xyz=xyz5, multiplicity=2, charge=0, smiles=str('[O]S'))
spc6 = ARCSpecies(label=str('test_spc'), xyz=xyz6, multiplicity=1, charge=0, smiles=str('C#N'))
spc7 = ARCSpecies(label=str('test_spc'), xyz=xyz7, multiplicity=1, charge=0, smiles=str('C#C'))
spc8 = ARCSpecies(label=str('test_spc'), xyz=xyz8, multiplicity=1, charge=0, smiles=str('C#C'))
spc9 = ARCSpecies(label=str('test_spc'), xyz=xyz9, multiplicity=1, charge=0, smiles=str('[O-][O+]=O'))
self.assertTrue(spc1.is_linear())
self.assertTrue(spc6.is_linear())
self.assertTrue(spc7.is_linear())
self.assertTrue(spc9.is_linear())
self.assertFalse(spc2.is_linear())
self.assertFalse(spc3.is_linear())
self.assertFalse(spc4.is_linear())
self.assertFalse(spc5.is_linear())
self.assertFalse(spc8.is_linear())
def test_charge_and_multiplicity(self):
"""Test determination of molecule charge and multiplicity"""
spc1 = ARCSpecies(label='spc1', mol=Molecule(SMILES=str('C[CH]C')), generate_thermo=False)
spc2 = ARCSpecies(label='spc2', mol=Molecule(SMILES=str('CCC')), generate_thermo=False)
spc3 = ARCSpecies(label='spc3', smiles=str('N[NH]'), generate_thermo=False)
spc4 = ARCSpecies(label='spc4', smiles=str('NNN'), generate_thermo=False)
adj1 = """multiplicity 2
1 O u1 p2 c0 {2,S}
2 H u0 p0 c0 {1,S}
"""
adj2 = """1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S}
2 N u0 p1 c0 {1,S} {3,S} {7,S}
3 O u0 p2 c0 {2,S} {8,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {3,S}
"""
spc5 = ARCSpecies(label='spc5', adjlist=str(adj1), generate_thermo=False)
spc6 = ARCSpecies(label='spc6', adjlist=str(adj2), generate_thermo=False)
xyz1 = """O 0.00000000 0.00000000 -0.10796235
H 0.00000000 0.00000000 0.86318839"""
xyz2 = """N -0.74678912 -0.11808620 0.00000000
C 0.70509190 0.01713703 0.00000000
H 1.11547042 -0.48545356 0.87928385
H 1.11547042 -0.48545356 -0.87928385
H 1.07725194 1.05216961 0.00000000
H -1.15564250 0.32084669 0.81500594
H -1.15564250 0.32084669 -0.81500594"""
spc7 = ARCSpecies(label='spc7', xyz=xyz1, generate_thermo=False)
spc8 = ARCSpecies(label='spc8', xyz=xyz2, generate_thermo=False)
self.assertEqual(spc1.charge, 0)
self.assertEqual(spc2.charge, 0)
self.assertEqual(spc3.charge, 0)
self.assertEqual(spc4.charge, 0)
self.assertEqual(spc5.charge, 0)
self.assertEqual(spc6.charge, 0)
self.assertEqual(spc7.charge, 0)
self.assertEqual(spc8.charge, 0)
self.assertEqual(spc1.multiplicity, 2)
self.assertEqual(spc2.multiplicity, 1)
self.assertEqual(spc3.multiplicity, 2)
self.assertEqual(spc4.multiplicity, 1)
self.assertEqual(spc5.multiplicity, 2)
self.assertEqual(spc6.multiplicity, 1)
self.assertEqual(spc7.multiplicity, 2)
self.assertEqual(spc8.multiplicity, 1)
def test_as_dict(self):
"""Test Species.as_dict()"""
spc_dict = self.spc3.as_dict()
expected_dict = {'optical_isomers': None,
'number_of_rotors': 0,
'neg_freqs_trshed': [],
'external_symmetry': None,
'multiplicity': 1,
'arkane_file': None,
'E0': None,
'mol': """1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 N u0 p1 c0 {1,S} {6,S} {7,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
""",
'generate_thermo': True,
'label': 'methylamine',
'long_thermo_description': spc_dict['long_thermo_description'],
'charge': 0,
'is_ts': False,
'final_xyz': '',
't1': None,
'bond_corrections': {'C-H': 3, 'C-N': 1, 'H-N': 2},
'rotors_dict': {}}
self.assertEqual(spc_dict, expected_dict)
def test_from_dict(self):
"""Test Species.from_dict()"""
species_dict = self.spc2.as_dict()
spc = ARCSpecies(species_dict=species_dict)
self.assertEqual(spc.multiplicity, 2)
self.assertEqual(spc.charge, 0)
self.assertEqual(spc.label, 'OH')
self.assertEqual(spc.mol.toSMILES(), '[OH]')
self.assertFalse(spc.is_ts)
def test_determine_rotor_type(self):
"""Test that we correctly determine whether a rotor is FreeRotor or HinderedRotor"""
free_path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'CH3C(O)O_FreeRotor.out')
hindered_path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'H2O2.out')
self.assertEqual(determine_rotor_type(free_path), 'FreeRotor')
self.assertEqual(determine_rotor_type(hindered_path), 'HinderedRotor')
def test_rotor_symmetry(self):
"""Test that ARC automatically determines a correct rotor symmetry"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'OOC1CCOCO1.out') # symmetry = 1; min at -10 o
path2 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'H2O2.out') # symmetry = 1
path3 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'N2O3.out') # symmetry = 2
path4 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'sBuOH.out') # symmetry = 3
path5 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'CH3C(O)O_FreeRotor.out') # symmetry = 6
symmetry1, _ = determine_rotor_symmetry(rotor_path=path1, label='label', pivots=[3,4])
symmetry2, _ = determine_rotor_symmetry(rotor_path=path2, label='label', pivots=[3,4])
symmetry3, _ = determine_rotor_symmetry(rotor_path=path3, label='label', pivots=[3,4])
symmetry4, _ = determine_rotor_symmetry(rotor_path=path4, label='label', pivots=[3,4])
symmetry5, _ = determine_rotor_symmetry(rotor_path=path5, label='label', pivots=[3,4])
self.assertEqual(symmetry1, 1)
self.assertEqual(symmetry2, 1)
self.assertEqual(symmetry3, 2)
self.assertEqual(symmetry4, 3)
self.assertEqual(symmetry5, 6)
def test_xyz_from_file(self):
"""Test parsing xyz from a file and saving it in the .initial_xyz attribute"""
self.assertTrue(' N -2.36276900 2.14528400 -0.76917500' in self.spc7.initial_xyz)
def test_check_species_xyz(self):
"""Test the check_xyz() function"""
xyz = """
C -0.67567701 1.18507660 0.04672449
H -0.25592948 1.62415961 0.92757746
H -2.26870864 1.38030564 0.05865317
O -0.36671999 -0.21081064 0.01630374
H -0.73553821 -0.63718986 0.79332805
C -0.08400571 1.86907236 -1.19973252
H -0.50375517 1.42998100 -2.08057962
H -0.31518819 2.91354759 -1.17697025
H 0.97802159 1.73893214 -1.20769117
O -3.69788377 1.55609096 0.07050345
O -4.28667752 0.37487691 0.04916102
H -4.01978712 -0.12970163 0.82103635
"""
expected_xyz1 = """ C -0.67567701 1.18507660 0.04672449
H -0.25592948 1.62415961 0.92757746
H -2.26870864 1.38030564 0.05865317
O -0.36671999 -0.21081064 0.01630374
H -0.73553821 -0.63718986 0.79332805
C -0.08400571 1.86907236 -1.19973252
H -0.50375517 1.42998100 -2.08057962
H -0.31518819 2.91354759 -1.17697025
H 0.97802159 1.73893214 -1.20769117
O -3.69788377 1.55609096 0.07050345
O -4.28667752 0.37487691 0.04916102
H -4.01978712 -0.12970163 0.82103635"""
new_xyz1 = check_species_xyz(xyz)
self.assertEqual(new_xyz1, expected_xyz1)
xyz_path = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'CH3C(O)O.xyz')
expected_xyz2 = """O -0.53466300 -1.24850800 -0.02156300
O -0.79314200 1.04818800 0.18134200
C -0.02397300 0.01171700 -0.37827400
C 1.40511900 0.21728200 0.07675200
H -0.09294500 0.02877800 -1.47163200
H 2.04132100 -0.57108600 -0.32806800
H 1.45535600 0.19295200 1.16972300
H 1.77484100 1.18704300 -0.25986700
H -0.43701200 -1.34990600 0.92900600
H -1.69944700 0.93441600 -0.11271200"""
new_xyz2 = check_species_xyz(xyz_path)
self.assertEqual(new_xyz2, expected_xyz2)
def test_get_min_energy_conformer(self):
"""Test that the xyz with the minimum specified energy is returned from get_min_energy_conformer()"""
xyzs = ['xyz1', 'xyz2', 'xyz3']
energies = [-5, -30, -1.5]
min_xyz = get_min_energy_conformer(xyzs, energies)
self.assertEqual(min_xyz, 'xyz2')
def test_mol_from_xyz_atom_id_1(self):
"""Test that atom ids are saved properly when loading both xyz and smiles."""
mol = self.spc6.mol
mol_list = self.spc6.mol_list
self.assertEqual(len(mol_list), 1)
res = mol_list[0]
self.assertTrue(mol.atomIDValid())
self.assertTrue(res.atomIDValid())
self.assertTrue(mol.isIsomorphic(res))
self.assertTrue(mol.isIdentical(res))
def test_mol_from_xyz_atom_id_2(self):
"""Test that atom ids are saved properly when loading both xyz and smiles."""
mol = self.spc8.mol
mol_list = self.spc8.mol_list
self.assertEqual(len(mol_list), 2)
res1, res2 = mol_list
self.assertTrue(mol.atomIDValid())
self.assertTrue(res1.atomIDValid())
self.assertTrue(res2.atomIDValid())
self.assertTrue(mol.isIsomorphic(res1))
self.assertTrue(mol.isIdentical(res1))
# Check that atom ordering is consistent, ignoring specific oxygen ordering
mol_ids = [(a.element.symbol, a.id) if a.element.symbol != 'O' else (a.element.symbol,) for a in mol.atoms]
res1_ids = [(a.element.symbol, a.id) if a.element.symbol != 'O' else (a.element.symbol,) for a in res1.atoms]
res2_ids = [(a.element.symbol, a.id) if a.element.symbol != 'O' else (a.element.symbol,) for a in res2.atoms]
self.assertEqual(mol_ids, res1_ids)
self.assertEqual(mol_ids, res2_ids)
def test_preserving_multiplicity(self):
"""Test that multiplicity is being preserved, especially when it is guessed differently from xyz"""
multiplicity_list = [2, 2, 1, 1, 1, 1, 1, 2, 1]
for i, spc in enumerate([self.spc1, self.spc2, self.spc3, self.spc4, self.spc5, self.spc6, self.spc7,
self.spc8, self.spc9]):
self.assertEqual(spc.multiplicity, multiplicity_list[i])
self.assertEqual(spc.mol.multiplicity, multiplicity_list[i])
self.assertTrue(all([structure.multiplicity == spc.multiplicity for structure in spc.mol_list]))
def test_append_conformers(self):
"""Test that ARC correctly parses its own conformer files"""
ess_settings = {'gaussian': 'server1', 'molpro': 'server2', 'qchem': 'server1', 'ssh': False}
project_directory = os.path.join(arc_path, 'Projects', 'arc_project_for_testing_delete_after_usage4')
spc1 = ARCSpecies(label=str('vinoxy'), smiles=str('C=C[O]'))
rmgdb = make_rmg_database_object()
sched1 = Scheduler(project='project_test', ess_settings=ess_settings, species_list=[spc1],
composite_method='', conformer_level=default_levels_of_theory['conformer'],
opt_level=default_levels_of_theory['opt'], freq_level=default_levels_of_theory['freq'],
sp_level=default_levels_of_theory['sp'], scan_level=default_levels_of_theory['scan'],
ts_guess_level=default_levels_of_theory['ts_guesses'], rmgdatabase=rmgdb,
project_directory=project_directory, generate_conformers=True, testing=True,
orbitals_level=default_levels_of_theory['orbitals'])
xyzs = ["""O 1.09068700 0.26516800 -0.16706300
C 2.92204100 -1.18335700 -0.38884900
C 2.27655500 -0.00373900 0.08543500
H 2.36544800 -1.88781000 -0.99914600
H 3.96112000 -1.38854500 -0.14958800
H 2.87813500 0.68828400 0.70399400
""",
"""O 1.19396100 -0.06003700 0.03890100
C 3.18797000 0.77061300 -0.87352700
C 2.43591200 -0.04439300 0.02171600
H 4.27370000 0.76090200 -0.86286100
H 2.66641700 1.41155700 -1.57757300
H 3.00398000 -0.68336800 0.72359800
""",
"""O 1.35241100 -1.02956000 -0.24056200
C -0.72084300 0.01308200 0.09573000
C 0.69217700 0.01185100 -0.09044300
H -1.25803800 -0.93018100 0.10926800
H -1.26861200 0.94177100 0.22420100
H 1.20290400 0.99303700 -0.09819400
""",
"""O -1.40102900 -0.98575100 -0.11588500
C 0.72457000 -0.01076700 0.06448800
C -0.69494600 0.03450000 -0.06206300
H 1.22539000 -0.97248000 0.11741200
H 1.31277400 0.90087100 0.10878400
H -1.16675800 1.03362600 -0.11273700"""]
energies = [0, 5, 5, 5] # J/mol
sched1.save_conformers_file(label='vinoxy', xyzs=xyzs)
self.assertTrue(os.path.isfile(os.path.join(project_directory, 'output', 'Species', 'vinoxy', 'geometry',
'conformers_before_optimization.txt')))
sched1.save_conformers_file(label='vinoxy', xyzs=xyzs, energies=energies)
self.assertTrue(os.path.isfile(os.path.join(project_directory, 'output', 'Species', 'vinoxy', 'geometry',
'conformers_after_optimization.txt')))
spc2 = ARCSpecies(label=str('vinoxy'), smiles=str('C=C[O]'), conformers_path=os.path.join(project_directory,
'output', 'Species', 'vinoxy', 'geometry', 'conformers_before_optimization.txt'))
spc3 = ARCSpecies(label=str('vinoxy'), smiles=str('C=C[O]'), conformers_path=os.path.join(project_directory,
'output', 'Species', 'vinoxy', 'geometry', 'conformers_after_optimization.txt'))
self.assertEqual(spc2.conformers[2], xyzs[2])
self.assertEqual(spc3.conformers[2], xyzs[2])
self.assertEqual(spc3.conformer_energies[2], energies[2])
def test_the_number_of_atoms_property(self):
"""Test that the number_of_atoms property functions correctly"""
self.assertEqual(self.spc1.number_of_atoms, 6)
self.assertEqual(self.spc2.number_of_atoms, 2)
self.assertEqual(self.spc3.number_of_atoms, 7)
self.assertEqual(self.spc4.number_of_atoms, 9)
self.assertEqual(self.spc5.number_of_atoms, 6)
self.assertEqual(self.spc6.number_of_atoms, 8)
self.assertEqual(self.spc7.number_of_atoms, 24)
self.assertEqual(self.spc8.number_of_atoms, 5)
self.assertEqual(self.spc9.number_of_atoms, 2)
xyz10 = """N 0.82269400 0.19834500 -0.33588000
C -0.57469800 -0.02442800 0.04618900
H -1.08412400 -0.56416500 -0.75831900
H -0.72300600 -0.58965300 0.98098100
H -1.07482500 0.94314300 0.15455500
H 1.31266200 -0.68161600 -0.46770200
H 1.32129900 0.71837500 0.38017700
"""
spc10 = ARCSpecies(label='spc10', xyz=xyz10)
self.assertEqual(spc10.number_of_atoms, 7)
@classmethod
def tearDownClass(cls):
"""
A function that is run ONCE after all unit tests in this class.
Delete all project directories created during these unit tests
"""
projects = ['arc_project_for_testing_delete_after_usage4']
for project in projects:
project_directory = os.path.join(arc_path, 'Projects', project)
shutil.rmtree(project_directory)
class TestTSGuess(unittest.TestCase):
"""
Contains unit tests for the TSGuess class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
spc1 = Species().fromSMILES(str('CON=O'))
spc1.label = str('CONO')
spc2 = Species().fromSMILES(str('C[N+](=O)[O-]'))
spc2.label = str('CNO2')
rmg_reaction = Reaction(reactants=[spc1], products=[spc2])
cls.tsg1 = TSGuess(rmg_reaction=rmg_reaction, method='AutoTST', family='H_Abstraction')
xyz = """N 0.9177905887 0.5194617797 0.0000000000
H 1.8140204898 1.0381941417 0.0000000000
H -0.4763167868 0.7509348722 0.0000000000
N 0.9992350860 -0.7048575683 0.0000000000
N -1.4430010939 0.0274543367 0.0000000000
H -0.6371484821 -0.7497769134 0.0000000000
H -2.0093636431 0.0331190314 -0.8327683174
H -2.0093636431 0.0331190314 0.8327683174"""
cls.tsg2 = TSGuess(xyz=xyz)
def test_as_dict(self):
"""Test TSGuess.as_dict()"""
tsg_dict = self.tsg1.as_dict()
expected_dict = {'method': u'autotst',
'energy': None,
'family': 'H_Abstraction',
'index': None,
'rmg_reaction': u'CON=O <=> [O-][N+](=O)C',
'success': None,
't0': None,
'execution_time': None}
self.assertEqual(tsg_dict, expected_dict)
def test_from_dict(self):
"""Test TSGuess.from_dict()
Also tests that the round trip to and from a dictionary ended in an RMG Reaction object"""
ts_dict = self.tsg1.as_dict()
tsg = TSGuess(ts_dict=ts_dict)
self.assertEqual(tsg.method, 'autotst')
self.assertTrue(isinstance(tsg.rmg_reaction, Reaction))
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 51.855639 | 119 | 0.570699 |
44b658bf197d830cb682edb1f3d375c4e6a59e2f | 1,413 | py | Python | bert-ranker/experiment/sentence_embeddings/evaluation/evaluation_use_qa.py | UKPLab/emnlp2020-multicqa | 9d08a89dba5e51786bb3153ca3733b23006eb628 | [
"Apache-2.0"
] | 14 | 2020-09-23T11:47:05.000Z | 2022-01-10T07:18:55.000Z | bert-ranker/experiment/sentence_embeddings/evaluation/evaluation_use_qa.py | UKPLab/emnlp2020-multicqa | 9d08a89dba5e51786bb3153ca3733b23006eb628 | [
"Apache-2.0"
] | null | null | null | bert-ranker/experiment/sentence_embeddings/evaluation/evaluation_use_qa.py | UKPLab/emnlp2020-multicqa | 9d08a89dba5e51786bb3153ca3733b23006eb628 | [
"Apache-2.0"
] | 2 | 2021-03-05T05:19:10.000Z | 2021-09-13T08:26:39.000Z | import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from numpy.linalg import norm
from experiment.qa.evaluation import BasicQAEvaluation
class QAEvaluationUSEQA(BasicQAEvaluation):
def __init__(self, config, config_global, logger):
super(QAEvaluationUSEQA, self).__init__(config, config_global, logger)
self.batch_size = config["batchsize"]
self.module = hub.load('https://tfhub.dev/google/universal-sentence-encoder-qa/3')
self.i = 0
def start(self, model, data, valid_only=False):
return super(QAEvaluationUSEQA, self).start(model, data, valid_only)
def get_reps(self, qs, docs):
# encode the texts
repr_queries = self.module.signatures['question_encoder'](tf.constant(qs))['outputs'].numpy()
repr_docs = self.module.signatures['response_encoder'](
input=tf.constant(docs),
context=tf.constant(docs)
)['outputs'].numpy()
return repr_queries, repr_docs
def score(self, qa_pairs, model, data, tasks):
query_examples = [q.text[:4096] for (q, _, _) in qa_pairs]
doc_examples = [a.text[:4096] for (_, a, _) in qa_pairs]
q_reps, d_reps = self.get_reps(query_examples, doc_examples)
scores = (q_reps * d_reps).sum(axis=1) / (norm(q_reps, axis=1) * norm(d_reps, axis=1))
return scores, np.zeros(1)
component = QAEvaluationUSEQA
| 35.325 | 101 | 0.675867 |
86692b6ec6cd814bf04258dc9a3c24202e80cdd4 | 17,125 | py | Python | slybot/slybot/tests/test_multiple_item_extraction.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | slybot/slybot/tests/test_multiple_item_extraction.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | slybot/slybot/tests/test_multiple_item_extraction.py | coolkunal64/ht | b7c52d5604dd75ea4086a6ff92eaa2db85bb145c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import re
from unittest import TestCase
from scrapy import Request, Item
from scrapy.settings import Settings
from scrapy.http.response.html import HtmlResponse
from slybot.plugins.scrapely_annotations.extraction import (
TemplatePageMultiItemExtractor,
SlybotIBLExtractor)
from slybot.plugins.scrapely_annotations.extraction.pageparsing import (
parse_template)
from slybot.plugins.scrapely_annotations.extraction.container_extractors import (
BaseContainerExtractor, ContainerExtractor, RepeatedContainerExtractor)
from slybot.plugins.scrapely_annotations.extraction.utils import group_tree
from slybot.extractors import add_extractors_to_descriptors
from slybot.item import create_slybot_item_descriptor
from slybot.plugins.scrapely_annotations.builder import (
apply_annotations, _clean_annotation_data
)
from slybot.spider import IblSpider
from slybot.spidermanager import SlybotSpiderManager
from scrapely.extraction.pageobjects import TokenDict
from scrapely.htmlpage import HtmlPage
from scrapely.extraction.regionextract import BasicTypeExtractor
from scrapely.extraction.pageparsing import parse_extraction_page
from scrapely.htmlpage import HtmlTagType
from .utils import (open_spec, open_sample_and_page, open_page, make_spider,
PATH, open_spider_page_and_results)
base_page = u"""<html><body>
<ul>{}</ul>
</body></html>""".format
item_template = u"""
<li>
<div><span>{rank}</span><h3><a href='/item/{idx}'>Item i</a></h3></div>
<div><p>Text {idx} Text {idx}</p><p>Text {idx} Text {idx}</p></div>
</li>
""".format
html = base_page('\n'.join(item_template(idx=i, rank=i if i % 2 else '')
for i in range(1, 11)))
annotations = _clean_annotation_data([{
'id': 'annotation1', 'selector': 'li > div > h3 > a',
'container_id': 'repeated_parent',
'data': {1: {'attribute': 'content', 'field': 'title', 'required': False,
'extractors': []},
2: {'attribute': 'href', 'field': 'url', 'required': False,
'extractors': ['1', '2']}}},
{'id': 'annotation2', 'selector': 'li > div > span',
'container_id': 'repeated_parent',
'data': {1: {'attribute': 'content', 'field': 'rank',
'required': True, 'extractors': []}}},
{'id': 'annotation3', 'selector': 'li > div:nth-child(2)',
'container_id': 'repeated_parent',
'data': {1: {'attribute': 'content', 'field': 'description',
'required': True, 'extractors': []}}},
{'id': 'parent', 'item_container': True, 'selector': 'ul'},
{'id': 'repeated_parent', 'item_container': True, 'container_id': 'parent',
'selector': 'li', 'repeated': True}])
schemas = {
'#default': {'name': 'default_item', 'fields': {}},
'data': {
'name': 'data_item',
'fields': {
'title': {'required': False, 'vary': False, 'type': 'text'},
'url': {'required': False, 'vary': False, 'type': 'url'},
'description': {'required': False, 'vary': False, 'type': 'text'},
'rank': {'required': False, 'vary': False, 'type': 'price'}}
}
}
simple_template = HtmlPage(url="http://www.test.com/a",
body=apply_annotations(annotations, html))
target1 = base_page('\n'.join(item_template(idx=i, rank=1)
for i in range(1, 11)))
target2 = base_page('\n'.join(item_template(idx=i, rank=i if i % 2 else '')
for i in range(1, 11)))
target1 = HtmlPage(url="http://www.test.com/a", body=target1)
target2 = HtmlPage(url="http://www.test.com/a", body=target2)
simple_descriptors = {k: create_slybot_item_descriptor(v)
for k, v in schemas.items()}
add_extractors_to_descriptors(simple_descriptors, {})
td = TokenDict()
html_page = HtmlPage(body=open_spec('stack_overflow.html').decode('utf-8'))
extraction_page = parse_extraction_page(td, html_page)
with open('%s/data/SampleProject/items.json' % PATH) as f:
items = json.load(f)
descriptors = {'#default': create_slybot_item_descriptor(items['default'],
'default')}
template = parse_template(td, html_page, descriptors)
unvalidated_template = parse_template(td, html_page, {})
unvalidated_template.id = u'stack_overflow_test'
basic_extractors = BasicTypeExtractor.create(template.annotations)
uncontained_annotation = basic_extractors[0]
root_container = basic_extractors[1]
child_container = basic_extractors[2]
child_annotations = basic_extractors[3:]
sample_411, page_411 = open_sample_and_page('411_list.json')
xceed_spider = open_spec('xceed.json')
def _annotation_tag_to_dict(tag):
return {attr: getattr(tag, attr, object())
for attr in ['annotation_text', 'end_index', 'metadata',
'start_index', 'surrounds_attribute',
'tag_attributes', 'variant_id']}
class ContainerExtractorTest(TestCase):
def test_get_container_info(self):
containers, annotations, remaining_annotations = \
BaseContainerExtractor._get_container_data(basic_extractors)
self.assertEqual(remaining_annotations, [uncontained_annotation])
self.assertEqual(containers, {'root': root_container,
'child': child_container})
self.assertEqual(annotations, {'child': child_annotations,
'root': [child_container]})
# TODO: test template with missing referenced container
def test_build_extraction_tree(self):
containers = {'root': root_container, 'child': child_container}
tree = BaseContainerExtractor._build_extraction_tree(containers)
self.assertEqual([['root'], ['root', 'child']], tree)
# TODO: test cyclical tree
def test_group_tree(self):
annotations = {'child': child_annotations, 'root': [child_container]}
tree1 = [['root']]
self.assertEqual(group_tree(tree1, annotations),
{'root': [child_container]})
tree2 = [['root'], ['root', 'child']]
self.assertEqual(group_tree(tree2, annotations),
{'root': {'child': child_annotations}})
def test_find_annotation(self):
bce = BaseContainerExtractor(basic_extractors, template)
a2d = _annotation_tag_to_dict
self.assertEqual(a2d(bce._find_annotation(template, 'child')),
a2d(child_container.annotation))
self.assertIsNone(bce._find_annotation(template, 'non_existant'))
def test_validate_and_adapt_item(self):
bce = BaseContainerExtractor(basic_extractors, template)
data = {'price': ['10']}
data['_type'] = 'skip_checks'
result = bce._validate_and_adapt_item(data, template).dump()
self.assertEqual(result,
{'price': ['10'], '_type': 'skip_checks'})
data = {
'price': ['10'],
u'description': [u'It can do everything except make calls'],
u'name': ['Smartphone 6']
}
result = data.copy()
result['_type'] = 'default'
extracted = bce._validate_and_adapt_item(data, template).dump()
self.assertEqual(extracted,
result)
data['pid'] = ['13532']
result = data.copy()
result['_type'] = 'default'
extracted = bce._validate_and_adapt_item(data, template).dump()
self.assertEqual(extracted, result)
bce.extra_requires = ['pid', '_sticky1']
data['_sticky1'] = True
extracted = bce._validate_and_adapt_item(data, template).dump()
self.assertEqual(extracted, result)
def test_find_tokens(self):
htt = HtmlTagType
s = RepeatedContainerExtractor._find_tokens(template.page_tokens[::-1],
(htt.OPEN_TAG,),
template)
self.assertEqual(s, [16777216, 16777217, 16777218])
e = RepeatedContainerExtractor._find_tokens(template.page_tokens,
(htt.CLOSE_TAG,),
template)
self.assertEqual(e, [33554432, 33554439, 33554438])
def test_extract(self):
extractors = ContainerExtractor.apply(unvalidated_template,
basic_extractors)
ibl_extractor = TemplatePageMultiItemExtractor(unvalidated_template,
extractors)
data = [i.dump() for i in ibl_extractor.extract(extraction_page)]
self.assertEqual(len(data), 96)
self.assertEqual(
{tuple(sorted(i.keys())) for i in data},
{(u'_index', u'_template', u'date', u'text', u'title', u'url')})
self.assertDictEqual(data[0], {
u'_index': 1,
u'_template': u'stack_overflow_test',
u'date': [u'2015-08-07 10:09:32Z'],
u'text': [u"Bootstrap navbar doesn't open - mobile view"],
u'title': [u'I have a sticky nav with this code (Which is not mine'
u')\n\n// Create a clone of the menu, right next to '
u'original.\n...'],
u'url': [u'https://stackoverflow.com/questions/31875193/bootstrap-'
u'navbar-doesnt-open-mobile-view']
})
self.assertDictEqual(data[50], {
u'_index': 51,
u'_template': u'stack_overflow_test',
u'date': [u'2015-08-07 10:01:03Z'],
u'text': [u'Rails in production with Apache+passenger error'],
u'title': [u"Last days i'm trying to put my rails app in "
u"production with apache and passenger(no rvm), but "
u"still nothing. In my browser i get an error like "
u"this:\n\nWe're sorry, but something went wrong."
u"\nWe've been ..."],
u'url': [u'https://stackoverflow.com/questions/31874997/rails-in-'
u'production-with-apachepassenger-error']
})
self.assertDictEqual(data[-1], {
u'_index': 96,
u'_template': u'stack_overflow_test',
u'date': [u'2015-08-07 08:16:43Z'],
u'text': [u'iPython + Spark + Cassandra - Py4JJavaError and How to'
u' connect to Cassandra from Spark?'],
u'title': [u"How can I connect to Cassandra from Spark with "
u"iPython?\n\nI have followed the code from here and "
u"modified it,\n\nimport os\nimport sys\n\n# Path for "
u"spark source folder\nos.environ['SPARK_HOME'] = ..."],
u'url': [u'https://stackoverflow.com/questions/31872831/ipython-'
u'spark-cassandra-py4jjavaerror-and-how-to-connect-to-'
u'cassandra-from']
})
def test_extract_single_attribute_to_multiple_fields(self):
extractors = {'1': {'regular_expression': '(.*)\s'},
'2': {'regular_expression': '\s(.*)'}}
descriptors = {'#default': create_slybot_item_descriptor({'fields': {
'full_name': {'type': 'text', 'required': False, 'vary': False},
'first_name': {'type': 'text', 'required': False, 'vary': False,
'name': u'prénom'},
'last_name': {'type': 'text', 'required': False, 'vary': False,
'name': 'nom'},
'address': {'type': 'text', 'required': False, 'vary': False}}})}
add_extractors_to_descriptors(descriptors, extractors)
extractor = SlybotIBLExtractor([(sample_411, descriptors, '0.13.0')])
data = extractor.extract(page_411)[0][1]
self.assertEqual(data['full_name'], [u'Joe Smith'])
self.assertEqual(data[u'prénom'], [u'Joe'])
self.assertEqual(data['nom'], [u'Smith'])
def test_extract_missing_schema(self):
extractor = SlybotIBLExtractor([(sample_411, {}, '0.13.0')])
data = extractor.extract(page_411)[0][1]
raw_html = ('<span itemprop="name"><span itemprop="givenName">Joe'
'</span> <span itemprop="familyName">Smith</span></span>')
self.assertEqual(data['full_name'], [raw_html])
self.assertEqual(data['first_name'], [raw_html])
self.assertEqual(data['last_name'], [raw_html])
def test_extract_multiple_item_types(self):
spider = IblSpider('xceed', xceed_spider, xceed_spider['items'], {},
Settings())
data = list(spider.parse(
HtmlResponse('http://url',
body=xceed_spider['templates'][0]['original_body'],
encoding='utf-8')
))
items = [d for d in data if not isinstance(d, Request)]
self.assertEqual(items, xceed_spider['results'])
def test_extract_repeated_field(self):
sample = {
'plugins': {'annotations-plugin': {}},
'url': 'https://stackoverflow.com',
'original_body': re.sub(
'data-scrapy-annotate=".*"', '', html_page._body),
'scrapes': 'default',
'page_id': '507f520c3bf361f4c5cd55c44307a271bccb2218',
'version': '0.13.0'
}
data = open_spec('so_annotations.json')
annos, items, results = data['annos'], data['items'], data['results']
sample['plugins']['annotations-plugin']['extracts'] = annos
spider = IblSpider('so', make_spider(sample=sample),
items, {}, Settings())
page = HtmlResponse('http://url', body=sample['original_body'],
encoding='utf-8')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
keys = {(u'_index', u'_template', u'_type', u'answered', u'tags',
u'title', 'url')}
self.assertEqual({tuple(sorted(i.keys())) for i in items}, keys)
self.assertEqual([items[0], items[52], items[-1]], results)
self.assertEqual(len(items), 96)
spider, page, results = open_spider_page_and_results('autoevolution.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertEqual(items, results)
def test_item_merging_in_container(self):
spider, page, results = open_spider_page_and_results('autoevolution2.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertEqual(items, results)
def test_extracted_items_are_scrapy_items(self):
spider, page, results = open_spider_page_and_results('autoevolution2.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertTrue(len(items) > 0)
self.assertTrue(all(isinstance(i, Item) for i in items))
def test_required_annotation(self):
ibl_extractor = SlybotIBLExtractor([
(simple_template, simple_descriptors, '0.13.0')
])
data, _ = ibl_extractor.extract(target1)
self.assertEqual(len(data), 10)
self.assertTrue(all('rank' in item and item['rank'] for item in data))
self.assertTrue(all('description' in item and item['description']
for item in data))
data, _ = ibl_extractor.extract(target2)
self.assertEqual(len(data), 5)
self.assertTrue(all('rank' in item and item['rank'] for item in data))
self.assertTrue(all('description' in item and item['description']
for item in data))
def test_missing_selectors(self):
spider, page, results = open_spider_page_and_results('cars.com.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertEqual(items, results)
def test_against_false_positive(self):
page = open_page('autoevolution.html')
spider, _, _ = open_spider_page_and_results('autoevolution2.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertEqual(items, [])
def test_nested_items(self):
smanager = SlybotSpiderManager("%s/data/SampleProject" % PATH)
name = 'books.toscrape.com'
spider = smanager.create(name)
spec = smanager._specs["spiders"][name]
t = [t for t in spec["templates"] if t['page_id'] == '3617-44af-a2f0'][0]
response = HtmlResponse(t['url'], body=t['original_body'].encode('utf-8'))
results = [i for i in spider.parse(response)
if hasattr(i, '__getitem__')]
self.assertEqual(results, t['results'])
def test_nested_items_without_nested_structure(self):
spider, page, results = open_spider_page_and_results(
'cars.com_nested.json')
items = [i for i in spider.parse(page) if not isinstance(i, Request)]
self.assertEqual(items, results)
| 48.103933 | 83 | 0.60146 |
8aaac2f1ac6285129e1478d009a7f06189fbfe1d | 413 | py | Python | extra/matplotlib-bare-minimum/hists_plot.py | cookieblues/cookieblues.github.io | 9b570d83887eb2d6f92cfaa927a1adf136124a90 | [
"MIT"
] | null | null | null | extra/matplotlib-bare-minimum/hists_plot.py | cookieblues/cookieblues.github.io | 9b570d83887eb2d6f92cfaa927a1adf136124a90 | [
"MIT"
] | 2 | 2020-03-30T14:58:30.000Z | 2020-12-10T15:15:06.000Z | extra/matplotlib-bare-minimum/hists_plot.py | cookieblues/cookieblues.github.io | 9b570d83887eb2d6f92cfaa927a1adf136124a90 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
x1 = np.random.randn(10000)-1
x2 = np.random.randn(10000)+1
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
ax.hist(x1, color='turquoise', edgecolor='none', bins=50, alpha=0.5, density=True)
ax.hist(x2, color='magenta', edgecolor='none', bins=200, alpha=0.5, density=True)
plt.tight_layout()
plt.savefig('hists.svg', bbox_inches='tight')
plt.show()
| 24.294118 | 82 | 0.711864 |
3684ac7c4b8b21a49e00f52668a30d6d57639388 | 8,474 | py | Python | setup.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | 2 | 2019-08-18T11:05:25.000Z | 2019-11-17T02:07:18.000Z | setup.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | 1 | 2019-02-19T17:11:38.000Z | 2019-02-19T17:11:38.000Z | setup.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, Extension, Command
from setuptools.command.test import test as TestCommand
from setuptools.command.sdist import sdist
from setuptools.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import os
import subprocess
import sys
import ast
# A py.test test command
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test"),
('coverage', 'c', "Generate coverage report")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
self.coverage = False
def finalize_options(self):
TestCommand.finalize_options(self)
# The following is required for setuptools<18.4
try:
self.test_args = []
except AttributeError:
# fails on setuptools>=18.4
pass
self.test_suite = 'unused'
def run_tests(self):
import pytest
test_args = ['test']
if self.pytest_args:
test_args += self.pytest_args.split()
if self.coverage:
test_args += ['--cov', os.path.abspath('asv')]
errno = pytest.main(test_args)
sys.exit(errno)
class sdist_checked(sdist):
"""Check git submodules on sdist to prevent incomplete tarballs"""
def run(self):
self.__check_submodules()
sdist.run(self)
def __check_submodules(self):
"""
Verify that the submodules are checked out and clean.
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line)
basedir = os.path.abspath(os.path.dirname(__file__))
def get_version():
"""Parse current version number from __init__.py"""
# Grab the first assignment to __version__
version = None
init_py = os.path.join(os.path.dirname(__file__),
'asv', '__init__.py')
with open(init_py, 'r') as f:
source = f.read()
tree = ast.parse(source)
for statement in tree.body:
if (isinstance(statement, ast.Assign) and
len(statement.targets) == 1 and
statement.targets[0].id == '__version__'):
version = statement.value.s
break
if not version:
raise RuntimeError("Failed to parse version from {}".format(init_py))
if 'dev' in version and not version.endswith('.dev'):
raise RuntimeError("Dev version string in {} doesn't end in .dev".format(
init_py))
return version
def get_git_hash():
"""
Get version from asv/__init__.py and generate asv/_version.py
"""
# Obtain git revision
githash = ""
if os.path.isdir(os.path.join(basedir, '.git')):
try:
proc = subprocess.Popen(
['git', '-C', basedir, 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rev, err = proc.communicate()
if proc.returncode == 0:
githash = rev.strip().decode('ascii')
except OSError:
pass
return githash
def get_git_revision():
"""
Get the number of revisions since the beginning.
"""
revision = "0"
if os.path.isdir(os.path.join(basedir, '.git')):
try:
proc = subprocess.Popen(
['git', '-C', basedir, 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rev, err = proc.communicate()
if proc.returncode == 0:
revision = rev.strip().decode('ascii')
except OSError:
pass
return revision
def write_version_file(filename, suffix, githash):
# Write revision file (only if it needs to be changed)
content = ('__suffix__ = "{0}"\n'
'__githash__ = "{1}"\n'.format(suffix, githash))
if not githash.strip():
# Not in git repository; probably in sdist, so keep old
# version file
return
old_content = None
if os.path.isfile(filename):
with open(filename, 'r') as f:
old_content = f.read()
if content != old_content:
with open(filename, 'w') as f:
f.write(content)
class BuildFailed(Exception):
pass
class optional_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError, ValueError):
raise BuildFailed()
def run_setup(build_binary=False):
version = get_version()
git_hash = get_git_hash()
if version.endswith('.dev'):
suffix = '{0}+{1}'.format(get_git_revision(), git_hash[:8])
version += suffix
else:
suffix = ''
write_version_file(os.path.join(basedir, 'asv', '_version.py'),
suffix, git_hash)
# Install entry points for making releases with zest.releaser
entry_points = {}
for hook in [('releaser', 'middle'), ('postreleaser', 'before')]:
hook_ep = 'zest.releaser.' + '.'.join(hook)
hook_name = 'asv.release.' + '.'.join(hook)
hook_func = 'asv._release:' + '_'.join(hook)
entry_points[hook_ep] = ['%s = %s' % (hook_name, hook_func)]
entry_points['console_scripts'] = ['asv = asv.main:main']
if build_binary:
ext_modules = [Extension("asv._rangemedian", ["asv/_rangemedian.cpp"])]
else:
ext_modules = []
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name="asv",
version=version,
packages=['asv',
'asv.commands',
'asv.plugins',
'asv.extern',
'asv._release'],
entry_points=entry_points,
ext_modules = ext_modules,
install_requires=[
str('six>=1.4')
],
extras_require={
str('hg'): ["python-hglib>=1.5"]
},
package_data={
str('asv'): [
'www/*.html',
'www/*.js',
'www/*.css',
'www/*.png',
'www/*.ico',
'www/flot/*.js',
'www/vendor/*.css',
'www/vendor/*.js',
'template/__init__.py',
'template/asv.conf.json',
'template/benchmarks/*.py'
]
},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
zip_safe=False,
# py.test testing
tests_require=['pytest'],
cmdclass={'test': PyTest,
'build_ext': optional_build_ext,
'sdist': sdist_checked},
author="Michael Droettboom",
author_email="mdroe@stsci.edu",
description="Airspeed Velocity: A simple Python history benchmarking tool",
license="BSD",
url="https://github.com/airspeed-velocity/asv",
long_description=long_description,
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing',
]
)
if __name__ == "__main__":
try:
run_setup(build_binary=True)
except BuildFailed:
print("Compiling asv._rangemedian failed -- continuing without it")
run_setup(build_binary=False)
| 29.838028 | 87 | 0.559712 |
6b2b0945ad4f9ce7d6f435f6ff27cb8b8c6d4fc0 | 14,170 | py | Python | brownie/network/account.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | null | null | null | brownie/network/account.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | 156 | 2020-07-20T21:23:47.000Z | 2021-07-27T21:21:46.000Z | brownie/network/account.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import json
import os
import threading
from getpass import getpass
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import eth_keys
from eth_hash.auto import keccak
from hexbytes import HexBytes
from brownie._config import CONFIG, _get_data_folder
from brownie._singleton import _Singleton
from brownie.convert import Wei, to_address
from brownie.exceptions import IncompatibleEVMVersion, UnknownAccount, VirtualMachineError
from .rpc import Rpc, _revert_register
from .transaction import TransactionReceipt
from .web3 import _resolve_address, web3
__tracebackhide__ = True
rpc = Rpc()
class Accounts(metaclass=_Singleton):
"""
List-like container that holds all available `Account` objects.
Attributes
----------
default : Account, optional
Default account to broadcast transactions from.
"""
def __init__(self) -> None:
self.default = None
self._accounts: List = []
# prevent private keys from being stored in readline history
self.add.__dict__["_private"] = True
_revert_register(self)
self._reset()
def _reset(self) -> None:
self._accounts.clear()
try:
self._accounts = [Account(i) for i in web3.eth.accounts]
except Exception:
pass
if self.default not in self._accounts:
self.default = None
def _revert(self, height: int) -> None:
# must exist for rpc registry callback
pass
def __contains__(self, address: str) -> bool:
try:
address = to_address(address)
return address in self._accounts
except ValueError:
return False
def __repr__(self) -> str:
return str(self._accounts)
def __iter__(self) -> Iterator:
return iter(self._accounts)
def __getitem__(self, key: int) -> Any:
return self._accounts[key]
def __delitem__(self, key: int) -> None:
del self._accounts[key]
def __len__(self) -> int:
return len(self._accounts)
def add(self, priv_key: Union[int, bytes, str] = None) -> "LocalAccount":
"""Creates a new ``LocalAccount`` instance and appends it to the container.
Args:
priv_key: Private key of the account. If none is given, one is
randomly generated.
Returns:
Account instance."""
private_key: Union[int, bytes, str]
if not priv_key:
private_key = "0x" + keccak(os.urandom(8192)).hex()
else:
private_key = priv_key
w3account = web3.eth.account.from_key(private_key)
if w3account.address in self._accounts:
return self.at(w3account.address)
account = LocalAccount(w3account.address, w3account, private_key)
self._accounts.append(account)
return account
def load(self, filename: str = None) -> Union[List, "LocalAccount"]:
"""Loads a local account from a keystore file.
Args:
filename: Keystore filename. If none is given, returns a list of
available keystores.
Returns:
Account instance."""
project_path = _get_data_folder().joinpath("accounts")
if not filename:
return [i.stem for i in project_path.glob("*.json")]
filename = str(filename)
if not filename.endswith(".json"):
filename += ".json"
json_file = Path(filename).expanduser()
if not json_file.exists():
json_file = project_path.joinpath(filename)
if not json_file.exists():
raise FileNotFoundError(f"Cannot find {json_file}")
with json_file.open() as fp:
priv_key = web3.eth.account.decrypt(
json.load(fp), getpass("Enter the password to unlock this account: ")
)
return self.add(priv_key)
def at(self, address: str) -> "LocalAccount":
"""Retrieves an Account instance from the address string. Raises
ValueError if the account cannot be found.
Args:
address: string of the account address.
Returns:
Account instance.
"""
address = _resolve_address(address)
try:
return next(i for i in self._accounts if i == address)
except StopIteration:
raise UnknownAccount(f"No account exists for {address}")
def remove(self, address: str) -> None:
"""Removes an account instance from the container.
Args:
address: Account instance or address string of account to remove."""
address = _resolve_address(address)
try:
self._accounts.remove(address)
except ValueError:
raise UnknownAccount(f"No account exists for {address}")
def clear(self) -> None:
"""Empties the container."""
self._accounts.clear()
class PublicKeyAccount:
"""Class for interacting with an Ethereum account where you do not control
the private key. Can be used to check the balance or nonce, and to send ether to."""
def __init__(self, addr: str) -> None:
self.address = _resolve_address(addr)
def __repr__(self) -> str:
return f"<{type(self).__name__} '{self.address}'>"
def __hash__(self) -> int:
return hash(self.address)
def __str__(self) -> str:
return self.address
def __eq__(self, other: Union[object, str]) -> bool:
if isinstance(other, str):
try:
address = _resolve_address(other)
return address == self.address
except ValueError:
return False
if isinstance(other, PublicKeyAccount):
return other.address == self.address
return super().__eq__(other)
def balance(self) -> Wei:
"""Returns the current balance at the address, in wei."""
balance = web3.eth.getBalance(self.address)
return Wei(balance)
@property
def nonce(self) -> int:
return web3.eth.getTransactionCount(self.address)
class _PrivateKeyAccount(PublicKeyAccount):
"""Base class for Account and LocalAccount"""
def _gas_limit(self, to: Union[str, "Accounts"], amount: Optional[int], data: str = "") -> int:
gas_limit = CONFIG.active_network["settings"]["gas_limit"]
if isinstance(gas_limit, bool) or gas_limit in (None, "auto"):
return self.estimate_gas(to, amount, data)
return Wei(gas_limit)
def _gas_price(self) -> Wei:
gas_price = CONFIG.active_network["settings"]["gas_price"]
if isinstance(gas_price, bool) or gas_price in (None, "auto"):
return web3.eth.gasPrice
return Wei(gas_price)
def _check_for_revert(self, tx: Dict) -> None:
if not CONFIG.active_network["settings"]["reverting_tx_gas_limit"]:
try:
web3.eth.call(dict((k, v) for k, v in tx.items() if v))
except ValueError as e:
raise VirtualMachineError(e) from None
def deploy(
self,
contract: Any,
*args: Tuple,
amount: Optional[int] = None,
gas_limit: Optional[int] = None,
gas_price: Optional[int] = None,
) -> Any:
"""Deploys a contract.
Args:
contract: ContractContainer instance.
*args: Constructor arguments. The last argument may optionally be
a dictionary of transaction values.
Kwargs:
amount: Amount of ether to send with transaction, in wei.
gas_limit: Gas limit of the transaction.
gas_price: Gas price of the transaction.
Returns:
* Contract instance if the transaction confirms
* TransactionReceipt if the transaction is pending or reverts"""
evm = contract._build["compiler"]["evm_version"]
if rpc.is_active() and not rpc.evm_compatible(evm):
raise IncompatibleEVMVersion(
f"Local RPC using '{rpc.evm_version()}' but contract was compiled for '{evm}'"
)
data = contract.deploy.encode_input(*args)
try:
txid = self._transact( # type: ignore
{
"from": self.address,
"value": Wei(amount),
"nonce": self.nonce,
"gasPrice": Wei(gas_price) or self._gas_price(),
"gas": Wei(gas_limit) or self._gas_limit("", amount, data),
"data": HexBytes(data),
}
)
revert_data = None
except ValueError as e:
txid, revert_data = _raise_or_return_tx(e)
tx = TransactionReceipt(
txid, self, name=contract._name + ".constructor", revert_data=revert_data
)
add_thread = threading.Thread(target=contract._add_from_tx, args=(tx,), daemon=True)
add_thread.start()
if tx.status != 1:
return tx
add_thread.join()
return contract.at(tx.contract_address)
def estimate_gas(
self, to: Union[str, "Accounts"], amount: Optional[int], data: str = ""
) -> int:
"""Estimates the gas cost for a transaction. Raises VirtualMachineError
if the transaction would revert.
Args:
to: Account instance or address string of transaction recipient.
amount: Amount of ether to send in wei.
data: Transaction data hexstring.
Returns:
Estimated gas value in wei."""
try:
return web3.eth.estimateGas(
{"from": self.address, "to": str(to), "value": Wei(amount), "data": HexBytes(data)}
)
except ValueError:
if CONFIG.active_network["settings"]["reverting_tx_gas_limit"]:
return CONFIG.active_network["settings"]["reverting_tx_gas_limit"]
raise
def transfer(
self,
to: "Accounts",
amount: int,
gas_limit: int = None,
gas_price: int = None,
data: str = "",
) -> "TransactionReceipt":
"""Transfers ether from this account.
Args:
to: Account instance or address string to transfer to.
amount: Amount of ether to send, in wei.
Kwargs:
gas_limit: Gas limit of the transaction.
gas_price: Gas price of the transaction.
data: Hexstring of data to include in transaction.
Returns:
TransactionReceipt object"""
try:
txid = self._transact( # type: ignore
{
"from": self.address,
"to": to_address(str(to)),
"value": Wei(amount),
"nonce": self.nonce,
"gasPrice": Wei(gas_price) if gas_price is not None else self._gas_price(),
"gas": Wei(gas_limit) or self._gas_limit(to, amount, data),
"data": HexBytes(data),
}
)
revert_data = None
except ValueError as e:
txid, revert_data = _raise_or_return_tx(e)
return TransactionReceipt(txid, self, revert_data=revert_data)
class Account(_PrivateKeyAccount):
"""Class for interacting with an Ethereum account.
Attributes:
address: Public address of the account.
nonce: Current nonce of the account."""
def _transact(self, tx: Dict) -> Any:
self._check_for_revert(tx)
return web3.eth.sendTransaction(tx)
class LocalAccount(_PrivateKeyAccount):
"""Class for interacting with an Ethereum account.
Attributes:
address: Public address of the account.
nonce: Current nonce of the account.
private_key: Account private key.
public_key: Account public key."""
def __init__(self, address: str, account: Account, priv_key: Union[int, bytes, str]) -> None:
self._acct = account
self.private_key = priv_key
self.public_key = eth_keys.keys.PrivateKey(HexBytes(priv_key)).public_key
super().__init__(address)
def save(self, filename: str, overwrite: bool = False) -> str:
"""Encrypts the private key and saves it in a keystore json.
Attributes:
filename: path to keystore file. If no folder is given, saved in
~/.brownie/accounts
overwrite: if True, will overwrite an existing file.
Returns the absolute path to the keystore file as a string.
"""
path = _get_data_folder().joinpath("accounts")
path.mkdir(exist_ok=True)
filename = str(filename)
if not filename.endswith(".json"):
filename += ".json"
if not any(i in r"\/" for i in filename):
json_file = path.joinpath(filename).resolve()
else:
json_file = Path(filename).expanduser().resolve()
if not overwrite and json_file.exists():
raise FileExistsError("Account with this identifier already exists")
encrypted = web3.eth.account.encrypt(
self.private_key, getpass("Enter the password to encrypt this account with: ")
)
with json_file.open("w") as fp:
json.dump(encrypted, fp)
return str(json_file)
def _transact(self, tx: Dict) -> None:
self._check_for_revert(tx)
signed_tx = self._acct.sign_transaction(tx).rawTransaction # type: ignore
return web3.eth.sendRawTransaction(signed_tx)
def _raise_or_return_tx(exc: ValueError) -> Any:
try:
data = eval(str(exc))["data"]
txid = next(i for i in data.keys() if i[:2] == "0x")
reason = data[txid]["reason"] if "reason" in data[txid] else None
pc = data[txid]["program_counter"]
revert_type = data[txid]["error"]
if revert_type == "revert":
pc -= 1
return txid, [reason, pc, revert_type]
except SyntaxError:
raise exc
except Exception:
raise VirtualMachineError(exc) from None
| 34.309927 | 99 | 0.600071 |
33660a3b30ef8d36b7d9062f1604032212d0b3f7 | 4,663 | py | Python | projects/self_feeding/scripts/convert_rated_to_polarized.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
] | 2 | 2017-09-20T21:49:51.000Z | 2018-08-12T06:58:10.000Z | projects/self_feeding/scripts/convert_rated_to_polarized.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
] | 7 | 2021-01-12T01:07:03.000Z | 2022-03-12T00:50:45.000Z | projects/self_feeding/scripts/convert_rated_to_polarized.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
] | 2 | 2020-10-29T18:14:33.000Z | 2020-11-07T09:46:23.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
import json
from parlai.projects.self_feeding.utils import (
Parley,
extract_parlai_episodes,
add_person_tokens,
)
from parlai.mturk.tasks.self_feeding.rating.worlds import (
NEW_TOPIC_REQUEST,
SUGGESTION_REQUEST,
)
from parlai.utils.io import PathManager
# Initial prompts vary due to the random nouns, but all will start this way
INITIAL_PROMPT = "start a conversation"
REPORT_DIR = '/private/home/bhancock/metadialog/mturk/reports'
DATA_DIR = '/private/home/bhancock/metadialog/data'
DEFAULT_IN = REPORT_DIR + '/20181105/pilot_1.txt'
DEFAULT_OUT = DATA_DIR + '/feedback_classifier/temp.txt'
def setup_args():
parser = ArgumentParser()
parser.add_argument('-if', '--infile', type=str, default=DEFAULT_IN)
parser.add_argument('-of', '--outfile', type=str, default=DEFAULT_OUT)
parser.add_argument(
'-histsz',
'--history-size',
type=int,
default=-1,
help="The number of turns to include in the prompt.",
)
parser.add_argument(
'-pos',
'--positives',
type=str,
default='3,4,5',
help="A comma-separated list of ratings with positive label",
)
parser.add_argument(
'-neg',
'--negatives',
type=str,
default='1',
help="A comma-separated list of ratings with negative label",
)
config = vars(parser.parse_args())
return config
def main(config):
"""
Extracts training data for the negative response classifier (NRC) from Mturk logs.
input: file of logs (in ParlaiDialog format) from Mturk task 1 with turn-by-turn
quality ratings 1-5
output: file of episodes (self-feeding format) w/ +1/-1 ratings indicating
positive/negative example
"""
examples = []
positives = config['positives'].split(',')
negatives = config['negatives'].split(',')
assert len(set(positives).intersection(set(negatives))) == 0
num_episodes = 0
num_parleys = 0
for episode in extract_parlai_episodes(config['infile']):
num_episodes += 1
history = []
for parley in episode:
num_parleys += 1
# Update history (not including stock control flow responses)
if parley.context.startswith(INITIAL_PROMPT.lower()):
# Conversation prompt, first utterance
history = [parley.response]
elif parley.context.startswith(SUGGESTION_REQUEST.lower()):
# Asked for y_exp, got y_exp
pass
elif parley.context.startswith(NEW_TOPIC_REQUEST.lower()):
# Asked for new topic, got a first utterance
history = [parley.response]
else:
history.append(parley.context)
history.append(parley.response)
# Only create a new example if this parley's rating is relevant
if parley.reward in (positives + negatives):
# Concatenate history and add speaker tokens as necessary
# history_size refers to the total number of utterances
# (history_size == 0 means predict sentiment from '__null__')
# response that's being classified (so if history_size == 0 then
# classify based only on the response w/o any extra context).
# Note that the response being classified should always be preceded by
# __p1__ (the human), not __p2__ (the bot).
if config['history_size'] < 0:
utterances = history
elif config['history_size'] == 0:
utterances = ['__null__']
else:
utterances = history[-config['history_size'] :]
context = add_person_tokens(utterances, last_speaker=1)
label = 1 if parley.reward in positives else -1
example = Parley(context, label)
examples.append(example)
with PathManager.open(config['outfile'], 'w') as outfile:
for ex in examples:
outfile.write(json.dumps(ex.to_dict()) + '\n')
print(
f"Extracted {len(examples)} ratings out of {num_episodes} episodes "
f"({num_parleys} parleys) and wrote them to {config['outfile']} with "
f"histsz == {config['history_size']}."
)
if __name__ == '__main__':
config = setup_args()
main(config)
| 34.798507 | 86 | 0.620416 |
a56ed868570dc21397155a34a2e3aea272b13c20 | 3,194 | py | Python | openpyxl/tests/test_named_range.py | Zhaoxun/openpyxl-1 | a483787c1bf3b167582e97bdbd5b695b402367da | [
"MIT"
] | 76 | 2015-08-01T06:06:39.000Z | 2021-12-09T15:14:39.000Z | openpyxl/tests/test_named_range.py | Zhaoxun/openpyxl-1 | a483787c1bf3b167582e97bdbd5b695b402367da | [
"MIT"
] | 68 | 2015-08-20T04:30:14.000Z | 2021-08-02T21:07:40.000Z | openpyxl/tests/test_named_range.py | Zhaoxun/openpyxl-1 | a483787c1bf3b167582e97bdbd5b695b402367da | [
"MIT"
] | 29 | 2015-08-26T18:35:20.000Z | 2022-03-30T06:21:28.000Z | # file openpyxl/tests/test_named_range.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
# Python stdlib imports
from __future__ import with_statement
import os.path
# 3rd-party imports
from nose.tools import eq_, assert_raises
# package imports
from openpyxl.tests.helper import DATADIR
from openpyxl.namedrange import split_named_range
from openpyxl.reader.workbook import read_named_ranges
from openpyxl.shared.exc import NamedRangeException
from openpyxl.reader.excel import load_workbook
def test_split():
eq_([('My Sheet', '$D$8'), ], split_named_range("'My Sheet'!$D$8"))
def test_split_no_quotes():
eq_([('HYPOTHESES', '$B$3:$L$3'), ], split_named_range('HYPOTHESES!$B$3:$L$3'))
def test_bad_range_name():
assert_raises(NamedRangeException, split_named_range, 'HYPOTHESES$B$3')
def test_read_named_ranges():
class DummyWs(object):
title = 'My Sheeet'
def __str__(self):
return self.title
class DummyWB(object):
def get_sheet_by_name(self, name):
return DummyWs()
with open(os.path.join(DATADIR, 'reader', 'workbook.xml')) as handle:
content = handle.read()
named_ranges = read_named_ranges(content, DummyWB())
eq_(["My Sheeet!$D$8"], [str(range) for range in named_ranges])
def test_oddly_shaped_named_ranges():
ranges_counts = ((4, 'TEST_RANGE'),
(3, 'TRAP_1'),
(13, 'TRAP_2'))
def check_ranges(ws, count, range_name):
eq_(count, len(ws.range(range_name)))
wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'),
use_iterators = False)
ws = wb.worksheets[0]
for count, range_name in ranges_counts:
yield check_ranges, ws, count, range_name
def test_merged_cells_named_range():
wb = load_workbook(os.path.join(DATADIR, 'genuine', 'merge_range.xlsx'),
use_iterators = False)
ws = wb.worksheets[0]
cell = ws.range('TRAP_3')
eq_('B15', cell.get_coordinate())
eq_(10, cell.value)
| 31.009709 | 83 | 0.706324 |
7eac52c4193e92b031e59d37ce8dad7c0423290c | 700 | py | Python | colors.py | Kobemeka/lambda-clock | bc0c8f5cbe4dc87664bd4667e73a8c11f2b85bb0 | [
"MIT"
] | null | null | null | colors.py | Kobemeka/lambda-clock | bc0c8f5cbe4dc87664bd4667e73a8c11f2b85bb0 | [
"MIT"
] | null | null | null | colors.py | Kobemeka/lambda-clock | bc0c8f5cbe4dc87664bd4667e73a8c11f2b85bb0 | [
"MIT"
] | null | null | null | Stop = '\033[0m'
colors={
"Black":'\033[0;30m',
"Red":'\033[0;31m',
"Green":'\033[0;32m',
"Yellow":'\033[0;33m',
"Blue":'\033[0;34m',
"Purple":'\033[0;35m',
"Cyan":'\033[0;36m',
"White":'\033[0;37m',
# custom rgb colors
"LightBlue": '\033[38;2;69;80;230m',
"Pink": '\033[38;2;252;105;255m',
"Orange":'\033[38;2;252;111;3m',
"LightYellow": '\033[38;2;158;255;105m'
}
character_colors = {
# TODO: background color
# Test colors
"1": "Red",
"2": "Blue",
"3": "Green",
"4": "Yellow",
"5": "LightYellow",
"6": "Cyan",
"7": "Purple",
"8": "Orange",
"9": "LightBlue",
"0": "Pink",
":": "White"
} | 20 | 43 | 0.474286 |
a8b6c46a5e78b65b106b41e1fda0fc07139e86cc | 82 | py | Python | dj_highcharts/views.py | oscarmcm/django-highcharts | f6b3b36113a8569b168f9f394873af2f87048ff0 | [
"MIT"
] | 1 | 2016-12-30T21:32:32.000Z | 2016-12-30T21:32:32.000Z | dj_highcharts/views.py | oscarmcm/django-highcharts | f6b3b36113a8569b168f9f394873af2f87048ff0 | [
"MIT"
] | null | null | null | dj_highcharts/views.py | oscarmcm/django-highcharts | f6b3b36113a8569b168f9f394873af2f87048ff0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .models import (
Chart,
Data,
Config,
)
| 10.25 | 23 | 0.5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.