hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
565c727f90be501e8abede934cc9f94f687d2ef9 | 1,942 | py | Python | finder.py | wilsontayar/unused-file-finder | 2ba8db69a7992f08259998ada66b106f36c318c3 | [
"MIT"
] | null | null | null | finder.py | wilsontayar/unused-file-finder | 2ba8db69a7992f08259998ada66b106f36c318c3 | [
"MIT"
] | 1 | 2015-05-27T14:29:25.000Z | 2015-05-27T14:29:25.000Z | finder.py | wilsontayar/unused-file-finder | 2ba8db69a7992f08259998ada66b106f36c318c3 | [
"MIT"
] | 1 | 2015-05-26T20:17:37.000Z | 2015-05-26T20:17:37.000Z | import sys
import os
LOOKUP_EXTENSIONS = [".cshtml", ".gif", ".jpg", ".png", ".js", ".css"]
FILES_TO_SEARCH = [".cshtml", ".cs", ".css", ".less", ".js"]
def main(argv):
directory = argv[0]
files_to_look_for = find_view_files_in_directory(directory)
print_break()
print("Loading files...")
print_break()
print("files to look for: {0}".format(len(files_to_look_for)))
print_break()
print("Looking for unused files...")
print_break()
results = {'using': [], 'not_using': []}
for file_name in files_to_look_for:
references, looked_at = find_references_for_view_file(directory, file_name)
if references:
results['using'].append(file_name)
else:
results['not_using'].append(file_name)
print("USING: {0} files".format(len(results['using'])))
print("NOT USING: {0} files".format(len(results['not_using'])))
for file in results['not_using']:
print(file)
def print_break():
print("-" * 45)
def prepare_file_name_to_look_for(file_name):
if ".cshtml" in file_name:
return file_name.replace(".cshtml", "")
return file_name
def find_references_for_view_file(directory, file_name):
using = []
looking_in = []
for root, directories, files in os.walk(directory):
for filename in [f for f in files if any([f.endswith(ext) for ext in FILES_TO_SEARCH])]:
looking_in.append(os.path.join(root, filename))
with open(os.path.join(root, filename), 'r', encoding="ISO-8859-1") as searchfile:
content = searchfile.read()
if prepare_file_name_to_look_for(file_name) in content:
using.append(filename)
return (using, looking_in)
def find_view_files_in_directory(directory):
views = []
for root, directories, files in os.walk(directory):
for ext in LOOKUP_EXTENSIONS:
for filename in [f for f in files if f.endswith(ext)]:
views.append(filename)
return views
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit("Argument required: application path")
main(sys.argv[1:])
| 25.552632 | 90 | 0.703399 |
f000a17990c00a560b5343bd21010fd37dc0861e | 8,176 | py | Python | saleor/order/__init__.py | ibutiti/saleor | fffe9a54c01aa07131102474dcb1519e0b59da74 | [
"BSD-3-Clause"
] | 1 | 2021-05-29T10:50:02.000Z | 2021-05-29T10:50:02.000Z | saleor/order/__init__.py | Niranjoyyengkhom/saleor | 4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5 | [
"CC-BY-4.0"
] | 100 | 2021-06-21T04:32:32.000Z | 2022-03-28T04:41:16.000Z | saleor/order/__init__.py | Niranjoyyengkhom/saleor | 4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5 | [
"CC-BY-4.0"
] | null | null | null | from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..product.models import ProductVariant
from .models import FulfillmentLine, OrderLine
class OrderStatus:
DRAFT = "draft" # fully editable, not finalized order created by staff users
UNCONFIRMED = (
"unconfirmed" # order created by customers when confirmation is required
)
UNFULFILLED = "unfulfilled" # order with no items marked as fulfilled
PARTIALLY_FULFILLED = (
"partially fulfilled" # order with some items marked as fulfilled
)
FULFILLED = "fulfilled" # order with all items marked as fulfilled
PARTIALLY_RETURNED = (
"partially_returned" # order with some items marked as returned
)
RETURNED = "returned" # order with all items marked as returned
CANCELED = "canceled" # permanently canceled order
CHOICES = [
(DRAFT, "Draft"),
(UNCONFIRMED, "Unconfirmed"),
(UNFULFILLED, "Unfulfilled"),
(PARTIALLY_FULFILLED, "Partially fulfilled"),
(PARTIALLY_RETURNED, "Partially returned"),
(RETURNED, "Returned"),
(FULFILLED, "Fulfilled"),
(CANCELED, "Canceled"),
]
class OrderOrigin:
CHECKOUT = "checkout" # order created from checkout
DRAFT = "draft" # order created from draft order
REISSUE = "reissue" # order created from reissue existing one
CHOICES = [
(CHECKOUT, "Checkout"),
(DRAFT, "Draft"),
(REISSUE, "Reissue"),
]
class FulfillmentStatus:
FULFILLED = "fulfilled" # group of products in an order marked as fulfilled
REFUNDED = "refunded" # group of refunded products
RETURNED = "returned" # group of returned products
REFUNDED_AND_RETURNED = (
"refunded_and_returned" # group of returned and replaced products
)
REPLACED = "replaced" # group of replaced products
CANCELED = "canceled" # fulfilled group of products in an order marked as canceled
CHOICES = [
(FULFILLED, "Fulfilled"),
(REFUNDED, "Refunded"),
(RETURNED, "Returned"),
(REPLACED, "Replaced"),
(REFUNDED_AND_RETURNED, "Refunded and returned"),
(CANCELED, "Canceled"),
]
class OrderEvents:
"""The different order event types."""
CONFIRMED = "confirmed"
DRAFT_CREATED = "draft_created"
DRAFT_CREATED_FROM_REPLACE = "draft_created_from_replace"
ADDED_PRODUCTS = "added_products"
REMOVED_PRODUCTS = "removed_products"
PLACED = "placed"
PLACED_FROM_DRAFT = "placed_from_draft"
OVERSOLD_ITEMS = "oversold_items"
CANCELED = "canceled"
ORDER_MARKED_AS_PAID = "order_marked_as_paid"
ORDER_FULLY_PAID = "order_fully_paid"
ORDER_REPLACEMENT_CREATED = "order_replacement_created"
ORDER_DISCOUNT_ADDED = "order_discount_added"
ORDER_DISCOUNT_AUTOMATICALLY_UPDATED = "order_discount_automatically_updated"
ORDER_DISCOUNT_UPDATED = "order_discount_updated"
ORDER_DISCOUNT_DELETED = "order_discount_deleted"
ORDER_LINE_DISCOUNT_UPDATED = "order_line_discount_updated"
ORDER_LINE_DISCOUNT_REMOVED = "order_line_discount_removed"
UPDATED_ADDRESS = "updated_address"
EMAIL_SENT = "email_sent"
PAYMENT_AUTHORIZED = "payment_authorized"
PAYMENT_CAPTURED = "payment_captured"
PAYMENT_REFUNDED = "payment_refunded"
PAYMENT_VOIDED = "payment_voided"
PAYMENT_FAILED = "payment_failed"
EXTERNAL_SERVICE_NOTIFICATION = "external_service_notification"
INVOICE_REQUESTED = "invoice_requested"
INVOICE_GENERATED = "invoice_generated"
INVOICE_UPDATED = "invoice_updated"
INVOICE_SENT = "invoice_sent"
FULFILLMENT_CANCELED = "fulfillment_canceled"
FULFILLMENT_RESTOCKED_ITEMS = "fulfillment_restocked_items"
FULFILLMENT_FULFILLED_ITEMS = "fulfillment_fulfilled_items"
FULFILLMENT_REFUNDED = "fulfillment_refunded"
FULFILLMENT_RETURNED = "fulfillment_returned"
FULFILLMENT_REPLACED = "fulfillment_replaced"
TRACKING_UPDATED = "tracking_updated"
NOTE_ADDED = "note_added"
# Used mostly for importing legacy data from before Enum-based events
OTHER = "other"
CHOICES = [
(DRAFT_CREATED, "The draft order was created"),
(DRAFT_CREATED_FROM_REPLACE, "The draft order with replace lines was created"),
(ADDED_PRODUCTS, "Some products were added to the order"),
(REMOVED_PRODUCTS, "Some products were removed from the order"),
(PLACED, "The order was placed"),
(PLACED_FROM_DRAFT, "The draft order was placed"),
(OVERSOLD_ITEMS, "The draft order was placed with oversold items"),
(CANCELED, "The order was canceled"),
(ORDER_MARKED_AS_PAID, "The order was manually marked as fully paid"),
(ORDER_FULLY_PAID, "The order was fully paid"),
(ORDER_REPLACEMENT_CREATED, "The draft order was created based on this order."),
(ORDER_DISCOUNT_ADDED, "New order discount applied to this order."),
(
ORDER_DISCOUNT_AUTOMATICALLY_UPDATED,
"Order discount was automatically updated after the changes in order.",
),
(ORDER_DISCOUNT_UPDATED, "Order discount was updated for this order."),
(ORDER_DISCOUNT_DELETED, "Order discount was deleted for this order."),
(ORDER_LINE_DISCOUNT_UPDATED, "Order line was discounted."),
(ORDER_LINE_DISCOUNT_REMOVED, "The discount for order line was removed."),
(UPDATED_ADDRESS, "The address from the placed order was updated"),
(EMAIL_SENT, "The email was sent"),
(CONFIRMED, "Order was confirmed"),
(PAYMENT_AUTHORIZED, "The payment was authorized"),
(PAYMENT_CAPTURED, "The payment was captured"),
(EXTERNAL_SERVICE_NOTIFICATION, "Notification from external service"),
(PAYMENT_REFUNDED, "The payment was refunded"),
(PAYMENT_VOIDED, "The payment was voided"),
(PAYMENT_FAILED, "The payment was failed"),
(INVOICE_REQUESTED, "An invoice was requested"),
(INVOICE_GENERATED, "An invoice was generated"),
(INVOICE_UPDATED, "An invoice was updated"),
(INVOICE_SENT, "An invoice was sent"),
(FULFILLMENT_CANCELED, "A fulfillment was canceled"),
(FULFILLMENT_RESTOCKED_ITEMS, "The items of the fulfillment were restocked"),
(FULFILLMENT_FULFILLED_ITEMS, "Some items were fulfilled"),
(FULFILLMENT_REFUNDED, "Some items were refunded"),
(FULFILLMENT_RETURNED, "Some items were returned"),
(FULFILLMENT_REPLACED, "Some items were replaced"),
(TRACKING_UPDATED, "The fulfillment's tracking code was updated"),
(NOTE_ADDED, "A note was added to the order"),
(OTHER, "An unknown order event containing a message"),
]
class OrderEventsEmails:
"""The different order emails event types."""
CONFIRMED = "confirmed"
PAYMENT = "payment_confirmation"
SHIPPING = "shipping_confirmation"
TRACKING_UPDATED = "tracking_updated"
ORDER_CONFIRMATION = "order_confirmation"
ORDER_CANCEL = "order_cancel"
ORDER_REFUND = "order_refund"
FULFILLMENT = "fulfillment_confirmation"
DIGITAL_LINKS = "digital_links"
CHOICES = [
(PAYMENT, "The payment confirmation email was sent"),
(CONFIRMED, "The order confirmed email was sent"),
(SHIPPING, "The shipping confirmation email was sent"),
(TRACKING_UPDATED, "The fulfillment tracking code email was sent"),
(ORDER_CONFIRMATION, "The order placement confirmation email was sent"),
(ORDER_CANCEL, "The order cancel confirmation email was sent"),
(ORDER_REFUND, "The order refund confirmation email was sent"),
(FULFILLMENT, "The fulfillment confirmation email was sent"),
(DIGITAL_LINKS, "The email containing the digital links was sent"),
]
@dataclass
class OrderLineData:
line: "OrderLine"
quantity: int
variant: Optional["ProductVariant"] = None
replace: bool = False
warehouse_pk: Optional[str] = None
@dataclass
class FulfillmentLineData:
line: "FulfillmentLine"
quantity: int
replace: bool = False
| 38.748815 | 88 | 0.696184 |
7401c6e9bfa48ea1d9da04c062c224691b0c5a9e | 11,667 | py | Python | themark/firstgo.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
] | null | null | null | themark/firstgo.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
] | null | null | null | themark/firstgo.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
] | null | null | null |
# standard imports
import numpy as np
import matplotlib.pyplot as plt
# Add parent directory to path
import sys
import os
parent_path = '..\\nistapttools'
if parent_path not in sys.path:
sys.path.append(os.path.abspath(parent_path))
# custom imports
import apt_fileio
import m2q_calib
import plotting_stuff
import initElements_P3
import histogram_functions
import peak_param_determination as ppd
from histogram_functions import bin_dat
import voltage_and_bowl
from voltage_and_bowl import do_voltage_and_bowl
from voltage_and_bowl import mod_full_vb_correction
import colorcet as cc
def create_histogram(xs, ys, x_roi=None, delta_x=0.1, y_roi=None, delta_y=0.1):
"""Create a 2d histogram of the data, specifying the bin intensity, region
of interest (on the y-axis), and the spacing of the y bins"""
# even number
num_x = int(np.ceil((x_roi[1]-x_roi[0])/delta_x))
num_y = int(np.ceil((y_roi[1]-y_roi[0])/delta_y))
return np.histogram2d(xs, ys, bins=[num_x, num_y],
range=[x_roi, y_roi],
density=False)
def _extents(f):
"""Helper function to determine axis extents based off of the bin edges"""
delta = f[1] - f[0]
return [f[0] - delta/2, f[-1] + delta/2]
def plot_2d_histo(ax, N, x_edges, y_edges, scale='log'):
if scale=='log':
dat = np.log10(1+N)
elif scale=='lin':
dat = N
"""Helper function to plot a histogram on an axis"""
ax.imshow(np.transpose(dat), aspect='auto',
extent=_extents(x_edges) + _extents(y_edges),
origin='lower', cmap=cc.cm.CET_L8,
interpolation='antialiased')
def corrhist(epos, delta=1, roi=None):
dat = epos['tof']
if roi is None:
roi = [0, 1000]
N = int(np.ceil((roi[1]-roi[0])/delta))
corrhist = np.zeros([N,N], dtype=int)
multi_idxs = np.where(epos['ipp']>1)[0]
for multi_idx in multi_idxs:
n_hits = epos['ipp'][multi_idx]
cluster = dat[multi_idx:multi_idx+n_hits]
idx1 = -1
idx2 = -1
for i in range(n_hits):
for j in range(i+1,n_hits):
idx1 = int(np.floor(cluster[i]/delta))
idx2 = int(np.floor(cluster[j]/delta))
if idx1 < N and idx1>=0 and idx2 < N and idx2>=0:
corrhist[idx1,idx2] += 1
edges = np.arange(roi[0],roi[1]+delta,delta)
assert edges.size-1 == N
return (edges, corrhist+corrhist.T-np.diag(np.diag(corrhist)))
def calc_t0(tof,tof_vcorr_fac,tof_bcorr_fac,sigma):
BB = tof_bcorr_fac[0::2]+tof_bcorr_fac[1::2]
t0 = ((tof_bcorr_fac[0::2]*tof[0::2]+tof_bcorr_fac[1::2]*tof[1::2]) - sigma/(tof_vcorr_fac[0::2]))/BB
t0 = np.ravel(np.column_stack((t0,t0)))
return t0
def create_sigma_delta_histogram(raw_tof, tof_vcorr_fac, tof_bcorr_fac, sigmas=None, delta_range=None, delta_step=0.5):
# Must be a doubles only epos...
# scan through a range of sigmas and compute the corrected data
if sigmas is None:
sigmas = np.linspace(0,2000,2**7)
if delta_range is None:
delta_range = [0,700]
delta_n_bins = int((delta_range[1]-delta_range [0])/delta_step)
# print('delta_n_bins = '+str(delta_n_bins))
res_dat = np.zeros((sigmas.size,delta_n_bins))
for sigma_idx in np.arange(sigmas.size):
t0 = calc_t0(raw_tof, tof_vcorr_fac, tof_bcorr_fac, sigmas[sigma_idx])
tof_corr = tof_vcorr_fac*tof_bcorr_fac*(raw_tof-t0)
dts = np.abs(tof_corr[:-1:2]-tof_corr[1::2])
N, delta_edges = np.histogram(dts, bins=delta_n_bins, range=delta_range)
res_dat[sigma_idx,:] = N
if np.mod(sigma_idx,10)==0:
print("Loop index "+str(sigma_idx+1)+" of "+str(sigmas.size))
delta_centers = 0.5*(delta_edges[:-1]+delta_edges[1:])
return (res_dat, sigmas, delta_centers)
def interleave(a,b):
return np.ravel(np.column_stack((a,b)))
def calc_slope_and_intercept(raw_tof, volt_coeff, bowl_coeff):
A = volt_coeff[0::2]
B_alpha = bowl_coeff[0::2]
B_beta = bowl_coeff[1::2]
tof_alpha = raw_tof[0::2]
tof_beta = raw_tof[1::2]
intercept = 2*A*B_alpha*B_beta*(tof_beta-tof_alpha)/(B_alpha+B_beta)
slope = (B_beta-B_alpha)/(B_beta+B_alpha)
return (slope, intercept)
# Note that x is sums and y is diffs
def compute_dist_to_line(slope, intercept, x, y):
return np.abs(intercept+slope*x-y)/np.sqrt(1+slope**2)
def calc_parametric_line(raw_tof, volt_coeff, bowl_coeff, n=2):
if n>0:
t = raw_tof.reshape(-1,n)
v = volt_coeff.reshape(-1,n)
b = bowl_coeff.reshape(-1,n)
else:
t = raw_tof
v = volt_coeff
b = bowl_coeff
r0 = v*b*(t-np.sum(b*t,axis=1)[:,np.newaxis]/np.sum(b,axis=1)[:,np.newaxis])
r1 = b/np.sum(b,axis=1)[:,np.newaxis]
return (r0, r1)
def compute_dist_to_parametric_line(r0, r1, q):
# q is n_pts by n_dim
sigma = (np.dot(r1,q.T)-np.sum(r0*r1,axis=1)[:,np.newaxis])/np.sum(r1**2,axis=1)[:,np.newaxis]
d = np.sqrt(np.sum(((r0[:,np.newaxis,:]+np.einsum("np,nd->npd",sigma,r1))-q[np.newaxis,...])**2, axis=-1))
return d, sigma
#from itertools import combinations, chain
#from scipy.special import comb
#def comb_index(n, k):
# count = comb(n, k, exact=True)
# index = np.fromiter(chain.from_iterable(combinations(range(n), k)),
# int, count=count*k)
# return index.reshape(-1, k)
def cartesian_product(arrays):
ndim = len(arrays)
return np.stack(np.meshgrid(*arrays), axis=-1).reshape(-1, ndim)
plt.close('all')
fn = r'C:\Users\capli\Google Drive\NIST\pos_and_epos_files\themark\R20_18140-v02.epos'
epos = apt_fileio.read_epos_numpy(fn)
import sys
sys.exit(0)
# voltage and bowl correct ToF data.
p_volt = np.array([])
p_bowl = np.array([ 0.89964083, -0.43114144, -0.27484715, -0.25883824])
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
roi = [31, 33]
idxs = np.where((epos['tof']>roi[0]) & (epos['tof']<roi[1]))[0]
fig = plt.figure(num=10)
fig.clf()
ax = fig.gca()
ax.scatter(epos['x_det'][idxs],epos['y_det'][idxs],alpha=0.1,s=0.1)
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
roi = [1000, 3000]
idxs = np.where((epos['tof']>roi[0]) & (epos['tof']<roi[1]))[0]
fig = plt.figure(num=10)
fig.clf()
ax = fig.gca()
ax.scatter(epos['x_det'][idxs],epos['y_det'][idxs],alpha=0.1,s=0.1, c=epos['v_dc'][idxs]/epos['v_dc'].max())
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
vc = np.sqrt(5000)/np.sqrt(epos['v_dc'])
plotting_stuff.plot_TOF_vs_time(epos['tof']/vc,epos,1,user_ylim=[0,5000])
# Find transform to m/z space
epos_R44['m2q'], p_m2q_R44 = m2q_calib.align_m2q_to_ref_m2q(ref_epos['m2q'],mod_full_vb_correction(epos_R44,p_volt_R44,p_bowl_R44))
epos_R20['m2q'], p_m2q_R20 = m2q_calib.align_m2q_to_ref_m2q(ref_epos['m2q'],mod_full_vb_correction(epos_R20,p_volt_R20,p_bowl_R20))
plotting_stuff.plot_histo(epos_R44['m2q'],fig_idx=1,user_label='R44',scale_factor=1/epos_R44.size)
plotting_stuff.plot_histo(epos_R20['m2q'],fig_idx=1,user_label='R20',clearFigure=False,scale_factor=1/epos_R20.size)
# Make a correlation histogram of doubles
epos_vb_R44 = epos_R44.copy()
epos_vb_R44['tof'] = np.sqrt(1e4*epos_R44['m2q'])
edges, ch = corrhist(epos_vb_R44,roi = [0, 1000], delta=1)
centers = (edges[1:]+edges[:-1])/2.0
fig2 = plt.figure(num=3)
fig2.clf()
ax2 = fig2.gca()
plot_2d_histo(ax2, ch, edges, edges, scale='log')
ax2.axis('equal')
ax2.axis('square')
ax2.set_xlabel('ns')
ax2.set_ylabel('ns')
ax2.set_title('R44')
epos_vb_R20 = epos_R20.copy()
epos_vb_R20['tof'] = np.sqrt(1e4*epos_R20['m2q'])
edges, ch = corrhist(epos_vb_R20,roi = [0, 1000], delta=1)
centers = (edges[1:]+edges[:-1])/2.0
fig2 = plt.figure(num=4)
fig2.clf()
ax2 = fig2.gca()
plot_2d_histo(ax2, ch, edges, edges, scale='log')
ax2.axis('equal')
ax2.axis('square')
ax2.set_xlabel('ns')
ax2.set_ylabel('ns')
ax2.set_title('R20')
plotting_stuff.plot_histo(epos_vb_R44['tof'],fig_idx=5,user_label='R44',scale_factor=1/epos_R44.size,user_xlim=[0, 1000],user_bin_width=.1)
plotting_stuff.plot_histo(epos_vb_R20['tof'],fig_idx=5,user_label='R20',clearFigure=False,scale_factor=1/epos_R20.size,user_xlim=[0, 1000],user_bin_width=0.1)
plotting_stuff.plot_histo(epos_vb_R44['tof'][epos_vb_R44['ipp']==1],fig_idx=7,user_label='R44 s',scale_factor=1/epos_R44.size,user_xlim=[0, 1000],user_bin_width=.1)
plotting_stuff.plot_histo(epos_vb_R20['tof'][epos_vb_R20['ipp']==1],fig_idx=7,user_label='R20 s',clearFigure=False,scale_factor=1/epos_R20.size,user_xlim=[0, 1000],user_bin_width=0.1)
plotting_stuff.plot_histo(epos_vb_R44['tof'][epos_vb_R44['ipp']!=1],fig_idx=9,user_label='R44 m',scale_factor=1/epos_R44.size,user_xlim=[0, 1000],user_bin_width=.1)
plotting_stuff.plot_histo(epos_vb_R20['tof'][epos_vb_R20['ipp']!=1],fig_idx=9,user_label='R20 m',clearFigure=False,scale_factor=1/epos_R20.size,user_xlim=[0, 1000],user_bin_width=0.1)
plotting_stuff.plot_histo(epos_vb_R20['tof'][epos_vb_R20['ipp']==1],fig_idx=101,user_label='R20 s',clearFigure=True,scale_factor=1/epos_R20.size,user_xlim=[0, 1000],user_bin_width=0.1)
plotting_stuff.plot_histo(epos_vb_R20['tof'][epos_vb_R20['ipp']!=1],fig_idx=101,user_label='R20 m',clearFigure=False,scale_factor=1/epos_R20.size,user_xlim=[0, 1000],user_bin_width=0.1)
plotting_stuff.plot_histo(epos_vb_R44['tof'][epos_vb_R44['ipp']==1],fig_idx=103,user_label='R44 s',clearFigure=True,scale_factor=1/epos_R44.size,user_xlim=[0, 1000],user_bin_width=.1)
plotting_stuff.plot_histo(epos_vb_R44['tof'][epos_vb_R44['ipp']!=1],fig_idx=103,user_label='R44 m',clearFigure=False,scale_factor=1/epos_R44.size,user_xlim=[0, 1000],user_bin_width=.1)
lhs_roi = [489.3, 491]
rhs_roi = [491, 492.7]
#lhs_roi = [345.7, 346.5]
#
#rhs_roi = [346.5, 348]
lhs_idxs = np.where((epos_vb_R44['tof']>lhs_roi[0]) & (epos_vb_R44['tof']<lhs_roi[1]))[0]
rhs_idxs = np.where((epos_vb_R44['tof']>rhs_roi[0]) & (epos_vb_R44['tof']<rhs_roi[1]))[0]
fig = plt.figure(num=400)
fig.clf()
ax = fig.gca()
ax.scatter(epos_vb_R44['x_det'][lhs_idxs],epos_vb_R44['y_det'][lhs_idxs],alpha=0.1,s=3)
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
fig = plt.figure(num=401)
fig.clf()
ax = fig.gca()
ax.scatter(epos_vb_R44['x_det'][rhs_idxs],epos_vb_R44['y_det'][rhs_idxs],alpha=0.1,s=3)
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
plotting_stuff.plot_bowl_slices(
epos_vb_R44['tof'],
epos_vb_R44,
1000,
clearFigure=True,
user_ylim=[0, 1000],
)
lhs_roi = [489., 490]
rhs_roi = [490, 491]
lhs_idxs = np.where((epos_vb_R20['tof']>lhs_roi[0]) & (epos_vb_R20['tof']<lhs_roi[1]))[0]
rhs_idxs = np.where((epos_vb_R20['tof']>rhs_roi[0]) & (epos_vb_R20['tof']<rhs_roi[1]))[0]
fig = plt.figure(num=500)
fig.clf()
ax = fig.gca()
ax.scatter(epos_vb_R20['x_det'][lhs_idxs],epos_vb_R20['y_det'][lhs_idxs],alpha=0.1,s=3)
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
fig = plt.figure(num=501)
fig.clf()
ax = fig.gca()
ax.scatter(epos_vb_R20['x_det'][rhs_idxs],epos_vb_R20['y_det'][rhs_idxs],alpha=0.1,s=3)
ax.axis('equal')
ax.axis('square')
ax.set_xlabel('x')
ax.set_ylabel('y')
plotting_stuff.plot_bowl_slices(
epos_vb_R20['tof'],
epos_vb_R20,
1000,
clearFigure=True,
user_ylim=[0, 1000],
)
| 25.926667 | 185 | 0.66161 |
492d2181b11d8ebeb5a7a118c5b43741979abc15 | 7,103 | py | Python | python/singa/data.py | pavan87/Machine-learning | e5c438c3410c714df3a9cdf4fd760676e5eeb721 | [
"Apache-2.0"
] | null | null | null | python/singa/data.py | pavan87/Machine-learning | e5c438c3410c714df3a9cdf4fd760676e5eeb721 | [
"Apache-2.0"
] | null | null | null | python/singa/data.py | pavan87/Machine-learning | e5c438c3410c714df3a9cdf4fd760676e5eeb721 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
This module includes classes for loading and prefetching data batches.
Example usage::
import image_tool
from PIL import Image
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range(
(112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt', 3,
image_transform, shuffle=True, delimiter=',',
image_folder='images/',
capacity=10)
data.start()
# imgs is a numpy array for a batch of images,
# shape: batch_size, 3 (RGB), height, width
imgs, labels = data.next()
# convert numpy array back into images
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
'''
import os
import random
import time
from multiprocessing import Process, Queue
import numpy as np
class ImageBatchIter:
'''Utility for iterating over an image dataset to get mini-batches.
Args:
img_list_file(str): name of the file containing image meta data; each
line consists of image_path_suffix delimiter meta_info,
where meta info could be label index or label strings, etc.
meta_info should not contain the delimiter. If the meta_info
of each image is just the label index, then we will parse the
label index into a numpy array with length=batchsize
(for compatibility); otherwise, we return a list of meta_info;
if meta info is available, we return a list of None.
batch_size(int): num of samples in one mini-batch
image_transform: a function for image augmentation; it accepts the full
image path and outputs a list of augmented images.
shuffle(boolean): True for shuffling images in the list
delimiter(char): delimiter between image_path_suffix and label, e.g.,
space or comma
image_folder(boolean): prefix of the image path
capacity(int): the max num of mini-batches in the internal queue.
'''
def __init__(self, img_list_file, batch_size, image_transform,
shuffle=True, delimiter=' ', image_folder=None, capacity=10):
self.img_list_file = img_list_file
self.queue = Queue(capacity)
self.batch_size = batch_size
self.image_transform = image_transform
self.shuffle = shuffle
self.delimiter = delimiter
self.image_folder = image_folder
self.stop = False
self.p = None
with open(img_list_file, 'r') as fd:
self.num_samples = len(fd.readlines())
def start(self):
self.p = Process(target=self.run)
self.p.start()
return
def next(self):
assert self.p is not None, 'call start before next'
while self.queue.empty():
time.sleep(0.1)
x, y = self.queue.get() # dequeue one mini-batch
return x, y
def stop(self):
self.end();
def end(self):
if self.p is not None:
self.stop = True
time.sleep(0.1)
self.p.terminate()
def run(self):
img_list = []
is_label_index = True
for line in open(self.img_list_file, 'r'):
item = line.strip('\n').split(self.delimiter)
if len(item) < 2:
is_label_index = False
img_list.append((item[0].strip(), None))
else:
if not item[1].strip().isdigit():
# the meta info is not label index
is_label_index = False
img_list.append((item[0].strip(), item[1].strip()))
index = 0 # index for the image
if self.shuffle:
random.shuffle(img_list)
while not self.stop:
if not self.queue.full():
x, y = [], []
i = 0
while i < self.batch_size:
img_path, img_meta = img_list[index]
aug_images = self.image_transform(
os.path.join(self.image_folder, img_path))
assert i + len(aug_images) <= self.batch_size, \
'too many images (%d) in a batch (%d)' % \
(i + len(aug_images), self.batch_size)
for img in aug_images:
ary = np.asarray(img.convert('RGB'), dtype=np.float32)
x.append(ary.transpose(2, 0, 1))
if is_label_index:
y.append(int(img_meta))
else:
y.append(img_meta)
i += 1
index += 1
if index == self.num_samples:
index = 0 # reset to the first image
if self.shuffle:
random.shuffle(img_list)
# enqueue one mini-batch
if is_label_index:
self.queue.put((np.asarray(x), np.asarray(y, dtype=np.int32)))
else:
self.queue.put((np.asarray(x), y))
else:
time.sleep(0.1)
return
if __name__ == '__main__':
import image_tool
from PIL import Image
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range(
(112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt', 3,
image_transform, shuffle=False, delimiter=',',
image_folder='images/',
capacity=10)
data.start()
imgs, labels = data.next()
print labels
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
| 37.582011 | 90 | 0.555681 |
1e98689dd195325a7824d1b3ebaa9b735b238741 | 2,775 | py | Python | src/efficient_det/hyper_parameter_search.py | zeynep68/object-detection-efficientdet | 4d4b194bf1641cbb705e7659b66539752986c218 | [
"Apache-2.0"
] | null | null | null | src/efficient_det/hyper_parameter_search.py | zeynep68/object-detection-efficientdet | 4d4b194bf1641cbb705e7659b66539752986c218 | [
"Apache-2.0"
] | null | null | null | src/efficient_det/hyper_parameter_search.py | zeynep68/object-detection-efficientdet | 4d4b194bf1641cbb705e7659b66539752986c218 | [
"Apache-2.0"
] | null | null | null | from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler
from efficient_det.train import distributed_training
def hyper_param_search(config, num_tries=10, gpus_per_trial=1,
cpus_per_trial=4):
"""Hyper parameter sweep. Automatically manages resources so all GPUs
are used. Includes early stopping.
Args:
config:
num_tries: Number of combinations to try
gpus_per_trial (float): Number of GPU(s) for each trial. Can be 0.5
cpus_per_trial (int): Number of CPUs for each trial. Defaults to 4.
"""
# START RAY TUNE IMPORT
import tensorflow as tf
# END OF RAY TUNE IMPORTS
run_prefix = 'small_space_522_'
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Asynchronous Hyperband parameter search algorithm
# A SYSTEM FOR MASSIVELY PARALLEL HYPERPARAMETER TUNING
# https://arxiv.org/pdf/1810.05934.pdf
if config['evaluation']:
scheduler = ASHAScheduler(metric="mAP", mode="max", grace_period=1,
reduction_factor=2,
max_t=config['num_epochs'])
score_attr = "min-mAP"
else:
scheduler = ASHAScheduler(metric="val_loss", mode="min",
grace_period=1, reduction_factor=2,
max_t=config['num_epochs'])
score_attr = "max-val_loss"
# Reports progress as terminal output
reporter = CLIReporter(
parameter_columns=["learning_rate", "batch_size", "activations",
"optimizer"],
metric_columns=["train_loss", 'train_classification_loss',
'train_regression_loss', "training_iteration"])
result = tune.run(distributed_training,
resources_per_trial={"cpu": cpus_per_trial,
"gpu": gpus_per_trial},
config=config, num_samples=num_tries, name=run_prefix,
checkpoint_score_attr=score_attr, scheduler=scheduler,
progress_reporter=reporter)
if config['evaluation']:
best_trial = result.get_best_trial("mAP", "max", "last")
print(f"Best trial config: {best_trial.config}")
print(f"Best trial final mAP score:{best_trial.last_result['mAP']}")
else:
best_trial = result.get_best_trial("val_loss", "min", "last")
print(f"Best trial config: {best_trial.config}")
print(f"Best trial final val loss:"
f"{best_trial.last_result['val_loss']}")
| 40.217391 | 80 | 0.585225 |
24bf4f5629aafdcebf3cbade7b1b40f1b9f915ef | 11,854 | py | Python | reman/tests/tests_reman_views.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | reman/tests/tests_reman_views.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | reman/tests/tests_reman_views.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | from django.urls import reverse
from django.contrib.messages import get_messages
from django.utils.translation import ugettext as _
from dashboard.tests.base import UnitTest
from reman.models import Repair, SparePart, Batch, EcuModel, EcuRefBase, EcuType, Default
class RemanTestCase(UnitTest):
def setUp(self):
super(RemanTestCase, self).setUp()
self.psaBarcode = '9612345678'
spare_part = SparePart.objects.create(code_produit='test HW_9876543210')
ecu_type = EcuType.objects.create(hw_reference='9876543210', technical_data='test', spare_part=spare_part)
ref_base = EcuRefBase.objects.create(reman_reference='1234567890', ecu_type=ecu_type)
ecu = EcuModel.objects.create(oe_raw_reference='1699999999', ecu_type=ecu_type, psa_barcode=self.psaBarcode)
self.batch = Batch.objects.create(year="C", number=1, quantity=10, created_by=self.user, ecu_ref_base=ref_base)
self.repair = Repair.objects.create(
batch=self.batch, identify_number="C001010001", created_by=self.user, status="Réparé", quality_control=True)
self.authError = {"detail": "Informations d'authentification non fournies."}
Default.objects.create(code='TEST1', description='Ceci est le test 1')
def test_repair_table_page(self):
url = reverse('reman:repair_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
# Test if connected with permissions
self.add_perms_user(Repair, 'view_repair')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_spare_part_table_page(self):
url = reverse('reman:part_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(SparePart, 'view_sparepart')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_repair_pages(self):
urls_perms = [
(reverse('reman:create_repair'), 'add_repair'),
(reverse('reman:edit_repair', kwargs={'pk': self.repair.pk}), 'change_repair'),
(reverse('reman:close_repair', kwargs={'pk': self.repair.pk}), 'change_repair'),
(reverse('reman:detail_repair', kwargs={'pk': self.repair.pk}), 'view_repair'),
]
for url, perm in urls_perms:
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(Repair, perm)
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.logout()
def test_batch_table(self):
url = reverse('reman:batch_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(Batch, 'view_batch')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_out_table(self):
url = reverse('reman:out_table') + '?filter=' + self.batch.batch_number
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(Repair, 'close_repair')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Invalid form
response = self.client.post(url, {'identify_number': ''})
self.assertFormError(response, 'form', 'identify_number', _('This field is required.'))
for identify_number in ['C001010001', 'C001010002R']:
response = self.client.post(url, {'identify_number': identify_number})
self.assertFormError(response, 'form', 'identify_number', "N° d'identification invalide")
Repair.objects.create(batch=self.batch, identify_number="C001010002", created_by=self.user, status="Réparé")
response = self.client.post(url, {'identify_number': 'C001010002R'})
self.assertFormError(response, 'form', 'identify_number', "Contrôle qualité non validé, voir avec Atelier.")
def test_check_part(self):
url = reverse('reman:part_check')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(EcuModel, 'check_ecumodel', 'add_ecumodel')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Invalid form
response = self.client.post(url, {'psa_barcode': ''})
self.assertFormError(response, 'form', 'psa_barcode', _('This field is required.'))
for barcode in ['123456789', '96123']:
response = self.client.post(url, {'psa_barcode': barcode})
self.assertFormError(response, 'form', 'psa_barcode', _('The barcode is invalid'))
# Valid form
barcode_list = [
('9600000000', '9600000000'), ('9687654321', '9687654321'), ('9800000000', '9800000000'),
('9887654321', '9887654321'), ('96876543210000000000', '9687654321'), ('89661-0H390', '89661-0H390')
]
for barcode in barcode_list:
response = self.client.post(url, {'psa_barcode': barcode[0]})
self.assertRedirects(
response, reverse('reman:part_create', kwargs={'psa_barcode': barcode[1]}), status_code=302)
response = self.client.post(url, {'psa_barcode': self.psaBarcode})
ecu = EcuModel.objects.get(psa_barcode=self.psaBarcode)
self.assertEquals(response.context['ecu'], ecu)
def test_new_part_email(self):
url = reverse('reman:part_email', kwargs={'psa_barcode': self.psaBarcode})
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(EcuModel, 'check_ecumodel')
self.login()
response = self.client.get(url)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), _('Success: The email has been sent.'))
self.assertRedirects(response, reverse('reman:part_check'), status_code=302)
def test_base_ref_table(self):
url = reverse('reman:base_ref_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(EcuRefBase, 'view_ecurefbase')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_ecu_hw_table(self):
url = reverse('reman:ecu_hw_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_ecu_hw_generate_view(self):
url = reverse('reman:ecu_hw_generate')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.login()
response = self.client.get(url)
self.assertRedirects(response, reverse('reman:ecu_hw_table'), status_code=302)
def test_ecu_dump_table(self):
url = reverse('reman:ecu_dump_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_default_table(self):
url =reverse('reman:default_table')
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(Default, 'view_default')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_repair_view_set_is_disconnected(self):
response = self.client.get(reverse('reman:api_repair-list'), format='json')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data, self.authError)
def test_part_create_view(self):
psa_barcode = '9676543210'
url = reverse('reman:part_create', kwargs={'psa_barcode': psa_barcode})
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(EcuModel, 'check_ecumodel')
self.login()
for nb in range(2):
response = self.client.get(url + f"?next={nb}")
if nb == 2:
self.assertEqual(response.status_code, 404)
else:
self.assertEqual(response.status_code, 200)
def test_ref_base_edit_view(self):
url = reverse('reman:edit_ref_base', kwargs={'psa_barcode': self.psaBarcode})
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(EcuModel, 'change_ecumodel')
self.login()
for nb in range(2):
response = self.client.get(url + f"?next={nb}")
self.assertEqual(response.status_code, 200)
def test_batch_pdf_generate(self):
url = reverse('reman:batch_pdf', kwargs={'pk': self.batch.pk})
response = self.client.get(url)
self.assertRedirects(response, self.nextLoginUrl + url, status_code=302)
self.add_perms_user(Batch, 'pdfgen_batch')
self.login()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_create_default(self):
"""
Create Default through CreateView.
"""
self.add_perms_user(Default, 'add_default')
self.login()
# First post request = ajax request checking if form in view is valid
response = self.client.post(
reverse('reman:create_default'),
data={
'code': '',
'description': '',
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
# Form has errors
self.assertTrue(response.context_data['form'].errors)
# No redirection
self.assertEqual(response.status_code, 200)
# Object is not created
defaults = Default.objects.all()
self.assertEqual(defaults.count(), 1)
# Second post request = non-ajax request creating an object
response = self.client.post(
reverse('reman:create_default'),
data={
'code': 'TEST2',
'description': 'Ceci est le test 2',
},
)
# redirection
self.assertEqual(response.status_code, 302)
# Object is not created
defaults = Default.objects.all()
self.assertEqual(defaults.count(), 2)
def test_update_default(self):
"""
Update Default throught UpdateView.
"""
self.add_perms_user(Default, 'change_default')
self.login()
# Update object through BSModalUpdateView
default = Default.objects.first()
response = self.client.post(
reverse('reman:update_default', kwargs={'pk': default.pk}),
data={
'code': 'TEST3',
'description': 'Ceci est le test 3',
}
)
# redirection
self.assertEqual(response.status_code, 302)
# Object is updated
default = Default.objects.first()
self.assertEqual(default.code, 'TEST3')
| 41.739437 | 120 | 0.643749 |
da8f7e928740b8633705edc46e732450db21aecc | 25,020 | py | Python | tests/cli/client/test_args.py | tnoff/hathor | 237dfe37e93443b5a298f9f4b258d15429d754ea | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2017-05-03T17:52:27.000Z | 2019-04-04T22:29:27.000Z | tests/cli/client/test_args.py | tnoff/hathor | 237dfe37e93443b5a298f9f4b258d15429d754ea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | tests/cli/client/test_args.py | tnoff/hathor | 237dfe37e93443b5a298f9f4b258d15429d754ea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from hathor import client, settings, utils
from hathor.cli import client as cli
from hathor.exc import CLIException
from tests import utils as test_utils
LOG_LEVELS = [
'debug',
'info',
'warn',
'error',
]
class TestGlobalArgs(test_utils.TestHelper):
def setUp(self):
# common function to test globals
self.func = ['podcast', 'list']
def test_defaults(self):
args = cli.parse_args(self.func)
self.assertEqual(args.pop('settings'), settings.DEFAULT_SETTINGS_FILE)
self.assertEqual(args.pop('module'), 'podcast')
self.assertEqual(args.pop('command'), 'list')
self.assertEqual(args.pop('column_limit'), settings.COLUMN_LIMIT_DEFAULT)
self.assertEqual(args.pop('console_logging'), False)
self.assertEqual(args.pop('reverse_sort'), False)
null_args = ['database_file', 'logging_file', 'google_api_key',
'podcast_directory', 'keys', 'sort_key',
'datetime_output_format', 'console_logging_level',
'logging_file_level', 'soundcloud_client_id',]
for key in null_args:
self.assertEqual(args.pop(key), None)
self.assert_length(args.keys(), 0)
def test_settings(self):
settings_file = utils.random_string(prefix='/home/foo/')
args = cli.parse_args(['-s', settings_file] + self.func)
self.assertEqual(args['settings'], settings_file)
args = cli.parse_args(['--settings', settings_file] + self.func)
self.assertEqual(args['settings'], settings_file)
def test_database(self):
database_file = utils.random_string(prefix='/home/bar/')
args = cli.parse_args(['-d', database_file] + self.func)
self.assertEqual(args['database_file'], database_file)
args = cli.parse_args(['--database', database_file] + self.func)
self.assertEqual(args['database_file'], database_file)
def test_log_file(self):
log_file = utils.random_string(prefix='/home/derp/')
args = cli.parse_args(['-l', log_file] + self.func)
self.assertEqual(args['logging_file'], log_file)
args = cli.parse_args(['--log-file', log_file] + self.func)
self.assertEqual(args['logging_file'], log_file)
def test_log_file_level(self):
with self.assertRaises(CLIException) as error:
cli.parse_args(['-ll', 'foo'])
self.check_error_message("argument -ll/--log-file-level: invalid choice: "
"'foo' (choose from 'debug', 'error', 'info', 'warn')",
error)
for level in LOG_LEVELS:
args = cli.parse_args(['-ll', level] + self.func)
self.assertEqual(args['logging_file_level'], level)
args = cli.parse_args(['--log-file-level', level] + self.func)
self.assertEqual(args['logging_file_level'], level)
def test_datetime_output_format(self):
df = utils.random_string()
args = cli.parse_args(['-df', df] + self.func)
self.assertEqual(args['datetime_output_format'], df)
args = cli.parse_args(['--datetime-format', df] + self.func)
self.assertEqual(args['datetime_output_format'], df)
def test_soundcloud_client_id(self):
sc = utils.random_string()
args = cli.parse_args(['-sc', sc] + self.func)
self.assertEqual(args['soundcloud_client_id'], sc)
args = cli.parse_args(['--soundcloud', sc] + self.func)
self.assertEqual(args['soundcloud_client_id'], sc)
def test_google_api_key(self):
ga = utils.random_string()
args = cli.parse_args(['-g', ga] + self.func)
self.assertEqual(args['google_api_key'], ga)
args = cli.parse_args(['--google', ga] + self.func)
self.assertEqual(args['google_api_key'], ga)
def test_podcast_directory(self):
pd = utils.random_string(prefix='/home/derp/')
args = cli.parse_args(['-p', pd] + self.func)
self.assertEqual(args['podcast_directory'], pd)
args = cli.parse_args(['--podcast-dir', pd] + self.func)
self.assertEqual(args['podcast_directory'], pd)
def test_column_limit(self):
with self.assertRaises(CLIException) as error:
cli.parse_args(['-c', 'foo'] + self.func)
self.check_error_message("argument -c/--column-limit: invalid int value: 'foo'",
error)
def test_console_log(self):
args = cli.parse_args(['-cl'] + self.func)
self.assertTrue(args['console_logging'])
args = cli.parse_args(['-cl'] + self.func)
self.assertTrue(args['console_logging'])
def test_console_log_level(self):
with self.assertRaises(CLIException) as error:
cli.parse_args(['-cll', 'foo'])
self.check_error_message("argument -cll/--console-log-level: invalid choice: "
"'foo' (choose from 'debug', 'error', 'info', 'warn')",
error)
for level in LOG_LEVELS:
args = cli.parse_args(['-cll', level] + self.func)
self.assertEqual(args['console_logging_level'], level)
args = cli.parse_args(['--console-log-level', level] + self.func)
self.assertEqual(args['console_logging_level'], level)
def test_keys(self):
key = utils.random_string()
args = cli.parse_args(['-k', key] + self.func)
self.assertEqual(args['keys'], key)
args = cli.parse_args(['--keys', key] + self.func)
self.assertEqual(args['keys'], key)
# make sure comma seperated works
args = cli.parse_args(['--keys', 'foo,foo2'] + self.func)
self.assertEqual(args['keys'], 'foo,foo2')
def test_sort_key(self):
key = utils.random_string()
args = cli.parse_args(['-sk', key] + self.func)
self.assertEqual(args['sort_key'], key)
args = cli.parse_args(['--sort-key', key] + self.func)
self.assertEqual(args['sort_key'], key)
class TestFilterArgs(test_utils.TestHelper):
def test_filter_list(self):
args = cli.parse_args(['filter', 'list'])
self.assertEqual(args['module'], 'filter')
self.assertEqual(args['command'], 'list')
with self.assertRaises(CLIException) as error:
cli.parse_args(['filter', 'list', '-i', 'foo'])
self.check_error_message("argument -i/--include-podcasts: invalid"
" int value: 'foo'", error)
with self.assertRaises(CLIException) as error:
cli.parse_args(['filter', 'list', '-e', 'foo'])
self.check_error_message("argument -e/--exclude-podcasts: invalid"
" int value: 'foo'", error)
args = cli.parse_args(['filter', 'list', '-i', '5'])
self.assertEqual(args['include_podcasts'], [5])
args = cli.parse_args(['filter', 'list', '-i', '5', '7'])
self.assertEqual(args['include_podcasts'], [5, 7])
args = cli.parse_args(['filter', 'list', '-e', '5'])
self.assertEqual(args['exclude_podcasts'], [5])
args = cli.parse_args(['filter', 'list', '-e', '5', '7'])
self.assertEqual(args['exclude_podcasts'], [5, 7])
def test_filter_create(self):
regex = utils.random_string()
expected = {
'module' : 'filter',
'command' : 'create',
'podcast_id' : 1,
'regex_string' : regex,
}
args = cli.parse_args(['filter', 'create', '1', regex])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['filter', 'create', 'foo', 'foo'])
self.check_error_message("argument podcast_id: invalid"
" int value: 'foo'", error)
def test_filter_delete(self):
expected = {
'module' : 'filter',
'command' : 'delete',
'filter_input' : [1],
}
args = cli.parse_args(['filter', 'delete', '1',])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['filter', 'delete', 'foo'])
self.check_error_message("argument filter_input: invalid"
" int value: 'foo'", error)
# make sure multiple works
args = cli.parse_args(['filter', 'delete', '2', '6'])
self.assertEqual(args['filter_input'], [2, 6])
class TestPodcastArgs(test_utils.TestHelper):
def test_podcast_create(self):
expected = {
'module' : 'podcast',
'command' : 'create',
'file_location' : None,
'artist_name' : None,
'automatic_download' : True,
'max_allowed' : None,
'podcast_name' : utils.random_string(),
'archive_type' : 'rss',
'broadcast_id' : utils.random_string(),
}
common_args = [
'podcast',
'create',
expected['podcast_name'],
expected['archive_type'],
expected['broadcast_id'],
]
args = cli.parse_args(common_args)
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['podcast', 'create', 'foo',
'bar', 'foo'])
self.check_error_message("argument archive_type: invalid choice:"
" 'bar' (choose from 'rss', 'soundcloud', 'youtube')", error)
for choice in client.ARCHIVE_TYPES:
args = cli.parse_args(['podcast', 'create', 'foo',
choice, 'bar'])
self.assertEqual(args['archive_type'], choice)
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['--max-allowed', 'foo'])
self.check_error_message("argument --max-allowed: invalid"
" int value: 'foo'", error)
args = cli.parse_args(common_args + ['--max-allowed', '8'])
self.assertEqual(args['max_allowed'], 8)
file_location = utils.random_string()
args = cli.parse_args(common_args + ['--file-location', file_location])
self.assertEqual(args['file_location'], file_location)
artist = utils.random_string()
args = cli.parse_args(common_args + ['--artist-name', artist])
self.assertEqual(args['artist_name'], artist)
args = cli.parse_args(common_args + ['--no-auto-download'])
self.assertFalse(args['automatic_download'])
def test_podcast_list(self):
expected = {
'module' : 'podcast',
'command' : 'list',
}
args = cli.parse_args(['podcast', 'list'])
for key, value in expected.items():
self.assertEqual(args[key], value)
def test_podcast_show(self):
expected = {
'module' : 'podcast',
'command' : 'show',
'podcast_input' : [5],
}
args = cli.parse_args(['podcast', 'show', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['podcast', 'show', 'foo'])
self.check_error_message("argument podcast_input:"
" invalid int value: 'foo'", error)
args = cli.parse_args(['podcast', 'show', '5', '10'])
self.assertEqual(args['podcast_input'], [5, 10])
def test_podcast_delete(self):
expected = {
'module' : 'podcast',
'command' : 'delete',
'podcast_input' : [5],
'delete_files' : True,
}
args = cli.parse_args(['podcast', 'delete', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['podcast', 'delete', 'foo'])
self.check_error_message("argument podcast_input:"
" invalid int value: 'foo'", error)
args = cli.parse_args(['podcast', 'delete', '5', '10'])
self.assertEqual(args['podcast_input'], [5, 10])
args = cli.parse_args(['podcast', 'delete', '--keep-files', '5'])
self.assertFalse(args['delete_files'])
def test_podcast_update(self):
expected = {
'module' : 'podcast',
'command' : 'update',
'podcast_id' : 5,
}
args = cli.parse_args(['podcast', 'update', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['podcast', 'update', 'foo'])
self.check_error_message("argument podcast_id:"
" invalid int value: 'foo'", error)
common_args = ['podcast', 'update', '5']
pod_name = utils.random_string()
args = cli.parse_args(common_args + ['--podcast-name', pod_name])
self.assertEqual(args['podcast_name'], pod_name)
broadcast_id = utils.random_string()
args = cli.parse_args(common_args + ['--broadcast-id', broadcast_id])
self.assertEqual(args['broadcast_id'], broadcast_id)
artist_name = utils.random_string()
args = cli.parse_args(common_args + ['--artist-name', artist_name])
self.assertEqual(args['artist_name'], artist_name)
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['--max-allowed', 'foo'])
self.check_error_message("argument --max-allowed:"
" invalid int value: 'foo'", error)
args = cli.parse_args(common_args + ['--max-allowed', '11'])
self.assertEqual(args['max_allowed'], 11)
args = cli.parse_args(common_args + ['--auto-download'])
self.assertTrue(args['automatic_download'])
args = cli.parse_args(common_args + ['--no-auto-download'])
self.assertFalse(args['automatic_download'])
args = cli.parse_args(common_args)
self.assert_none(args['automatic_download'])
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['--auto-download', '--no-auto-download'])
self.check_error_message("argument --no-auto-download: not allowed"
" with argument --auto-download", error)
def test_podcast_update_file_location(self):
expected = {
'module' : 'podcast',
'command' : 'update-file-location',
'podcast_id' : 5,
'file_location' : 'foo',
'move_files' : True,
}
args = cli.parse_args(['podcast', 'update-file-location', '5', 'foo'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['podcast', 'update-file-location', 'foo', 'foo'])
self.check_error_message("argument podcast_id:"
" invalid int value: 'foo'", error)
args = cli.parse_args(['podcast', 'update-file-location',
'2', 'foo', '--no-move'])
self.assertFalse(args['move_files'])
def test_podcast_sync(self):
expected = {
'module' : 'podcast',
'command' : 'sync',
'sync_web_episodes' : True,
'download_episodes' : True,
}
args = cli.parse_args(['podcast', 'sync'])
for key, value in expected.items():
self.assertEqual(args[key], value)
common_args = ['podcast', 'sync']
args = cli.parse_args(common_args + ['--no-web-sync'])
self.assertFalse(args['sync_web_episodes'])
args = cli.parse_args(common_args + ['--no-download'])
self.assertFalse(args['download_episodes'])
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['-i', 'foo'])
self.check_error_message("argument -i/--include-podcasts:"
" invalid int value: 'foo'", error)
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['-e', 'foo'])
self.check_error_message("argument -e/--exclude-podcasts:"
" invalid int value: 'foo'", error)
args = cli.parse_args(common_args + ['-i', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--include-podcasts', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['-e', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--exclude-podcasts', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
class TestEpisodeArgs(test_utils.TestHelper):
def test_episode_list(self):
expected = {
'module' : 'episode',
'command' : 'list',
'only_files' : True,
}
args = cli.parse_args(['episode', 'list'])
for key, value in expected.items():
self.assertEqual(args[key], value)
common_args = ['episode', 'list']
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['-i', 'foo'])
self.check_error_message("argument -i/--include-podcasts:"
" invalid int value: 'foo'", error)
with self.assertRaises(CLIException) as error:
cli.parse_args(common_args + ['-e', 'foo'])
self.check_error_message("argument -e/--exclude-podcasts:"
" invalid int value: 'foo'", error)
args = cli.parse_args(common_args + ['-i', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--include-podcasts', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['-e', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--exclude-podcasts', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--all'])
self.assertFalse(args['only_files'])
def test_episode_show(self):
expected = {
'module' : 'episode',
'command' : 'show',
'episode_input' : [5],
}
args = cli.parse_args(['episode', 'show', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'show', 'bar'])
self.check_error_message("argument episode_input:"
" invalid int value: 'bar'", error)
args = cli.parse_args(['episode', 'show', '6', '12'])
self.assertEqual(args['episode_input'], [6, 12])
def test_episosde_delete(self):
expected = {
'module' : 'episode',
'command' : 'delete',
'episode_input' : [5],
'delete_files' : True,
}
args = cli.parse_args(['episode', 'delete', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'delete', 'bar'])
self.check_error_message("argument episode_input:"
" invalid int value: 'bar'", error)
args = cli.parse_args(['episode', 'delete', '6', '12'])
self.assertEqual(args['episode_input'], [6, 12])
args = cli.parse_args(['episode', 'delete', '--keep-files', '6'])
self.assertFalse(args['delete_files'])
def test_episode_delete_file(self):
expected = {
'module' : 'episode',
'command' : 'delete-file',
'episode_input' : [5],
}
args = cli.parse_args(['episode', 'delete-file', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'delete-file', 'bar'])
self.check_error_message("argument episode_input:"
" invalid int value: 'bar'", error)
args = cli.parse_args(['episode', 'delete-file', '6', '12'])
self.assertEqual(args['episode_input'], [6, 12])
def test_episode_download(self):
expected = {
'module' : 'episode',
'command' : 'download',
'episode_input' : [5],
}
args = cli.parse_args(['episode', 'download', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'download', 'bar'])
self.check_error_message("argument episode_input:"
" invalid int value: 'bar'", error)
args = cli.parse_args(['episode', 'download', '6', '12'])
self.assertEqual(args['episode_input'], [6, 12])
def test_episode_update(self):
expected = {
'module' : 'episode',
'command' : 'update',
'episode_id' : 5,
}
args = cli.parse_args(['episode', 'update', '5'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'update', '5', '--prevent-delete',
'--allow-delete'])
self.check_error_message("argument --allow-delete: not allowed"
" with argument --prevent-delete", error)
args = cli.parse_args(['episode', 'update', '5', '--prevent-delete'])
self.assertTrue(args['prevent_delete'])
args = cli.parse_args(['episode', 'update', '5', '--allow-delete'])
self.assertFalse(args['prevent_delete'])
args = cli.parse_args(['episode', 'update', '5'])
self.assert_none(args['prevent_delete'])
def test_episode_update_file__path(self):
expected = {
'module' : 'episode',
'command' : 'update-file-path',
'episode_id' : 5,
'file_path' : 'foo'
}
args = cli.parse_args(['episode', 'update-file-path', '5', 'foo'])
for key, value in expected.items():
self.assertEqual(args[key], value)
def test_episode_sync(self):
expected = {
'module' : 'episode',
'command' : 'sync',
}
args = cli.parse_args(['episode', 'sync'])
for key, value in expected.items():
self.assertEqual(args[key], value)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'sync', '--sync-num', 'bar'])
self.check_error_message("argument --sync-num:"
" invalid int value: 'bar'", error)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'sync', '-i', 'foo'])
self.check_error_message("argument -i/--include-podcasts:"
" invalid int value: 'foo'", error)
with self.assertRaises(CLIException) as error:
cli.parse_args(['episode', 'sync', '-e', 'foo'])
self.check_error_message("argument -e/--exclude-podcasts:"
" invalid int value: 'foo'", error)
common_args = ['episode', 'sync']
args = cli.parse_args(common_args + ['--sync-num', '5'])
self.assertEqual(args['max_episode_sync'], 5)
args = cli.parse_args(common_args + ['-i', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['-e', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--include-podcasts', '5', '10'])
self.assertEqual(args['include_podcasts'], [5, 10])
args = cli.parse_args(common_args + ['--exclude-podcasts', '5', '10'])
self.assertEqual(args['exclude_podcasts'], [5, 10])
def test_episode_cleanup(self):
expected = {
'module' : 'episode',
'command' : 'cleanup',
}
args = cli.parse_args(['episode', 'cleanup'])
for key, value in expected.items():
self.assertEqual(args[key], value)
| 41.839465 | 94 | 0.564828 |
a220216ac3c1d533435314c622b65e4530018f47 | 6,823 | py | Python | ArticleRecommendationProject/Recommendation/CollabContentBased.py | Newsrecommender/newsrecommender | f6d2b720833d7cbca0d905812860193824b4e657 | [
"MIT"
] | 7 | 2017-07-08T11:22:39.000Z | 2020-04-12T01:44:58.000Z | ArticleRecommendationProject/Recommendation/CollabContentBased.py | Newsrecommender/newsrecommender | f6d2b720833d7cbca0d905812860193824b4e657 | [
"MIT"
] | null | null | null | ArticleRecommendationProject/Recommendation/CollabContentBased.py | Newsrecommender/newsrecommender | f6d2b720833d7cbca0d905812860193824b4e657 | [
"MIT"
] | 5 | 2017-07-09T09:38:59.000Z | 2018-10-23T07:41:08.000Z | import yaml
import pandas as pd
import numpy as np
import sys
import os
from math import sqrt
import matplotlib
import matplotlib.pyplot as plot
import networkx as nx
class Collabcontentbased(object):
def __init__(self):
self.object = object
def get_script_directory(self):
"""
This function returns the directory of the script in scrip mode
In interactive mode returns interpreter name. """
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
def similarity_score(self, Article1, Article2):
"""
This function calculates Euclidean distance between to objects
"""
both_viewed = {}
for item in self.dataset[Article1]:
if item in self.dataset[Article2]:
both_viewed[item] = 1
# The Conditions to check if they both have common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_euclidean_distance = []
for item in self.dataset[Article1]:
if item in self.dataset[Article2]:
sum_of_euclidean_distance.append(pow(self.dataset[Article1][item] - self.dataset[Article2][item], 2))
sum_of_euclidean_distance = sum(sum_of_euclidean_distance)
#print (sum_of_euclidean_distance)
return 1/(1+sqrt(sum_of_euclidean_distance))
def pearson_correlation(self, Article1, Article2):
"""
This function calculates Pearson correlation between two vectors
"""
both_rated = {}
for item in self.dataset[Article1]:
if item in self.dataset[Article2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
person1_preferences_sum = sum([self.dataset[Article1][item] for item in both_rated])
person2_preferences_sum = sum([self.dataset[Article2][item] for item in both_rated])
# Sum up the squares of preferences of each user
person1_square_preferences_sum = sum([pow(self.dataset[Article1][item],2) for item in both_rated])
person2_square_preferences_sum = sum([pow(self.dataset[Article2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([self.dataset[Article1][item] * self.dataset[Article2][item] for item in both_rated])
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (person1_preferences_sum*person2_preferences_sum/number_of_ratings)
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum,2)/number_of_ratings) * (person2_square_preferences_sum -pow(person2_preferences_sum,2)/number_of_ratings))
if denominator_value == 0:
return 0
else:
r = numerator_value/denominator_value
return r
def find_most_similar_objects(self, Article1, number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(self.pearson_correlation(Article1,other_person),other_person) for other_person in self.dataset if other_person != Article1 ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return (scores[0:number_of_users][0][1])
def get_recommendations(self, objects, no_of_recommendations):
"""
This function generates recommendations for specified object
"""
recommended_articles = []
input_articles = []
for article in objects:
# print (article, find_most_similar_objects(article,2)[0][1], find_most_similar_objects(article,2)[1][1])
input_articles.append(article)
recommended_articles.append(self.find_most_similar_objects(article,no_of_recommendations))
return input_articles,recommended_articles
def run(self):
# 0. Find the path of script and set wd
path = self.get_script_directory()
os.chdir(path)
print ('Script is located at {}'.format(path))
# import config files
print("Reading configuration")
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
user_ratings_files_path = cfg['project_test_conf']['ratings_file_path']
user_ratings_csv_filename = cfg['project_test_conf']['ratings_file_name']
articles_files_path = cfg['project_test_conf']['articles_file_path']
articles_csv_filename = cfg['project_test_conf']['articles_file_name']
ratings_index = cfg['project_test_conf']['ratings_index_column']
output_file_path = cfg['project_test_conf']['output_path']
output_file_name = cfg['project_test_conf']['output_file_name']
ratings_file = os.path.join(user_ratings_files_path, user_ratings_csv_filename)
articles_file = os.path.join(articles_files_path, articles_csv_filename)
Output_Recommendations = os.path.join(output_file_path, output_file_name)
print("Configuration loaded successfully")
print ('Reading ratings from file {}'.format(ratings_file))
user_ratings = pd.read_csv(ratings_file, index_col=ratings_index)
articles_db = pd.read_csv(articles_file, index_col=ratings_index)
objects_list = list(user_ratings.index)
user_ratings_T = user_ratings.transpose()
self.dataset = user_ratings_T.to_dict()
# Get recommendations
print('Calculations in progress...')
Article, recommended_article = self.get_recommendations(objects_list, 5)
print('Calculations completed.')
# Create output files
print('Creating output file')
recommended_article_title = []
for content in recommended_article:
recommended_article_title.append(articles_db.Title[content])
input_article_title = []
for content in Article:
input_article_title.append(articles_db.Title[content])
df = pd.DataFrame()
df['Article'] = Article
df['Recommendation'] = recommended_article
df['News'] = input_article_title
df['Recommended_News'] = recommended_article_title
df = df.set_index('Article', drop=True, append=False, inplace=False, verify_integrity=False)
df.to_csv(Output_Recommendations)
print('Output file created.')
print('Check output files at {}'.format(Output_Recommendations))
if __name__ == "__main__":
Collabcontentbased().run()
| 43.183544 | 202 | 0.678001 |
d4f35bed741e094da299267b64cfa2820f50cdd3 | 22,610 | py | Python | Code/GraphMol/SubstructLibrary/Wrap/rough_test.py | nbehrnd/rdkit | 3262b498149f8dcdabe8d74b6d3882c685bbc252 | [
"BSD-3-Clause"
] | 1 | 2017-08-18T02:26:16.000Z | 2017-08-18T02:26:16.000Z | Code/GraphMol/SubstructLibrary/Wrap/rough_test.py | nbehrnd/rdkit | 3262b498149f8dcdabe8d74b6d3882c685bbc252 | [
"BSD-3-Clause"
] | 9 | 2016-08-08T13:53:40.000Z | 2020-03-08T05:52:07.000Z | Code/GraphMol/SubstructLibrary/Wrap/rough_test.py | bp-kelley/rdkit | e0de7c9622ce73894b1e7d9568532f6d5638058a | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2017-2021 Novartis Institute of BioMedical Research
# and other RDKit contributors
#
# All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" This is a rough coverage test of the python wrapper for the SubstructLibrary
it is intended to be shallow but broad.
"""
import doctest, unittest, os, sys
from rdkit import RDConfig, RDLogger
from rdkit.RDLogger import logger
logger = logger()
from rdkit import Chem
from rdkit.Chem import rdSubstructLibrary
import time
import pickle
import tempfile
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(rdSubstructLibrary))
return tests
def makeStereoExamples():
el = "NO"
mols = []
for e in el:
for e2 in el:
if e != e2:
smi = "C1CCO[C@@](%s)(%s)1"%(e,e2)
m = Chem.MolFromSmiles(smi)
if m:
mols.append(m)
smi = "C1CCO[C@](%s)(%s)1"%(e,e2)
m = Chem.MolFromSmiles(smi)
if m:
mols.append(m)
return mols
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0SubstructLibrary(self):
for fpholderCls in [None, rdSubstructLibrary.PatternHolder]:
for holder in [rdSubstructLibrary.MolHolder(), rdSubstructLibrary.CachedMolHolder(),
rdSubstructLibrary.CachedSmilesMolHolder()]:
if fpholderCls: fpholder = fpholderCls()
else: fpholder = None
slib_ = rdSubstructLibrary.SubstructLibrary(holder, fpholder)
for i in range(100):
m = Chem.MolFromSmiles("c1ccccc1")
self.assertEqual(slib_.AddMol(m), i)
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
res = slib.GetMatches(m)
t2 = time.time()
self.assertTrue(len(res) == 100)
res = slib.GetMatches(m)
self.assertEqual(len(res), 100)
self.assertTrue(set(res) == set(list(range(100))))
res = slib.GetMatches(m, maxResults=100);
self.assertEqual(len(res), 100)
self.assertEqual(len(slib.GetMatches(m, startIdx=0, endIdx=100)), 100)
self.assertTrue(slib.HasMatch(m))
self.assertEqual(slib.CountMatches(m), 100)
def test1SubstructLibrary(self):
for fpholderCls in [None, rdSubstructLibrary.PatternHolder]:
for holder in [rdSubstructLibrary.MolHolder(), rdSubstructLibrary.CachedMolHolder(),
rdSubstructLibrary.CachedSmilesMolHolder()]:
if fpholderCls: fpholder = fpholderCls()
else: fpholder = None
slib_ = rdSubstructLibrary.SubstructLibrary(holder, fpholder)
mols = []
for i in range(100):
m = Chem.MolFromSmiles("c1ccccc1")
self.assertEqual(slib_.AddMol(m), i*2)
mols.append(m)
m2 = Chem.MolFromSmiles("CCCC")
self.assertEqual(slib_.AddMol(m2), i*2+1)
mols.append(m2)
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
res = slib.GetMatches(m)
self.assertEqual(len(res), 100)
self.assertEqual(set(res), set(list(range(0,200,2))))
res = slib.GetMatches(m2)
self.assertEqual(len(res), 100)
self.assertTrue(set(res) == set(list(range(1,200,2))))
res = slib.GetMatches(m)
self.assertEqual(len(res), 100)
res = slib.GetMatches(m, maxResults=100);
self.assertEqual(len(res), 100)
self.assertEqual(len(slib.GetMatches(m, startIdx=0, endIdx=50*2)), 50)
self.assertEqual(len(slib.GetMatches(m2, startIdx=1, endIdx=50*2+1)), 50)
self.assertTrue(slib.HasMatch(m))
self.assertTrue(slib.HasMatch(m2))
self.assertEqual(slib.CountMatches(m), 100)
self.assertEqual(slib.CountMatches(m2), 100)
def testOptions(self):
mols = makeStereoExamples() * 10
for holderCls in [
rdSubstructLibrary.MolHolder,
rdSubstructLibrary.CachedMolHolder,
rdSubstructLibrary.CachedSmilesMolHolder,
rdSubstructLibrary.CachedTrustedSmilesMolHolder,
]:
holder = holderCls()
slib_ = rdSubstructLibrary.SubstructLibrary(holder, None)
for mol in mols:
slib_.AddMol(mol)
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
core = Chem.MolFromSmarts("C-1-C-C-O-C(-*)(-*)1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
core = Chem.MolFromSmarts("C-1-C-C-O-C(-[O])(-[N])1")
core.SetProp("core", "core")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
def testSmilesCache(self):
mols = makeStereoExamples() * 10
holder = rdSubstructLibrary.CachedSmilesMolHolder()
slib_ = rdSubstructLibrary.SubstructLibrary(holder, None)
for mol in mols:
holder.AddSmiles(Chem.MolToSmiles(mol, isomericSmiles=True))
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
core = Chem.MolFromSmarts("C-1-C-C-O-C(-*)(-*)1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
core = Chem.MolFromSmarts("C-1-C-C-O-C(-[O])(-[N])1")
core.SetProp("core", "core")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
def testTrustedSmilesCache(self):
mols = makeStereoExamples() * 10
holder = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
slib_ = rdSubstructLibrary.SubstructLibrary(holder, None)
for mol in mols:
holder.AddSmiles(Chem.MolToSmiles(mol, isomericSmiles=True))
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
core = Chem.MolFromSmarts("C-1-C-C-O-C(-*)(-*)1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
core = Chem.MolFromSmarts("C-1-C-C-O-C(-[O])(-[N])1")
core.SetProp("core", "core")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
def testBinaryCache(self):
mols = makeStereoExamples() * 10
holder = rdSubstructLibrary.CachedMolHolder()
slib_ = rdSubstructLibrary.SubstructLibrary(holder, None)
for mol in mols:
holder.AddBinary(mol.ToBinary())
libs = [slib_]
if rdSubstructLibrary.SubstructLibraryCanSerialize():
serialized1 = pickle.loads(pickle.dumps(slib_))
serialized2 = rdSubstructLibrary.SubstructLibrary(slib_.Serialize())
libs.append(serialized1)
libs.append(serialized2)
for slib in libs:
core = Chem.MolFromSmarts("C-1-C-C-O-C(-*)(-*)1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
core = Chem.MolFromSmarts("C-1-C-C-O-C(-[O])(-[N])1")
core.SetProp("core", "core")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core, useChirality=False)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=False)]))
core = Chem.MolFromSmarts("C-1-C-C-O-[C@@](-[O])(-[N])1")
res = slib.GetMatches(core)
self.assertEqual(len(res),
len([x for x in mols if x.HasSubstructMatch(core, useChirality=True)]))
def testRingSmartsWithTrustedSmiles(self):
pat = Chem.MolFromSmarts("[C&R1]")
pat2 = Chem.MolFromSmarts("C@C") # ring bond
holder = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
lib = rdSubstructLibrary.SubstructLibrary(holder)
lib.AddMol(Chem.MolFromSmiles("C1CC1"))
# make sure we can get an unsanitized molecule that fails (no ring info)
print("Testing atom rings")
with self.assertRaises(RuntimeError):
holder.GetMol(0).HasSubstructMatch(pat)
print("testing bond rings")
with self.assertRaises(RuntimeError):
holder.GetMol(0).HasSubstructMatch(pat2)
# shouldn't throw
print("searching atom rings")
self.assertEqual(len(lib.GetMatches(pat)), 1)
self.assertEqual(lib.CountMatches(pat), 1)
print("searching bond rings")
self.assertEqual(len(lib.GetMatches(pat2)), 1)
self.assertEqual(lib.CountMatches(pat2), 1)
print("done")
def test_init_from_and_to_stream(self):
mols = makeStereoExamples() * 10
holder = rdSubstructLibrary.CachedSmilesMolHolder()
# one day I'll fix this, but we need to write text but read binary
# grrr.... something about the python_streambuf handler.
slib = rdSubstructLibrary.SubstructLibrary(holder, None)
for mol in mols:
holder.AddSmiles(Chem.MolToSmiles(mol, isomericSmiles=True))
if rdSubstructLibrary.SubstructLibraryCanSerialize():
fd, path = tempfile.mkstemp()
with open(path, 'w') as file:
slib.ToStream(file)
with open(path, 'rb') as file:
slib2 = rdSubstructLibrary.SubstructLibrary()
slib2.InitFromStream(file)
self.assertEqual(len(slib), len(slib2))
from io import StringIO, BytesIO
s = StringIO()
slib.ToStream(s)
sb = BytesIO(s.getvalue().encode("ascii"))
self.assertTrue(len(sb.getvalue()) > 0)
slib3 = rdSubstructLibrary.SubstructLibrary()
slib3.InitFromStream(sb)
self.assertEqual(len(slib), len(slib2))
def test_addpatterns(self):
pdb_ligands = [
"CCS(=O)(=O)c1ccc(OC)c(Nc2ncc(-c3cccc(-c4ccccn4)c3)o2)c1",
"COc1ccc(S(=O)(=O)NCC2CC2)cc1Nc1ncc(-c2cccc(-c3cccnc3)c2)o1",
"COc1ccc(-c2oc3ncnc(N)c3c2-c2ccc(NC(=O)Nc3cc(C(F)(F)F)ccc3F)cc2)cc1",
"COC(=O)Nc1nc2ccc(Oc3ccc(NC(=O)Nc4cc(C(F)(F)F)ccc4F)cc3)cc2[nH]1",
"COc1cc(Nc2ncnc(-c3cccnc3Nc3ccccc3)n2)cc(OC)c1OC",
"O=C(Nc1ccc(Oc2ccccc2)cc1)c1cccnc1NCc1ccncc1",
"O=C(Nc1ccc(Oc2ccccc2)cc1)c1cccnc1NCc1ccncc1",
"CNC(=O)c1cc(Oc2ccc3[nH]c(Nc4ccc(Cl)c(C(F)(F)F)c4)nc3c2)ccn1",
"CNC(=O)c1cc(Oc2ccc3oc(Nc4ccc(Cl)c(OCC5CCC[NH+]5C)c4)nc3c2)ccn1",
"CNC(=O)c1cc(Oc2ccc3oc(Nc4ccc(Cl)c(OCC5CCC[NH+]5C)c4)nc3c2)ccn1",
"COc1cc2nccc(Oc3ccc4c(c3)OCCN4C(=O)Nc3ccc(Cl)cc3)c2cc1OC",
"CNC(=O)c1c(C)oc2cc(Oc3cc[nH+]c4cc(OCCN5CCOCC5)ccc34)ccc12",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)Nc5ccc(Cl)cc5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)Nc5ccc(Cl)cc5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)NC5CC5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)NC5CC5)cccc4c3)c2cc1OC",
"Cc1ccc(C(=O)Nc2cc(CCC[NH+](C)C)cc(C(F)(F)F)c2)cc1Nc1ncccc1-c1ccncn1",
"COc1cc(Nc2nccc(Nc3ccc4c(C)n[nH]c4c3)n2)cc(OC)c1OC",
"COc1cc(Nc2nccc(N(C)c3ccc4c(C)n[nH]c4c3)n2)cc(OC)c1OC",
"Cc1ccn(-c2ccc3c(c2)NCC3(C)C)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccn(-c2ccc3c(c2)NCC3(C)C)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccc(C(=O)NCCC2CCCC2)cc1C(=O)Nc1ccc(N)nc1",
"Cc1ccc(C(=O)NCCC2CCCC2)cc1C(=O)Nc1ccc(N)nc1",
"Cc1ccn(-c2cccc(C(F)(F)F)c2)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccn(-c2cccc(C(F)(F)F)c2)c(=O)c1-c1ccc2nc(N)ncc2c1",
"O=C(Nc1cncnc1)c1c(Cl)ccc2c(Nc3cccc(C(F)(F)F)c3)noc12",
"O=C(Nc1cncnc1)c1c(Cl)ccc2c(Nc3cccc(C(F)(F)F)c3)noc12",
"CC1(C)CNc2cc(NC(=O)c3cccnc3NCc3ccncc3)ccc21",
"CC1(C)CNc2cc(NC(=O)c3cccnc3NCc3ccncc3)ccc21"
]
for patterns in [rdSubstructLibrary.PatternHolder(), rdSubstructLibrary.TautomerPatternHolder()]:
mols = [Chem.MolFromSmiles(smi) for smi in pdb_ligands]
holder = rdSubstructLibrary.CachedMolHolder()
slib_with_patterns = rdSubstructLibrary.SubstructLibrary(holder, patterns)
for mol in mols:
slib_with_patterns.AddMol(mol)
for nthreads in [1, 2, 0]:
slib_without_patterns = rdSubstructLibrary.SubstructLibrary(holder, None)
rdSubstructLibrary.AddPatterns(slib_without_patterns, nthreads)
# check for seg fault
# were the fingerprints really created
slib_without_patterns.GetFpHolder().GetFingerprint(0)
for mol in mols:
l1 = slib_with_patterns.CountMatches(mol)
l2 = slib_without_patterns.CountMatches(mol)
self.assertTrue(l1)
self.assertEqual(l1,l2)
def test_basic_addpatterns(self):
# add mols
pdb_ligands = [
"CCS(=O)(=O)c1ccc(OC)c(Nc2ncc(-c3cccc(-c4ccccn4)c3)o2)c1",
"COc1ccc(S(=O)(=O)NCC2CC2)cc1Nc1ncc(-c2cccc(-c3cccnc3)c2)o1",
"COc1ccc(-c2oc3ncnc(N)c3c2-c2ccc(NC(=O)Nc3cc(C(F)(F)F)ccc3F)cc2)cc1",
"COC(=O)Nc1nc2ccc(Oc3ccc(NC(=O)Nc4cc(C(F)(F)F)ccc4F)cc3)cc2[nH]1",
"COc1cc(Nc2ncnc(-c3cccnc3Nc3ccccc3)n2)cc(OC)c1OC",
"O=C(Nc1ccc(Oc2ccccc2)cc1)c1cccnc1NCc1ccncc1",
"O=C(Nc1ccc(Oc2ccccc2)cc1)c1cccnc1NCc1ccncc1",
"CNC(=O)c1cc(Oc2ccc3[nH]c(Nc4ccc(Cl)c(C(F)(F)F)c4)nc3c2)ccn1",
"CNC(=O)c1cc(Oc2ccc3oc(Nc4ccc(Cl)c(OCC5CCC[NH+]5C)c4)nc3c2)ccn1",
"CNC(=O)c1cc(Oc2ccc3oc(Nc4ccc(Cl)c(OCC5CCC[NH+]5C)c4)nc3c2)ccn1",
"COc1cc2nccc(Oc3ccc4c(c3)OCCN4C(=O)Nc3ccc(Cl)cc3)c2cc1OC",
"CNC(=O)c1c(C)oc2cc(Oc3cc[nH+]c4cc(OCCN5CCOCC5)ccc34)ccc12",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)Nc5ccc(Cl)cc5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)Nc5ccc(Cl)cc5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)NC5CC5)cccc4c3)c2cc1OC",
"COc1cc2[nH+]ccc(Oc3ccc4c(C(=O)NC5CC5)cccc4c3)c2cc1OC",
"Cc1ccc(C(=O)Nc2cc(CCC[NH+](C)C)cc(C(F)(F)F)c2)cc1Nc1ncccc1-c1ccncn1",
"COc1cc(Nc2nccc(Nc3ccc4c(C)n[nH]c4c3)n2)cc(OC)c1OC",
"COc1cc(Nc2nccc(N(C)c3ccc4c(C)n[nH]c4c3)n2)cc(OC)c1OC",
"Cc1ccn(-c2ccc3c(c2)NCC3(C)C)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccn(-c2ccc3c(c2)NCC3(C)C)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccc(C(=O)NCCC2CCCC2)cc1C(=O)Nc1ccc(N)nc1",
"Cc1ccc(C(=O)NCCC2CCCC2)cc1C(=O)Nc1ccc(N)nc1",
"Cc1ccn(-c2cccc(C(F)(F)F)c2)c(=O)c1-c1ccc2nc(N)ncc2c1",
"Cc1ccn(-c2cccc(C(F)(F)F)c2)c(=O)c1-c1ccc2nc(N)ncc2c1",
"O=C(Nc1cncnc1)c1c(Cl)ccc2c(Nc3cccc(C(F)(F)F)c3)noc12",
"O=C(Nc1cncnc1)c1c(Cl)ccc2c(Nc3cccc(C(F)(F)F)c3)noc12",
"CC1(C)CNc2cc(NC(=O)c3cccnc3NCc3ccncc3)ccc21",
"CC1(C)CNc2cc(NC(=O)c3cccnc3NCc3ccncc3)ccc21"
]
for holder in [rdSubstructLibrary.CachedSmilesMolHolder(),
rdSubstructLibrary.CachedTrustedSmilesMolHolder()]:
for smi in pdb_ligands:
holder.AddSmiles(smi)
for patttern in [None, rdSubstructLibrary.PatternHolder(), rdSubstructLibrary.TautomerPatternHolder()]:
lib = rdSubstructLibrary.SubstructLibrary(holder)
rdSubstructLibrary.AddPatterns(lib, numThreads=-1)
self.assertEqual(len(lib.GetMolHolder()), len(lib.GetFpHolder()))
for smi in pdb_ligands:
self.assertTrue( lib.CountMatches(Chem.MolFromSmiles(smi)) )
def test_PatternHolder(self):
for holder in [rdSubstructLibrary.PatternHolder,
rdSubstructLibrary.TautomerPatternHolder]:
fname = os.path.join(os.environ["RDBASE"], "Data", "NCI", "first_5K.smi")
suppl = Chem.SmilesMolSupplier(fname, delimiter="\t", titleLine=False)
mols1 = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
fps1 = holder(2048)
ssslib1 = rdSubstructLibrary.SubstructLibrary(mols1, fps1)
mols2 = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
fps2 = holder()
ssslib2 = rdSubstructLibrary.SubstructLibrary(mols2, fps2)
RDLogger.DisableLog('rdApp.error')
for i in range(0, 1000, 10):
try:
mol = suppl[i]
except Exception:
continue
if (not mol):
continue
mols1.AddSmiles(Chem.MolToSmiles(mol))
fps1.AddFingerprint(fps1.MakeFingerprint(mol))
ssslib2.AddMol(mol)
RDLogger.EnableLog('rdApp.error')
query = Chem.MolFromSmarts("N")
self.assertIsNotNone(query)
matches1 = sorted(ssslib1.GetMatches(query))
matches2 = sorted(ssslib2.GetMatches(query))
self.assertEqual(len(matches1), len(matches2))
self.assertTrue(all([m1 == matches2[i] for i, m1 in enumerate(matches1)]))
def testMolBundles(self):
ssl = rdSubstructLibrary.SubstructLibrary()
for smi in ('CCOC','CCNC','COOCOO','CCNC','CCCC'):
ssl.AddMol(Chem.MolFromSmiles(smi))
bndl = Chem.MolBundle()
for smi in ('COC', 'CCC'):
bndl.AddMol(Chem.MolFromSmiles(smi))
self.assertEqual(list(ssl.GetMatches(bndl)),[0, 4])
bndl.AddMol(Chem.MolFromSmiles('CN'))
self.assertEqual(list(sorted(ssl.GetMatches(bndl))), [0, 1, 3, 4])
def testSubstructParameters(self):
ssl = rdSubstructLibrary.SubstructLibrary()
for smi in ('C[C@H](F)Cl','C[C@@H](F)Cl','CC(F)Cl'):
ssl.AddMol(Chem.MolFromSmiles(smi))
bndl = Chem.MolBundle()
for smi in ('C[C@H](F)Cl',):
bndl.AddMol(Chem.MolFromSmiles(smi))
params = Chem.SubstructMatchParameters()
self.assertEqual(list(sorted(ssl.GetMatches(bndl,params))), [0, 1, 2])
params.useChirality = True
self.assertEqual(list(sorted(ssl.GetMatches(bndl,params))), [0])
def testSearchOrder(self):
ssl = rdSubstructLibrary.SubstructLibrary()
for smi in ("CCCOC", "CCCCOCC", "CCOC", "COC", "CCCCCOC"):
ssl.AddMol(Chem.MolFromSmiles(smi))
ssl.SetSearchOrder((3,2,0,1,4))
self.assertEqual(ssl.GetSearchOrder(),(3,2,0,1,4))
qm = Chem.MolFromSmiles('COC')
self.assertEqual(list(ssl.GetMatches(qm,maxResults=2)),[3,2])
def testSearchOrder2(self):
ssl = rdSubstructLibrary.SubstructLibrary()
for smi in ("CCCOC", "CCCCOCC", "CCOC", "COC", "CCCCCOC"):
ssl.AddMol(Chem.MolFromSmiles(smi))
def setSearchSmallestFirst(sslib):
searchOrder = list(range(len(sslib)))
holder = sslib.GetMolHolder()
searchOrder.sort(key=lambda x,holder=holder:holder.GetMol(x).GetNumAtoms())
sslib.SetSearchOrder(searchOrder)
setSearchSmallestFirst(ssl)
qm = Chem.MolFromSmiles('COC')
self.assertEqual(list(ssl.GetMatches(qm)),[3, 2, 0, 1, 4])
if __name__ == '__main__':
unittest.main()
| 40.447227 | 109 | 0.657497 |
b24c13b68a700125579f4922aa4ee6f57d020a98 | 16,872 | py | Python | setup.py | venkatalolla/ose3.9 | 6ad78c11b7b76f1e05bce5b49601cf1f1d95812b | [
"Apache-2.0"
] | 1 | 2018-11-14T01:47:19.000Z | 2018-11-14T01:47:19.000Z | setup.py | venkatalolla/ose3.9 | 6ad78c11b7b76f1e05bce5b49601cf1f1d95812b | [
"Apache-2.0"
] | null | null | null | setup.py | venkatalolla/ose3.9 | 6ad78c11b7b76f1e05bce5b49601cf1f1d95812b | [
"Apache-2.0"
] | 4 | 2018-07-30T02:40:11.000Z | 2019-11-12T06:59:02.000Z | """A setuptools based setup module.
"""
from __future__ import print_function
import os
import fnmatch
import re
import sys
import subprocess
import yaml
# Always prefer setuptools over distutils
from setuptools import setup, Command
from setuptools_lint.setuptools_command import PylintCommand
from six import string_types
from six.moves import reload_module
from yamllint.config import YamlLintConfig
from yamllint.cli import Format
from yamllint import linter
def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
''' find files matching file_regex '''
found = []
exclude_regex = ''
include_regex = ''
if exclude_dirs is not None:
exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
# Don't use include_dirs, it is broken
if include_dirs is not None:
include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
for root, dirs, files in os.walk(base_dir):
if exclude_dirs is not None:
# filter out excludes for dirs
dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
if include_dirs is not None:
# filter for includes for dirs
dirs[:] = [d for d in dirs if re.match(include_regex, d)]
matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
found.extend(matches)
return found
def recursive_search(search_list, field):
"""
Takes a list with nested dicts, and searches all dicts for a key of the
field provided. If the items in the list are not dicts, the items are not
processed.
"""
fields_found = []
for item in search_list:
if isinstance(item, dict):
for key, value in item.items():
if key == field:
fields_found.append(value)
elif isinstance(value, list):
results = recursive_search(value, field)
for result in results:
fields_found.append(result)
return fields_found
def find_playbooks():
''' find Ansible playbooks'''
all_playbooks = set()
included_playbooks = set()
exclude_dirs = ('adhoc', 'tasks')
for yaml_file in find_files(
os.path.join(os.getcwd(), 'playbooks'),
exclude_dirs, None, r'^[^\.].*\.ya?ml$'):
with open(yaml_file, 'r') as contents:
for task in yaml.safe_load(contents) or {}:
if not isinstance(task, dict):
# Skip yaml files which are not a dictionary of tasks
continue
if 'include' in task or 'import_playbook' in task:
# Add the playbook and capture included playbooks
all_playbooks.add(yaml_file)
if 'include' in task:
directive = task['include']
else:
directive = task['import_playbook']
included_file_name = directive.split()[0]
included_file = os.path.normpath(
os.path.join(os.path.dirname(yaml_file),
included_file_name))
included_playbooks.add(included_file)
elif 'hosts' in task:
all_playbooks.add(yaml_file)
return all_playbooks, included_playbooks
class OpenShiftAnsibleYamlLint(Command):
''' Command to run yamllint '''
description = "Run yamllint tests"
user_options = [
('excludes=', 'e', 'directories to exclude'),
('config-file=', 'c', 'config file to use'),
('format=', 'f', 'format to use (standard, parsable)'),
]
def initialize_options(self):
''' initialize_options '''
# Reason: Defining these attributes as a part of initialize_options is
# consistent with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
self.excludes = None
self.config_file = None
self.format = None
def finalize_options(self):
''' finalize_options '''
# Reason: These attributes are defined in initialize_options and this
# usage is consistant with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
if isinstance(self.excludes, string_types):
self.excludes = self.excludes.split(',')
if self.format is None:
self.format = 'standard'
assert (self.format in ['standard', 'parsable']), (
'unknown format {0}.'.format(self.format))
if self.config_file is None:
self.config_file = '.yamllint'
assert os.path.isfile(self.config_file), (
'yamllint config file {0} does not exist.'.format(self.config_file))
def run(self):
''' run command '''
if self.excludes is not None:
print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
config = YamlLintConfig(file=self.config_file)
has_errors = False
has_warnings = False
if self.format == 'parsable':
format_method = Format.parsable
else:
format_method = Format.standard_color
for yaml_file in find_files(os.getcwd(), self.excludes, None, r'^[^\.].*\.ya?ml$'):
first = True
with open(yaml_file, 'r') as contents:
for problem in linter.run(contents, config):
if first and self.format != 'parsable':
print('\n{0}:'.format(os.path.relpath(yaml_file)))
first = False
print(format_method(problem, yaml_file))
if problem.level == linter.PROBLEM_LEVELS[2]:
has_errors = True
elif problem.level == linter.PROBLEM_LEVELS[1]:
has_warnings = True
if has_errors or has_warnings:
print('yamllint issues found')
raise SystemExit(1)
class OpenShiftAnsiblePylint(PylintCommand):
''' Class to override the default behavior of PylintCommand '''
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def find_all_modules(self):
''' find all python files to test '''
exclude_dirs = ('.tox', 'utils', 'test', 'tests', 'git')
modules = []
for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
package = os.path.basename(match).replace('.py', '')
modules.append(('openshift_ansible', package, match))
return modules
def get_finalized_command(self, cmd):
''' override get_finalized_command to ensure we use our
find_all_modules method '''
if cmd == 'build_py':
return self
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def with_project_on_sys_path(self, func, func_args, func_kwargs):
''' override behavior, since we don't need to build '''
return func(*func_args, **func_kwargs)
class OpenShiftAnsibleGenerateValidation(Command):
''' Command to run generated module validation'''
description = "Run generated module validation"
user_options = []
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
# self isn't used but I believe is required when it is called.
# pylint: disable=no-self-use
def run(self):
''' run command '''
# find the files that call generate
generate_files = find_files('roles',
['inventory',
'test',
'playbooks',
'utils'],
None,
'generate.py$')
if len(generate_files) < 1:
print('Did not find any code generation. Please verify module code generation.') # noqa: E501
raise SystemExit(1)
errors = False
for gen in generate_files:
print('Checking generated module code: {0}'.format(gen))
try:
sys.path.insert(0, os.path.dirname(gen))
# we are importing dynamically. This isn't in
# the python path.
# pylint: disable=import-error
import generate
reload_module(generate)
generate.verify()
except generate.GenerateAnsibleException as gae:
print(gae.args)
errors = True
if errors:
print('Found errors while generating module code.')
raise SystemExit(1)
print('\nAll generate scripts passed.\n')
class OpenShiftAnsibleSyntaxCheck(Command):
''' Command to run Ansible syntax check'''
description = "Run Ansible syntax check"
user_options = []
# Colors
FAIL = '\033[31m' # Red
ENDC = '\033[0m' # Reset
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
def deprecate_jinja2_in_when(self, yaml_contents, yaml_file):
''' Check for Jinja2 templating delimiters in when conditions '''
test_result = False
failed_items = []
search_results = recursive_search(yaml_contents, 'when')
for item in search_results:
if isinstance(item, str):
if '{{' in item or '{%' in item:
failed_items.append(item)
else:
for sub_item in item:
if '{{' in sub_item or '{%' in sub_item:
failed_items.append(sub_item)
if len(failed_items) > 0:
print('{}Error: Usage of Jinja2 templating delimiters in when '
'conditions is deprecated in Ansible 2.3.\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in failed_items:
print(' Found: "{}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def deprecate_include(self, yaml_contents, yaml_file):
''' Check for usage of include directive '''
test_result = False
search_results = recursive_search(yaml_contents, 'include')
if len(search_results) > 0:
print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in search_results:
print(' Found: "include: {}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def run(self):
''' run command '''
has_errors = False
print('#' * 60)
print('Ansible Deprecation Checks')
exclude_dirs = ('adhoc', 'files', 'meta', 'vars', 'defaults', '.tox')
for yaml_file in find_files(
os.getcwd(), exclude_dirs, None, r'^[^\.].*\.ya?ml$'):
with open(yaml_file, 'r') as contents:
yaml_contents = yaml.safe_load(contents)
if not isinstance(yaml_contents, list):
continue
# Check for Jinja2 templating delimiters in when conditions
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
# Check for usage of include: directive
result = self.deprecate_include(yaml_contents, yaml_file)
has_errors = result or has_errors
if not has_errors:
print('...PASSED')
all_playbooks, included_playbooks = find_playbooks()
print('#' * 60)
print('Invalid Playbook Include Checks')
invalid_include = []
for playbook in included_playbooks:
# Ignore imported playbooks in 'common', 'private' and 'init'. It is
# expected that these locations would be imported by entry point
# playbooks.
# Ignore playbooks in 'aws', 'azure', 'gcp' and 'openstack' because these
# playbooks do not follow the same component entry point structure.
# Ignore deploy_cluster.yml and prerequisites.yml because these are
# entry point playbooks but are imported by playbooks in the cloud
# provisioning playbooks.
ignored = ('common', 'private', 'init',
'aws', 'azure', 'gcp', 'openstack',
'deploy_cluster.yml', 'prerequisites.yml')
if any(x in playbook for x in ignored):
continue
invalid_include.append(playbook)
if invalid_include:
print('{}Invalid included playbook(s) found. Please ensure'
' component entry point playbooks are not included{}'.format(self.FAIL, self.ENDC))
invalid_include.sort()
for playbook in invalid_include:
print('{}{}{}'.format(self.FAIL, playbook, self.ENDC))
has_errors = True
if not has_errors:
print('...PASSED')
print('#' * 60)
print('Ansible Playbook Entry Point Syntax Checks')
# Evaluate the difference between all playbooks and included playbooks
entrypoint_playbooks = sorted(all_playbooks.difference(included_playbooks))
print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
for playbook in entrypoint_playbooks:
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
# Error on any entry points in 'common' or 'private'
invalid_entry_point = ('common', 'private')
if any(x in playbook for x in invalid_entry_point):
print('{}Invalid entry point playbook or orphaned file. Entry'
' point playbooks are not allowed in \'common\' or'
' \'private\' directories{}'.format(self.FAIL, self.ENDC))
has_errors = True
# --syntax-check each entry point playbook
try:
# Create a host group list to avoid WARNING on unmatched host patterns
tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH']
subprocess.check_output(
['ansible-playbook', '-i', tox_ansible_inv,
'--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv)]
)
except subprocess.CalledProcessError as cpe:
print('{}Execution failed: {}{}'.format(
self.FAIL, cpe, self.ENDC))
has_errors = True
if has_errors:
raise SystemExit(1)
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def initialize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def finalize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def run(self):
''' run command '''
print("Unsupported command for openshift-ansible")
setup(
name='openshift-ansible',
license="Apache 2.0",
cmdclass={
'install': UnsupportedCommand,
'develop': UnsupportedCommand,
'build': UnsupportedCommand,
'build_py': UnsupportedCommand,
'build_ext': UnsupportedCommand,
'egg_info': UnsupportedCommand,
'sdist': UnsupportedCommand,
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
| 37.162996 | 107 | 0.584993 |
66dee474a0314024685f63625b81822e9865ac19 | 9,705 | py | Python | common/src/stack/ws/restapi/views.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/ws/restapi/views.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/ws/restapi/views.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | null | null | null | #
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
from django.views.generic import View
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth import authenticate, login, logout
from stack.restapi.models import BlackList
from stack.restapi.models import SudoList
from stack.exception import CommandError
import stack.commands
from stack.commands import get_mysql_connection
import pymysql
import json
import os
import sys
import subprocess
import tempfile
import logging
import shlex
import re
import traceback
# Start a logger
log = logging.getLogger("SWS")
# MySQL Access Denied Error Codes.
# from: https://mariadb.com/kb/en/mariadb/mariadb-error-codes/
MYSQL_EX = [1044, 1045, 1142, 1143, 1227]
class StackWS(View):
# Decorator Function to check if a user is logged in
def _check_login_(func):
def runner(inst, *args, **kwargs):
request = args[0]
if request.user.is_authenticated():
return func(inst, *args, **kwargs)
else:
j = json.dumps({'logged_in': False})
return HttpResponseForbidden(str(j),
content_type="application/json")
return runner
# HTTP Response to GET. Sole purpose of this function
# is to return a CSRF Cookie
@method_decorator(ensure_csrf_cookie)
def get(self, request):
return HttpResponse('{}')
# Main POST Function. Runs the actual Stacki Command Line
@_check_login_
def post(self, request):
body = json.loads(request.body)
# Get the command being used
cmd = str(body['cmd'])
args = shlex.split(cmd)
# Log to file
log.info("Session Starting")
# filter out all the params
params = [p for p in filter(lambda x: len(x.split('=')) >= 2, args)]
# Filter out all the args
args = [a for a in filter(lambda x: len(x.split('=')) == 1, args)]
done = False
command = None
cmd_arg_list = []
# Get the command module to execute
mod_name = '.'.join(args)
log.info(f'user {request.user.username} called "{mod_name}" {params}')
# Check if command is blacklisted
if self._blacklisted(mod_name):
return HttpResponseForbidden(f'Blacklisted Command: Command {mod_name} is not permitted',
content_type="text/plain")
# Check if user has permission to run
# command module
if not request.user.has_perm(mod_name):
return HttpResponseForbidden('Unauthorized Command: User %s is not allowed to run %s' %
(request.user.username, mod_name),
content_type="text/plain")
try:
# Get command module class
if request.user.is_superuser:
connection = get_mysql_connection()
else:
connection = get_mysql_connection('nobody', '')
while not done:
try:
if len(args) == 0:
done = True
else:
mod_name = '.'.join(args)
mod = 'stack.commands.%s' % mod_name
__import__(mod)
m = eval(mod)
if hasattr(m, 'Command'):
command = m.Command(connection)
# Flush the cache, we do this
# since multiple threads may be
# running and there is no
# mechanism for one thread to
# invalidate the cache of
# another thread.
command.db.clearCache()
done = True
else:
cmd_arg_list.append(args.pop())
except ImportError:
cmd_arg_list.append(args.pop())
except:
log.error("%s" % sys.exc_info()[1])
# If command does not exist, return Not Found
if not command:
output = {"API Error": "Command Not Found"}
return HttpResponse(str(json.dumps(output)),
content_type="application/json",
status=404)
# Handle case where json data is posted (currently only for `stack load`)
with tempfile.NamedTemporaryFile(mode='w') as json_file:
# Parse the command out. A couple of special
# cases are called out.
cmd_arg_list.reverse()
data = body.get('data')
if data:
json.dump(data, json_file)
json_file.flush()
cmd_arg_list.append(json_file.name)
cmd_arg_list.extend(params)
cmd_arg_list.append("output-format=json")
cmd_module = str('.'.join(mod.split('.')[2:]))
# Don't allow "run host" commands. This opens
# the door for arbitrary command executions
if cmd_module == "run.host":
return HttpResponseForbidden(
"'run host' command is not permitted",
content_type="text/plain",
)
# If command is a sync/load command, run
# it with sudo, as the command will require
# some root privileges. However, if the user
# isn't a django superuser (with admin privileges)
# don't allow command to run.
elif self._isSudoCommand(cmd_module):
if not request.user.is_superuser:
cmd_str = cmd_module.replace('.', ' ')
return HttpResponseForbidden(
f"Command \"{cmd_str}\" requires Admin Privileges" ,
content_type="text/plain",
)
# Run the sync command using sudo
else:
c = [
"/usr/bin/sudo",
"/opt/stack/bin/stack",
]
c.extend(cmd_module.split('.'))
c.extend(cmd_arg_list)
log.info(f'{c}')
p = subprocess.Popen(
c,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8',
)
output, error = p.communicate()
rc = p.wait()
if rc:
j = {"API Error": error, "Output": output}
return HttpResponse(
str(json.dumps(j)),
content_type="application/json",
status=500,
)
else:
if not output:
output = {}
# Check to see if text is json
try:
j = json.loads(output)
except:
j = {"Output": output}
return HttpResponse(
str(json.dumps(j)),
content_type="application/json",
status=200,
)
# If it's not the sync command, run the
# command module wrapper directly.
else:
try:
rc = command.runWrapper(cmd_module, cmd_arg_list)
# If we hit a database error, check if it's an access
# denied error. If so, sanitize the error message, and
# don't expose database access.
except pymysql.OperationalError as e:
errortext = str(sys.exc_info()[1])
log.error(errortext)
if int(e.args[0]) in MYSQL_EX:
errortext = "Database Permission Denied. Admin privileges required"
status_code = 403
else:
status_code = 500
return HttpResponse(
json.dumps({'API Error': errortext}),
content_type='application/json',
status=status_code,
)
except CommandError as e:
# Get output from command
text = command.getText()
if not text:
text = {}
return HttpResponse(
json.dumps({'API Error': '%s' % e, 'Output': text}),
content_type='application/json',
status=500,
)
# Any other type of error, simply forward it
# to the client
except:
errortext = str(traceback.format_exc())
log.error(errortext)
return HttpResponse(
json.dumps({'API Error': errortext}),
content_type='application/json',
status=500,
)
# Get output from command
text = command.getText()
if not text:
text = {}
# Check to see if text is json
try:
j = json.loads(text)
except:
j = {"Output": text}
return HttpResponse(
str(json.dumps(j)),
content_type="application/json",
)
finally:
if connection:
connection.close()
# Check if command is blacklisted
def _blacklisted(self, mod):
# Get all blacklisted commands
c = BlackList.objects.values("command")
# Get actual command values
bl = map(lambda x: x['command'], c)
for cmd in bl:
# Make sure to match the module names
cmd = re.sub('[ \t]+', '.', cmd)
r = re.compile(str(cmd))
m = r.match(mod)
if m:
# Match the exact blacklisted command
if m.group() == mod:
return True
return False
def _isSudoCommand(self, mod):
# Get a list of all sudo commands
c = SudoList.objects.values("command")
sl = [ i['command'] for i in c ]
for cmd in sl:
cmd = re.sub('[ \t]+', '.', cmd)
r = re.compile(str(cmd))
m = r.match(mod)
if m and m.group() == mod:
return True
return False
# Function to log in the user
def log_in(request):
username = request.POST['USERNAME']
password = request.POST['PASSWORD']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
s = {'login': 'True'}
return HttpResponse(str(json.dumps(s)),
content_type="application/json")
else:
s = {'login': 'False'}
return HttpResponse(str(json.dumps(s)),
status=401,
content_type="application/json")
# Function to log out
def log_out(request):
logout(request)
s = {'logout': 'True'}
return HttpResponse(str(json.dumps(s)),
content_type="application/json")
# Function to check log in user
def check_user(request):
if request.user.is_authenticated():
s = {'user': request.user.username}
else:
s = {'user': 'None'}
return HttpResponse(str(json.dumps(s)),
content_type="application/json")
# Upload Function. Uploads a file to the provided location
def upload(request):
ufile = request.FILES["csvFile"]
csv_dir = '/tmp/' + ufile.name
d = open(csv_dir, 'wb+')
chunk = ufile.read()
d.write(chunk)
d.close()
f = open(csv_dir, 'r')
text = f.read()
f.close()
s = {'csv': text, 'name': ufile.name, 'dir': csv_dir}
return HttpResponse(str(json.dumps(s)),
content_type="application/json")
| 25.81117 | 92 | 0.643174 |
2609f4bc19cf76159251c67fb2ad3cc3561c7186 | 4,042 | py | Python | facebook_business/adobjects/audiocopyright.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | 1 | 2020-05-10T20:53:02.000Z | 2020-05-10T20:53:02.000Z | facebook_business/adobjects/audiocopyright.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/audiocopyright.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | 1 | 2018-09-24T14:04:48.000Z | 2018-09-24T14:04:48.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AudioCopyright(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAudioCopyright = True
super(AudioCopyright, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
creation_time = 'creation_time'
displayed_matches_count = 'displayed_matches_count'
id = 'id'
in_conflict = 'in_conflict'
isrc = 'isrc'
match_rule = 'match_rule'
ownership_countries = 'ownership_countries'
reference_file_status = 'reference_file_status'
ridge_monitoring_status = 'ridge_monitoring_status'
update_time = 'update_time'
whitelisted_fb_users = 'whitelisted_fb_users'
whitelisted_ig_users = 'whitelisted_ig_users'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AudioCopyright,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'creation_time': 'datetime',
'displayed_matches_count': 'int',
'id': 'string',
'in_conflict': 'bool',
'isrc': 'string',
'match_rule': 'VideoCopyrightRule',
'ownership_countries': 'list<string>',
'reference_file_status': 'string',
'ridge_monitoring_status': 'string',
'update_time': 'datetime',
'whitelisted_fb_users': 'list<Object>',
'whitelisted_ig_users': 'list<string>',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 37.775701 | 103 | 0.68951 |
71afc0e93148ea4fd42b32e133043608786f3c7a | 523 | py | Python | packages/pyre/algebraic/Leaf.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/pyre/algebraic/Leaf.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/algebraic/Leaf.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
class Leaf:
"""
Mix-in class that provides an implementation of the subset of the interface of {Node} that
requires traversals of the expression graph rooted at leaf nodes.
"""
# interface
@property
def span(self):
"""
Traverse my subgraph and yield all its nodes
"""
# just myself
yield self
# and nothing else
return
# end of file
| 18.034483 | 94 | 0.596558 |
621180752abeaa1151b56d2604b139435c29a535 | 1,124 | py | Python | Algorithms/Dutch_National_Flag_Algorithm.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Algorithms/Dutch_National_Flag_Algorithm.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Algorithms/Dutch_National_Flag_Algorithm.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Algorithm = "Dutch National Flag Algorithm"
Link = "https://leetcode.com/problems/sort-colors/"
Description = "Given an array nums with n objects colored red, white, or blue, sort them in-place so that objects of" \
"the same color are adjacent, with the colors in the order red, white, and blue." \
"We will use the integers 0, 1, and 2 to represent the color red, white, and blue, respectively."
Example = "Input: nums = [2,0,2,1,1,0]" \
"Output: [0,0,1,1,2,2]"
nums = [0,1,1,2,0,1,2,1,1,0]
class Solution:
def sortColors(self,nums):
low = 0
mid = 0
high = len(nums)-1
while mid <= high:
if nums[mid] == 0:
temp = nums[low]
nums[low] = nums[mid]
nums[mid] = temp
low+=1
mid+=1
elif nums[mid] == 1:
mid+=1
elif nums[mid] == 2:
temp = nums[mid]
nums[mid] = nums[high]
nums[high] = temp
high-=1
return nums
print(Solution().sortColors(nums))
| 32.114286 | 119 | 0.508897 |
e9b80c052294c96eda02518562b3049778980752 | 19,147 | py | Python | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/train.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/train.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/train.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## train centerface example ########################
train centerface and get network model files(.ckpt) :
you'd better donot use this file directly, use the training script in folder 'script'
"""
import os
from mindspore import context
devid = int(os.getenv('DEVICE_ID'))
# sigmoid need to be fp32, thus should not use enable_auto_mixed_precision=True
context.set_context(mode=context.GRAPH_MODE, enable_auto_mixed_precision=False,
device_target="Davinci", save_graphs=True, device_id=devid, reserve_class_name_in_scope=False)
#context.set_context(variable_memory_max_size="3GB") # belongs to users hardware
import time
import argparse
import datetime
import numpy as np
try:
from mindspore.train import ParallelMode
except:
from mindspore.context import ParallelMode
from mindspore.nn.optim.adam import Adam
from mindspore.nn.optim.momentum import Momentum
from mindspore.nn.optim.sgd import SGD
from mindspore import Tensor
import mindspore.nn as nn
from mindspore.common import dtype as mstype
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.callback import ModelCheckpoint, RunContext
from mindspore.train.callback import _InternalCallbackParam, CheckpointConfig, Callback
import mindspore as ms
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.utils import get_logger
from src.utils import AverageMeter
from src.lr_scheduler import warmup_step_lr
from src.lr_scheduler import warmup_cosine_annealing_lr, \
warmup_cosine_annealing_lr_V2, warmup_cosine_annealing_lr_sample
from src.lr_scheduler import MultiStepLR
from src.var_init import default_recurisive_init
from src.centerface import centerface_mobilev2
from src.utils import load_backbone, get_param_groups
from src.config import Config_centerface
from src.centerface import CenterFaceWithLossCell, TrainingWrapper
try:
from src.dataset import get_dataLoader
except:
from src.dependency.train.dataset import get_dataLoader
def parse_args(cloud_args={}):
parser = argparse.ArgumentParser('mindspore coco training')
# dataset related
parser.add_argument('--data_dir', type=str, default='', help='train data dir')
parser.add_argument('--annot_path', type=str, default='', help='train data annotation path')
parser.add_argument('--img_dir', type=str, default='', help='train data img dir')
parser.add_argument('--per_batch_size', default=32, type=int, help='batch size for per gpu')
# network related
parser.add_argument('--pretrained_backbone', default='', type=str, help='model_path, local pretrained backbone'
' model to load')
parser.add_argument('--resume', default='', type=str, help='path of pretrained centerface_model')
# optimizer and lr related
parser.add_argument('--lr_scheduler', default='multistep', type=str,
help='lr-scheduler, option type: exponential, cosine_annealing')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate of the training')
parser.add_argument('--lr_epochs', type=str, default='220,250', help='epoch of lr changing')
parser.add_argument('--lr_gamma', type=float, default=0.1,
help='decrease lr by a factor of exponential lr_scheduler')
parser.add_argument('--eta_min', type=float, default=0., help='eta_min in cosine_annealing scheduler')
parser.add_argument('--T_max', type=int, default=280, help='T-max in cosine_annealing scheduler')
parser.add_argument('--max_epoch', type=int, default=280, help='max epoch num to train the model')
parser.add_argument('--warmup_epochs', default=0, type=float, help='warmup epoch')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--optimizer', default='adam', type=str,
help='optimizer type, default: adam')
# loss related
parser.add_argument('--loss_scale', type=int, default=1024, help='static loss scale')
parser.add_argument('--label_smooth', type=int, default=0, help='whether to use label smooth in CE')
parser.add_argument('--label_smooth_factor', type=float, default=0.1, help='smooth strength of original one-hot')
# logging related
parser.add_argument('--log_interval', type=int, default=100, help='logging interval')
parser.add_argument('--ckpt_path', type=str, default='outputs/', help='checkpoint save location')
parser.add_argument('--ckpt_interval', type=int, default=None, help='ckpt_interval')
parser.add_argument('--is_save_on_master', type=int, default=1, help='save ckpt on master or all rank')
# distributed related
parser.add_argument('--is_distributed', type=int, default=1, help='if multi device')
parser.add_argument('--rank', type=int, default=0, help='local rank of distributed')
parser.add_argument('--group_size', type=int, default=1, help='world size of distributed')
# roma obs
parser.add_argument('--train_url', type=str, default="", help='train url')
# profiler init, can open when you debug. if train, donot open, since it cost memory and disk space
parser.add_argument('--need_profiler', type=int, default=0, help='whether use profiler')
# reset default config
parser.add_argument('--training_shape', type=str, default="", help='fix training shape')
parser.add_argument('--resize_rate', type=int, default=None, help='resize rate for multi-scale training')
args, _ = parser.parse_known_args()
args = merge_args(args, cloud_args)
if args.lr_scheduler == 'cosine_annealing' and args.max_epoch > args.T_max:
args.T_max = args.max_epoch
args.lr_epochs = list(map(int, args.lr_epochs.split(',')))
return args
def merge_args(args, cloud_args):
args_dict = vars(args)
if isinstance(cloud_args, dict):
for key in cloud_args.keys():
val = cloud_args[key]
if key in args_dict and val:
arg_type = type(args_dict[key])
if arg_type is not type(None):
val = arg_type(val)
args_dict[key] = val
return args
def conver_training_shape(args):
training_shape = [int(args.training_shape), int(args.training_shape)]
return training_shape
def train(cloud_args={}):
args = parse_args(cloud_args)
# init distributed
if args.is_distributed:
init()
args.rank = get_rank()
args.group_size = get_group_size()
# select for master rank save ckpt or all rank save, compatiable for model parallel
args.rank_save_ckpt_flag = 0
if args.is_save_on_master:
if args.rank == 0:
args.rank_save_ckpt_flag = 1
else:
args.rank_save_ckpt_flag = 1
# logger
args.outputs_dir = os.path.join(args.ckpt_path,
datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, args.rank)
args.logger.save_args(args)
if args.need_profiler:
from mindspore.profiler.profiling import Profiler
profiler = Profiler(output_path=args.outputs_dir, is_detail=True, is_show_op_path=True)
loss_meter = AverageMeter('loss')
context.reset_auto_parallel_context()
if args.is_distributed:
parallel_mode = ParallelMode.DATA_PARALLEL
degree = get_group_size()
else:
parallel_mode = ParallelMode.STAND_ALONE
degree = 1
# context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=degree, parameter_broadcast=True, gradients_mean=True)
# Notice: parameter_broadcast should be supported, but current version has bugs, thus been disabled.
# To make sure the init weight on all npu is the same, we need to set a static seed in default_recurisive_init when weight initialization
context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=degree)
network = centerface_mobilev2()
# init, to avoid overflow, some std of weight should be enough small
default_recurisive_init(network)
if args.pretrained_backbone:
network = load_backbone(network, args.pretrained_backbone, args)
args.logger.info('load pre-trained backbone {} into network'.format(args.pretrained_backbone))
else:
args.logger.info('Not load pre-trained backbone, please be careful')
if os.path.isfile(args.resume):
param_dict = load_checkpoint(args.resume)
param_dict_new = {}
for key, values in param_dict.items():
if key.startswith('moments.') or key.startswith('moment1.') or key.startswith('moment2.'):
continue
elif key.startswith('centerface_network.'):
param_dict_new[key[19:]] = values
else:
param_dict_new[key] = values
load_param_into_net(network, param_dict_new)
args.logger.info('load_model {} success'.format(args.resume))
else:
args.logger.info('{} not set/exists or not a pre-trained file'.format(args.resume))
network = CenterFaceWithLossCell(network)
args.logger.info('finish get network')
config = Config_centerface()
config.data_dir = args.data_dir
config.annot_path = args.annot_path
config.img_dir = args.img_dir
config.label_smooth = args.label_smooth
config.label_smooth_factor = args.label_smooth_factor
# -------------reset config-----------------
if args.training_shape:
config.multi_scale = [conver_training_shape(args)]
if args.resize_rate:
config.resize_rate = args.resize_rate
# data loader
data_loader, train_sampler = get_dataLoader(config, args)
args.steps_per_epoch = len(data_loader)#data_size
args.logger.info('Finish loading dataset')
if not args.ckpt_interval:
args.ckpt_interval = args.steps_per_epoch
# lr scheduler
if args.lr_scheduler == 'multistep':
lr_fun = MultiStepLR(args.lr, args.lr_epochs, args.lr_gamma, args.steps_per_epoch, args.max_epoch, args.warmup_epochs)
lr = lr_fun.get_lr()
elif args.lr_scheduler == 'exponential':
lr = warmup_step_lr(args.lr,
args.lr_epochs,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
gamma=args.lr_gamma
)
elif args.lr_scheduler == 'cosine_annealing':
lr = warmup_cosine_annealing_lr(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.T_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_V2':
lr = warmup_cosine_annealing_lr_V2(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.T_max,
args.eta_min)
elif args.lr_scheduler == 'cosine_annealing_sample':
lr = warmup_cosine_annealing_lr_sample(args.lr,
args.steps_per_epoch,
args.warmup_epochs,
args.max_epoch,
args.T_max,
args.eta_min)
else:
raise NotImplementedError(args.lr_scheduler)
if args.optimizer == "adam":
opt = Adam(params=get_param_groups(network),
learning_rate=Tensor(lr),
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
args.logger.info("use adam optimizer")
elif args.optimizer == "sgd":
opt = SGD(params=get_param_groups(network),
learning_rate=Tensor(lr),
momentum=args.momentum,
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
else:
opt = Momentum(params=get_param_groups(network),
learning_rate=Tensor(lr),
momentum=args.momentum,
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
network = TrainingWrapper(network, opt, sens=args.loss_scale)
network.set_train()
ckpt_history = []
if args.rank_save_ckpt_flag:
# checkpoint save
ckpt_max_num = args.max_epoch * args.steps_per_epoch // args.ckpt_interval
ckpt_config = CheckpointConfig(save_checkpoint_steps=args.ckpt_interval,
keep_checkpoint_max=ckpt_max_num)
ckpt_cb = ModelCheckpoint(config=ckpt_config,
directory=args.outputs_dir,
prefix='{}'.format(args.rank))
cb_params = _InternalCallbackParam()
cb_params.train_network = network
cb_params.epoch_num = ckpt_max_num
cb_params.cur_epoch_num = 1
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
args.logger.info('args.steps_per_epoch = {} args.ckpt_interval ={}'.format(args.steps_per_epoch, args.ckpt_interval))
old_progress = -1
t_end = time.time()
#feed_mode + pytorch dataloader
start_epoch = 0
for epoch in range(start_epoch + 1, args.max_epoch + 1):
for i, batch_load in enumerate(data_loader):
batch ={}
batch['input'] = batch_load['input'].detach().cpu().numpy()
batch['hm'] = batch_load['hm'].detach().cpu().numpy()
batch['reg_mask'] = batch_load['reg_mask'].detach().cpu().numpy()
batch['ind'] = batch_load['ind'].detach().cpu().numpy()
batch['wh'] = batch_load['wh'].detach().cpu().numpy()
batch['landmarks'] = batch_load['landmarks'].detach().cpu().numpy()
batch['hps_mask'] = batch_load['hps_mask'].detach().cpu().numpy()
batch['wight_mask'] = batch_load['wight_mask'].detach().cpu().numpy()
batch['hm_offset'] = batch_load['hm_offset'].detach().cpu().numpy()
images = batch['input']
hm = batch['hm']
reg_mask = batch['reg_mask']
ind_origin = batch['ind']
wh_origin = batch['wh']
wight_mask_origin = batch['wight_mask']
hm_offset_origin = batch['hm_offset']
hps_mask_origin = batch['hps_mask']
landmarks_origin = batch['landmarks']
batch_size = args.per_batch_size #8
output_res = config.output_res
wh = np.zeros((batch_size, output_res, output_res, 2), dtype=np.float32)
hm_offset = np.zeros((batch_size, output_res, output_res, 2), dtype=np.float32) # reg
ind = np.zeros((batch_size, output_res, output_res), dtype=np.float32)
landmarks = np.zeros((batch_size, output_res, output_res, config.num_joints * 2), dtype=np.float32) # kps
hps_mask = np.zeros((batch_size, output_res, output_res, config.num_joints * 2), dtype=np.float32) # kps_mask
wight_mask = np.zeros((batch_size, output_res, output_res, 2), dtype=np.float32)
for i_1 in range(batch_size):
batch_ind_origin = ind_origin[i_1]
for k in range(len(batch_ind_origin)):
if batch_ind_origin[k] > 0:
ct_int = [0, 0]
ct_int[0] = batch_ind_origin[k] % output_res
ct_int[1] = batch_ind_origin[k] // output_res
wh[i_1, ct_int[1], ct_int[0], :] = wh_origin[i_1, k, : ]
hm_offset[i_1, ct_int[1], ct_int[0], :] = hm_offset_origin[i_1, k, : ]
ind[i_1, ct_int[1], ct_int[0]] = 1.0
landmarks[i_1, ct_int[1], ct_int[0], : ] = landmarks_origin[i_1, k, : ]
hps_mask[i_1, ct_int[1], ct_int[0], : ] = hps_mask_origin[i_1, k, : ]
wight_mask[i_1, ct_int[1], ct_int[0], 0] = wight_mask_origin[i_1, k]
wight_mask[i_1, ct_int[1], ct_int[0], 1] = wight_mask_origin[i_1, k]
images = Tensor(images)
hm = Tensor(hm)
reg_mask = Tensor(reg_mask)
ind = Tensor(ind)
wh = Tensor(wh)
wight_mask = Tensor(wight_mask)
hm_offset = Tensor(hm_offset)
hps_mask = Tensor(hps_mask)
landmarks = Tensor(landmarks)
loss, overflow, scaling = network(images, hm, reg_mask, ind, wh, wight_mask, hm_offset, hps_mask, landmarks)
# Tensor to numpy
overflow = np.all(overflow.asnumpy())
loss = loss.asnumpy()
loss_meter.update(loss)
args.logger.info('epoch:{}, iter:{}, average_loss:{}, loss:{}, overflow:{}, loss_scale:{}'.format(epoch, i, loss_meter, loss, overflow, scaling.asnumpy()))
end = time.time()
if args.rank_save_ckpt_flag:
# ckpt progress
cb_params.cur_epoch_num = epoch
cb_params.cur_step_num = i + 1 + (epoch-1)*args.steps_per_epoch
cb_params.batch_num = i + 2 + (epoch-1)*args.steps_per_epoch
ckpt_cb.step_end(run_context)
time_used = time.time() - t_end
fps = args.per_batch_size * args.steps_per_epoch * args.group_size / time_used
if args.rank == 0:
args.logger.info('epoch[{}], {}, {:.2f} imgs/sec, lr:{}'.format(epoch, loss_meter, fps, lr[i + (epoch-1)*args.steps_per_epoch]))
t_end = time.time()
loss_meter.reset()
args.logger.info('==========end epoch===============')
# reset shuffle seed, important for impove performance
train_sampler.set_epoch(epoch)
if args.need_profiler:
profiler.analyse()
args.logger.info('==========end training===============')
if __name__ == "__main__":
train()
| 44.527907 | 167 | 0.629811 |
ddfd44f2d155c982d19c953dfd5fc3ab97262800 | 3,621 | py | Python | intake/source/tests/test_text.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | intake/source/tests/test_text.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | intake/source/tests/test_text.py | gramhagen/intake | de4cbb5df78881dc166b1f02743d22067f2bbd78 | [
"BSD-2-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import os
import pytest
import intake
from intake.source.textfiles import TextFilesSource
from intake.source import import_name
from dask.bytes import open_files
here = os.path.abspath(os.path.dirname(__file__))
def test_textfiles(tempdir):
open(os.path.join(tempdir, '1.txt'), 'wt').write('hello\nworld')
open(os.path.join(tempdir, '2.txt'), 'wt').write('hello\nworld')
path = os.path.join(tempdir, '*.txt')
t = TextFilesSource(path)
t.discover()
assert t.npartitions == 2
assert t._get_partition(0) == t.to_dask().to_delayed()[0].compute()
out = t.read()
assert isinstance(out, list)
assert out[0] == 'hello\n'
@pytest.mark.parametrize('comp', [None, 'gzip', 'bz2'])
def test_complex_text(tempdir, comp):
dump, load, read = 'json.dumps', 'json.loads', True
dump = import_name(dump)
data = [{'something': 'simple', 'and': 0}] * 2
for f in ['1.out', '2.out']:
fn = os.path.join(tempdir, f)
with open_files([fn], mode='wt', compression=comp)[0] as fo:
if read:
fo.write(dump(data))
else:
dump(data, fo)
# that was all setup
path = os.path.join(tempdir, '*.out')
t = TextFilesSource(path, text_mode=True, compression=comp,
decoder=load)
t.discover()
assert t.npartitions == 2
assert t._get_partition(0) == t.to_dask().to_delayed()[0].compute()
out = t.read()
assert isinstance(out, list)
assert out[0] == data[0]
@pytest.mark.parametrize('comp', [None, 'gzip', 'bz2'])
@pytest.mark.parametrize('pars', [['msgpack.pack', 'msgpack.unpack', False],
['msgpack.packb', 'msgpack.unpackb', True],
['pickle.dump', 'pickle.load', False],
['pickle.dumps', 'pickle.loads', True]])
def test_complex_bytes(tempdir, comp, pars):
dump, load, read = pars
dump = import_name(dump)
# using bytestrings means not needing extra en/decode argument to msgpack
data = [{b'something': b'simple', b'and': 0}] * 2
for f in ['1.out', '2.out']:
fn = os.path.join(tempdir, f)
with open_files([fn], mode='wb', compression=comp)[0] as fo:
if read:
fo.write(dump(data))
else:
dump(data, fo)
# that was all setup
path = os.path.join(tempdir, '*.out')
t = TextFilesSource(path, text_mode=False, compression=comp,
decoder=load, read=read)
t.discover()
assert t.npartitions == 2
assert t._get_partition(0) == t.to_dask().to_delayed()[0].compute()
out = t.read()
assert isinstance(out, list)
assert out[0] == data[0]
def test_text_persist(temp_cache):
cat = intake.open_catalog(os.path.join(here, 'sources.yaml'))
s = cat.sometext()
s2 = s.persist()
assert s.read() == s2.read()
def test_text_export(temp_cache):
import tempfile
outdir = tempfile.mkdtemp()
cat = intake.open_catalog(os.path.join(here, 'sources.yaml'))
s = cat.sometext()
out = s.export(outdir)
fn = os.path.join(outdir, 'cat.yaml')
with open(fn, 'w') as f:
f.write(out)
cat = intake.open_catalog(fn)
s2 = cat[s.name]()
assert s.read() == s2.read()
| 34.817308 | 78 | 0.573598 |
235f09e4cb837545a38b4a1952ecfa35ee4191db | 1,457 | py | Python | arjuna-samples/workspace/arjex/tests/modules/s01unitee_engine/ep14_my_config/ex01_central.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 9 | 2018-11-15T10:09:17.000Z | 2021-01-12T05:59:19.000Z | arjuna-samples/workspace/arjex/tests/modules/s01unitee_engine/ep14_my_config/ex01_central.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 2 | 2019-07-01T15:33:46.000Z | 2019-07-12T13:04:08.000Z | arjuna-samples/workspace/arjex/tests/modules/s01unitee_engine/ep14_my_config/ex01_central.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 4 | 2018-12-02T15:14:04.000Z | 2020-05-28T12:57:24.000Z | '''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.tpi.markup import *
from arjuna.tpi.helpers import *
from arjuna.tpi.enums import ArjunaOption
@init_module
def setup_module(my):
console.display(my.context.get_config().get_browser_type())
my.context.ConfigBuilder().chrome().build("chrome_config")
console.display(my.context.get_config("chrome_config").get_browser_type())
@test_function
def test_config_from_central(my):
console.display(my.context.get_config("chrome_config").get_browser_type())
console.display(my.context.get_config().get_browser_type())
my.context.ConfigBuilder().firefox().build("firefox_config")
console.display(my.context.get_config().get_browser_type())
console.display(my.context.get_config("firefox_config").get_browser_type())
| 34.690476 | 79 | 0.780371 |
b0794cfce39bec6671b2b59a773b07d6b360613c | 9,625 | py | Python | simpleplots/utils.py | a-maliarov/simplegraphs | bba3fb9420ce136da4f3d5096a9caff970eba87e | [
"MIT"
] | 2 | 2022-02-09T03:33:00.000Z | 2022-02-14T11:14:21.000Z | simpleplots/utils.py | a-maliarov/simplegraphs | bba3fb9420ce136da4f3d5096a9caff970eba87e | [
"MIT"
] | 2 | 2022-02-06T17:39:39.000Z | 2022-03-31T19:00:13.000Z | simpleplots/utils.py | a-maliarov/simpleplots | bba3fb9420ce136da4f3d5096a9caff970eba87e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
simpleplots.utils
~~~~~~~~~~~~~~~~~
This module contains simpleplots' utilities.
"""
__all__ = ('get_font', 'get_text_dimensions', 'normalize_float', 'find_gcd',
'decimals', 'isint', 'normalize_values', 'frange', 'smartrange'
'get_indices_of_values_in_list', 'choose_locator')
from .base import Theme, Size
from .ticker import Locator, AutoLocator, AutoFormatter
from .dates import AutoDateLocator, AutoDateFormatter
from typing import List, Iterable
from numpy.typing import ArrayLike
from numbers import Number
from PIL import ImageFont
from dateutil.relativedelta import relativedelta
from datetime import datetime, timedelta
from functools import reduce
from decimal import *
import numpy as np
import math
import os
getcontext().prec = 6
#-------------------------------------------------------------------------------
DISPLAYABLE: int = 15360 # maximum number of elements per axis
INT_DTYPES: List[str] = ['int8', 'int16', 'int32', 'int64']
FLOAT_DTYPES: List[str] = ['float16', 'float32', 'float64', 'float96', 'float128']
DATE_DTYPE: str = 'datetime64'
#-------------------------------------------------------------------------------
def get_font(type_: str, theme: Theme, image_width: int) -> ImageFont:
"""Return ImageFont based theme, type and image width."""
package_directory_path = os.path.abspath(os.path.dirname(__file__))
fonts_folder = os.path.join(package_directory_path, 'fonts')
if type_ == 'tick_label':
return ImageFont.truetype(
os.path.join(fonts_folder, theme.tick_label_font),
int(image_width * theme.tick_label_size_perc)
)
elif type_ == 'title':
return ImageFont.truetype(
os.path.join(fonts_folder, theme.title_font),
int(image_width * theme.title_size_perc)
)
elif type_ == 'legend':
return ImageFont.truetype(
os.path.join(fonts_folder, theme.legend_font),
int(image_width * theme.legend_size_perc)
)
def get_text_dimensions(text_string: str, font: ImageFont) -> Size:
"""Calculates size of a given text string using given font."""
ascent, descent = font.getmetrics()
text_width = font.getmask(text_string).getbbox()[2]
text_height = font.getmask(text_string).getbbox()[3] + descent
return (text_width, text_height)
#-------------------------------------------------------------------------------
def normalize_float(n: float) -> float:
"""Normalize floats like '1.230000000003' to just '1.23'."""
return float(Decimal(n).normalize())
def find_gcd(lst: List[int]) -> int:
"""Find GCD of a list."""
return reduce(math.gcd, lst)
def decimals(n: float) -> int:
"""Get the number of decimals after comma."""
if 'e' in str(n):
return int(str(n).split('e')[1][1:])
return len(str(n).split('.')[1]) if len(str(n).split('.')) == 2 else 0
def isint(n: Number) -> bool:
"""Check if number is integer even if type if float."""
return isinstance(n, int) or n.is_integer()
#-------------------------------------------------------------------------------
def get_indices_of_values_in_list(values: np.ndarray, lst: np.ndarray) -> np.ndarray:
"""Get indices of values in list A from list B."""
sorter = np.argsort(lst)
ind = sorter[np.searchsorted(lst, values, sorter=sorter)]
return ind
#-------------------------------------------------------------------------------
def normalize_values(values: ArrayLike) -> np.ndarray:
"""Check input values before trying to plot them."""
values = np.asarray(values)
if values.dtype in INT_DTYPES:
step = find_gcd(values)
max_value = np.max(values)
if max_value / step / DISPLAYABLE > 3:
round_to = int(math.log10(int(max_value / step / DISPLAYABLE))) + 1
values = np.around(values, decimals=-round_to)
return values
elif values.dtype in FLOAT_DTYPES:
scale = max([decimals(normalize_float(n)) for n in values])
step = 1 * (10 ** -scale)
max_value = normalize_float(np.max(values))
if max_value / step / DISPLAYABLE > 3:
round_to = int(math.log10(int(max_value / step / DISPLAYABLE))) + 1
values = np.around(values, decimals=round_to)
return np.asarray([normalize_float(n) for n in values])
elif all([isinstance(e, datetime) for e in values]):
values = values.astype('datetime64[s]')
return values
elif DATE_DTYPE in str(values.dtype):
values = values.astype('datetime64[s]')
return values
else:
raise TypeError('unknown input datatype')
#-------------------------------------------------------------------------------
def choose_locator(values: np.ndarray) -> Locator:
"""Returns tick locator based on datatype."""
if values.dtype in INT_DTYPES or values.dtype in FLOAT_DTYPES:
return AutoLocator()
elif DATE_DTYPE in str(values.dtype):
return AutoDateLocator()
else:
raise TypeError('unknown input datatype')
def choose_formatter(values: np.ndarray) -> Locator:
"""Returns label formatter based on datatype."""
if values.dtype in INT_DTYPES or values.dtype in FLOAT_DTYPES:
return AutoFormatter()
elif DATE_DTYPE in str(values.dtype):
return AutoDateFormatter()
else:
raise TypeError('unknown input datatype')
#-------------------------------------------------------------------------------
def frange(start: float, stop: float, step: float = None) -> Iterable[float]:
"""Generates a range between float numbers."""
start, stop = float(start), float(stop)
if not step:
start_scale = len(str(start).split('.')[1])
stop_scale = len(str(stop).split('.')[1])
scale = max(start_scale, stop_scale)
step = 1 * (10 ** -scale)
start, stop = Decimal(start).normalize(), Decimal(stop).normalize()
while start <= stop:
yield float(start)
start += Decimal(step).normalize()
#-------------------------------------------------------------------------------
def find_min_timedelta(values: np.ndarray) -> dict:
"""Given an array of dates, finds the minimum timedelta parameters."""
datetime_values = [d.astype(datetime) for d in values]
changes = {
'seconds': 0,
'minutes': 0,
'hours': 0,
'days': 0,
'months': 0,
'years': 0
}
if (any(d.second for d in datetime_values) and not
all(d.second == datetime_values[0].second for d in datetime_values)):
changes['seconds'] = 1
if (any(d.minute for d in datetime_values) and not
all(d.minute == datetime_values[0].minute for d in datetime_values)):
changes['minutes'] = 1
if (any(d.hour for d in datetime_values) and not
all(d.hour == datetime_values[0].hour for d in datetime_values)):
changes['hours'] = 1
if (any(d.day for d in datetime_values) and not
all(d.day == datetime_values[0].day for d in datetime_values)):
changes['days'] = 1
if (any(d.month for d in datetime_values) and not
all(d.month == datetime_values[0].month for d in datetime_values)):
changes['months'] = 1
if (any(d.year for d in datetime_values) and not
all(d.year == datetime_values[0].year for d in datetime_values)):
changes['years'] = 1
if changes['years'] or changes['months']:
return np.timedelta64(1, 'D')
elif changes['days']:
return np.timedelta64(1, 'h')
elif changes['hours']:
return np.timedelta64(1, 'm')
elif changes['minutes']:
return np.timedelta64(1, 's')
#-------------------------------------------------------------------------------
def smartrange(vmin: Number, vmax: Number, origin_values: np.ndarray) -> np.ndarray:
"""Fills gaps between vmin and vmax based on input type."""
if vmin == vmax:
return origin_values
if isinstance(vmin, (float, int)) and isinstance(vmax, (float, int)):
if (isint(vmin) and isint(vmax) and origin_values.dtype in INT_DTYPES):
all_values = np.append(origin_values, [int(vmin), int(vmax)])
step = find_gcd(all_values)
n_range = np.arange(int(vmin), int(vmax) + 1, step)
#-------------------------------------------------------------------
if max([abs(n) for n in n_range]) <= 10 and len(n_range) <= 5:
return np.asarray([i for i in frange(vmin, vmax, 0.1)])
#-------------------------------------------------------------------
return n_range
else:
start, stop = normalize_float(vmin), normalize_float(vmax)
start_scale, stop_scale = decimals(start), decimals(stop)
origin_values = np.asarray([float(n) for n in origin_values])
origin_scale = max([decimals(normalize_float(n)) for n in origin_values])
scale = max(start_scale, stop_scale, origin_scale)
step = 1 * (10 ** -scale)
return np.asarray([i for i in frange(vmin, vmax, step)])
elif DATE_DTYPE in str(origin_values.dtype):
delta = find_min_timedelta(origin_values)
dmin, dmax = np.min(origin_values), np.max(origin_values)
if np.datetime64(vmax) > dmax:
dmax = np.datetime64(vmax)
values = np.arange(dmin, dmax, delta, dtype='datetime64[s]')
values = np.append(values, dmax)
return values
#-------------------------------------------------------------------------------
| 35.780669 | 85 | 0.576104 |
74a1980e3f0bb77293f31079ba0b1454e4d68069 | 23,115 | py | Python | tests/test_tables.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 1 | 2020-02-10T12:50:35.000Z | 2020-02-10T12:50:35.000Z | tests/test_tables.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 2 | 2020-02-14T21:19:27.000Z | 2020-04-21T21:30:26.000Z | tests/test_tables.py | bdemchak/PyCy3 | 4058e41689c78304812b1e6fd8371d797cbb6b5b | [
"MIT"
] | 1 | 2020-02-10T17:16:17.000Z | 2020-02-10T17:16:17.000Z | # -*- coding: utf-8 -*-
""" Test functions in tables.py.
"""
import numpy as np
"""License:
Copyright 2020 The Cytoscape Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import pandas as df
from requests import HTTPError
from test_utils import *
class TablesTests(unittest.TestCase):
def setUp(self):
try:
close_session(False)
# delete_all_networks()
except:
pass
def tearDown(self):
pass
@print_entry_exit
def test_delete_table_column(self):
# Initialization
load_test_session()
def check_delete(table, column):
columns = set(get_table_column_names(table=table))
self.assertEqual(delete_table_column(table=table, column=column), '')
columns.discard(column)
fewer_columns = set(get_table_column_names(table=table))
self.assertSetEqual(set(columns), set(fewer_columns))
check_delete('node', 'BetweennessCentrality')
check_delete('edge', 'EdgeBetweenness')
check_delete('node', 'boguscolumn')
self.assertRaises(CyError, delete_table_column, table='bogustable', column='boguscolumn')
self.assertRaises(CyError, get_table_column_names, network='bogus')
@print_entry_exit
def test_get_table_columns(self):
# Initialization
load_test_session()
# Verify that an empty column list returns all columns, and all columns have at least one non-nan value
df = get_table_columns()
self.assertSetEqual(set(df.columns),
{'BetweennessCentrality', 'gal1RGexp', 'Eccentricity', 'Stress', 'NumberOfDirectedEdges',
'NeighborhoodConnectivity', 'NumberOfUndirectedEdges', 'selected', 'gal4RGsig', 'Degree',
'gal80Rsig', 'SUID', 'gal80Rexp', 'TopologicalCoefficient', 'ClusteringCoefficient',
'Radiality', 'gal4RGexp', 'gal1RGsig', 'name', 'degree.layout', 'ClosenessCentrality',
'COMMON', 'AverageShortestPathLength', 'shared name', 'PartnerOfMultiEdgedNodePairs',
'SelfLoops', 'isExcludedFromPaths', 'IsSingleNode'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that an explicity column list returns exact columns, and each has at least one non-nan value
df = get_table_columns(columns=['gal1RGexp', 'Eccentricity', 'Stress'])
self.assertSetEqual(set(df.columns), {'gal1RGexp', 'Eccentricity', 'Stress'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that a column list as a comma-separated string returns exact columns, and each has at least one non-nan value
df = get_table_columns(columns='Stress, NumberOfDirectedEdges')
self.assertSetEqual(set(df.columns), {'Stress', 'NumberOfDirectedEdges'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that a bogus column name still returns a column, though it must be all nan
df = get_table_columns(columns='Stress, bogus')
self.assertSetEqual(set(df.columns), {'Stress', 'bogus'})
self.assertEqual(len(df.index), get_node_count())
self.assertTrue(True in list(df['Stress'].notnull()))
self.assertFalse(False in df['bogus'].isnull())
# Verify that an empty column list returns all columns for edges, too
df = get_table_columns(table='edge')
self.assertSetEqual(set(df.columns),
{'SUID', 'shared name', 'shared interaction', 'name', 'selected', 'interaction',
'EdgeBetweenness'})
self.assertEqual(len(df.index), get_edge_count())
self.assertRaises(CyError, get_table_columns, table='bogustable', columns='boguscolumn')
self.assertRaises(CyError, get_table_columns, network='bogus')
@print_entry_exit
def test_get_table_value(self):
# Initialization
load_test_session()
self.assertEqual(get_table_value('node', 'YDL194W', 'gal1RGexp'), 0.139)
self.assertEqual(get_table_value('node', 'YDL194W', 'Degree'), 1)
self.assertFalse(get_table_value('node', 'YDL194W', 'IsSingleNode'))
self.assertEqual(get_table_value('node', 'YDL194W', 'COMMON'), 'SNF3')
self.assertEqual(get_table_value('edge', 'YLR197W (pp) YOR310C', 'EdgeBetweenness'), 2.0)
self.assertEqual(get_table_value('network', 'galFiltered.sif', 'publication'),
'Integrated Genomic and Proteomic Analyses of a Systematically Perturbed Metabolic Network\n'
'Trey Ideker, Vesteinn Thorsson, Jeffrey A. Ranish, Rowan Christmas, Jeremy Buhler, Jimmy K. Eng, Roger Bumgarner, David R. Goodlett, Ruedi Aebersold, and Leroy Hood\n'
'Science 4 May 2001: 292 (5518), 929-934. [DOI:10.1126/science.292.5518.929]')
# TODO: Fetching a None number raises an error, but should really return a None ... can this be changed?
# TODO: Find out if a null string in Cytoscape is the same thing as a None
self.assertRaises(CyError, get_table_value, 'node', 'YER056CA', 'gal1RGexp')
self.assertIsNone(get_table_value('node', 'YER056CA', 'COMMON'))
self.assertRaises(CyError, get_table_value, 'node', 'YDL194W', 'gal1RGexp', network='bogus')
@print_entry_exit
def test_get_table_column_names(self):
# Initialization
load_test_session()
self.assertSetEqual(set(get_table_column_names()),
{'SUID', 'shared name', 'name', 'selected', 'AverageShortestPathLength',
'BetweennessCentrality', 'ClosenessCentrality', 'ClusteringCoefficient', 'Degree',
'Eccentricity', 'IsSingleNode', 'NeighborhoodConnectivity', 'NumberOfDirectedEdges',
'NumberOfUndirectedEdges', 'PartnerOfMultiEdgedNodePairs', 'Radiality', 'SelfLoops',
'Stress', 'TopologicalCoefficient', 'degree.layout', 'COMMON', 'gal1RGexp', 'gal4RGexp',
'gal80Rexp', 'gal1RGsig', 'gal4RGsig', 'gal80Rsig', 'isExcludedFromPaths'})
self.assertSetEqual(set(get_table_column_names('edge')),
{'SUID', 'shared name', 'shared interaction', 'name', 'selected', 'interaction',
'EdgeBetweenness'})
self.assertSetEqual(set(get_table_column_names('network')),
{'SUID', 'shared name', 'name', 'selected', '__Annotations', 'publication', 'Dataset Name',
'Dataset URL'})
self.assertRaises(CyError, get_table_column_names, 'library')
self.assertRaises(CyError, get_table_column_names, network='bogus')
@print_entry_exit
def test_get_table_column_types(self):
# Initialization
load_test_session()
self.assertDictEqual(get_table_column_types(),
{'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',
'AverageShortestPathLength': 'Double', 'BetweennessCentrality': 'Double',
'ClosenessCentrality': 'Double', 'ClusteringCoefficient': 'Double', 'Degree': 'Integer',
'Eccentricity': 'Integer', 'IsSingleNode': 'Boolean',
'NeighborhoodConnectivity': 'Double', 'NumberOfDirectedEdges': 'Integer',
'NumberOfUndirectedEdges': 'Integer', 'PartnerOfMultiEdgedNodePairs': 'Integer',
'Radiality': 'Double', 'SelfLoops': 'Integer', 'Stress': 'Long',
'TopologicalCoefficient': 'Double', 'degree.layout': 'Integer', 'COMMON': 'String',
'gal1RGexp': 'Double', 'gal4RGexp': 'Double', 'gal80Rexp': 'Double',
'gal1RGsig': 'Double', 'gal4RGsig': 'Double', 'gal80Rsig': 'Double',
'isExcludedFromPaths': 'Boolean'})
self.assertDictEqual(get_table_column_types('edge'),
{'SUID': 'Long', 'shared name': 'String', 'shared interaction': 'String', 'name': 'String',
'selected': 'Boolean', 'interaction': 'String', 'EdgeBetweenness': 'Double'})
self.assertDictEqual(get_table_column_types('network'),
{'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',
'__Annotations': 'List', 'publication': 'String', 'Dataset Name': 'String',
'Dataset URL': 'String'})
self.assertRaises(CyError, get_table_column_types, 'library')
self.assertRaises(CyError, get_table_column_types, 'edge', network='bogus')
@print_entry_exit
def test_load_table_data_from_file(self):
def check_table(original_columns, new_column_name, key_values, table_name='node'):
# Make sure we get exactly the expected columns
self.assertSetEqual(set(get_table_column_names(table=table_name)), original_columns | {new_column_name})
# Make sure we get exactly the expected number of values in the new column
table = get_table_columns(table=table_name, columns=['name', new_column_name])
table.dropna(inplace=True)
table.set_index('name', inplace=True)
self.assertEqual(len(table.index), len(key_values))
# Make sure the new column values are as expected
for key, val in key_values:
self.assertEqual(table[new_column_name][key], val)
# Initialization
load_test_session()
node_column_names = set(get_table_column_names())
edge_column_names = set(get_table_column_names(table='edge'))
# Verify that a table with column headers can be loaded into the node table
res = load_table_data_from_file('data/defaultnode_table.tsv', first_row_as_column_names=True)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with column headers can be loaded into the edge table
res = load_table_data_from_file('data/defaultedge_table.tsv', first_row_as_column_names=True, table='edge')
check_table(edge_column_names, 'newcol_e', [('YDR277C (pp) YDL194W', 1000), ('YDR277C (pp) YJR022W', 2000), ('YPR145W (pp) YMR117C', 3000)], table_name='edge')
# Verify that a spreadsheet with column headers can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.xlsx', first_row_as_column_names=True)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with no header can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.no-header.tsv', first_row_as_column_names=False)
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with extra lines at the beginning can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.extra-lines.tsv', first_row_as_column_names=False, start_load_row=4)
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with different field delimiters can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.semi-delimiter.txt', first_row_as_column_names=False, delimiters=' ,;')
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with values in a different order can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.backwards.tsv', first_row_as_column_names=True, data_key_column_index=2)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with indexing on a different table column can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.COMMON.tsv', first_row_as_column_names=True, table_key_column='COMMON')
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
self.assertRaises(CyError, load_table_data_from_file, 'bogus file name')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', start_load_row=-1)
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', delimiters='bogus')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', data_key_column_index='newcol')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', data_key_column_index=-1)
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', table_key_column='bogus column')
@print_entry_exit
def test_load_table_data(self):
def check_values_added(table_column_names, table_key_name, test_data, data_key_name, data_value_name, table='node'):
data = get_table_columns(table=table)
self.assertEqual(len(table_column_names) + 2, len(data.columns))
self.assertIn(data_key_name, data.columns)
self.assertIn(data_value_name, data.columns)
added_data = data[data[table_key_name] == data[data_key_name]]
self.assertEqual(len(test_data.index), len(added_data.index))
verify_each_newcol_value = [added_data[added_data[data_key_name] == row[data_key_name]].iloc[0][data_value_name] == row[data_value_name]
for row_index, row in test_data.iterrows()]
self.assertNotIn(False, verify_each_newcol_value)
# Initialization
load_test_session()
# Verify that adding into rows that don't exist fails
unrelated_data = df.DataFrame(data={'id': ['New1', 'New2', 'New3'], 'newcol': [1, 2, 3]})
self.assertRaises(CyError, load_table_data, unrelated_data, data_key_column='id', table='node', table_key_column='name')
# Verify that adding into node table rows that do exist succeeds ... checks that string-keys work
column_names_string_keyed = get_table_column_names()
test_data_string_keyed = df.DataFrame(data={'id': ['YDL194W', 'YDR277C', 'YBR043C'], 'newcol': [1, 2, 3]})
res = load_table_data(test_data_string_keyed, data_key_column='id', table='node', table_key_column='name')
self.assertEqual(res, 'Success: Data loaded in defaultnode table')
# Verify that ID column and newcol were added, and that the newcols have values only for the named nodes
check_values_added(column_names_string_keyed, 'name', test_data_string_keyed, 'id', 'newcol')
# Given newcol values, use them as non-string keys to add yet another column
column_names_int_keyed = get_table_column_names()
test_data_int_keyed = df.DataFrame(data={'newcol_val': [1, 2, 3], 'derived': [100, 200, 300]})
res = load_table_data(test_data_int_keyed, data_key_column='newcol_val', table='node', table_key_column='newcol')
self.assertEqual(res, 'Success: Data loaded in defaultnode table')
# Verify that newcol_val column and derived were added, and that derived has values only for the newcol nodes
check_values_added(column_names_int_keyed, 'newcol', test_data_int_keyed, 'newcol_val', 'derived')
# Verify that adding data into edge table rows that do exist succeeds
column_names_string_keyed = get_table_column_names(table='edge')
test_data_string_keyed = df.DataFrame(data={'id_e': ['YDR277C (pp) YDL194W', 'YDR277C (pp) YJR022W', 'YPR145W (pp) YMR117C'], 'newcol_e': [1000, 2000, 3000]})
res = load_table_data(test_data_string_keyed, data_key_column='id_e', table='edge', table_key_column='name')
self.assertEqual(res, 'Success: Data loaded in defaultedge table')
# Verify that newcol_val column and derived were added, and that derived has values only for the newcol nodes
check_values_added(column_names_string_keyed, 'name', test_data_string_keyed, 'id_e', 'newcol_e', table='edge')
# Verify that adding a column with a null works properly, and that adding columns of different types does, too
# While we're at it, eyeball the running time to see that it's not crazy slow
test_data_suid_name = get_table_columns(columns=['SUID', 'name'])
test_data_suid_name['IntCol'] = test_data_suid_name['SUID']
test_data_suid_name['StrCol'] = test_data_suid_name['SUID']
test_data_suid_name['FloatCol'] = test_data_suid_name['SUID']
test_data_suid_name = test_data_suid_name.astype({'IntCol': np.int64, 'StrCol': np.str, 'FloatCol': np.float})
suid_YBL079W = test_data_suid_name.index[test_data_suid_name.name == 'YBL079W'][0]
del test_data_suid_name['name']
test_data_suid_name.at[suid_YBL079W, 'FloatCol'] = np.nan # used to be set_value, but it was deprecated
# test_data_suid_name.set_value(suid_YBL079W, 'FloatCol', np.nan)
res = load_table_data(test_data_suid_name, data_key_column='SUID', table_key_column='SUID')
self.assertEqual(res, 'Success: Data loaded in defaultnode table')
# Make sure that Cytoscape got all of the column types and values right, including the NAN
t = get_table_columns(columns=['SUID', 'IntCol', 'StrCol', 'FloatCol'])
for suid, intcol, strcol, floatcol in zip(t['SUID'], t['IntCol'], t['StrCol'], t['FloatCol']):
str_suid = str(suid)
self.assertEqual(str_suid, str(intcol))
self.assertEqual(str_suid, strcol)
if suid == suid_YBL079W:
self.assertTrue(np.isnan(floatcol))
else:
self.assertEqual(str_suid, str(int(floatcol)))
data = get_table_columns()
self.assertRaises(CyError, load_table_data, data, table='bogus')
self.assertRaises(CyError, load_table_data, data, namespace='bogus')
self.assertRaises(CyError, load_table_data, data, network='bogus')
@print_entry_exit
def test_map_table_column(self):
# Initialization
load_test_session()
# Verify that mapping Yeast from Ensembl to SGD produces a complete (name, SGD) mapping, though
# the number of unmapped symbols depends on the mapping database used ... we can't know this
df = map_table_column('name', 'Yeast', 'Ensembl', 'SGD')
self.assertSetEqual({'name', 'SGD'}, set(df.columns))
self.assertEqual(get_node_count(), len(df.index))
self.assertSetEqual(set(df['name']), set(get_table_columns('node', ['name'])['name']))
empty_mapping = df[df['SGD'].isnull()]
self.assertTrue(0 < len(empty_mapping.index) <= len(df.index))
# Verify that mapping a non-existent column and other bad parameters are caught
self.assertRaises(CyError, map_table_column, 'bogusname', 'Yeast', 'Ensembl', 'SGD')
self.assertRaises(CyError, map_table_column, 'name', 'bogus', 'Ensembl', 'SGD')
self.assertRaises(CyError, map_table_column, 'name', 'Yeast', 'bogus', 'SGD')
self.assertRaises(CyError, map_table_column, 'name', 'Yeast', 'Ensembl', 'bogus')
self.assertRaises(CyError, map_table_column, 'name', 'Yeast', 'Ensembl', 'SGD', table='bogus')
self.assertRaises(CyError, map_table_column, 'name', 'Yeast', 'Ensembl', 'SGD', namespace='bogus')
self.assertRaises(CyError, map_table_column, 'name', 'Yeast', 'Ensembl', 'SGD', network='bogus')
@print_entry_exit
def test_rename_table_column(self):
# Initialization
load_test_session()
# Verify that the rename reports OK and the column name is actually changed
orig_columns = set(get_table_column_names())
expected_columns = orig_columns.copy()
expected_columns.discard('AverageShortestPathLength')
expected_columns.add('xAveragex')
self.assertEqual(rename_table_column('AverageShortestPathLength', 'xAveragex'), '')
self.assertSetEqual(set(get_table_column_names()), expected_columns)
# Verify that invalid parameters raise exceptions
self.assertRaises(CyError, rename_table_column, 'bogus', 'xAveragex')
self.assertRaises(CyError, rename_table_column, '', 'xAveragex')
self.assertRaises(CyError, rename_table_column, None, 'xAveragex')
self.assertRaises(CyError, rename_table_column, 'xAveragex', '')
# self.assertRaises(HTTPError, rename_table_column, 'xAveragex', None) # This should fail, but doesn't
# TODO: CyREST shouldn't allow change of name to None ... it shows up as null in Cytoscape
self.assertRaises(CyError, rename_table_column, 'xAveragex', 'name')
self.assertRaises(CyError, rename_table_column, 'AverageShortestPathLength', 'xAveragex',
network='bogus')
self.assertRaises(CyError, rename_table_column, 'AverageShortestPathLength', 'xAveragex',
namespace='bogus')
self.assertRaises(CyError, rename_table_column, 'AverageShortestPathLength', 'xAveragex',
table='bogus')
if __name__ == '__main__':
unittest.main()
| 59.117647 | 193 | 0.664114 |
630cdbbbaabe9712ec5bd6e0f19e766ff8c98cc0 | 479 | py | Python | ScienceCruiseDataManagement/main/migrations/0026_auto_20170113_0520.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 6 | 2017-10-06T09:18:04.000Z | 2022-02-10T08:54:56.000Z | ScienceCruiseDataManagement/main/migrations/0026_auto_20170113_0520.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 12 | 2020-02-27T09:24:50.000Z | 2021-09-22T17:39:55.000Z | ScienceCruiseDataManagement/main/migrations/0026_auto_20170113_0520.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 1 | 2017-10-16T13:49:33.000Z | 2017-10-16T13:49:33.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-13 03:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('main', '0025_auto_20170113_0451'),
]
operations = [
migrations.RenameModel(
old_name='ParentDevice',
new_name='SamplingMethod',
),
]
| 20.826087 | 48 | 0.659708 |
4b78df008cc4ebb4fb7cf0f8c524fdc3fc0e2ed6 | 25,929 | py | Python | arcade/sprite.py | ercasta/arcade | 4e664d578344ff845fc18f0b5c1cd6f1ee2028ba | [
"MIT"
] | null | null | null | arcade/sprite.py | ercasta/arcade | 4e664d578344ff845fc18f0b5c1cd6f1ee2028ba | [
"MIT"
] | null | null | null | arcade/sprite.py | ercasta/arcade | 4e664d578344ff845fc18f0b5c1cd6f1ee2028ba | [
"MIT"
] | null | null | null | """
This module manages all of the code around Sprites.
For information on Spatial Hash Maps, see:
https://www.gamedev.net/articles/programming/general-and-gameplay-programming/spatial-hashing-r2697/
"""
import math
from arcade.draw_commands import load_texture
from arcade.draw_commands import draw_texture_rectangle
from arcade.draw_commands import Texture
from arcade.draw_commands import rotate_point
from arcade.arcade_types import RGB
from typing import Sequence
from typing import Tuple
FACE_RIGHT = 1
FACE_LEFT = 2
FACE_UP = 3
FACE_DOWN = 4
class Sprite:
"""
Class that represents a 'sprite' on-screen.
Attributes:
:alpha: Transparency of sprite. 0 is invisible, 255 is opaque.
:angle: Rotation angle in degrees.
:bottom: Set/query the sprite location by using the bottom coordinate. \
This will be the 'y' of the bottom of the sprite.
:boundary_left: Used in movement. Left boundary of moving sprite.
:boundary_right: Used in movement. Right boundary of moving sprite.
:boundary_top: Used in movement. Top boundary of moving sprite.
:boundary_bottom: Used in movement. Bottom boundary of moving sprite.
:center_x: X location of the center of the sprite
:center_y: Y location of the center of the sprite
:change_x: Movement vector, in the x direction.
:change_y: Movement vector, in the y direction.
:change_angle: Change in rotation.
:color: Color tint the sprite
:collision_radius: Used as a fast-check to see if this item is close \
enough to another item. If this check works, we do a slower more accurate check.
:cur_texture_index: Index of current texture being used.
:guid: Unique identifier for the sprite. Useful when debugging.
:height: Height of the sprite.
:force: Force being applied to the sprite. Useful when used with Pymunk \
for physics.
:left: Set/query the sprite location by using the left coordinate. This \
will be the 'x' of the left of the sprite.
:points: Points, in relation to the center of the sprite, that are used \
for collision detection. Arcade defaults to creating points for a rectangle \
that encompass the image. If you are creating a ramp or making better \
hit-boxes, you can custom-set these.
:position: A list with the (x, y) of where the sprite is.
:repeat_count_x:
:repeat_count_y:
:right: Set/query the sprite location by using the right coordinate. \
This will be the 'y=x' of the right of the sprite.
:sprite_lists: List of all the sprite lists this sprite is part of.
:texture: `Texture` class with the current texture.
:textures: List of textures associated with this sprite.
:top: Set/query the sprite location by using the top coordinate. This \
will be the 'y' of the top of the sprite.
:scale: Scale the image up or down. Scale of 1.0 is original size, 0.5 \
is 1/2 height and width.
:velocity: Change in x, y expressed as a list. (0, 0) would be not moving.
:width: Width of the sprite
It is common to over-ride the `update` method and provide mechanics on
movement or other sprite updates.
:Example:
"""
def __init__(self,
filename: str=None,
scale: float=1,
image_x: float=0, image_y: float=0,
image_width: float=0, image_height: float=0,
center_x: float=0, center_y: float=0,
repeat_count_x=1, repeat_count_y=1):
"""
Create a new sprite.
Args:
filename (str): Filename of an image that represents the sprite.
scale (float): Scale the image up or down. Scale of 1.0 is none.
image_x (float): Scale the image up or down. Scale of 1.0 is none.
image_y (float): Scale the image up or down. Scale of 1.0 is none.
image_width (float): Width of the sprite
image_height (float): Height of the sprite
center_x (float): Location of the sprite
center_y (float): Location of the sprite
"""
if image_width < 0:
raise ValueError("Width of image can't be less than zero.")
if image_height < 0:
raise ValueError("Height entered is less than zero. Height must be a positive float.")
if image_width == 0 and image_height != 0:
raise ValueError("Width can't be zero.")
if image_height == 0 and image_width != 0:
raise ValueError("Height can't be zero.")
self.sprite_lists = []
if filename is not None:
self.texture = load_texture(filename, image_x, image_y,
image_width, image_height)
self.textures = [self.texture]
self._width = self.texture.width * scale
self._height = self.texture.height * scale
else:
self.textures = []
self._texture = None
self._width = 0
self._height = 0
self.cur_texture_index = 0
self.scale = scale
self._position = [center_x, center_y]
self._angle = 0.0
self.velocity = [0, 0]
self.change_angle = 0
self.boundary_left = None
self.boundary_right = None
self.boundary_top = None
self.boundary_bottom = None
self._alpha = 255
self._collision_radius = None
self._color = (255, 255, 255)
self._points = None
self._point_list_cache = None
self.force = [0, 0]
self.guid = None
self.repeat_count_x = repeat_count_x
self.repeat_count_y = repeat_count_y
def append_texture(self, texture: Texture):
"""
Appends a new texture to the list of textures that can be
applied to this sprite.
"""
self.textures.append(texture)
def _get_position(self) -> (float, float):
""" Get the center x coordinate of the sprite. """
return (self._position[0], self._position[1])
def _set_position(self, new_value: (float, float)):
""" Set the center x coordinate of the sprite. """
self.clear_spatial_hashes()
self._point_list_cache = None
self._position[0] = new_value[0]
self._position[1] = new_value[1]
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_location(self)
position = property(_get_position, _set_position)
def set_position(self, center_x: float, center_y: float):
"""
Set a sprite's position
"""
if center_x != self._position[0] or center_y != self._position[1]:
from arcade.sprite_list import SpriteList
self.clear_spatial_hashes()
self._point_list_cache = None
self._position[0] = center_x
self._position[1] = center_y
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_location(self)
def set_points(self, points: Sequence[Sequence[float]]):
"""
Set a sprite's position
"""
self._points = points
def get_points(self) -> Tuple[Tuple[float, float]]:
"""
Get the corner points for the rect that makes up the sprite.
"""
if self._point_list_cache is not None:
return self._point_list_cache
if self._points is not None:
point_list = []
for point in range(len(self._points)):
point = (self._points[point][0] + self.center_x,
self._points[point][1] + self.center_y)
point_list.append(point)
self._point_list_cache = tuple(point_list)
else:
x1, y1 = rotate_point(self.center_x - self.width / 2,
self.center_y - self.height / 2,
self.center_x,
self.center_y,
self.angle)
x2, y2 = rotate_point(self.center_x + self.width / 2,
self.center_y - self.height / 2,
self.center_x,
self.center_y,
self.angle)
x3, y3 = rotate_point(self.center_x + self.width / 2,
self.center_y + self.height / 2,
self.center_x,
self.center_y,
self.angle)
x4, y4 = rotate_point(self.center_x - self.width / 2,
self.center_y + self.height / 2,
self.center_x,
self.center_y,
self.angle)
self._point_list_cache = ((x1, y1), (x2, y2), (x3, y3), (x4, y4))
return self._point_list_cache
points = property(get_points, set_points)
def _set_collision_radius(self, collision_radius):
"""
Set the collision radius.
Note: Final collision checking is done via geometry that was
set in get_points/set_points. These points are used in the
check_for_collision function. This collision_radius variable
is used as a "pre-check." We do a super-fast check with
collision_radius and see if the sprites are close. If they are,
then we look at the geometry and figure if they really are colliding.
"""
self._collision_radius = collision_radius
def _get_collision_radius(self):
"""
Get the collision radius.
Note: Final collision checking is done via geometry that was
set in get_points/set_points. These points are used in the
check_for_collision function. This collision_radius variable
is used as a "pre-check." We do a super-fast check with
collision_radius and see if the sprites are close. If they are,
then we look at the geometry and figure if they really are colliding.
"""
if not self._collision_radius:
self._collision_radius = max(self.width, self.height)
return self._collision_radius
collision_radius = property(_get_collision_radius, _set_collision_radius)
def __lt__(self, other):
return self.texture.texture_id.value < other.texture.texture_id.value
def clear_spatial_hashes(self):
for sprite_list in self.sprite_lists:
if sprite_list.use_spatial_hash and sprite_list.spatial_hash is not None:
try:
sprite_list.spatial_hash.remove_object(self)
except ValueError:
print("Warning, attempt to remove item from spatial hash that doesn't exist in the hash.")
def add_spatial_hashes(self):
for sprite_list in self.sprite_lists:
if sprite_list.use_spatial_hash:
sprite_list.spatial_hash.insert_object_for_box(self)
def _get_bottom(self) -> float:
"""
Return the y coordinate of the bottom of the sprite.
"""
points = self.get_points()
my_min = points[0][1]
for point in range(1, len(points)):
my_min = min(my_min, points[point][1])
return my_min
def _set_bottom(self, amount: float):
"""
Set the location of the sprite based on the bottom y coordinate.
"""
lowest = self._get_bottom()
diff = lowest - amount
self.center_y -= diff
bottom = property(_get_bottom, _set_bottom)
def _get_top(self) -> float:
"""
Return the y coordinate of the top of the sprite.
"""
points = self.get_points()
my_max = points[0][1]
for i in range(1, len(points)):
my_max = max(my_max, points[i][1])
return my_max
def _set_top(self, amount: float):
""" The highest y coordinate. """
highest = self._get_top()
diff = highest - amount
self.center_y -= diff
top = property(_get_top, _set_top)
def _get_width(self) -> float:
""" Get the center x coordinate of the sprite. """
return self._width
def _set_width(self, new_value: float):
""" Set the center x coordinate of the sprite. """
if new_value != self._width:
self.clear_spatial_hashes()
self._point_list_cache = None
self._width = new_value
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
width = property(_get_width, _set_width)
def _get_height(self) -> float:
""" Get the center x coordinate of the sprite. """
return self._height
def _set_height(self, new_value: float):
""" Set the center x coordinate of the sprite. """
if new_value != self._height:
self.clear_spatial_hashes()
self._point_list_cache = None
self._height = new_value
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
height = property(_get_height, _set_height)
def _get_center_x(self) -> float:
""" Get the center x coordinate of the sprite. """
return self._position[0]
def _set_center_x(self, new_value: float):
""" Set the center x coordinate of the sprite. """
if new_value != self._position[0]:
self.clear_spatial_hashes()
self._point_list_cache = None
self._position[0] = new_value
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
center_x = property(_get_center_x, _set_center_x)
def _get_center_y(self) -> float:
""" Get the center y coordinate of the sprite. """
return self._position[1]
def _set_center_y(self, new_value: float):
""" Set the center y coordinate of the sprite. """
if new_value != self._position[1]:
self.clear_spatial_hashes()
self._point_list_cache = None
self._position[1] = new_value
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
center_y = property(_get_center_y, _set_center_y)
def _get_change_x(self) -> float:
""" Get the velocity in the x plane of the sprite. """
return self.velocity[0]
def _set_change_x(self, new_value: float):
""" Set the velocity in the x plane of the sprite. """
self.velocity[0] = new_value
change_x = property(_get_change_x, _set_change_x)
def _get_change_y(self) -> float:
""" Get the velocity in the y plane of the sprite. """
return self.velocity[1]
def _set_change_y(self, new_value: float):
""" Set the velocity in the y plane of the sprite. """
self.velocity[1] = new_value
change_y = property(_get_change_y, _set_change_y)
def _get_angle(self) -> float:
""" Get the angle of the sprite's rotation. """
return self._angle
def _set_angle(self, new_value: float):
""" Set the angle of the sprite's rotation. """
if new_value != self._angle:
self.clear_spatial_hashes()
self._angle = new_value
self._point_list_cache = None
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_angle(self)
angle = property(_get_angle, _set_angle)
def _get_left(self) -> float:
"""
Left-most coordinate.
"""
points = self.get_points()
my_min = points[0][0]
for i in range(1, len(points)):
my_min = min(my_min, points[i][0])
return my_min
def _set_left(self, amount: float):
""" The left most x coordinate. """
leftmost = self._get_left()
diff = amount - leftmost
self.center_x += diff
left = property(_get_left, _set_left)
def _get_right(self) -> float:
"""
Return the x coordinate of the right-side of the sprite.
"""
points = self.get_points()
my_max = points[0][0]
for point in range(1, len(points)):
my_max = max(my_max, points[point][0])
return my_max
def _set_right(self, amount: float):
""" The right most x coordinate. """
rightmost = self._get_right()
diff = rightmost - amount
self.center_x -= diff
right = property(_get_right, _set_right)
def get_texture(self) -> Texture:
"""
Return the texture that the sprite uses.
"""
return self._texture
def set_texture(self, texture: Texture):
"""
Set the current sprite texture.
"""
if isinstance(texture, Texture):
self.clear_spatial_hashes()
self._point_list_cache = None
self._texture = texture
self._width = texture.width * texture.scale
self._height = texture.height * texture.scale
self.add_spatial_hashes()
for sprite_list in self.sprite_lists:
sprite_list.update_texture(self)
else:
raise SystemError("Can't set the texture to something that is " +
"not an instance of the Texture class.")
texture = property(get_texture, set_texture)
def _get_color(self) -> RGB:
"""
Return the RGB color associated with the sprite.
"""
return self._color
def _set_color(self, color: RGB):
"""
Set the current sprite color as a RGB value
"""
self._color = color
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
color = property(_get_color, _set_color)
def _get_alpha(self) -> RGB:
"""
Return the RGB color associated with the sprite.
"""
return self._alpha
def _set_alpha(self, alpha: RGB):
"""
Set the current sprite color as a RGB value
"""
self._alpha = alpha
for sprite_list in self.sprite_lists:
sprite_list.update_position(self)
alpha = property(_get_alpha, _set_alpha)
def register_sprite_list(self, new_list):
"""
Register this sprite as belonging to a list. We will automatically
remove ourselves from the the list when kill() is called.
"""
self.sprite_lists.append(new_list)
def draw(self):
""" Draw the sprite. """
if self._alpha != 255:
transparent = False
else:
transparent = True
draw_texture_rectangle(self.center_x, self.center_y,
self.width, self.height,
self.texture, self.angle, self.alpha, # TODO: review this function
repeat_count_x=self.repeat_count_x,
repeat_count_y=self.repeat_count_y)
def update(self):
"""
Update the sprite.
"""
self.set_position(self.center_x + self.change_x, self.center_y + self.change_y)
self.angle += self.change_angle
def update_animation(self):
"""
Override this to add code that will change
what image is shown, so the sprite can be
animated.
"""
pass
def remove_from_sprite_lists(self):
"""
Remove the sprite from all sprite lists.
"""
for sprite_list in self.sprite_lists:
if self in sprite_list:
sprite_list.remove(self)
self.sprite_lists.clear()
def kill(self):
"""
Alias of `remove_from_sprite_lists`
"""
self.remove_from_sprite_lists()
class AnimatedTimeSprite(Sprite):
"""
Sprite for platformer games that supports animations.
"""
def __init__(self, scale: float=1,
image_x: float=0, image_y: float=0,
center_x: float=0, center_y: float=0):
super().__init__(scale=scale, image_x=image_x, image_y=image_y,
center_x=center_x, center_y=center_y)
self.state = FACE_RIGHT
self.cur_texture_index = 0
self.texture_change_frames = 5
self.frame = 0
def update_animation(self):
"""
Logic for selecting the proper texture to use.
"""
if self.frame % self.texture_change_frames == 0:
self.cur_texture_index += 1
if self.cur_texture_index >= len(self.textures):
self.cur_texture_index = 0
self.set_texture(self.cur_texture_index)
self.frame += 1
class AnimatedWalkingSprite(Sprite):
"""
Sprite for platformer games that supports animations.
"""
def __init__(self, scale: float=1,
image_x: float=0, image_y: float=0,
center_x: float=0, center_y: float=0):
super().__init__(scale=scale, image_x=image_x, image_y=image_y,
center_x=center_x, center_y=center_y)
self.state = FACE_RIGHT
self.stand_right_textures = None
self.stand_left_textures = None
self.walk_left_textures = None
self.walk_right_textures = None
self.walk_up_textures = None
self.walk_down_textures = None
self.cur_texture_index = 0
self.texture_change_distance = 20
self.last_texture_change_center_x = 0
self.last_texture_change_center_y = 0
def update_animation(self):
"""
Logic for selecting the proper texture to use.
"""
x1 = self.center_x
x2 = self.last_texture_change_center_x
y1 = self.center_y
y2 = self.last_texture_change_center_y
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
texture_list = []
change_direction = False
if self.change_x > 0 \
and self.change_y == 0 \
and self.state != FACE_RIGHT \
and self.walk_right_textures \
and len(self.walk_right_textures) > 0:
self.state = FACE_RIGHT
change_direction = True
elif self.change_x < 0 and self.change_y == 0 and self.state != FACE_LEFT \
and self.walk_left_textures and len(self.walk_left_textures) > 0:
self.state = FACE_LEFT
change_direction = True
elif self.change_y < 0 and self.change_x == 0 and self.state != FACE_DOWN \
and self.walk_down_textures and len(self.walk_down_textures) > 0:
self.state = FACE_DOWN
change_direction = True
elif self.change_y > 0 and self.change_x == 0 and self.state != FACE_UP \
and self.walk_up_textures and len(self.walk_up_textures) > 0:
self.state = FACE_UP
change_direction = True
if self.change_x == 0 and self.change_y == 0:
if self.state == FACE_LEFT:
self.texture = self.stand_left_textures[0]
elif self.state == FACE_RIGHT:
self.texture = self.stand_right_textures[0]
elif self.state == FACE_UP:
self.texture = self.walk_up_textures[0]
elif self.state == FACE_DOWN:
self.texture = self.walk_down_textures[0]
elif change_direction or distance >= self.texture_change_distance:
self.last_texture_change_center_x = self.center_x
self.last_texture_change_center_y = self.center_y
if self.state == FACE_LEFT:
texture_list = self.walk_left_textures
if texture_list is None or len(texture_list) == 0:
raise RuntimeError("update_animation was called on a sprite that doesn't have a "
"list of walk left textures.")
elif self.state == FACE_RIGHT:
texture_list = self.walk_right_textures
if texture_list is None or len(texture_list) == 0:
raise RuntimeError("update_animation was called on a sprite that doesn't have a list of "
"walk right textures.")
elif self.state == FACE_UP:
texture_list = self.walk_up_textures
if texture_list is None or len(texture_list) == 0:
raise RuntimeError("update_animation was called on a sprite that doesn't have a list of "
"walk up textures.")
elif self.state == FACE_DOWN:
texture_list = self.walk_down_textures
if texture_list is None or len(texture_list) == 0:
raise RuntimeError(
"update_animation was called on a sprite that doesn't have a list of walk down textures.")
self.cur_texture_index += 1
if self.cur_texture_index >= len(texture_list):
self.cur_texture_index = 0
self.texture = texture_list[self.cur_texture_index]
self.width = self.texture.width * self.scale
self.height = self.texture.height * self.scale
def get_distance_between_sprites(sprite1: Sprite, sprite2: Sprite) -> float:
"""
Returns the distance between the two given sprites
"""
distance = math.sqrt((sprite1.center_x - sprite2.center_x) ** 2 + (sprite1.center_y - sprite2.center_y) ** 2)
return distance
| 36.571227 | 114 | 0.594585 |
22ab7ec10b242599583b3010e8c37e57050f61d6 | 9,315 | py | Python | selfdrive/car/honda/carcontroller.py | reddn/openpilothack | f11b2cfef0cfe8fe9e2f2cbc4348f61e83ff27a1 | [
"MIT"
] | 1 | 2021-11-10T05:23:50.000Z | 2021-11-10T05:23:50.000Z | selfdrive/car/honda/carcontroller.py | reddn/openpilothack | f11b2cfef0cfe8fe9e2f2cbc4348f61e83ff27a1 | [
"MIT"
] | null | null | null | selfdrive/car/honda/carcontroller.py | reddn/openpilothack | f11b2cfef0cfe8fe9e2f2cbc4348f61e83ff27a1 | [
"MIT"
] | 1 | 2021-11-10T05:23:52.000Z | 2021-11-10T05:23:52.000Z | from cereal import car
from collections import namedtuple
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.controls.lib.drive_helpers import rate_limit
from common.numpy_fast import clip
from selfdrive.car.honda import hondacan
from selfdrive.car.honda.values import AH, CruiseButtons, CAR
from selfdrive.can.packer import CANPacker
import zmq
def actuator_hystereses(brake, braking, brake_steady, v_ego, car_fingerprint):
# hyst params... TODO: move these to VehicleParams
brake_hyst_on = 0.02 # to activate brakes exceed this value
brake_hyst_off = 0.005 # to deactivate brakes below this value
brake_hyst_gap = 0.01 # don't change brake command for small ocilalitons within this value
#*** histeresis logic to avoid brake blinking. go above 0.1 to trigger
if (brake < brake_hyst_on and not braking) or brake < brake_hyst_off:
brake = 0.
braking = brake > 0.
# for small brake oscillations within brake_hyst_gap, don't change the brake command
if brake == 0.:
brake_steady = 0.
elif brake > brake_steady + brake_hyst_gap:
brake_steady = brake - brake_hyst_gap
elif brake < brake_steady - brake_hyst_gap:
brake_steady = brake + brake_hyst_gap
brake = brake_steady
if (car_fingerprint in (CAR.ACURA_ILX, CAR.CRV)) and brake > 0.0:
brake += 0.15
return brake, braking, brake_steady
def process_hud_alert(hud_alert):
# initialize to no alert
fcw_display = 0
steer_required = 0
acc_alert = 0
if hud_alert == AH.NONE: # no alert
pass
elif hud_alert == AH.FCW: # FCW
fcw_display = hud_alert[1]
elif hud_alert == AH.STEER: # STEER
steer_required = hud_alert[1]
else: # any other ACC alert
acc_alert = hud_alert[1]
return fcw_display, steer_required, acc_alert
HUDData = namedtuple("HUDData",
["pcm_accel", "v_cruise", "mini_car", "car", "X4",
"lanes", "beep", "chime", "fcw", "acc_alert", "steer_required"])
class CarController(object):
def __init__(self, dbc_name, enable_camera=True):
self.braking = False
self.brake_steady = 0.
self.brake_last = 0.
self.enable_camera = enable_camera
self.packer = CANPacker(dbc_name)
self.new_radar_config = False
self.counter = 0
self.lincontext = zmq.Context()
self.linsocket = self.lincontext.socket(zmq.PUB)
self.linsocket.bind("tcp://127.0.0.1:8099")
self.linsocket.send(bytearray([0xFF,0xFF,0xFF,0xFF])) #initializes the LIN pin at 9600 with even parity
self.lkas_active_prev = False
self.lkas_signal_changed_timeout = 10 #use counts of frames at 10hz
def update(self, sendcan, enabled, CS, frame, actuators, \
pcm_speed, pcm_override, pcm_cancel_cmd, pcm_accel, \
radar_error, hud_v_cruise, hud_show_lanes, hud_show_car, \
hud_alert, snd_beep, snd_chime):
""" Controls thread """
if not self.enable_camera:
return
# *** apply brake hysteresis ***
brake, self.braking, self.brake_steady = actuator_hystereses(actuators.brake, self.braking, self.brake_steady, CS.v_ego, CS.CP.carFingerprint)
# *** no output if not enabled ***
#hacked # if not enabled and CS.pcm_acc_status:
# # send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
# pcm_cancel_cmd = True
# *** rate limit after the enable check ***
self.brake_last = rate_limit(brake, self.brake_last, -2., 1./100) #hacked
# vehicle hud display, wait for one update from 10Hz 0x304 msg
if hud_show_lanes:
hud_lanes = 1
else:
hud_lanes = 0
if enabled:
if hud_show_car:
hud_car = 2
else:
hud_car = 1
else:
hud_car = 0
# For lateral control-only, send chimes as a beep since we don't send 0x1fa
if CS.CP.radarOffCan:
snd_beep = snd_beep if snd_beep is not 0 else snd_chime
#print chime, alert_id, hud_alert
fcw_display, steer_required, acc_alert = process_hud_alert(hud_alert)
hud = HUDData(int(pcm_accel), int(round(hud_v_cruise)), 1, hud_car,
0xc1, hud_lanes, int(snd_beep), snd_chime, fcw_display, acc_alert, steer_required)
if not all(isinstance(x, int) and 0 <= x < 256 for x in hud):
hud = HUDData(0xc6, 255, 64, 0xc0, 209, 0x40, 0, 0, 0, 0)
# **** process the car messages ****
# *** compute control surfaces ***
BRAKE_MAX = 1024/4
if CS.CP.carFingerprint in (CAR.ACURA_ILX):
STEER_MAX = 0xF00
elif CS.CP.carFingerprint in (CAR.CRV, CAR.ACURA_RDX):
STEER_MAX = 0x3e8 # CR-V only uses 12-bits and requires a lower value (max value from energee)
elif CS.CP.carFingerprint in (CAR.ACCORD_2016):
STEER_MAX = 0x7f
else:
STEER_MAX = 0x1000
# steer torque is converted back to CAN reference (positive when steering right)
apply_gas = clip(actuators.gas, 0., 1.)
apply_brake = int(clip(self.brake_last * BRAKE_MAX, 0, BRAKE_MAX - 1))
apply_steer = int(clip(-actuators.steer * STEER_MAX, -STEER_MAX, STEER_MAX))
# any other cp.vl[0x18F]['STEER_STATUS'] is common and can happen during user override. sending 0 torque to avoid EPS sending error 5
lkas_active = enabled and not CS.steer_not_allowed
if lkas_active != self.lkas_active_prev:
if lkas_active:
self.lkas_signal_changed_timeout = 30
else:
self.lkas_signal_changed_timeout = 50
self.lkas_active_prev = lkas_active
# Send CAN commands.
can_sends = []
# Send steering command.
if CS.CP.carFingerprint in (CAR.ACCORD_2016):
if lkas_active:
lkas_on = 64
lkas_off = 0
chksm_off = 0
big_steer = (apply_steer >> 5) & 0xF
little_steer = apply_steer - (big_steer << 5)
# steer starts from 0, goes to 15, drops to -16 then up to -1
dashed_lanes = 0
if little_steer > 15:
little_steer = little_steer - 32
else:
chksm_on = 0
lkas_on = 0
lkas_off = 64
big_steer = 0
little_steer = 0
dashed_lanes = 1
# accord serial has a 1 bit counter, flipping every refresh
if self.counter == 0:
self.counter = 32
else:
self.counter = 0
# can_sends.append(hondacan.create_steering_control_serial(self.packer, self.counter, big_steer, lkas_on, little_steer, lkas_off, chksm))
self.linsocket.send(hondacan.create_steering_control_serial_lin2linrepeater(frame, big_steer, lkas_on, little_steer))
# self.linsocket.send(hondacan.create_steering_control_serial(frame, self.counter, big_steer, lkas_on, little_steer, lkas_off))
# if ((frame) % 50) == 0:
# can_sends.append(hondacan.create_steering_control_serial_candata(self.packer, self.counter, big_steer, lkas_on, little_steer, lkas_off, chksm, apply_steer, int(clip(actuators.steer * 100,0,100))))
else: # for if CAR.ACCORD_2016
idx = frame % 4
can_sends.append(hondacan.create_steering_control(self.packer, apply_steer, lkas_active, CS.CP.carFingerprint, idx))
# above is commented bc it should not happen on this branch
# Send dashboard UI commands.
if (frame % 10) == 0:
idx = (frame/10) % 4 #create_ui_commands is hacked
if self.lkas_signal_changed_timeout > 0:
signal_changed = 1
self.lkas_signal_changed_timeout -= 1
else:
signal_changed = 0
can_sends.extend(hondacan.create_ui_commands(self.packer, pcm_speed, hud, CS.CP.carFingerprint, idx, dashed_lanes, signal_changed))
# #hack if CS.CP.radarOffCan:
# # If using stock ACC, spam cancel command to kill gas when OP disengages.
# if pcm_cancel_cmd:
# can_sends.append(hondacan.spam_buttons_command(self.packer, CruiseButtons.CANCEL, idx))
# elif CS.stopped:
# can_sends.append(hondacan.spam_buttons_command(self.packer, CruiseButtons.RES_ACCEL, idx))
# else:
# # Send gas and brake commands.
# if (frame % 2) == 0:
# idx = (frame / 2) % 4
# can_sends.append(
# hondacan.create_brake_command(self.packer, apply_brake, pcm_override,
# pcm_cancel_cmd, hud.chime, hud.fcw, idx))
# if CS.CP.enableGasInterceptor:
# # send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# # This prevents unexpected pedal range rescaling
# can_sends.append(hondacan.create_gas_command(self.packer, apply_gas, idx))
#
# # radar at 20Hz, but these msgs need to be sent at 50Hz on ilx (seems like an Acura bug)
# if CS.CP.carFingerprint == CAR.ACURA_ILX:
# radar_send_step = 2
# else:
# radar_send_step = 5
#
# if (frame % radar_send_step) == 0: #removed radarmod
# idx = (frame/radar_send_step) % 4
# if not self.new_radar_config: # only change state once
# self.new_radar_config = car.RadarState.Error.wrongConfig in radar_error
# can_sends.extend(hondacan.create_radar_commands(self.packer, CS.v_ego, CS.CP.carFingerprint, self.new_radar_config, idx))
sendcan.send(can_list_to_can_capnp(can_sends, msgtype='sendcan').to_bytes())
| 39.978541 | 205 | 0.672678 |
ec8c55df94f2d8baee2f8927737aa128fc29489c | 619 | py | Python | mitmirror/main/adapters/request_adapter.py | Claayton/mitmirror-api | a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1 | [
"MIT"
] | null | null | null | mitmirror/main/adapters/request_adapter.py | Claayton/mitmirror-api | a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1 | [
"MIT"
] | 1 | 2021-10-09T20:42:03.000Z | 2021-10-09T20:42:03.000Z | mitmirror/main/adapters/request_adapter.py | Claayton/mitmirror-api | a78ec3aa84aa3685a26bfaf5e1ba2a3f0f8405d1 | [
"MIT"
] | null | null | null | """Adaptando requisicos para o FastAPI"""
from typing import Callable
from fastapi import Request as RequestFastApi
from mitmirror.presenters.helpers import HttpRequest
async def request_adapter(
request: RequestFastApi, callback: Callable, user_id: int = None
):
"""Adaptador de requisicoes para FastApi"""
body = None
try:
body = await request.json()
except: # pylint: disable=W0702
pass
http_request = HttpRequest(
headers=request.headers, body=body, query=request.query_params
)
http_response = callback(user_id, http_request)
return http_response
| 24.76 | 70 | 0.71567 |
2b6a2791dc1b7a8bb8d486e6bb402578ab601b8e | 279 | py | Python | tests/test.py | pfriesch/neural-pipeline | 2df4f7467a721b1fbd93f4439086c6dcee5dac2c | [
"MIT"
] | null | null | null | tests/test.py | pfriesch/neural-pipeline | 2df4f7467a721b1fbd93f4439086c6dcee5dac2c | [
"MIT"
] | null | null | null | tests/test.py | pfriesch/neural-pipeline | 2df4f7467a721b1fbd93f4439086c6dcee5dac2c | [
"MIT"
] | null | null | null | import unittest
from .data_processor_test import *
from .data_producer_test import *
from .train_config_test import *
from .utils_test import *
from .train_test import *
from .predict_test import *
from .monitoring_test import *
if __name__ == '__main__':
unittest.main()
| 19.928571 | 34 | 0.770609 |
43d9f6a83a93b4a0da94df83672385573bdfe775 | 2,940 | py | Python | setup/settings/settings.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | null | null | null | setup/settings/settings.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | 6 | 2021-08-10T02:19:35.000Z | 2021-08-10T02:24:05.000Z | setup/settings/settings.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | null | null | null | from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework.authtoken',
'rest_framework_swagger',
'drf_yasg',
'django_filters',
'admin_honeypot',
'rest_framework',
'challengenews'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'setup.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'setup.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root')
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.344828 | 91 | 0.690476 |
83aab244baacd2ecc9fdca56698c2f070ba52ba2 | 22,498 | py | Python | google/cloud/dialogflow_v2/__init__.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | google/cloud/dialogflow_v2/__init__.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | google/cloud/dialogflow_v2/__init__.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.agents import AgentsClient
from .services.agents import AgentsAsyncClient
from .services.answer_records import AnswerRecordsClient
from .services.answer_records import AnswerRecordsAsyncClient
from .services.contexts import ContextsClient
from .services.contexts import ContextsAsyncClient
from .services.conversation_datasets import ConversationDatasetsClient
from .services.conversation_datasets import ConversationDatasetsAsyncClient
from .services.conversation_models import ConversationModelsClient
from .services.conversation_models import ConversationModelsAsyncClient
from .services.conversation_profiles import ConversationProfilesClient
from .services.conversation_profiles import ConversationProfilesAsyncClient
from .services.conversations import ConversationsClient
from .services.conversations import ConversationsAsyncClient
from .services.documents import DocumentsClient
from .services.documents import DocumentsAsyncClient
from .services.entity_types import EntityTypesClient
from .services.entity_types import EntityTypesAsyncClient
from .services.environments import EnvironmentsClient
from .services.environments import EnvironmentsAsyncClient
from .services.fulfillments import FulfillmentsClient
from .services.fulfillments import FulfillmentsAsyncClient
from .services.intents import IntentsClient
from .services.intents import IntentsAsyncClient
from .services.knowledge_bases import KnowledgeBasesClient
from .services.knowledge_bases import KnowledgeBasesAsyncClient
from .services.participants import ParticipantsClient
from .services.participants import ParticipantsAsyncClient
from .services.session_entity_types import SessionEntityTypesClient
from .services.session_entity_types import SessionEntityTypesAsyncClient
from .services.sessions import SessionsClient
from .services.sessions import SessionsAsyncClient
from .services.versions import VersionsClient
from .services.versions import VersionsAsyncClient
from .types.agent import Agent
from .types.agent import DeleteAgentRequest
from .types.agent import ExportAgentRequest
from .types.agent import ExportAgentResponse
from .types.agent import GetAgentRequest
from .types.agent import GetValidationResultRequest
from .types.agent import ImportAgentRequest
from .types.agent import RestoreAgentRequest
from .types.agent import SearchAgentsRequest
from .types.agent import SearchAgentsResponse
from .types.agent import SetAgentRequest
from .types.agent import TrainAgentRequest
from .types.answer_record import AgentAssistantFeedback
from .types.answer_record import AgentAssistantRecord
from .types.answer_record import AnswerFeedback
from .types.answer_record import AnswerRecord
from .types.answer_record import ListAnswerRecordsRequest
from .types.answer_record import ListAnswerRecordsResponse
from .types.answer_record import UpdateAnswerRecordRequest
from .types.audio_config import InputAudioConfig
from .types.audio_config import OutputAudioConfig
from .types.audio_config import SpeechContext
from .types.audio_config import SpeechToTextConfig
from .types.audio_config import SpeechWordInfo
from .types.audio_config import SynthesizeSpeechConfig
from .types.audio_config import VoiceSelectionParams
from .types.audio_config import AudioEncoding
from .types.audio_config import OutputAudioEncoding
from .types.audio_config import SpeechModelVariant
from .types.audio_config import SsmlVoiceGender
from .types.context import Context
from .types.context import CreateContextRequest
from .types.context import DeleteAllContextsRequest
from .types.context import DeleteContextRequest
from .types.context import GetContextRequest
from .types.context import ListContextsRequest
from .types.context import ListContextsResponse
from .types.context import UpdateContextRequest
from .types.conversation import CompleteConversationRequest
from .types.conversation import Conversation
from .types.conversation import ConversationPhoneNumber
from .types.conversation import CreateConversationRequest
from .types.conversation import GetConversationRequest
from .types.conversation import ListConversationsRequest
from .types.conversation import ListConversationsResponse
from .types.conversation import ListMessagesRequest
from .types.conversation import ListMessagesResponse
from .types.conversation_dataset import ConversationDataset
from .types.conversation_dataset import ConversationInfo
from .types.conversation_dataset import CreateConversationDatasetOperationMetadata
from .types.conversation_dataset import CreateConversationDatasetRequest
from .types.conversation_dataset import DeleteConversationDatasetOperationMetadata
from .types.conversation_dataset import DeleteConversationDatasetRequest
from .types.conversation_dataset import GetConversationDatasetRequest
from .types.conversation_dataset import ImportConversationDataOperationMetadata
from .types.conversation_dataset import ImportConversationDataOperationResponse
from .types.conversation_dataset import ImportConversationDataRequest
from .types.conversation_dataset import InputConfig
from .types.conversation_dataset import ListConversationDatasetsRequest
from .types.conversation_dataset import ListConversationDatasetsResponse
from .types.conversation_event import ConversationEvent
from .types.conversation_model import ArticleSuggestionModelMetadata
from .types.conversation_model import ConversationModel
from .types.conversation_model import ConversationModelEvaluation
from .types.conversation_model import CreateConversationModelEvaluationOperationMetadata
from .types.conversation_model import CreateConversationModelEvaluationRequest
from .types.conversation_model import CreateConversationModelOperationMetadata
from .types.conversation_model import CreateConversationModelRequest
from .types.conversation_model import DeleteConversationModelOperationMetadata
from .types.conversation_model import DeleteConversationModelRequest
from .types.conversation_model import DeployConversationModelOperationMetadata
from .types.conversation_model import DeployConversationModelRequest
from .types.conversation_model import EvaluationConfig
from .types.conversation_model import GetConversationModelEvaluationRequest
from .types.conversation_model import GetConversationModelRequest
from .types.conversation_model import InputDataset
from .types.conversation_model import ListConversationModelEvaluationsRequest
from .types.conversation_model import ListConversationModelEvaluationsResponse
from .types.conversation_model import ListConversationModelsRequest
from .types.conversation_model import ListConversationModelsResponse
from .types.conversation_model import SmartReplyMetrics
from .types.conversation_model import SmartReplyModelMetadata
from .types.conversation_model import UndeployConversationModelOperationMetadata
from .types.conversation_model import UndeployConversationModelRequest
from .types.conversation_profile import AutomatedAgentConfig
from .types.conversation_profile import ClearSuggestionFeatureConfigOperationMetadata
from .types.conversation_profile import ClearSuggestionFeatureConfigRequest
from .types.conversation_profile import ConversationProfile
from .types.conversation_profile import CreateConversationProfileRequest
from .types.conversation_profile import DeleteConversationProfileRequest
from .types.conversation_profile import GetConversationProfileRequest
from .types.conversation_profile import HumanAgentAssistantConfig
from .types.conversation_profile import HumanAgentHandoffConfig
from .types.conversation_profile import ListConversationProfilesRequest
from .types.conversation_profile import ListConversationProfilesResponse
from .types.conversation_profile import LoggingConfig
from .types.conversation_profile import NotificationConfig
from .types.conversation_profile import SetSuggestionFeatureConfigOperationMetadata
from .types.conversation_profile import SetSuggestionFeatureConfigRequest
from .types.conversation_profile import SuggestionFeature
from .types.conversation_profile import UpdateConversationProfileRequest
from .types.document import CreateDocumentRequest
from .types.document import DeleteDocumentRequest
from .types.document import Document
from .types.document import ExportDocumentRequest
from .types.document import ExportOperationMetadata
from .types.document import GetDocumentRequest
from .types.document import ImportDocumentsRequest
from .types.document import ImportDocumentsResponse
from .types.document import ImportDocumentTemplate
from .types.document import KnowledgeOperationMetadata
from .types.document import ListDocumentsRequest
from .types.document import ListDocumentsResponse
from .types.document import ReloadDocumentRequest
from .types.document import UpdateDocumentRequest
from .types.entity_type import BatchCreateEntitiesRequest
from .types.entity_type import BatchDeleteEntitiesRequest
from .types.entity_type import BatchDeleteEntityTypesRequest
from .types.entity_type import BatchUpdateEntitiesRequest
from .types.entity_type import BatchUpdateEntityTypesRequest
from .types.entity_type import BatchUpdateEntityTypesResponse
from .types.entity_type import CreateEntityTypeRequest
from .types.entity_type import DeleteEntityTypeRequest
from .types.entity_type import EntityType
from .types.entity_type import EntityTypeBatch
from .types.entity_type import GetEntityTypeRequest
from .types.entity_type import ListEntityTypesRequest
from .types.entity_type import ListEntityTypesResponse
from .types.entity_type import UpdateEntityTypeRequest
from .types.environment import CreateEnvironmentRequest
from .types.environment import DeleteEnvironmentRequest
from .types.environment import Environment
from .types.environment import EnvironmentHistory
from .types.environment import GetEnvironmentHistoryRequest
from .types.environment import GetEnvironmentRequest
from .types.environment import ListEnvironmentsRequest
from .types.environment import ListEnvironmentsResponse
from .types.environment import TextToSpeechSettings
from .types.environment import UpdateEnvironmentRequest
from .types.fulfillment import Fulfillment
from .types.fulfillment import GetFulfillmentRequest
from .types.fulfillment import UpdateFulfillmentRequest
from .types.gcs import GcsDestination
from .types.gcs import GcsSources
from .types.human_agent_assistant_event import HumanAgentAssistantEvent
from .types.intent import BatchDeleteIntentsRequest
from .types.intent import BatchUpdateIntentsRequest
from .types.intent import BatchUpdateIntentsResponse
from .types.intent import CreateIntentRequest
from .types.intent import DeleteIntentRequest
from .types.intent import GetIntentRequest
from .types.intent import Intent
from .types.intent import IntentBatch
from .types.intent import ListIntentsRequest
from .types.intent import ListIntentsResponse
from .types.intent import UpdateIntentRequest
from .types.intent import IntentView
from .types.knowledge_base import CreateKnowledgeBaseRequest
from .types.knowledge_base import DeleteKnowledgeBaseRequest
from .types.knowledge_base import GetKnowledgeBaseRequest
from .types.knowledge_base import KnowledgeBase
from .types.knowledge_base import ListKnowledgeBasesRequest
from .types.knowledge_base import ListKnowledgeBasesResponse
from .types.knowledge_base import UpdateKnowledgeBaseRequest
from .types.participant import AnalyzeContentRequest
from .types.participant import AnalyzeContentResponse
from .types.participant import AnnotatedMessagePart
from .types.participant import ArticleAnswer
from .types.participant import AssistQueryParameters
from .types.participant import AutomatedAgentReply
from .types.participant import CreateParticipantRequest
from .types.participant import DtmfParameters
from .types.participant import FaqAnswer
from .types.participant import GetParticipantRequest
from .types.participant import ListParticipantsRequest
from .types.participant import ListParticipantsResponse
from .types.participant import Message
from .types.participant import MessageAnnotation
from .types.participant import OutputAudio
from .types.participant import Participant
from .types.participant import SmartReplyAnswer
from .types.participant import SuggestArticlesRequest
from .types.participant import SuggestArticlesResponse
from .types.participant import SuggestFaqAnswersRequest
from .types.participant import SuggestFaqAnswersResponse
from .types.participant import SuggestionResult
from .types.participant import SuggestSmartRepliesRequest
from .types.participant import SuggestSmartRepliesResponse
from .types.participant import UpdateParticipantRequest
from .types.session import DetectIntentRequest
from .types.session import DetectIntentResponse
from .types.session import EventInput
from .types.session import QueryInput
from .types.session import QueryParameters
from .types.session import QueryResult
from .types.session import Sentiment
from .types.session import SentimentAnalysisRequestConfig
from .types.session import SentimentAnalysisResult
from .types.session import StreamingDetectIntentRequest
from .types.session import StreamingDetectIntentResponse
from .types.session import StreamingRecognitionResult
from .types.session import TextInput
from .types.session_entity_type import CreateSessionEntityTypeRequest
from .types.session_entity_type import DeleteSessionEntityTypeRequest
from .types.session_entity_type import GetSessionEntityTypeRequest
from .types.session_entity_type import ListSessionEntityTypesRequest
from .types.session_entity_type import ListSessionEntityTypesResponse
from .types.session_entity_type import SessionEntityType
from .types.session_entity_type import UpdateSessionEntityTypeRequest
from .types.validation_result import ValidationError
from .types.validation_result import ValidationResult
from .types.version import CreateVersionRequest
from .types.version import DeleteVersionRequest
from .types.version import GetVersionRequest
from .types.version import ListVersionsRequest
from .types.version import ListVersionsResponse
from .types.version import UpdateVersionRequest
from .types.version import Version
from .types.webhook import OriginalDetectIntentRequest
from .types.webhook import WebhookRequest
from .types.webhook import WebhookResponse
__all__ = (
"AgentsAsyncClient",
"AnswerRecordsAsyncClient",
"ContextsAsyncClient",
"ConversationDatasetsAsyncClient",
"ConversationModelsAsyncClient",
"ConversationProfilesAsyncClient",
"ConversationsAsyncClient",
"DocumentsAsyncClient",
"EntityTypesAsyncClient",
"EnvironmentsAsyncClient",
"FulfillmentsAsyncClient",
"IntentsAsyncClient",
"KnowledgeBasesAsyncClient",
"ParticipantsAsyncClient",
"SessionEntityTypesAsyncClient",
"SessionsAsyncClient",
"VersionsAsyncClient",
"Agent",
"AgentAssistantFeedback",
"AgentAssistantRecord",
"AgentsClient",
"AnalyzeContentRequest",
"AnalyzeContentResponse",
"AnnotatedMessagePart",
"AnswerFeedback",
"AnswerRecord",
"AnswerRecordsClient",
"ArticleAnswer",
"ArticleSuggestionModelMetadata",
"AssistQueryParameters",
"AudioEncoding",
"AutomatedAgentConfig",
"AutomatedAgentReply",
"BatchCreateEntitiesRequest",
"BatchDeleteEntitiesRequest",
"BatchDeleteEntityTypesRequest",
"BatchDeleteIntentsRequest",
"BatchUpdateEntitiesRequest",
"BatchUpdateEntityTypesRequest",
"BatchUpdateEntityTypesResponse",
"BatchUpdateIntentsRequest",
"BatchUpdateIntentsResponse",
"ClearSuggestionFeatureConfigOperationMetadata",
"ClearSuggestionFeatureConfigRequest",
"CompleteConversationRequest",
"Context",
"ContextsClient",
"Conversation",
"ConversationDataset",
"ConversationDatasetsClient",
"ConversationEvent",
"ConversationInfo",
"ConversationModel",
"ConversationModelEvaluation",
"ConversationModelsClient",
"ConversationPhoneNumber",
"ConversationProfile",
"ConversationProfilesClient",
"ConversationsClient",
"CreateContextRequest",
"CreateConversationDatasetOperationMetadata",
"CreateConversationDatasetRequest",
"CreateConversationModelEvaluationOperationMetadata",
"CreateConversationModelEvaluationRequest",
"CreateConversationModelOperationMetadata",
"CreateConversationModelRequest",
"CreateConversationProfileRequest",
"CreateConversationRequest",
"CreateDocumentRequest",
"CreateEntityTypeRequest",
"CreateEnvironmentRequest",
"CreateIntentRequest",
"CreateKnowledgeBaseRequest",
"CreateParticipantRequest",
"CreateSessionEntityTypeRequest",
"CreateVersionRequest",
"DeleteAgentRequest",
"DeleteAllContextsRequest",
"DeleteContextRequest",
"DeleteConversationDatasetOperationMetadata",
"DeleteConversationDatasetRequest",
"DeleteConversationModelOperationMetadata",
"DeleteConversationModelRequest",
"DeleteConversationProfileRequest",
"DeleteDocumentRequest",
"DeleteEntityTypeRequest",
"DeleteEnvironmentRequest",
"DeleteIntentRequest",
"DeleteKnowledgeBaseRequest",
"DeleteSessionEntityTypeRequest",
"DeleteVersionRequest",
"DeployConversationModelOperationMetadata",
"DeployConversationModelRequest",
"DetectIntentRequest",
"DetectIntentResponse",
"Document",
"DocumentsClient",
"DtmfParameters",
"EntityType",
"EntityTypeBatch",
"EntityTypesClient",
"Environment",
"EnvironmentHistory",
"EnvironmentsClient",
"EvaluationConfig",
"EventInput",
"ExportAgentRequest",
"ExportAgentResponse",
"ExportDocumentRequest",
"ExportOperationMetadata",
"FaqAnswer",
"Fulfillment",
"FulfillmentsClient",
"GcsDestination",
"GcsSources",
"GetAgentRequest",
"GetContextRequest",
"GetConversationDatasetRequest",
"GetConversationModelEvaluationRequest",
"GetConversationModelRequest",
"GetConversationProfileRequest",
"GetConversationRequest",
"GetDocumentRequest",
"GetEntityTypeRequest",
"GetEnvironmentHistoryRequest",
"GetEnvironmentRequest",
"GetFulfillmentRequest",
"GetIntentRequest",
"GetKnowledgeBaseRequest",
"GetParticipantRequest",
"GetSessionEntityTypeRequest",
"GetValidationResultRequest",
"GetVersionRequest",
"HumanAgentAssistantConfig",
"HumanAgentAssistantEvent",
"HumanAgentHandoffConfig",
"ImportAgentRequest",
"ImportConversationDataOperationMetadata",
"ImportConversationDataOperationResponse",
"ImportConversationDataRequest",
"ImportDocumentTemplate",
"ImportDocumentsRequest",
"ImportDocumentsResponse",
"InputAudioConfig",
"InputConfig",
"InputDataset",
"Intent",
"IntentBatch",
"IntentView",
"IntentsClient",
"KnowledgeBase",
"KnowledgeBasesClient",
"KnowledgeOperationMetadata",
"ListAnswerRecordsRequest",
"ListAnswerRecordsResponse",
"ListContextsRequest",
"ListContextsResponse",
"ListConversationDatasetsRequest",
"ListConversationDatasetsResponse",
"ListConversationModelEvaluationsRequest",
"ListConversationModelEvaluationsResponse",
"ListConversationModelsRequest",
"ListConversationModelsResponse",
"ListConversationProfilesRequest",
"ListConversationProfilesResponse",
"ListConversationsRequest",
"ListConversationsResponse",
"ListDocumentsRequest",
"ListDocumentsResponse",
"ListEntityTypesRequest",
"ListEntityTypesResponse",
"ListEnvironmentsRequest",
"ListEnvironmentsResponse",
"ListIntentsRequest",
"ListIntentsResponse",
"ListKnowledgeBasesRequest",
"ListKnowledgeBasesResponse",
"ListMessagesRequest",
"ListMessagesResponse",
"ListParticipantsRequest",
"ListParticipantsResponse",
"ListSessionEntityTypesRequest",
"ListSessionEntityTypesResponse",
"ListVersionsRequest",
"ListVersionsResponse",
"LoggingConfig",
"Message",
"MessageAnnotation",
"NotificationConfig",
"OriginalDetectIntentRequest",
"OutputAudio",
"OutputAudioConfig",
"OutputAudioEncoding",
"Participant",
"ParticipantsClient",
"QueryInput",
"QueryParameters",
"QueryResult",
"ReloadDocumentRequest",
"RestoreAgentRequest",
"SearchAgentsRequest",
"SearchAgentsResponse",
"Sentiment",
"SentimentAnalysisRequestConfig",
"SentimentAnalysisResult",
"SessionEntityType",
"SessionEntityTypesClient",
"SessionsClient",
"SetAgentRequest",
"SetSuggestionFeatureConfigOperationMetadata",
"SetSuggestionFeatureConfigRequest",
"SmartReplyAnswer",
"SmartReplyMetrics",
"SmartReplyModelMetadata",
"SpeechContext",
"SpeechModelVariant",
"SpeechToTextConfig",
"SpeechWordInfo",
"SsmlVoiceGender",
"StreamingDetectIntentRequest",
"StreamingDetectIntentResponse",
"StreamingRecognitionResult",
"SuggestArticlesRequest",
"SuggestArticlesResponse",
"SuggestFaqAnswersRequest",
"SuggestFaqAnswersResponse",
"SuggestSmartRepliesRequest",
"SuggestSmartRepliesResponse",
"SuggestionFeature",
"SuggestionResult",
"SynthesizeSpeechConfig",
"TextInput",
"TextToSpeechSettings",
"TrainAgentRequest",
"UndeployConversationModelOperationMetadata",
"UndeployConversationModelRequest",
"UpdateAnswerRecordRequest",
"UpdateContextRequest",
"UpdateConversationProfileRequest",
"UpdateDocumentRequest",
"UpdateEntityTypeRequest",
"UpdateEnvironmentRequest",
"UpdateFulfillmentRequest",
"UpdateIntentRequest",
"UpdateKnowledgeBaseRequest",
"UpdateParticipantRequest",
"UpdateSessionEntityTypeRequest",
"UpdateVersionRequest",
"ValidationError",
"ValidationResult",
"Version",
"VersionsClient",
"VoiceSelectionParams",
"WebhookRequest",
"WebhookResponse",
)
| 42.369115 | 88 | 0.834119 |
67cacd89124fd68874302e8ab9d12be22b7b8722 | 2,384 | py | Python | configs/selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | neuroailab/OpenSelfSup | 37709c326173a981292fed12c336c82d3356cab2 | [
"Apache-2.0"
] | 2 | 2020-07-01T02:46:25.000Z | 2021-02-21T03:56:10.000Z | configs/selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | neuroailab/OpenSelfSup | 37709c326173a981292fed12c336c82d3356cab2 | [
"Apache-2.0"
] | null | null | null | configs/selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | neuroailab/OpenSelfSup | 37709c326173a981292fed12c336c82d3356cab2 | [
"Apache-2.0"
] | null | null | null | _base_ = '../../base.py'
# model settings
model = dict(
type='SimCLR',
pretrained=None,
backbone=dict(
type='ResNet',
depth=50,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='SyncBN')),
neck=dict(
type='NonLinearNeckV1', # simple fc-relu-fc neck in MoCo v2
in_channels=2048,
hid_channels=2048,
out_channels=128,
with_avg_pool=True),
head=dict(type='ContrastiveHead', temperature=0.1))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
memcached=True,
mclient_path='/mnt/lustre/share/memcached_client')
data_train_list = 'data/imagenet/meta/train.txt'
data_train_root = 'data/imagenet/train'
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.8,
contrast=0.8,
saturation=0.8,
hue=0.2)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0,
kernel_size=23)
],
p=0.5),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
imgs_per_gpu=32, # total 32*8
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline))
# optimizer
optimizer = dict(type='LARS', lr=0.3, weight_decay=0.000001, momentum=0.9,
paramwise_options={
'(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay=0., lars_exclude=True),
'bias': dict(weight_decay=0., lars_exclude=True)})
# learning policy
lr_config = dict(
policy='CosineAnealing',
min_lr=0.,
warmup='linear',
warmup_iters=10,
warmup_ratio=0.0001,
warmup_by_epoch=True)
checkpoint_config = dict(interval=10)
# runtime settings
total_epochs = 200
| 29.432099 | 92 | 0.585151 |
5cbe7559ff5c9d72eee6f8abf6cbf23660b4c06a | 1,999 | py | Python | atcoder/python/manhattan.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | atcoder/python/manhattan.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | atcoder/python/manhattan.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | # Title: Manhattan Crepe Cart
# Link: hhttps://codingcompetitions.withgoogle.com/codejam/round/0000000000051706/000000000012295c
import sys
import bisect
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
read_list_words = lambda: sys.stdin.readline().strip().split(' ')
def solution(p: int, q: int, persons: list):
norths = []
souths = []
easts = []
wests = []
for x, y, d in persons:
if d == 'N':
norths.append(y)
elif d == 'S':
souths.append(y)
elif d == 'E':
easts.append(x)
elif d == 'W':
wests.append(x)
norths = sorted(norths)
souths = sorted(souths)
ys = set(norths)
ys.update(souths)
ys = sorted(ys)
ys = list(map(lambda k: k+1, ys))
max_y_value = -99999
max_y = -99999
for y in [0] + ys:
v = bisect.bisect_left(norths, y)
v += (len(souths) - bisect.bisect_right(souths, y))
if v > max_y_value:
max_y_value = v
max_y = y
wests = sorted(wests)
easts = sorted(easts)
xs = set(wests)
xs.update(easts)
xs = sorted(xs)
xs = list(map(lambda k: k+1, xs))
max_x_value = -9999
max_x = -9999
for x in [0] + xs:
v = bisect.bisect_left(easts, x)
v += (len(wests)-bisect.bisect_right(wests, x))
if v > max_x_value:
max_x_value = v
max_x = x
return '{} {}'.format(max_x, max_y)
def main():
t = read_single_int()
for case in range(1, t+1):
p, q = read_list_int()
persons = []
for _ in range(p):
x, y, d = read_list_words()
persons.append((int(x), int(y), d))
print('Case #{}: {}'.format(case, solution(p, q, persons)))
if __name__ == '__main__':
main() | 26.302632 | 99 | 0.528764 |
059e2589ae4d045f29a95059b303da34f3fbd7d4 | 9,335 | py | Python | huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/list_message_template_details_response.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/list_message_template_details_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/list_message_template_details_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListMessageTemplateDetailsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'message_template_id': 'str',
'message_template_name': 'str',
'protocol': 'str',
'tag_names': 'list[str]',
'create_time': 'str',
'update_time': 'str',
'content': 'str',
'request_id': 'str'
}
attribute_map = {
'message_template_id': 'message_template_id',
'message_template_name': 'message_template_name',
'protocol': 'protocol',
'tag_names': 'tag_names',
'create_time': 'create_time',
'update_time': 'update_time',
'content': 'content',
'request_id': 'request_id'
}
def __init__(self, message_template_id=None, message_template_name=None, protocol=None, tag_names=None, create_time=None, update_time=None, content=None, request_id=None):
"""ListMessageTemplateDetailsResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._message_template_id = None
self._message_template_name = None
self._protocol = None
self._tag_names = None
self._create_time = None
self._update_time = None
self._content = None
self._request_id = None
self.discriminator = None
if message_template_id is not None:
self.message_template_id = message_template_id
if message_template_name is not None:
self.message_template_name = message_template_name
if protocol is not None:
self.protocol = protocol
if tag_names is not None:
self.tag_names = tag_names
if create_time is not None:
self.create_time = create_time
if update_time is not None:
self.update_time = update_time
if content is not None:
self.content = content
if request_id is not None:
self.request_id = request_id
@property
def message_template_id(self):
"""Gets the message_template_id of this ListMessageTemplateDetailsResponse.
模板ID。
:return: The message_template_id of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._message_template_id
@message_template_id.setter
def message_template_id(self, message_template_id):
"""Sets the message_template_id of this ListMessageTemplateDetailsResponse.
模板ID。
:param message_template_id: The message_template_id of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._message_template_id = message_template_id
@property
def message_template_name(self):
"""Gets the message_template_name of this ListMessageTemplateDetailsResponse.
模板名称。
:return: The message_template_name of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._message_template_name
@message_template_name.setter
def message_template_name(self, message_template_name):
"""Sets the message_template_name of this ListMessageTemplateDetailsResponse.
模板名称。
:param message_template_name: The message_template_name of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._message_template_name = message_template_name
@property
def protocol(self):
"""Gets the protocol of this ListMessageTemplateDetailsResponse.
模板支持的协议类型。 目前支持的协议包括: “email”:邮件传输协议。 “default”: “sms”:短信传输协议。 “functionstage”:FunctionGraph(函数)传输协议。 “functiongraph”:FunctionGraph(工作流)传输协议。 “dms”:DMS传输协议。 “http”、“https”:HTTP/HTTPS传输协议。
:return: The protocol of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this ListMessageTemplateDetailsResponse.
模板支持的协议类型。 目前支持的协议包括: “email”:邮件传输协议。 “default”: “sms”:短信传输协议。 “functionstage”:FunctionGraph(函数)传输协议。 “functiongraph”:FunctionGraph(工作流)传输协议。 “dms”:DMS传输协议。 “http”、“https”:HTTP/HTTPS传输协议。
:param protocol: The protocol of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._protocol = protocol
@property
def tag_names(self):
"""Gets the tag_names of this ListMessageTemplateDetailsResponse.
模板tag列表。 是消息模板“{}”内的字段,在具体使用消息模板时,可根据实际情况替为该字段赋值。
:return: The tag_names of this ListMessageTemplateDetailsResponse.
:rtype: list[str]
"""
return self._tag_names
@tag_names.setter
def tag_names(self, tag_names):
"""Sets the tag_names of this ListMessageTemplateDetailsResponse.
模板tag列表。 是消息模板“{}”内的字段,在具体使用消息模板时,可根据实际情况替为该字段赋值。
:param tag_names: The tag_names of this ListMessageTemplateDetailsResponse.
:type: list[str]
"""
self._tag_names = tag_names
@property
def create_time(self):
"""Gets the create_time of this ListMessageTemplateDetailsResponse.
模板创建时间。 时间格式为UTC时间,YYYY-MM-DDTHH:MM:SSZ。
:return: The create_time of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this ListMessageTemplateDetailsResponse.
模板创建时间。 时间格式为UTC时间,YYYY-MM-DDTHH:MM:SSZ。
:param create_time: The create_time of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._create_time = create_time
@property
def update_time(self):
"""Gets the update_time of this ListMessageTemplateDetailsResponse.
模板最后更新时间。时间格式为UTC时间,YYYY-MM-DDTHH:MM:SSZ。
:return: The update_time of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this ListMessageTemplateDetailsResponse.
模板最后更新时间。时间格式为UTC时间,YYYY-MM-DDTHH:MM:SSZ。
:param update_time: The update_time of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._update_time = update_time
@property
def content(self):
"""Gets the content of this ListMessageTemplateDetailsResponse.
模板内容。
:return: The content of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this ListMessageTemplateDetailsResponse.
模板内容。
:param content: The content of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._content = content
@property
def request_id(self):
"""Gets the request_id of this ListMessageTemplateDetailsResponse.
请求的唯一标识ID。
:return: The request_id of this ListMessageTemplateDetailsResponse.
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListMessageTemplateDetailsResponse.
请求的唯一标识ID。
:param request_id: The request_id of this ListMessageTemplateDetailsResponse.
:type: str
"""
self._request_id = request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListMessageTemplateDetailsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.116667 | 203 | 0.633637 |
a42c246a551c8421ed31b5b78c59629ddbd58d67 | 446 | py | Python | Heap/heapq.py | liu-yunpeng/algorithms | 1bbe34b4a3d2f090007faec25220ac8535213da7 | [
"MIT"
] | 3 | 2021-04-24T01:20:27.000Z | 2021-04-24T20:39:27.000Z | Heap/heapq.py | liu-yunpeng/algorithms | 1bbe34b4a3d2f090007faec25220ac8535213da7 | [
"MIT"
] | null | null | null | Heap/heapq.py | liu-yunpeng/algorithms | 1bbe34b4a3d2f090007faec25220ac8535213da7 | [
"MIT"
] | null | null | null | import heapq
lst = [6, 7, 9, 4, 3, 5, 8, 10, 1]
# using heapify() to convert list into heap
heapq.heapify(lst)
# get the largest k elements
print(heapq.nlargest(3, lst))
# get the smallest k elements
print(heapq.nsmallest(3, lst))
heapq.heappush(lst, 2)
heapq.heappush(lst, 12)
print(lst)
heapq.heappop(lst)
heapq.heappop(lst)
print(lst)
# pop first, then push
heapq.heapreplace(lst, 15)
# pop push, then pop
heapq.heappushpop(lst, 5)
| 14.866667 | 43 | 0.704036 |
0cfecda054a672cc32a062436e6a5cfeae36e263 | 21,736 | py | Python | rescene/utility.py | zordtk/pyReScene | fe20ee6c72dcbd62e92d53678016321705795871 | [
"MIT"
] | null | null | null | rescene/utility.py | zordtk/pyReScene | fe20ee6c72dcbd62e92d53678016321705795871 | [
"MIT"
] | null | null | null | rescene/utility.py | zordtk/pyReScene | fe20ee6c72dcbd62e92d53678016321705795871 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2010 ReScene.com
# Copyright (c) 2011-2015 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Port based on the these files: (MIT license)
# RarFileNameComparer.cs, RarFileNameFinder.cs, SfvReader.cs
# Everything else: MIT license
import re
import sys
import difflib
import mmap
import warnings
import locale
import os
import shutil
import time
import zlib
from io import BytesIO, TextIOBase, TextIOWrapper
from tempfile import mktemp
try:
import win32api
win32api_available = True
except ImportError:
win32api_available = False
# on Windows:
# SET NAME=True configure
# ECHO %NAME% check
# SET NAME= clear
# SETX NAME VALUE set environment variables permanently
_DEBUG = bool(os.environ.get("RESCENE_DEBUG")) # leave empty for False
# disables the spinner from showing while doing some processing
_SPINNER = not bool(os.environ.get("RESCENE_NO_SPINNER"))
# disables offset information to be printed out in srr -e output
# this way the output become more easy to compare
_OFFSETS = not bool(os.environ.get("RESCENE_NO_OFFSETS"))
# provides the temporary directory location to places where it would be a mess
# to pass it as parameter (fingerprint calculation)
temporary_directory = None
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
# compatibility with 2.x
if sys.hexversion < 0x3000000:
# prefer 3.x behaviour
range = xrange # @ReservedAssignment
str = unicode # @ReservedAssignment
unicode = unicode # @ReservedAssignment # Export to other modules
def fsunicode(path):
"""Converts a file system "str" object to Unicode"""
if isinstance(path, unicode):
return path
encoding = sys.getfilesystemencoding()
return path.decode(encoding or sys.getdefaultencoding())
else:
unicode = str # @ReservedAssignment
def fsunicode(path):
return path
# Python BUG: http://bugs.python.org/issue1927
try: # Python < 3
raw_input = raw_input # @ReservedAssignment
except NameError: # Python 3
raw_input = input # @ReservedAssignment
try: # Python < 3
basestring = basestring # @ReservedAssignment
except NameError: # Python 3
basestring = str # @ReservedAssignment
class FileType(object):
"""File types in use to create SRS files for"""
MKV, AVI, MP4, WMV, FLAC, MP3, STREAM, M2TS, Unknown = (
"MKV", "AVI", "MP4", "WMV", "FLAC", "MP3",
"STREAM", "M2TS", "Unknown")
# the extensions that are supported
# .m4v is used for some non scene samples, xxx samples and music releases
# It is the same file format as MP4
# VA-Anjunabeats_Vol_7__Mixed_By_Above_And_Beyond-(ANJCD014D)-2CD-2009-TT/
# 301-va-anjunabeats_vol_7__bonus_dvd-tt.m4v
# Gothic_3_Soundtrack-Promo-CD-2006-XARDAS/
# 05_g3_makingofst-xardas.wmv
# 06_g3_makingofst-xardas.m4v
# Her-Sweet-Hand.11.01.15.Alex.Shy.Definitely.1.Time.Only.XXX.720p.M4V-OHRLY
# Sample/ohrly-hsh115asd1to.sample.m4v
# System_Of_A_Down-Aerials-svcd-wcs
# system_of_a_down-aerials-svcd-wcs.m2p
# System_Of_A_Down-Aerials-svcd-wcs
# system_of_a_down-aerials-svcd-wcs.m2p
StreamExtensions = ('.vob', '.m2ts', '.ts',
'.mpeg', '.mpg', '.m2v', '.m2p')
VideoExtensions = ('.mp4', '.m4v', # M4V: used for some XXX releases
'.avi', '.mkv', '.wmv') + StreamExtensions
AudioExtensions = ('.mp3', '.flac') # TODO: mp2?
def __init__(self, file_type, archived_file):
self.file_type = file_type
self.archived_file = archived_file
def __str__(self, *args, **kwargs):
return self.file_type
class SfvEntry(object):
"""Represents a record from a .sfv file."""
def __init__(self, file_name, crc32="00000000"):
self.file_name = file_name.strip('"')
self.crc32 = crc32
def get_crc_32(self):
return self.__crc32
def set_crc_32(self, value):
if not bool(re.match("^[\dA-F]{1,8}$", value, re.IGNORECASE)):
raise ValueError(value + " is not a CRC32 hash.")
# Baywatch.S11E11.DVDRiP.XViD-NODLABS.srr CRC is missing a zero
self.__crc32 = value.rjust(8, "0")
crc32 = property(get_crc_32, set_crc_32, "The crc32 hash.")
def __lt__(self, other):
"""The sort routines are guaranteed to use __lt__ when making
comparisons between two objects."""
ext_self = self.file_name[-4:].lower()
ext_other = other.file_name[-4:].lower()
same_base = self.file_name[:-4].lower() == other.file_name[:-4].lower()
if same_base and ext_self != ext_other:
if ext_self == ".rar":
if bool(re.match("\.[r-z]\d{2}$", ext_other)):
return True
else:
return self.file_name < other.file_name # .rar < .r00
elif ext_other == ".rar":
if bool(re.match("\.[r-z]\d{2}$", ext_self)):
return False
else:
return self.file_name < other.file_name # .r00 > .rar
# .part1.rar < .part2.rar, r99 < s00, 001 < 002
return self.file_name < other.file_name
def __repr__(self):
return self.file_name + " " + self.crc32
def __eq__(self, other):
if type(other) is type(self):
return (self.file_name.lower() == other.file_name.lower() and
self.crc32.lower() == other.crc32.lower())
return False
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def parse_sfv_data(file_data):
"""Returns a tuple of three lists: (entries, comments, errors).
Accepts SFV file data as a byte string.
The "comments" and "errors" lists contain
lines decoded from the file as text strings.
File names must be iso-8859-1 aka latin1.
http://en.wikipedia.org/wiki/Extended_ASCII
Other text is decoded from latin1 using the "replace" error handler.
"""
entries = [] # SfvEntry objects
comments = []
errors = [] # unrecognized stuff
# .sfv files without any \n line breaks exist
# e.g. Need_for_Speed_Underground_2_JPN_NGC-WRG (\r\r instead)
# (are those made like that or altered on transfer?)
file_data = file_data.replace(b"\r", b"\n")
for line in file_data.split(b"\n"):
if not line.strip():
# ignore blank lines in parsed result
pass
elif line.lstrip().startswith(b";"):
# comment lines must start with ;
line = line.decode("latin-1", "replace")
comments.append(line)
else:
# actual data or parsing errors
line = line.rstrip()
try:
text = line.decode("latin-1")
text = text.replace("\t", " ") # convert tabs
index = text.rindex(" ") # ValueError: substring not found
filename = text[:index].strip()
# A SFV can contain multiple white spaces
crc = text[index + 1:].lstrip()
# ValueError: bad CRC e.g. char > F
entries.append(SfvEntry(filename, crc))
except ValueError:
line = line.decode("latin-1", "replace")
errors.append(line)
return entries, comments, errors
def parse_sfv_file(sfv_file):
"""Parses an SFV file with parse_sfv_data().
Accepts an open binary file object or a file name."""
try:
sfv_file.seek(0) # start at the beginning of the stream
sfv_data = sfv_file.read()
except AttributeError:
try:
with open(sfv_file, mode='rb') as fsock:
sfv_data = fsock.read()
except IOError:
if not isinstance(sfv_file, basestring):
raise
with open("\\\\?\\" + sfv_file, mode='rb') as fsock:
sfv_data = fsock.read()
return parse_sfv_data(sfv_data)
def filter_sfv_duplicates(entries):
"""Accepts the entries list of the parse functions above.
The result will be sorted."""
result = list()
previous = None
for entry in sorted(entries):
if previous is None or not entry.__eq__(previous):
result.append(entry)
previous = entry
return result
def same_sfv(one, two):
"""Only based on actual content, not comments."""
onec, _, _ = parse_sfv_file(one)
twoc, _, _ = parse_sfv_file(two)
onec.sort()
twoc.sort()
return onec == twoc
def next_archive(rfile, is_old=False):
"""Returns the name of the next possible RAR archive.
Behaviour undefined for *.part99.rar, *.rrar,...
It must never occur.
is_old: When enabled, makes sure the first .rar file is detected as
old style volume naming. It makes '.part02.r00' possible.
e.g. Doctor.Who.The.Enemy.Of.The.World.S05E17.DVDRip.x264-PFa
"""
def inc(extension):
# create an array of a string so we can manipulate it
extension = list(extension)
i = len(extension) - 1 # last element
while extension[i] == "9":
extension[i] = "0"
i -= 1 # go a character back
else: # also works with "rstuv"
extension[i] = chr(ord(extension[i]) + 1)
return "".join(extension) # array back to string
if re.match(".*\.part\d*.rar$", rfile, re.IGNORECASE) and not is_old:
return inc(rfile[:-4]) + rfile[-4:]
elif re.match(".*\.rar$", rfile, re.IGNORECASE):
return rfile[:-4] + ".r00"
elif not is_rar(rfile):
raise AttributeError("The extension must be one form a RAR archive.")
else:
return inc(rfile)
def is_rar(file_name):
"""True if file_name is a correctly named RAR file.
Checks only based on the file name.
Legal extensions:
- .rar
- .r00 - r99, s00 - v99 rar cmd creates beyond this limit
- .000 - .999 001 for Accepted.DVDRip.XViD-ALLiANCE
Not valid:
- .cbr
- .exe TODO: SFX support
"""
return bool(re.match(".*\.(rar|[r-z]\d{2}|\d{3})$", file_name, re.I))
def first_rars(file_iter):
"""Tries to pick the first RAR file based on file name."""
# group 3: when there is a digit before .rar e.g. test3.rar
fre = ".*((\.part0*1\.rar|(?<!\d)\.rar)|((^|[^\d])(?<!part)(\d+\.rar)))$"
def is_first(rar):
if re.match(fre, rar, re.IGNORECASE):
return True
return rar.endswith((".000", ".001"))
def is_dotrar(rar):
return rar.lower().endswith(".rar")
# all items will need to be checked at least once: full generator run
input_files = list(file_iter)
firsts = list(filter(is_first, input_files))
# .000? then no .001
for first in filter(lambda x: x.endswith(".000"), firsts):
try:
firsts.remove(first[:-1] + "1")
except ValueError:
# the release consists of a a single .000 file only
# e.g. Ys_6_The_Ark_of_Napishtim_USA_FIX_READNFO_PSP-REBORN
pass
# list still empty? A .part2.r00 situation might be the case.
if not len(firsts):
firsts = list(filter(is_dotrar, input_files))
have_r00_follower = []
for first in firsts:
if first[:-3] + "r00" in input_files:
have_r00_follower.append(first)
if len(have_r00_follower):
firsts = have_r00_follower
elif len(input_files) > 1:
firsts = [] # probably incomplete, so detect nothing
# else: empty list firsts or
# there is only a single .rar file provided with a weird name
# e.g. name.part3.rar (and it gets detected)
return firsts
def is_good_srr(filepath):
"""Tests whether the file path only contains / and none
of the other illegal characters: \/:*?"<>| in Windows.
Stored files in SRRs contain forward slashes.
RAR uses backward slashes."""
ILLEGAL_WINDOWS_CHARACTERS = """\:*?"<>|"""
for char in ILLEGAL_WINDOWS_CHARACTERS:
if char in filepath:
return False
return True
def joinpath(path, start=""):
"""Validates and joins a sequence of path elements into an OS path
Each path element is an individual directory, subdirectory or file
name. Raises ValueError if an element name is not supported by the
OS."""
illegal_names = frozenset(
("", os.path.curdir, os.path.pardir, os.path.devnull))
for elem in path:
if os.path.dirname(elem) or elem in illegal_names:
fmt = "Path element not supported by OS: {0!r}"
raise ValueError(fmt.format(elem))
return os.path.join(start, *path)
def sep(number, loc=''):
"""Adds a thousands separator to the number.
The function is locale aware."""
locale.setlocale(locale.LC_ALL, loc)
return locale.format('%d', number, True)
def show_spinner(amount):
"""amount: a number"""
if _SPINNER:
sys.stdout.write("\b%s" % ['|', '/', '-', '\\'][amount % 4])
def remove_spinner():
"""removes spinner with the backspace char"""
if _SPINNER:
sys.stdout.write("\b")
def empty_folder(folder_path):
if os.name == "nt" and win32api_available:
folder_path = win32api.GetShortPathName(folder_path)
for file_object in os.listdir(folder_path):
file_object_path = os.path.join(folder_path, file_object)
if os.name == "nt" and win32api_available:
file_object_path = win32api.GetShortPathName(file_object_path)
if os.path.isfile(file_object_path):
os.unlink(file_object_path)
else:
try:
shutil.rmtree(file_object_path)
except OSError:
remove_folder(file_object_path)
def remove_folder(path):
"""Recursively delete a directory tree."""
if os.name == "nt" and win32api_available:
path = win32api.GetShortPathName(path)
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
remove_folder(fullname)
else:
try:
os.remove(fullname)
except OSError:
try:
os.remove("\\\\?\\" + fullname)
except OSError: # it's a dir?
remove_folder(fullname)
try:
os.rmdir(path)
except OSError:
os.rmdir("\\\\?\\" + path)
def create_temp_file_name(output_file):
"""Creates file name for the temporary file based on the name of the
output file to create/overwrite later.
output_file must be an absolute path.
Used to prevent overwriting good files with a broken one later on."""
dirname = os.path.dirname(output_file)
prefix = os.path.basename(output_file)
tmpfile = mktemp(".tmp", prefix + "-", dirname)
# Windows long path support
if os.name == "nt":
tmpfile = "\\\\?\\" + os.path.abspath(tmpfile)
assert not os.path.exists(tmpfile), "Temp file must not exist yet"
return tmpfile
def replace_result(src, dest):
"""Replaces the destination file with the source file.
Will not do anything when the source file doesn't exist.
Used to prevent overwriting good files with a broken one later on."""
if not src.startswith(dest):
# Windows long path support
if os.name == "nt":
dest = "\\\\?\\" + os.path.abspath(dest)
# it must come from the above method (create_temp_file_name)
assert src.startswith(dest), "src and dest not at same location"
# it is possible a temporary source file was never created
# (.srr question for replacement is false)
if os.path.isfile(src):
# delete previous file if it exists: user allowed to overwrite it
if os.path.isfile(dest):
try:
os.unlink(dest)
except OSError as delete_error:
print("Two processes are now trying to delete the same file!")
print(delete_error)
if _DEBUG:
print(" Destination: {0}".format(dest))
# TODO: work in progress missing srs files
# assert False
#
# # wait 5 seconds for the file to disappear
# for _ in range(0, 5):
# if os.path.isfile(dest):
# time.sleep(1)
# else:
# break
# else:
# print("Destination file still not deleted!")
# concurrency issue: it can fail here with a
# WindowsError/OSError when the other process made the file
try:
os.rename(src, dest)
except OSError:
print("Two processes are now trying to output the same file!")
if _DEBUG:
print(" Source: {0}".format(src))
print(" Destination: {0}".format(dest))
print("This one lost... deleting temp file.")
os.unlink(src)
raise
def calculate_crc32(file_name):
"""Calculates crc32 for a given file and show a spinner."""
crc = 0
count = 0
with open(file_name, "rb") as f:
x = f.read(65536)
while x:
count += 1
show_spinner(count)
crc = zlib.crc32(x, crc)
x = f.read(65536)
remove_spinner()
return crc & 0xFFFFFFFF
def capitalized_fn(afile):
"""
Checks provided file with the file on disk and returns the imput with
its exact capitalization on disk. In the second value the capitalization
is preserved if it was available.
Returns tuple: (exact, capitals)
exact: what's on disk
captials: the name with capitals (preservation purposes)
"""
exact = capitals = afile
# 1) find the proper file on disk
# on Windows it will be found despite capitalization
# on Linux it could not when the capitals don't match (file name from sfv)
inputfn = os.path.basename(afile)
inputdir = os.path.dirname(afile) or os.curdir
for cfile in os.listdir(inputdir):
if (cfile.lower() == inputfn.lower() and
os.path.isfile(os.path.join(inputdir, cfile))):
exact = os.path.join(inputdir, cfile)
break
# 2) use proper capitalization on both OSes
# - choose the one with capitals
# - not conclusive? use original file name
actualfn = os.path.basename(exact)
if actualfn.lower() == actualfn:
# use file name of SFV either way (no difference is possible)
cpath = inputdir
capitals = os.path.join(cpath, inputfn)
elif inputfn.lower() == inputfn:
# actualfn has capitals and input none
cpath = inputdir
capitals = os.path.join(cpath, actualfn)
return exact, capitals
###############################################################################
def diff_lists(one, two):
"""Accepts two lists."""
# d = difflib.Differ() #linejunk=ignore_newline)
# oneclean = []
# twoclean = []
# for line in one:
# oneclean.append(line.encode('ascii', 'replace'))
# for line in two:
# twoclean.append(line.encode('ascii', 'replace'))
# #a = d.compare(oneclean, twoclean)
# print("\n".join(list(a)))
#
# TODO: remove empty lines?
a = difflib.ndiff(one, two, cleanlines)
(pos, neg, no) = (0, 0, 0)
res = []
for line in a:
if line[:1] in "+":
pos += 1
res.append("+")
elif line[:1] in "-":
neg += 1
res.append("-")
else: # ? or space
no += 1
res.append(" ")
# print(res)
return pos, neg, no
def cleanlines(line):
length = len(line.strip().replace("\r", "").replace("\n", ""))
return length == 0
def same_nfo(one, two):
with open(one, "rt") as f:
onec = f._read()
with open(two, "rt") as f:
twoc = f._read()
if len(onec) != len(twoc):
return False
else:
_pos, _neg, no = diff_lists(onec, twoc)
return len(no) == len(onec)
###############################################################################
def encodeerrors(text, textio, errors="replace"):
"""Prepare a string with a fallback encoding error handler
If the string is not encodable to the output stream,
the string is passed through a codec error handler."""
encoding = getattr(textio, "encoding", None)
if encoding is None:
if isinstance(textio, TextIOBase):
# TextIOBase, and therefore StringIO, etc,
# have an "encoding" attribute,
# despite not doing any encoding
return text
# Otherwise assume semantics like Python 2's "file" object
encoding = sys.getdefaultencoding()
try:
text.encode(encoding, textio.errors or "strict")
except UnicodeEncodeError:
text = text.encode(encoding, errors).decode(encoding)
return text
def decodetext(tbytes, *pos, **kw):
"""Decode a string using TextIOWrapper"""
return TextIOWrapper(BytesIO(tbytes), *pos, **kw).read()
"""Example SFV files:
; Generated by WIN-SFV32 v1 with MP3-RELEASER [Version:2000.2.2.1] by MorGoTH on 3/11/01 9:42:54 AM (px`hehxW)
; For more information visit http://morgoth.smartftp.com
;
;!SFV_COMMENT_START
; °
; ²
; ÞÛ²
; ÛÛÛ²
; ²ÛÛÛ²Ý
; ²ÛÛÛÛ²±
; ° ²ÛÛÛÛ²²±
; ÜÜÜܱ ÞÛÛÛÛÛ²²² ° ° ÜÜÜÜ
; ßÛÛ²ÛÛÜÜÜ ° °ÛÛÛÛÛÛ²²²Ý ± °ÜÜÜÛÛÛÛ²ß
; ßÛÛÛÛÛÛÛ²Ûܲ± ÞÛÛÛÛ²Û²²²Ý ± ÛÛ²²ßÛÛÛß
; ²²²²² ßß²ßß ²±Û²²ÜÜÜÜ ß²ßßÜÜÜÜ ²²ÛÛ ÜܲÛÛß
; ±±±±±ÛÜ ÜÛßßÛÛÜßÞÛ ÛÛܲ ÛÛ ßÛÛÜ ÛÛ ÛÛßß °
; ÛÛÛÛÛÛ ÜÛß ßßÛÛ ÛÛ ÞÛ ÛÛ ÛÛ ÜÜÜÜ ±
; ÛÛÛÛÛÛÛ ÛÛ Û²²²Ü ßÛÛÜÛßß ÛÛÜÜÜ ÛÛ ÛÛÝ°²ÛÛÛÛÛÛÛÛÛ²ÜÜÜÜÜ
; ÛÛÛÛÛÛÛÜ ÛÛÜ ßßÛÛÜ ßÛÛÜÜÛÛßß ßÛÛßß ÛÝ ßÜÜÜÜÛÛÛ²ßßßß
; ÛÛÛÛÛÛÜÜßß ÜÛÜ ßß ßßÛÛÜßß Ûݲ²²²ßßßß
; ßÛÛÛÛÛÛ ÜÛ²ÛÛß° Ü°Ü ßßßÛÛÛÜÜ
; ßÛÛÛÛÛÜ ÛÛß² ±ÜßÛ±Ü ßÛ²²² eu
; ßßÛÛÛ ± ßß ß² ²°Ü
; Ü ° 4 r e a l ± Ü °
; ° p r e s e n t s ß±
; ß ßß
;
; Created with MorGoTH's MP3 Releaser
;!SFV_COMMENT_END
01-olav_basoski_-_live_at_slam_fm_10-03-01-1real.mp3 2DEA959E
; sfv created by SFV Checker
;
"gh-flow.subs.rar" 83a20923
;
; Total 1 File(s) Combined CRC32 Checksum: 83a20923
"""
| 32.345238 | 110 | 0.673261 |
62ed9b3f37a9b7612bf899204cfcf17aa9f1873b | 844 | py | Python | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Operators/Attrib.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | 2 | 2015-01-26T07:15:19.000Z | 2015-11-09T13:42:11.000Z | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Operators/Attrib.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Operators/Attrib.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | # Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
class Attrib(object):
def __init__(self, name):
self.name = name
| 42.2 | 80 | 0.75237 |
50d72e2c72d89a9b49bc4c7895dd1e7d73cf6df5 | 21,199 | py | Python | vspk/v4_0/nuzfbrequest.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nuzfbrequest.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nuzfbrequest.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUJobsFetcher
from bambou import NURESTObject
class NUZFBRequest(NURESTObject):
""" Represents a ZFBRequest in the VSD
Notes:
A ZFB Request from an NSG
"""
__rest_name__ = "zfbrequest"
__resource_name__ = "zfbrequests"
## Constants
CONST_ZFB_APPROVAL_STATUS_DENIED = "DENIED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_APPROVAL_STATUS_UNASSIGNED = "UNASSIGNED"
CONST_ZFB_APPROVAL_STATUS_APPROVED = "APPROVED"
CONST_ZFB_APPROVAL_STATUS_ASSIGNED = "ASSIGNED"
def __init__(self, **kwargs):
""" Initializes a ZFBRequest instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> zfbrequest = NUZFBRequest(id=u'xxxx-xxx-xxx-xxx', name=u'ZFBRequest')
>>> zfbrequest = NUZFBRequest(data=my_dict)
"""
super(NUZFBRequest, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._zfb_approval_status = None
self._zfb_bootstrap_enabled = None
self._zfb_info = None
self._zfb_request_retry_timer = None
self._sku = None
self._ip_address = None
self._cpu_type = None
self._nsg_version = None
self._uuid = None
self._family = None
self._last_connected_time = None
self._last_updated_by = None
self._serial_number = None
self._entity_scope = None
self._hostname = None
self._associated_enterprise_id = None
self._associated_enterprise_name = None
self._associated_ns_gateway_id = None
self._associated_ns_gateway_name = None
self._status_string = None
self._external_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_approval_status", remote_name="ZFBApprovalStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'APPROVED', u'ASSIGNED', u'DENIED', u'UNASSIGNED'])
self.expose_attribute(local_name="zfb_bootstrap_enabled", remote_name="ZFBBootstrapEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_request_retry_timer", remote_name="ZFBRequestRetryTimer", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="sku", remote_name="SKU", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ip_address", remote_name="IPAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_connected_time", remote_name="lastConnectedTime", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="hostname", remote_name="hostname", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_id", remote_name="associatedEnterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_enterprise_name", remote_name="associatedEnterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status_string", remote_name="statusString", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
MAC Address fo the NSG Port1 interface
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def zfb_approval_status(self):
""" Get zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
return self._zfb_approval_status
@zfb_approval_status.setter
def zfb_approval_status(self, value):
""" Set zfb_approval_status value.
Notes:
the status of the request
This attribute is named `ZFBApprovalStatus` in VSD API.
"""
self._zfb_approval_status = value
@property
def zfb_bootstrap_enabled(self):
""" Get zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
return self._zfb_bootstrap_enabled
@zfb_bootstrap_enabled.setter
def zfb_bootstrap_enabled(self, value):
""" Set zfb_bootstrap_enabled value.
Notes:
whether the NSG should bootstrap, or just simulate bootstrap. Set from System Config
This attribute is named `ZFBBootstrapEnabled` in VSD API.
"""
self._zfb_bootstrap_enabled = value
@property
def zfb_info(self):
""" Get zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
""" Set zfb_info value.
Notes:
The Base64 encoded JSON string of ZFB Attributes
This attribute is named `ZFBInfo` in VSD API.
"""
self._zfb_info = value
@property
def zfb_request_retry_timer(self):
""" Get zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on NSG. Set from System Config
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
return self._zfb_request_retry_timer
@zfb_request_retry_timer.setter
def zfb_request_retry_timer(self, value):
""" Set zfb_request_retry_timer value.
Notes:
ZFB Request retry timer on NSG. Set from System Config
This attribute is named `ZFBRequestRetryTimer` in VSD API.
"""
self._zfb_request_retry_timer = value
@property
def sku(self):
""" Get sku value.
Notes:
The part number of the NSG
This attribute is named `SKU` in VSD API.
"""
return self._sku
@sku.setter
def sku(self, value):
""" Set sku value.
Notes:
The part number of the NSG
This attribute is named `SKU` in VSD API.
"""
self._sku = value
@property
def ip_address(self):
""" Get ip_address value.
Notes:
IP Address of the NSG
This attribute is named `IPAddress` in VSD API.
"""
return self._ip_address
@ip_address.setter
def ip_address(self, value):
""" Set ip_address value.
Notes:
IP Address of the NSG
This attribute is named `IPAddress` in VSD API.
"""
self._ip_address = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
Processor Type
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def nsg_version(self):
""" Get nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
return self._nsg_version
@nsg_version.setter
def nsg_version(self, value):
""" Set nsg_version value.
Notes:
The Nuage NSG Version
This attribute is named `NSGVersion` in VSD API.
"""
self._nsg_version = value
@property
def uuid(self):
""" Get uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
Redhat UUID
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def family(self):
""" Get family value.
Notes:
NSG Type
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
NSG Type
"""
self._family = value
@property
def last_connected_time(self):
""" Get last_connected_time value.
Notes:
the time in which the last GET was made from the NSG
This attribute is named `lastConnectedTime` in VSD API.
"""
return self._last_connected_time
@last_connected_time.setter
def last_connected_time(self, value):
""" Set last_connected_time value.
Notes:
the time in which the last GET was made from the NSG
This attribute is named `lastConnectedTime` in VSD API.
"""
self._last_connected_time = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The NSG's Serial Number
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The NSG's Serial Number
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def hostname(self):
""" Get hostname value.
Notes:
hostname of the NSG
"""
return self._hostname
@hostname.setter
def hostname(self, value):
""" Set hostname value.
Notes:
hostname of the NSG
"""
self._hostname = value
@property
def associated_enterprise_id(self):
""" Get associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
return self._associated_enterprise_id
@associated_enterprise_id.setter
def associated_enterprise_id(self, value):
""" Set associated_enterprise_id value.
Notes:
the ID of the associated enteprise
This attribute is named `associatedEnterpriseID` in VSD API.
"""
self._associated_enterprise_id = value
@property
def associated_enterprise_name(self):
""" Get associated_enterprise_name value.
Notes:
name of the associated Enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
return self._associated_enterprise_name
@associated_enterprise_name.setter
def associated_enterprise_name(self, value):
""" Set associated_enterprise_name value.
Notes:
name of the associated Enterprise
This attribute is named `associatedEnterpriseName` in VSD API.
"""
self._associated_enterprise_name = value
@property
def associated_ns_gateway_id(self):
""" Get associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
return self._associated_ns_gateway_id
@associated_ns_gateway_id.setter
def associated_ns_gateway_id(self, value):
""" Set associated_ns_gateway_id value.
Notes:
ID of the assigned NSG
This attribute is named `associatedNSGatewayID` in VSD API.
"""
self._associated_ns_gateway_id = value
@property
def associated_ns_gateway_name(self):
""" Get associated_ns_gateway_name value.
Notes:
name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
return self._associated_ns_gateway_name
@associated_ns_gateway_name.setter
def associated_ns_gateway_name(self, value):
""" Set associated_ns_gateway_name value.
Notes:
name of the associated NSG
This attribute is named `associatedNSGatewayName` in VSD API.
"""
self._associated_ns_gateway_name = value
@property
def status_string(self):
""" Get status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
return self._status_string
@status_string.setter
def status_string(self, value):
""" Set status_string value.
Notes:
Extra status info
This attribute is named `statusString` in VSD API.
"""
self._status_string = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 28.802989 | 214 | 0.583046 |
1c5292b9f47e3e88d43a8c4d34058765d7c0a84f | 6,183 | py | Python | pint/measurement.py | Suseko/pint | 4d0f842d8cd15e6d1763a63a2d1b4a8150847a45 | [
"BSD-3-Clause"
] | 1,545 | 2015-01-02T17:08:07.000Z | 2022-03-30T15:46:42.000Z | pint/measurement.py | Suseko/pint | 4d0f842d8cd15e6d1763a63a2d1b4a8150847a45 | [
"BSD-3-Clause"
] | 1,253 | 2015-01-02T22:45:45.000Z | 2022-03-31T21:05:47.000Z | pint/measurement.py | Suseko/pint | 4d0f842d8cd15e6d1763a63a2d1b4a8150847a45 | [
"BSD-3-Clause"
] | 425 | 2015-01-18T16:59:57.000Z | 2022-03-30T15:21:07.000Z | """
pint.measurement
~~~~~~~~~~~~~~~~
:copyright: 2016 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from .compat import ufloat
from .formatting import _FORMATS, extract_custom_flags, siunitx_format_unit
from .quantity import Quantity
MISSING = object()
class Measurement(Quantity):
"""Implements a class to describe a quantity with uncertainty.
Parameters
----------
value : pint.Quantity or any numeric type
The expected value of the measurement
error : pint.Quantity or any numeric type
The error or uncertainty of the measurement
Returns
-------
"""
def __new__(cls, value, error, units=MISSING):
if units is MISSING:
try:
value, units = value.magnitude, value.units
except AttributeError:
# if called with two arguments and the first looks like a ufloat
# then assume the second argument is the units, keep value intact
if hasattr(value, "nominal_value"):
units = error
error = MISSING # used for check below
else:
units = ""
try:
error = error.to(units).magnitude
except AttributeError:
pass
if error is MISSING:
mag = value
elif error < 0:
raise ValueError("The magnitude of the error cannot be negative")
else:
mag = ufloat(value, error)
inst = super().__new__(cls, mag, units)
return inst
@property
def value(self):
return self._REGISTRY.Quantity(self.magnitude.nominal_value, self.units)
@property
def error(self):
return self._REGISTRY.Quantity(self.magnitude.std_dev, self.units)
@property
def rel(self):
return abs(self.magnitude.std_dev / self.magnitude.nominal_value)
def __reduce__(self):
# See notes in Quantity.__reduce__
from . import _unpickle_measurement
return _unpickle_measurement, (Measurement, self.magnitude, self._units)
def __repr__(self):
return "<Measurement({}, {}, {})>".format(
self.magnitude.nominal_value, self.magnitude.std_dev, self.units
)
def __str__(self):
return "{}".format(self)
def __format__(self, spec):
# special cases
if "Lx" in spec: # the LaTeX siunitx code
# the uncertainties module supports formatting
# numbers in value(unc) notation (i.e. 1.23(45) instead of 1.23 +/- 0.45),
# using type code 'S', which siunitx actually accepts as input.
# However, the implementation is incompatible with siunitx.
# Uncertainties will do 9.1(1.1), which is invalid, should be 9.1(11).
# TODO: add support for extracting options
#
# Get rid of this code, we'll deal with it here
spec = spec.replace("Lx", "")
# The most compatible format from uncertainties is the default format,
# but even this requires fixups.
# For one, SIUnitx does not except some formats that unc does, like 'P',
# and 'S' is broken as stated, so...
spec = spec.replace("S", "").replace("P", "")
# get SIunitx options
# TODO: allow user to set this value, somehow
opts = _FORMATS["Lx"]["siopts"]
if opts != "":
opts = r"[" + opts + r"]"
# SI requires space between "+-" (or "\pm") and the nominal value
# and uncertainty, and doesn't accept "+/-", so this setting
# selects the desired replacement.
pm_fmt = _FORMATS["Lx"]["pm_fmt"]
mstr = format(self.magnitude, spec).replace(r"+/-", pm_fmt)
# Also, SIunitx doesn't accept parentheses, which uncs uses with
# scientific notation ('e' or 'E' and sometimes 'g' or 'G').
mstr = mstr.replace("(", "").replace(")", " ")
ustr = siunitx_format_unit(self.units._units, self._REGISTRY)
return r"\SI%s{%s}{%s}" % (opts, mstr, ustr)
# standard cases
if "L" in spec:
newpm = pm = r" \pm "
pars = _FORMATS["L"]["parentheses_fmt"]
elif "P" in spec:
newpm = pm = "±"
pars = _FORMATS["P"]["parentheses_fmt"]
else:
newpm = pm = "+/-"
pars = _FORMATS[""]["parentheses_fmt"]
if "C" in spec:
sp = ""
newspec = spec.replace("C", "")
pars = _FORMATS["C"]["parentheses_fmt"]
else:
sp = " "
newspec = spec
if "H" in spec:
newpm = "±"
newspec = spec.replace("H", "")
pars = _FORMATS["H"]["parentheses_fmt"]
mag = format(self.magnitude, newspec).replace(pm, sp + newpm + sp)
if "(" in mag:
# Exponential format has its own parentheses
pars = "{}"
if "L" in newspec and "S" in newspec:
mag = mag.replace("(", r"\left(").replace(")", r"\right)")
if "L" in newspec:
space = r"\ "
else:
space = " "
uspec = extract_custom_flags(spec)
ustr = format(self.units, uspec)
if not ("uS" in newspec or "ue" in newspec or "u%" in newspec):
mag = pars.format(mag)
if "H" in spec:
# Fix exponential format
mag = re.sub(r"\)e\+0?(\d+)", r")×10<sup>\1</sup>", mag)
mag = re.sub(r"\)e-0?(\d+)", r")×10<sup>-\1</sup>", mag)
return mag + space + ustr
_Measurement = Measurement
def build_measurement_class(registry):
if ufloat is None:
class Measurement:
_REGISTRY = registry
def __init__(self, *args):
raise RuntimeError(
"Pint requires the 'uncertainties' package to create a Measurement object."
)
else:
class Measurement(_Measurement):
_REGISTRY = registry
return Measurement
| 32.714286 | 95 | 0.547954 |
b79604a062f0b3fad35e3b5e278d56edab4de024 | 6,135 | py | Python | classset.py | Daedo/Otter | b82a74e908c196838a55abc1e001a30256e87542 | [
"MIT"
] | null | null | null | classset.py | Daedo/Otter | b82a74e908c196838a55abc1e001a30256e87542 | [
"MIT"
] | null | null | null | classset.py | Daedo/Otter | b82a74e908c196838a55abc1e001a30256e87542 | [
"MIT"
] | null | null | null | from typing import Set, List, Any
from testclass import *
import operator
class ClassSet:
def __init__(self, classes: List[TestClass]=[]):
self._classes = classes # type: List[TestClass]
# Cache Unique Elements
self._uniques = None # type: List[Set[Any]]
self._level = None
self._domain = None
def get_classes(self):
return self._classes
def set_classes(self, classes):
self._classes = classes
self._uniques = None # type: List[Set[Any]]
self._level = None
self._domain = None
classes = property(get_classes, set_classes)
def get_unique_elements(self) -> List[Set[any]]:
# assume the classes are subsetfree
if self._uniques is None:
self._uniques = [] # type: list[set[tuple[bool]]]
for elem in self.classes:
self._uniques.append(set(elem.domain))
for index_a in range(len(self.classes)):
for index_b in range(index_a+1, len(self.classes)):
self._uniques[index_a] -= self.classes[index_b].domain
self._uniques[index_b] -= self.classes[index_a].domain
return self._uniques
uniques = property(get_unique_elements)
def split(self, index: int) -> 'ClassSet,ClassSet':
positive = []
negative = []
for test_class in self.classes:
if test_class.tup[index] == 0:
negative.append(test_class)
elif test_class.tup[index] == 1:
positive.append(test_class)
else:
pos, neg = test_class.split(index)
positive.append(pos)
negative.append(neg)
pos = ClassSet(positive)
pos._clean_split()
neg = ClassSet(negative)
neg._clean_split()
return pos,neg
def get_level_of_uniqueness(self) -> int:
if self._level is None:
# Compute Unique Elements
self._level = 0
for unique_set in self.uniques:
if len(unique_set) > 0:
self._level += 1
return self._level
level_of_uniqueness = property(get_level_of_uniqueness)
def get_domain(self) -> Set[any]:
if self._domain is None:
result = set()
for dom_class in self.classes:
result |= dom_class.domain
self._domain = result
return self._domain
domain = property(get_domain)
def _clean_split(self):
self._filter_subsets()
keep = []
part_domain = set()
for index, unique in enumerate(self.uniques):
if len(unique) > 0:
keep.append(self.classes[index])
part_domain |= self.classes[index].domain
if part_domain == self.domain:
self.classes = keep
return
missing = self.domain - part_domain
for index, miss_class in enumerate(self.classes):
if not miss_class.domain.isdisjoint(missing):
keep.append(self.classes[index])
self.classes = keep
def _filter_subsets(self):
rem = set()
for i, class_a in enumerate(self.classes):
if class_a in rem:
continue
for j, class_b in enumerate(self.classes):
if i == j:
continue
if class_a.is_subset(class_b):
rem.add(class_a)
break
for rem_class in rem:
self._classes.remove(rem_class)
def __len__(self):
return len(self._classes)
class SplitClassSet:
def __init__(self, positive: ClassSet, negative: ClassSet):
self.pos_class = positive
self.neg_class = negative
len_pos = len(self.pos_class)
len_neg = len(self.neg_class)
self.total_len = len_neg + len_pos
self. pure = (len_pos == 0 or len_neg == 0 or (len_pos == 1 and len_neg == 1))
self._level = None
def level_of_uniqueness(self) -> int:
if self._level is None:
self._level = self.pos_class.level_of_uniqueness
self._level += self.neg_class.level_of_uniqueness
return self._level
def split(self, index):
pos_left, pos_right = self.pos_class.split(index)
neg_left, neg_right = self.neg_class.split(index)
return SplitClassSet(pos_left, neg_left), SplitClassSet(pos_right, neg_right)
def is_pure(self):
return self.pure
def __repr__(self):
out = "Pos:\n"
for cl in self.pos_class.classes:
out += str(cl) + "=> 1\n"
out += "Neg:\n"
for cl in self.neg_class.classes:
out += str(cl) + "=> 0\n"
return out
def __len__(self):
return self.total_len
def get_heuristic_counts(self):
star_count = None
one_count = None
zero_count = None
classes = set(self.pos_class.classes) | set(self.neg_class.classes)
for t_class in classes:
tup = t_class.tup
if star_count is None:
star_count = [0] * len(tup)
one_count = [0] * len(tup)
zero_count = [0] * len(tup)
for i in range(len(tup)):
if tup[i] is True:
one_count[i] += 1
elif tup[i] is False:
zero_count[i] += 1
else:
star_count[i] += 1
return list(zip(zero_count, one_count, star_count))
def get_split_order(self):
count = self.get_heuristic_counts()
# Add index
order = list(map(lambda x: [x[0]] + list(x[1]), list(enumerate(count))))
# Remove all entries that have no stars or that are pure
order = list(filter(lambda x: x[3] != 0 or (x[2] != 0 and x[1] != 0), order))
order.sort(key=operator.itemgetter(3))
return list(map(lambda x: x[0], order))
def get_split_class_set(positive: List[TestClass], negative: List[TestClass]):
return SplitClassSet(ClassSet(positive), ClassSet(negative)) | 32.807487 | 86 | 0.567237 |
79be78a385ff2cb8dd98c233b5e97484c82faab1 | 3,314 | py | Python | test/functional/test_framework/address.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | 1 | 2021-03-09T22:25:20.000Z | 2021-03-09T22:25:20.000Z | test/functional/test_framework/address.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | null | null | null | test/functional/test_framework/address.py | BitPalnet/BitPal | 0cce7aea31a2e45fecec0e2ef42728aa26355704 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The BitPal Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
import enum
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import hex_str_to_bytes
from . import segwit_addr
ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97'
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert len(hash) == 20
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert len(hash) == 20
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
| 32.174757 | 120 | 0.706397 |
880612d37103389da74af713a9701b440f4aa3eb | 32 | py | Python | build/lib/simple-default-backpropagation-ann-package/__init__.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | build/lib/simple-default-backpropagation-ann-package/__init__.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | build/lib/simple-default-backpropagation-ann-package/__init__.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | from GUI import GUI
gui = GUI() | 10.666667 | 19 | 0.6875 |
0f8edfb439ffc22a93490d3c53d776761d1dd462 | 15,499 | py | Python | maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py | SkrighYZ/scene_graph_benchmark | b17e831a031e11c7b56d12dd092e8f476e48e3d4 | [
"MIT"
] | 210 | 2021-04-13T09:16:33.000Z | 2022-03-29T03:13:18.000Z | maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py | SkrighYZ/scene_graph_benchmark | b17e831a031e11c7b56d12dd092e8f476e48e3d4 | [
"MIT"
] | 72 | 2021-04-13T18:21:37.000Z | 2022-03-25T13:45:55.000Z | maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py | SkrighYZ/scene_graph_benchmark | b17e831a031e11c7b56d12dd092e8f476e48e3d4 | [
"MIT"
] | 55 | 2021-04-13T09:16:37.000Z | 2022-03-29T08:31:04.000Z | # A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.data.datasets.evaluation.utils import evaluate_box_proposals
def do_vg_evaluation(dataset, predictions, output_folder, box_only, eval_attributes, logger, save_predictions=True):
# TODO need to make the use_07_metric format available
# for the user to choose
# we use int for box_only. 0: False, 1: box for RPN, 2: box for object detection,
if box_only:
if box_only == 1:
limits = [100, 1000]
elif box_only == 2:
limits = [36, 99]
else:
raise ValueError("box_only can be either 0/1/2, but get {0}".format(box_only))
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
result = {}
for area, suffix in areas.items():
for limit in limits:
logger.info("Evaluating bbox proposals@{:d}".format(limit))
stats = evaluate_box_proposals(
predictions, dataset, area=area, limit=limit
)
key_ar = "AR{}@{:d}".format(suffix, limit)
key_num_pos = "num_pos{}@{:d}".format(suffix, limit)
result[key_num_pos] = stats["num_pos"]
result[key_ar] = stats["ar"].item()
key_recalls = "Recalls{}@{:d}".format(suffix, limit)
# result[key_recalls] = stats["recalls"]
print(key_recalls, stats["recalls"])
print(key_ar, "ar={:.4f}".format(result[key_ar]))
print(key_num_pos, "num_pos={:d}".format(result[key_num_pos]))
logger.info(result)
logger.info(result)
# check_expected_results(result, expected_results, expected_results_sigma_tol)
if output_folder and save_predictions:
if box_only == 1:
torch.save(result, os.path.join(output_folder, "rpn_proposals.pth"))
elif box_only == 2:
torch.save(result, os.path.join(output_folder, "box_proposals.pth"))
else:
raise ValueError("box_only can be either 0/1/2, but get {0}".format(box_only))
return {"box_proposal": result}
pred_boxlists = []
gt_boxlists = []
for image_id, prediction in sorted(predictions.items()):
img_info = dataset.get_img_info(image_id)
if len(prediction) == 0:
continue
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
if eval_attributes:
classes = dataset.attributes
else:
classes = dataset.classes
result = eval_detection_voc(
pred_boxlists=pred_boxlists,
gt_boxlists=gt_boxlists,
classes=classes,
iou_thresh=0.5,
eval_attributes=eval_attributes,
use_07_metric=False,
)
result_str = "mAP: {:.4f}\n".format(result["map"])
for i, ap in enumerate(result["ap"]):
# if i == 0: # skip background
# continue
# we skipped background in result['ap'], so we need to use i+1
if eval_attributes:
result_str += "{:<16}: {:.4f}\n".format(
dataset.map_attribute_id_to_attribute_name(i+1), ap
)
else:
result_str += "{:<16}: {:.4f}\n".format(
dataset.map_class_id_to_class_name(i+1), ap
)
logger.info(result_str)
# return mAP and weighted mAP
if eval_attributes:
if output_folder and save_predictions:
with open(os.path.join(output_folder, "result_attr.txt"), "w") as fid:
fid.write(result_str)
return {"attr": {"map": result["map"], "weighted map": result["weighted map"]}}
else:
if output_folder and save_predictions:
with open(os.path.join(output_folder, "result_obj.txt"), "w") as fid:
fid.write(result_str)
return {"obj": {"map": result["map"], "weighted map": result["weighted map"]}}
def eval_detection_voc(pred_boxlists, gt_boxlists, classes, iou_thresh=0.5, eval_attributes=False, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
aps = []
nposs = []
thresh = []
for i, classname in enumerate(classes):
if classname == "__background__" or classname == "__no_attribute__":
continue
rec, prec, ap, scores, npos = calc_detection_voc_prec_rec(pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, \
classindex=i, iou_thresh=iou_thresh,
eval_attributes=eval_attributes,
use_07_metric=use_07_metric)
# Determine per class detection thresholds that maximise f score
# if npos > 1:
if npos > 1 and type(scores) != np.int:
f = np.nan_to_num((prec * rec) / (prec + rec))
thresh += [scores[np.argmax(f)]]
else:
thresh += [0]
aps += [ap]
nposs += [float(npos)]
print('AP for {} = {:.4f} (npos={:,})'.format(classname, ap, npos))
# if pickle:
# with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
# cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap,
# 'scores': scores, 'npos':npos}, f)
# Set thresh to mean for classes with poor results
thresh = np.array(thresh)
avg_thresh = np.mean(thresh[thresh != 0])
thresh[thresh == 0] = avg_thresh
# if eval_attributes:
# filename = 'attribute_thresholds_' + self._image_set + '.txt'
# else:
# filename = 'object_thresholds_' + self._image_set + '.txt'
# path = os.path.join(output_dir, filename)
# with open(path, 'wt') as f:
# for i, cls in enumerate(classes[1:]):
# f.write('{:s} {:.3f}\n'.format(cls, thresh[i]))
weights = np.array(nposs)
weights /= weights.sum()
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights)))
print('Mean Detection Threshold = {:.3f}'.format(avg_thresh))
print('~~~~~~~~')
print('Results:')
for ap, npos in zip(aps, nposs):
print('{:.3f}\t{:.3f}'.format(ap, npos))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** PASCAL VOC Python eval code.')
print('--------------------------------------------------------------')
# pdb.set_trace()
return {"ap": aps, "map": np.mean(aps), "weighted map": np.average(aps, weights=weights)}
def calc_detection_voc_prec_rec(pred_boxlists, gt_boxlists, classindex, iou_thresh=0.5, eval_attributes=False,
use_07_metric=False):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
"""
class_recs = {}
npos = 0
image_ids = []
confidence = []
BB = []
for image_index, (gt_boxlist, pred_boxlist) in enumerate(zip(gt_boxlists, pred_boxlists)):
pred_bbox = pred_boxlist.bbox.numpy()
gt_bbox = gt_boxlist.bbox.numpy()
if eval_attributes:
gt_label = gt_boxlist.get_field("attributes").numpy()
pred_label = pred_boxlist.get_field("attr_labels").numpy()
pred_score = pred_boxlist.get_field("attr_scores").numpy()
else:
gt_label = gt_boxlist.get_field("labels").numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
# get the ground truth information for this class
if eval_attributes:
gt_mask_l = np.array([classindex in i for i in gt_label])
else:
gt_mask_l = gt_label == classindex
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = np.zeros(gt_bbox_l.shape[0], dtype=bool)
det = [False] * gt_bbox_l.shape[0]
npos = npos + sum(~gt_difficult_l)
class_recs[image_index] = {'bbox': gt_bbox_l,
'difficult': gt_difficult_l,
'det': det}
# prediction output for each class
# pdb.set_trace()
if eval_attributes:
pred_mask_l = np.logical_and(pred_label == classindex, np.not_equal(pred_score, 0.0)).nonzero()
pred_bbox_l = pred_bbox[pred_mask_l[0]]
pred_score_l = pred_score[pred_mask_l]
else:
pred_mask_l = pred_label == classindex
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
for bbox_tmp, score_tmp in zip(pred_bbox_l, pred_score_l):
image_ids.append(image_index)
confidence.append(float(score_tmp))
BB.append([float(z) for z in bbox_tmp])
if npos == 0:
# No ground truth examples
return 0, 0, 0, 0, npos
if len(confidence) == 0:
# No detection examples
return 0, 0, 0, 0, npos
confidence = np.array(confidence)
BB = np.array(BB)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = -np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > iou_thresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap, sorted_scores, npos
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| 39.538265 | 121 | 0.561004 |
50666b561825d43a37197be46479ad0e99d7cbf6 | 12,508 | py | Python | thirdparty/DOTA_devkit/dota_evaluation_task1_.py | Artcs1/RotationDetection | 095be17345ee9984d8de8f24eb6b5a0b2d764a06 | [
"Apache-2.0"
] | 850 | 2020-10-27T08:51:54.000Z | 2022-03-30T15:12:06.000Z | thirdparty/DOTA_devkit/dota_evaluation_task1_.py | Artcs1/RotationDetection | 095be17345ee9984d8de8f24eb6b5a0b2d764a06 | [
"Apache-2.0"
] | 94 | 2020-12-01T02:18:47.000Z | 2022-03-30T08:14:27.000Z | thirdparty/DOTA_devkit/dota_evaluation_task1_.py | Artcs1/RotationDetection | 095be17345ee9984d8de8f24eb6b5a0b2d764a06 | [
"Apache-2.0"
] | 149 | 2020-10-29T03:30:32.000Z | 2022-03-29T09:53:23.000Z | # --------------------------------------------------------
# dota_evaluation_task1
# Licensed under The MIT License [see LICENSE for details]
# Written by Jian Ding, based on code from Bharath Hariharan
# --------------------------------------------------------
"""
To use the code, users should to config detpath, annopath and imagesetfile
detpath is the path for 15 result files, for the format, you can refer to "http://captain.whu.edu.cn/DOTAweb/tasks.html"
search for PATH_TO_BE_CONFIGURED to config the paths
Note, the evaluation is on the large scale images
"""
import numpy as np
import polyiou
import sys
sys.path.append('../..')
from libs.configs import cfgs
from libs.utils.coordinate_convert import backward_convert
def parse_gt(filename):
"""
:param filename: ground truth file to parse
:return: all instances in a picture
"""
objects = []
with open(filename, 'r') as f:
while True:
line = f.readline()
if line:
splitlines = line.strip().split(' ')
object_struct = {}
if (len(splitlines) < 9):
continue
object_struct['name'] = splitlines[8]
if (len(splitlines) == 9):
object_struct['difficult'] = 0
elif (len(splitlines) == 10):
object_struct['difficult'] = int(splitlines[9])
object_struct['bbox'] = [float(splitlines[0]),
float(splitlines[1]),
float(splitlines[2]),
float(splitlines[3]),
float(splitlines[4]),
float(splitlines[5]),
float(splitlines[6]),
float(splitlines[7])]
objects.append(object_struct)
else:
break
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
# cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
#if not os.path.isdir(cachedir):
# os.mkdir(cachedir)
#cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
#print('imagenames: ', imagenames)
#if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
#print('parse_files name: ', annopath.format(imagename))
recs[imagename] = parse_gt(annopath.format(imagename))
#if i % 100 == 0:
# print ('Reading annotation for {:d}/{:d}'.format(
# i + 1, len(imagenames)) )
# save
#print ('Saving cached annotations to {:s}'.format(cachefile))
#with open(cachefile, 'w') as f:
# cPickle.dump(recs, f)
#else:
# load
#with open(cachefile, 'r') as f:
# recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets from Task1* files
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
#print('check confidence: ', confidence)
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
#print('check sorted_scores: ', sorted_scores)
#print('check sorted_ind: ', sorted_ind)
## note the usage only in numpy not for list
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
#print('check imge_ids: ', image_ids)
#print('imge_ids len:', len(image_ids))
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
ae, iou, tp_num = 0, 0, 0
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
## compute det bb with each BBGT
if BBGT.size > 0:
# compute overlaps
# intersection
# 1. calculate the overlaps between hbbs, if the iou between hbbs are 0, the iou between obbs are 0, too.
# pdb.set_trace()
BBGT_xmin = np.min(BBGT[:, 0::2], axis=1)
BBGT_ymin = np.min(BBGT[:, 1::2], axis=1)
BBGT_xmax = np.max(BBGT[:, 0::2], axis=1)
BBGT_ymax = np.max(BBGT[:, 1::2], axis=1)
bb_xmin = np.min(bb[0::2])
bb_ymin = np.min(bb[1::2])
bb_xmax = np.max(bb[0::2])
bb_ymax = np.max(bb[1::2])
ixmin = np.maximum(BBGT_xmin, bb_xmin)
iymin = np.maximum(BBGT_ymin, bb_ymin)
ixmax = np.minimum(BBGT_xmax, bb_xmax)
iymax = np.minimum(BBGT_ymax, bb_ymax)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb_xmax - bb_xmin + 1.) * (bb_ymax - bb_ymin + 1.) +
(BBGT_xmax - BBGT_xmin + 1.) *
(BBGT_ymax - BBGT_ymin + 1.) - inters)
overlaps = inters / uni
BBGT_keep_mask = overlaps > 0
BBGT_keep = BBGT[BBGT_keep_mask, :]
BBGT_keep_index = np.where(overlaps > 0)[0]
# pdb.set_trace()
def calcoverlaps(BBGT_keep, bb):
overlaps = []
for index, GT in enumerate(BBGT_keep):
overlap = polyiou.iou_poly(polyiou.VectorDouble(BBGT_keep[index]), polyiou.VectorDouble(bb))
overlaps.append(overlap)
return overlaps
if len(BBGT_keep) > 0:
overlaps = calcoverlaps(BBGT_keep, bb)
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
# pdb.set_trace()
jmax = BBGT_keep_index[jmax]
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
bb_5p = backward_convert(np.reshape(bb, [-1, 8]), False)
gt_5p = backward_convert(np.reshape(BBGT[jmax], [-1, 8]), False)
angle_error = abs(bb_5p[0][-1]-gt_5p[0][-1]) % cfgs.ANGLE_RANGE
ae += min(angle_error, abs(cfgs.ANGLE_RANGE-angle_error))
iou += polyiou.iou_poly(polyiou.VectorDouble(BBGT[jmax]), polyiou.VectorDouble(bb))
tp_num += 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
# print('check fp:', fp)
# print('check tp', tp)
print('npos num:', npos)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap, (ae / tp_num, iou / tp_num)
def main():
detpath = r'../../tools/fcos/test_dota/%s/dota_res/Task1_{:s}.txt' % cfgs.VERSION
# change the directory to the path of val/labelTxt, if you want to do evaluation on the valset
annopath = r'/data/dataset/DOTA/val/labelTxt/labelTxt/{:s}.txt'
imagesetfile = r'../../dataloader/dataset/DOTA/val_set.txt'
# For DOTA-v1.5
# classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
# 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter', 'container-crane']
# For DOTA-v1.0
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship',
'tennis-court', 'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor',
'swimming-pool', 'helicopter']
ovthresh = 0.5
classaps, classaes, classious = [], [], []
map, mae, miou = 0, 0, 0
for classname in classnames:
print('classname:', classname)
rec, prec, ap, ae_iou = voc_eval(detpath,
annopath,
imagesetfile,
classname,
ovthresh=ovthresh,
use_07_metric=True)
map += ap
mae += ae_iou[0]
miou += ae_iou[1]
# print('rec: ', rec, 'prec: ', prec, 'ap: ', ap)
print('ap: {}, ae: {}, iou: {}'.format(ap, ae_iou[0], ae_iou[1]))
classaps.append(ap)
classaes.append(ae_iou[0])
classious.append(ae_iou[1])
# uncomment to show p-r curve of each category
# plt.figure(figsize=(8,4))
# plt.xlabel('recall')
# plt.ylabel('precision')
# plt.plot(rec, prec)
# plt.show()
map /= len(classnames)
mae /= len(classnames)
miou /= len(classnames)
print('************************' * 2)
print('map: {}, mae: {}, miou: {}'.format(map, mae, miou))
print('classaps: {}'.format(100 * np.array(classaps)))
print('classaes: {}'.format(classaes))
print('classious: {}'.format(classious))
if __name__ == '__main__':
main() | 37.449102 | 149 | 0.529341 |
7101183ccdb4121f3893bcbc1898ec763bbda397 | 266 | py | Python | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_/test_artificial_128_Integration_Lag1Trend_12__0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_/test_artificial_128_Integration_Lag1Trend_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_/test_artificial_128_Integration_Lag1Trend_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 0); | 38 | 166 | 0.733083 |
5fa561e301f6927210036fa24166045ed28233be | 1,847 | py | Python | cpl_media/__init__.py | matham/cpl_media | 4e01b61648562a76f05c5ff696f43972924e9101 | [
"MIT"
] | null | null | null | cpl_media/__init__.py | matham/cpl_media | 4e01b61648562a76f05c5ff696f43972924e9101 | [
"MIT"
] | null | null | null | cpl_media/__init__.py | matham/cpl_media | 4e01b61648562a76f05c5ff696f43972924e9101 | [
"MIT"
] | 1 | 2020-01-13T19:47:26.000Z | 2020-01-13T19:47:26.000Z | """cpl_media
=================
A library providing kivy support for playing and recording from
various cameras.
"""
from functools import wraps
import traceback
import sys
import pathlib
import os
__all__ = ('error_guard', 'error_callback', 'get_pyinstaller_datas')
__version__ = '0.1.3.dev0'
def _error_callback(e, exc_info=None, threaded=False):
pass
error_callback = _error_callback
"""When set, care must be taken to handle errors from secondary threads.
It's signature is ``error_callback(e, exc_info=None, threaded=False)``.
"""
def error_guard(error_func):
"""A decorator which wraps the function in `try...except` and calls
:func:`error_callback` if a exception is raised.
E.g.::
@error_guard
def do_something():
do_something_interesting
"""
@wraps(error_func)
def safe_func(*largs, **kwargs):
try:
return error_func(*largs, **kwargs)
except Exception as e:
exc_info = sys.exc_info()
stack = traceback.extract_stack()
tb = traceback.extract_tb(exc_info[2])
full_tb = stack[:-1] + tb
exc_line = traceback.format_exception_only(*exc_info[:2])
err = 'Traceback (most recent call last):'
err += "".join(traceback.format_list(full_tb))
err += "".join(exc_line)
error_callback(e, exc_info=err, threaded=True)
return safe_func
def get_pyinstaller_datas():
"""Returns the ``datas`` list required by PyInstaller to be able to package
:mod:`cpl_media` in a application.
"""
root = pathlib.Path(os.path.dirname(sys.modules[__name__].__file__))
datas = []
for pat in ('**/*.kv', '*.kv'):
for f in root.glob(pat):
datas.append((str(f), str(f.relative_to(root.parent).parent)))
return datas
| 26.385714 | 79 | 0.636708 |
b2dfcec42ac92e5d3c8ec2ec76590e97fee75e9d | 3,913 | py | Python | dimsdk/cpu/__init__.py | dimchat/sdk-py | 94223b5e6fa7f56eacc523ea3d803086f8b6c64d | [
"MIT"
] | null | null | null | dimsdk/cpu/__init__.py | dimchat/sdk-py | 94223b5e6fa7f56eacc523ea3d803086f8b6c64d | [
"MIT"
] | null | null | null | dimsdk/cpu/__init__.py | dimchat/sdk-py | 94223b5e6fa7f56eacc523ea3d803086f8b6c64d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# DIM-SDK : Decentralized Instant Messaging Software Development Kit
#
# Written in 2019 by Moky <albert.moky@gmail.com>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2019 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""
Content/Command Processing Units
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from dimp import *
from .content import ContentProcessor
from .forward import ForwardContentProcessor
from .command import CommandProcessor
from .history import HistoryCommandProcessor, GroupCommandProcessor
from .grp_invite import InviteCommandProcessor
from .grp_expel import ExpelCommandProcessor
from .grp_quit import QuitCommandProcessor
from .grp_reset import ResetCommandProcessor
from .grp_query import QueryCommandProcessor
from .meta import MetaCommandProcessor
from .document import DocumentCommandProcessor
def register_content_processors():
# contents
ContentProcessor.register(content_type=0, cpu=ContentProcessor()) # default
ContentProcessor.register(content_type=ContentType.FORWARD, cpu=ForwardContentProcessor())
# commands
ContentProcessor.register(content_type=ContentType.COMMAND, cpu=CommandProcessor())
ContentProcessor.register(content_type=ContentType.HISTORY, cpu=HistoryCommandProcessor())
def register_command_processors():
# meta
CommandProcessor.register(command=Command.META, cpu=MetaCommandProcessor())
# document
dpu = DocumentCommandProcessor()
CommandProcessor.register(command=Command.DOCUMENT, cpu=dpu)
CommandProcessor.register(command='profile', cpu=dpu)
CommandProcessor.register(command='visa', cpu=dpu)
CommandProcessor.register(command='bulletin', cpu=dpu)
# group commands
CommandProcessor.register(command='group', cpu=GroupCommandProcessor())
CommandProcessor.register(command=GroupCommand.INVITE, cpu=InviteCommandProcessor())
CommandProcessor.register(command=GroupCommand.EXPEL, cpu=ExpelCommandProcessor())
CommandProcessor.register(command=GroupCommand.QUIT, cpu=QuitCommandProcessor())
CommandProcessor.register(command=GroupCommand.QUERY, cpu=QueryCommandProcessor())
CommandProcessor.register(command=GroupCommand.RESET, cpu=ResetCommandProcessor())
def register_all_processors():
register_content_processors()
register_command_processors()
register_all_processors()
__all__ = [
'ContentProcessor',
'ForwardContentProcessor',
'CommandProcessor',
'HistoryCommandProcessor',
'GroupCommandProcessor',
'InviteCommandProcessor', 'ExpelCommandProcessor', 'QuitCommandProcessor',
'ResetCommandProcessor', 'QueryCommandProcessor',
'MetaCommandProcessor',
'DocumentCommandProcessor',
]
| 37.625 | 94 | 0.740864 |
f25f43664e262b25473557c5f11dae91e697e3f6 | 2,115 | py | Python | youtube_dl/extractor/infoq.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 1 | 2015-02-19T13:13:47.000Z | 2015-02-19T13:13:47.000Z | youtube_dl/extractor/infoq.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 2 | 2019-05-20T12:46:30.000Z | 2020-11-07T12:50:32.000Z | youtube_dl/extractor/infoq.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 5 | 2020-10-25T09:18:58.000Z | 2021-05-23T22:57:55.000Z | from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/[^/]+/(?P<id>[^/]+)$'
_TEST = {
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': '12-jan-pythonthings',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse.unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
video_filename = playpath.split('/')[-1]
video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*"(https?://[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': http_base + real_id,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| 32.538462 | 169 | 0.575887 |
aee6804b55465fa9bcd9d8ef5cc9b1fb8fbeb87e | 360 | py | Python | libhdhomerun/common/lineup.py | fugf123d/astrilchukj | 966e6f85789b9eac554a93320a62d98d517015ea | [
"MIT"
] | null | null | null | libhdhomerun/common/lineup.py | fugf123d/astrilchukj | 966e6f85789b9eac554a93320a62d98d517015ea | [
"MIT"
] | null | null | null | libhdhomerun/common/lineup.py | fugf123d/astrilchukj | 966e6f85789b9eac554a93320a62d98d517015ea | [
"MIT"
] | null | null | null | from channel import Channel
class Lineup(list):
def __init__(self, *args, **kwargs):
super(Lineup, self).__init__(*args, **kwargs)
def get_channel_numbers(self):
return [channel.guide_number for channel in self]
@classmethod
def from_iterable(cls, iterable):
return cls([Channel.from_dict(dct) for dct in iterable])
| 25.714286 | 64 | 0.683333 |
7ab8d78ee794b7954c350e09c63c3cd3b7a14d41 | 748 | py | Python | xlsxwriter/test/vml/test_write_stroke.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/vml/test_write_stroke.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/vml/test_write_stroke.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-04-12T16:44:58.000Z | 2020-04-12T16:44:58.000Z | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
from ..compatibility import StringIO
from ...vml import Vml
class TestWriteVstroke(unittest.TestCase):
"""
Test the Vml _write_stroke() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_stroke(self):
"""Test the _write_stroke() method"""
self.vml._write_stroke()
exp = """<v:stroke joinstyle="miter"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| 20.216216 | 79 | 0.552139 |
eb0425ab38bd578e179ec39d41ae07428735d050 | 10,955 | py | Python | isi_sdk_9_0_0/isi_sdk_9_0_0/models/drives_drive_firmware_node_drive.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/drives_drive_firmware_node_drive.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/drives_drive_firmware_node_drive.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DrivesDriveFirmwareNodeDrive(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'baynum': 'int',
'current_firmware': 'str',
'desired_firmware': 'str',
'devname': 'str',
'lnum': 'int',
'locnstr': 'str',
'model': 'str'
}
attribute_map = {
'baynum': 'baynum',
'current_firmware': 'current_firmware',
'desired_firmware': 'desired_firmware',
'devname': 'devname',
'lnum': 'lnum',
'locnstr': 'locnstr',
'model': 'model'
}
def __init__(self, baynum=None, current_firmware=None, desired_firmware=None, devname=None, lnum=None, locnstr=None, model=None): # noqa: E501
"""DrivesDriveFirmwareNodeDrive - a model defined in Swagger""" # noqa: E501
self._baynum = None
self._current_firmware = None
self._desired_firmware = None
self._devname = None
self._lnum = None
self._locnstr = None
self._model = None
self.discriminator = None
if baynum is not None:
self.baynum = baynum
if current_firmware is not None:
self.current_firmware = current_firmware
if desired_firmware is not None:
self.desired_firmware = desired_firmware
if devname is not None:
self.devname = devname
if lnum is not None:
self.lnum = lnum
if locnstr is not None:
self.locnstr = locnstr
if model is not None:
self.model = model
@property
def baynum(self):
"""Gets the baynum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
Numerical representation of this drive's bay. # noqa: E501
:return: The baynum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: int
"""
return self._baynum
@baynum.setter
def baynum(self, baynum):
"""Sets the baynum of this DrivesDriveFirmwareNodeDrive.
Numerical representation of this drive's bay. # noqa: E501
:param baynum: The baynum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: int
"""
if baynum is not None and baynum > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `baynum`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if baynum is not None and baynum < 0: # noqa: E501
raise ValueError("Invalid value for `baynum`, must be a value greater than or equal to `0`") # noqa: E501
self._baynum = baynum
@property
def current_firmware(self):
"""Gets the current_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
This drive's current firmware revision # noqa: E501
:return: The current_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: str
"""
return self._current_firmware
@current_firmware.setter
def current_firmware(self, current_firmware):
"""Sets the current_firmware of this DrivesDriveFirmwareNodeDrive.
This drive's current firmware revision # noqa: E501
:param current_firmware: The current_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: str
"""
if current_firmware is not None and len(current_firmware) > 255:
raise ValueError("Invalid value for `current_firmware`, length must be less than or equal to `255`") # noqa: E501
if current_firmware is not None and len(current_firmware) < 0:
raise ValueError("Invalid value for `current_firmware`, length must be greater than or equal to `0`") # noqa: E501
self._current_firmware = current_firmware
@property
def desired_firmware(self):
"""Gets the desired_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
This drive's desired firmware revision. # noqa: E501
:return: The desired_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: str
"""
return self._desired_firmware
@desired_firmware.setter
def desired_firmware(self, desired_firmware):
"""Sets the desired_firmware of this DrivesDriveFirmwareNodeDrive.
This drive's desired firmware revision. # noqa: E501
:param desired_firmware: The desired_firmware of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: str
"""
if desired_firmware is not None and len(desired_firmware) > 255:
raise ValueError("Invalid value for `desired_firmware`, length must be less than or equal to `255`") # noqa: E501
if desired_firmware is not None and len(desired_firmware) < 0:
raise ValueError("Invalid value for `desired_firmware`, length must be greater than or equal to `0`") # noqa: E501
self._desired_firmware = desired_firmware
@property
def devname(self):
"""Gets the devname of this DrivesDriveFirmwareNodeDrive. # noqa: E501
This drive's device name. # noqa: E501
:return: The devname of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: str
"""
return self._devname
@devname.setter
def devname(self, devname):
"""Sets the devname of this DrivesDriveFirmwareNodeDrive.
This drive's device name. # noqa: E501
:param devname: The devname of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: str
"""
if devname is not None and len(devname) > 255:
raise ValueError("Invalid value for `devname`, length must be less than or equal to `255`") # noqa: E501
if devname is not None and len(devname) < 0:
raise ValueError("Invalid value for `devname`, length must be greater than or equal to `0`") # noqa: E501
self._devname = devname
@property
def lnum(self):
"""Gets the lnum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
This drive's logical drive number in IFS. # noqa: E501
:return: The lnum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: int
"""
return self._lnum
@lnum.setter
def lnum(self, lnum):
"""Sets the lnum of this DrivesDriveFirmwareNodeDrive.
This drive's logical drive number in IFS. # noqa: E501
:param lnum: The lnum of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: int
"""
if lnum is not None and lnum > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `lnum`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if lnum is not None and lnum < 0: # noqa: E501
raise ValueError("Invalid value for `lnum`, must be a value greater than or equal to `0`") # noqa: E501
self._lnum = lnum
@property
def locnstr(self):
"""Gets the locnstr of this DrivesDriveFirmwareNodeDrive. # noqa: E501
String representation of this drive's physical location. # noqa: E501
:return: The locnstr of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: str
"""
return self._locnstr
@locnstr.setter
def locnstr(self, locnstr):
"""Sets the locnstr of this DrivesDriveFirmwareNodeDrive.
String representation of this drive's physical location. # noqa: E501
:param locnstr: The locnstr of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: str
"""
if locnstr is not None and len(locnstr) > 255:
raise ValueError("Invalid value for `locnstr`, length must be less than or equal to `255`") # noqa: E501
if locnstr is not None and len(locnstr) < 0:
raise ValueError("Invalid value for `locnstr`, length must be greater than or equal to `0`") # noqa: E501
self._locnstr = locnstr
@property
def model(self):
"""Gets the model of this DrivesDriveFirmwareNodeDrive. # noqa: E501
This drive's manufacturer and model. # noqa: E501
:return: The model of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this DrivesDriveFirmwareNodeDrive.
This drive's manufacturer and model. # noqa: E501
:param model: The model of this DrivesDriveFirmwareNodeDrive. # noqa: E501
:type: str
"""
if model is not None and len(model) > 255:
raise ValueError("Invalid value for `model`, length must be less than or equal to `255`") # noqa: E501
if model is not None and len(model) < 0:
raise ValueError("Invalid value for `model`, length must be greater than or equal to `0`") # noqa: E501
self._model = model
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DrivesDriveFirmwareNodeDrive):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.22508 | 147 | 0.618622 |
da984d6838d2fa8f4c7a751bb666e363a16e1e86 | 717 | py | Python | kombu/utils/__init__.py | nlundquist/kombu | 483cadced77d82a6ecd0be553b91ce92f04f9617 | [
"BSD-3-Clause"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/kombu-4.3.0/kombu/utils/__init__.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/kombu-4.3.0/kombu/utils/__init__.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """DEPRECATED - Import from modules below."""
from __future__ import absolute_import, print_function, unicode_literals
from .collections import EqualityDict
from .compat import fileno, maybe_fileno, nested, register_after_fork
from .div import emergency_dump_state
from .functional import (
fxrange, fxrangemax, maybe_list, reprcall, retry_over_time,
)
from .imports import symbol_by_name
from .objects import cached_property
from .uuid import uuid
__all__ = (
'EqualityDict', 'uuid', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'register_after_fork', 'reprkwargs', 'reprcall',
'symbol_by_name', 'nested', 'fileno', 'maybe_fileno',
)
| 34.142857 | 72 | 0.764296 |
8f8fa5f495a91d013c4df279f146ce1be31c98fa | 1,767 | py | Python | tests/book/ch02/dat.py | xchaoinfo/pyhanlp | d61829a7aa35965e34ca0e220ee7f83bd3e8c5ed | [
"Apache-2.0"
] | 2,792 | 2018-03-19T10:24:23.000Z | 2022-03-29T07:46:53.000Z | pyhanlp-master/tests/book/ch02/dat.py | tianmingl/maincode | 724c60d5281ba3911ca065d9e144bb1b09e8257f | [
"MIT"
] | 50 | 2018-03-19T11:08:37.000Z | 2022-01-11T16:34:00.000Z | pyhanlp-master/tests/book/ch02/dat.py | tianmingl/maincode | 724c60d5281ba3911ca065d9e144bb1b09e8257f | [
"MIT"
] | 802 | 2018-03-19T11:08:49.000Z | 2022-03-22T07:20:26.000Z | # -*- coding:utf-8 -*-
# Author:hankcs
# Date: 2018-05-26 21:16
# 《自然语言处理入门》2.5 双数组字典树
# 配套书籍:http://nlp.hankcs.com/book.php
# 讨论答疑:https://bbs.hankcs.com/
from pyhanlp import *
class DoubleArrayTrie(object):
def __init__(self, dic: dict) -> None:
m = JClass('java.util.TreeMap')()
for k, v in dic.items():
m[k] = v
DoubleArrayTrie = JClass('com.hankcs.hanlp.collection.trie.DoubleArrayTrie')
dat = DoubleArrayTrie(m)
self.base = dat.getBase()
self.check = dat.getCheck()
self.value = dat.getValueArray([''])
@staticmethod
def char_hash(c) -> int:
return JClass('java.lang.Character')(c).hashCode()
def transition(self, c, b) -> int:
"""
状态转移
:param c: 字符
:param b: 初始状态
:return: 转移后的状态,-1表示失败
"""
p = self.base[b] + self.char_hash(c) + 1
if self.base[b] == self.check[p]:
return p
else:
return -1
def __getitem__(self, key: str):
b = 0
for i in range(0, len(key)): # len(key)次状态转移
p = self.transition(key[i], b)
if p is not -1:
b = p
else:
return None
p = self.base[b] # 按字符'\0'进行状态转移
n = self.base[p] # 查询base
if p == self.check[p] and n < 0: # 状态转移成功且对应词语结尾
index = -n - 1 # 取得字典序
return self.value[index]
return None
if __name__ == '__main__':
dic = {'自然': 'nature', '自然人': 'human', '自然语言': 'language', '自语': 'talk to oneself', '入门': 'introduction'}
dat = DoubleArrayTrie(dic)
assert dat['自然'] == 'nature'
assert dat['自然语言'] == 'language'
assert dat['不存在'] is None
assert dat['自然\0在'] is None
| 28.047619 | 109 | 0.526882 |
7a4a0cabee4e9cb1e23f1acf37c6c01b81024d93 | 11,636 | py | Python | backend/data/client/lunarcrush.py | huenique/gibme-crypto | 970f2ab072682c4533a348783c29c40a8fcdd26e | [
"MIT"
] | 2 | 2021-05-23T23:29:56.000Z | 2021-05-24T00:16:30.000Z | backend/data/client/lunarcrush.py | huenique/gibme-crypto | 970f2ab072682c4533a348783c29c40a8fcdd26e | [
"MIT"
] | 6 | 2021-05-20T14:57:19.000Z | 2021-05-26T19:45:01.000Z | backend/data/client/lunarcrush.py | huenique/gibme-crypto | 970f2ab072682c4533a348783c29c40a8fcdd26e | [
"MIT"
] | null | null | null | """
Lunarcrush client.
HTTP API-endpoint connections to lunarCRUSH.
Visit `https://lunarcrush.com/developers/docs#` for more information.
"""
from __future__ import annotations
import string
from typing import Optional
from . import utils
BASE_URL = 'https://api.lunarcrush.com/v2?'
class QueryFormatter(string.Formatter):
def __init__(self, missing='', bad_fmt=''):
self.missing = missing
self.bad_fmt = bad_fmt
self.fields = {}
@staticmethod
def cleanup(fmtd):
if fmtd[-1:] == '&':
return fmtd[:-1]
else:
return fmtd
def get_field(self, field_name, args, kwargs):
try:
pairs = super().get_field(field_name, args, kwargs)
key = pairs[0]
if key:
self.fields[key] = field_name
except (KeyError, AttributeError):
pairs = None, field_name
return pairs
def format_field(self, value, spec):
if value == None:
return self.missing
try:
if len(self.fields) == 1:
return super().format_field(value, spec)
else:
return super().format_field(f'{self.fields[value]}={value}&',
spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
class AssetsEndpoint(QueryFormatter):
"""Details, overall metrics, and time series metrics for one or multiple
cryptocurrencies.
Usage:
- https://api.lunarcrush.com/v2?data=assets?
- https://api.lunarcrush.com/v2?data=assets&key=<key>&symbol=LTC
"""
async def fetch_assets(
self,
loop,
key: str,
symbol: str,
interval: Optional[str] = None,
time_series_indicators: Optional[str] = None,
change: Optional[str] = None,
data_points: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
output: Optional[str] = None,
) -> utils.objects.Response:
url_args = {
'base_url': BASE_URL,
'data': 'assets',
'key': key,
'symbol': symbol,
'interval': interval,
'time_series_indicators': time_series_indicators,
'change': change,
'data_points': data_points,
'start': start,
'end': end
}
fmt = '{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}{output}'
url_query = await loop.run_in_executor(None, self.cleanup,
self.format(fmt, **url_args))
return await utils.get(uri=url_query, output=output)
class MarketPairsEndpoint(QueryFormatter):
"""Provides the exchange information for cryptocurrencies and the other
cryptocurrencies they are being traded for.
Usage:
- https://api.lunarcrush.com/v2?data=market-pairs?
"""
async def fetch_matket_pairs(
self,
loop,
key: str,
symbol: str,
limit: Optional[int] = None,
page: Optional[int] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'market-pairs',
'key': key,
'symbol': symbol,
'limit': limit,
'page': page,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class MarketEndpoint(QueryFormatter):
"""Summary information for all supported cryptocurrencies including 5
recent time series values for some metrics.
"""
async def fetch_market(
self,
loop,
key: str,
type: Optional[int] = None,
page: Optional[int] = None,
sort: Optional[str] = None,
desc: Optional[bool] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'market',
'key': key,
'type': type,
'page': page,
'sort': sort,
'desc': desc,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class GlobalEndpoint(QueryFormatter):
"""Overall aggregated metrics for all supported cryptocurrencies.
"""
async def fetch_global(
self,
loop,
key: str,
interval: Optional[str] = None,
change: Optional[str] = None,
data_points: Optional[int] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'global',
'key': key,
'interval': interval,
'change': change,
'data_points': data_points,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class MetaEndpoint(QueryFormatter):
"""Meta information for all supported assets
"""
async def fetch_meta(
self,
loop,
key: str,
type: Optional[str] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'meta',
'key': key,
'type': type,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class ExchangesEndpoint(QueryFormatter):
"""Meta information for all trackable exchanges.
"""
async def fetch_exchanges(
self,
loop,
key: str,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'exchanges',
'key': key,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class ExchangeEnpoint(QueryFormatter):
"""Meta information and market pairs for a single exchange.
"""
async def fetch_exchange(
self,
loop,
key: str,
exchange: str,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'exchange',
'key': key,
'exchange': exchange,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class CoinOfTheDayEndpoint(QueryFormatter):
"""The current coin of the day
"""
async def fetch_cod(
self,
loop,
key: str,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'coinoftheday',
'key': key,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class CoinOfTheDayInfoEndpoint(QueryFormatter):
"""Provides the history of the coin of the day on LunarCRUSH when it was
last changed, and when each coin was last coin of the day
"""
async def fetch_cod_info(
self,
loop,
key: str,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'coinoftheday_info',
'key': key,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class FeedsEndpoint(QueryFormatter):
"""Social posts, news, and shared links for one or multiple coins.
"""
async def fetch_feeds(
self,
loop,
key: str,
symbol: Optional[str] = None,
sources: Optional[str] = None,
limit: Optional[int] = None,
page: Optional[int] = None,
type: Optional[str] = None,
start: Optional[int] = None,
end: Optional[int] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'feeds',
'key': key,
'symbol': symbol,
'sources': sources,
'limit': limit,
'page': page,
'type': type,
'start': start,
'end': end,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class InfluencersEndpoint(QueryFormatter):
"""List of social accounts that have the most influence on different assets
based on number of followers, engegements, and volume of posts.
"""
async def fetch_influencers(
self,
loop,
key: str,
symbol: Optional[str] = None,
days: Optional[int] = None,
num_days: Optional[int] = None,
limit: Optional[int] = None,
order_by: Optional[str] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'influencers',
'key': key,
'symbol': symbol,
'days': days,
'num_days': num_days,
'limit': limit,
'order_by': order_by,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
class InfluencerEndpoint(QueryFormatter):
"""Individual influencer details including actual posts.
"""
async def fetch_influencer(
self,
loop,
key: str,
id: Optional[str] = None,
screen_name: Optional[str] = None,
days: Optional[str] = None,
limit: Optional[str] = None,
page: Optional[str] = None,
output: Optional[str] = None,
):
url_args = {
'base_url': BASE_URL,
'data': 'influencer',
'key': key,
'id': id,
'screen_name': screen_name,
'days': days,
'limit': limit,
'page': page,
}
url_query = self.cleanup(
self.format(
'{base_url}{data}{key}{symbol}{interval}{time_series_indicators}{change}{data_points}{start}{end}',
**url_args))
return await utils.get(url_query)
| 29.835897 | 120 | 0.536954 |
f84b896c41d98ac7a0aa6b43f0ba7d373bf473ff | 34,148 | py | Python | utils/data.py | mstrise/dep2label-eye-tracking-data | ad83d75f0953d8562e157dbbe4cfe22b8ca516cd | [
"MIT"
] | 1 | 2019-09-02T18:36:38.000Z | 2019-09-02T18:36:38.000Z | utils/data.py | mstrise/dep2label-eye-tracking-data | ad83d75f0953d8562e157dbbe4cfe22b8ca516cd | [
"MIT"
] | null | null | null | utils/data.py | mstrise/dep2label-eye-tracking-data | ad83d75f0953d8562e157dbbe4cfe22b8ca516cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Jie
# @Date: 2017-06-14 17:34:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-04-26 13:58:10
import sys
import numpy as np
from alphabet import Alphabet
from functions import *
import cPickle as pickle
import itertools
from ast import literal_eval as make_tuple
from collections import OrderedDict
START = "</s>"
UNKNOWN = "</unk>"
PADDING = "</pad>"
class Data:
def __init__(self):
self.MAX_SENTENCE_LENGTH = 250
self.MAX_WORD_LENGTH = -1
self.number_normalized = True
self.norm_word_emb = False
self.norm_char_emb = False
self.word_alphabet = Alphabet('word')
self.char_alphabet = Alphabet('character')
self.feature_name = []
self.feature_alphabets = []
self.feature_num = len(self.feature_alphabets)
self.feat_config = None
self.label_alphabet = {0: Alphabet('label',True)}
self.tagScheme = "NoSeg" ## BMES/BIO
self.seg = True
### I/O
self.train_dir = None
self.dev_dir = None
self.test_dir = None
self.raw_dir = None
self.decode_dir = None
self.dset_dir = None ## data vocabulary related file
self.model_dir = None ## model save file
self.load_model_dir = None ## model load file
self.word_emb_dir = None
self.char_emb_dir = None
self.feature_emb_dirs = []
self.train_texts = []
self.dev_texts = []
self.test_texts = []
self.raw_texts = []
self.train_Ids = []
self.dev_Ids = []
self.test_Ids = []
self.raw_Ids = []
self.pretrain_word_embedding = None
self.pretrain_char_embedding = None
self.pretrain_feature_embeddings = []
#Added for pretraining
self.PRETRAINED_ALL = "all"
self.PRETRAINED_LSTMS = "lstms"
self.pretrained_model = None
self.pretrained_part = None
self.label_size = 0
self.word_alphabet_size = 0
self.char_alphabet_size = 0
#self.label_alphabet_size = 0
self.label_alphabet_sizes = {0:0}
self.feature_alphabet_sizes = []
self.feature_emb_dims = []
self.norm_feature_embs = []
self.word_emb_dim = 50
self.char_emb_dim = 30
###Networks
self.word_feature_extractor = "LSTM" ## "LSTM"/"CNN"/"GRU"/
self.use_char = True
self.char_feature_extractor = "CNN" ## "LSTM"/"CNN"/"GRU"/None
self.use_crf = True
self.nbest = None
## Training
self.average_batch_loss = False
self.optimizer = "SGD" ## "SGD"/"AdaGrad"/"AdaDelta"/"RMSProp"/"Adam"
self.status = "train"
### Hyperparameters
self.HP_cnn_layer = 4
self.HP_iteration = 100
self.HP_batch_size = 10
self.HP_char_hidden_dim = 50
self.HP_hidden_dim = 200
self.HP_feature_default_size = 20
self.HP_dropout = 0.5
self.HP_lstm_layer = 1
self.HP_bilstm = True
self.HP_gpu = False
self.HP_lr = 0.015
self.HP_lr_decay = 0.05
self.HP_clip = None
self.HP_momentum = 0
self.HP_l2 = 1e-8
#D: The number of tasks to be solved
self.HP_tasks = 1
self.HP_main_tasks = self.HP_tasks
self.HP_tasks_weights = [1]
self.optimize_with_evalb = False
self.optimize_with_las = False
self.offset = False
self.choice_of_best_model="avg"
self.language="English"
# self.HP_tasks_inputs = [self.LSTMOUT]
#Policy Gradient
self.No_samples = 8
self.pg_variance_reduce = True
self.variance_reduce_burn_in = 999
self.pg_valsteps = 1000
self.entropy_regularisation = True
self.entropy_reg_coeff = 0.01
#Hyper-parameters for disjoint training
self.train_task_ids = []
self.dev_task_ids = []
self.test_task_ids = []
self.raw_task_ids = []
self.disjoint = True
self.datasets = {}
self.tasks_metrics = {}
self.HP_tasks_weight_decays = [0]
def show_data_summary(self):
print("++"*50)
print("DATA SUMMARY START:")
print(" I/O:")
print(" Tag scheme: %s"%(self.tagScheme))
print(" MAX SENTENCE LENGTH: %s"%(self.MAX_SENTENCE_LENGTH))
print(" MAX WORD LENGTH: %s"%(self.MAX_WORD_LENGTH))
print(" Number normalized: %s"%(self.number_normalized))
print(" Word alphabet size: %s"%(self.word_alphabet_size))
print(" Char alphabet size: %s"%(self.char_alphabet_size))
for idtask in self.label_alphabet:
print (" Label alphabet size for task %s: %s"%(idtask,self.label_alphabet_sizes[idtask]))
#print(" Label alphabet size: %s"%(self.label_alphabet_size))
print(" Word embedding dir: %s"%(self.word_emb_dir))
print(" Char embedding dir: %s"%(self.char_emb_dir))
print(" Word embedding size: %s"%(self.word_emb_dim))
print(" Char embedding size: %s"%(self.char_emb_dim))
print(" Norm word emb: %s"%(self.norm_word_emb))
print(" Norm char emb: %s"%(self.norm_char_emb))
print(" Train file directory: %s"%(self.train_dir))
print(" Dev file directory: %s"%(self.dev_dir))
print(" Test file directory: %s"%(self.test_dir))
print(" Raw file directory: %s"%(self.raw_dir))
print(" Dset file directory: %s"%(self.dset_dir))
print(" Model file directory: %s"%(self.model_dir))
print(" Pretrained model : %s"%(self.pretrained_model))
print(" Pretrained part : %s"%(self.pretrained_part))
print(" Loadmodel directory: %s"%(self.load_model_dir))
print(" Decode file directory: %s"%(self.decode_dir))
print(" Train instance number: %s"%(len(self.train_texts)))
print(" Dev instance number: %s"%(len(self.dev_texts)))
print(" Test instance number: %s"%(len(self.test_texts)))
print(" Raw instance number: %s"%(len(self.raw_texts)))
print(" FEATURE num: %s"%(self.feature_num))
for idx in range(self.feature_num):
print(" Fe: %s alphabet size: %s"%(self.feature_alphabets[idx].name, self.feature_alphabet_sizes[idx]))
print(" Fe: %s embedding dir: %s"%(self.feature_alphabets[idx].name, self.feature_emb_dirs[idx]))
print(" Fe: %s embedding size: %s"%(self.feature_alphabets[idx].name, self.feature_emb_dims[idx]))
print(" Fe: %s norm emb: %s"%(self.feature_alphabets[idx].name, self.norm_feature_embs[idx]))
print(" "+"++"*20)
print(" Model Network:")
print(" Model use_crf: %s"%(self.use_crf))
print(" Model word extractor: %s"%(self.word_feature_extractor))
print(" Model use_char: %s"%(self.use_char))
if self.use_char:
print(" Model char extractor: %s"%(self.char_feature_extractor))
print(" Model char_hidden_dim: %s"%(self.HP_char_hidden_dim))
print(" "+"++"*20)
print(" Training:")
print(" Optimizer: %s"%(self.optimizer))
print(" Iteration: %s"%(self.HP_iteration))
print(" BatchSize: %s"%(self.HP_batch_size))
print(" Average batch loss: %s"%(self.average_batch_loss))
print(" "+"++"*20)
print(" Hyperparameters:")
print(" Hyper lr: %s"%(self.HP_lr))
print(" Hyper lr_decay: %s"%(self.HP_lr_decay))
print(" Hyper HP_clip: %s"%(self.HP_clip))
print(" Hyper momentum: %s"%(self.HP_momentum))
print(" Hyper l2: %s"%(self.HP_l2))
print(" Hyper hidden_dim: %s"%(self.HP_hidden_dim))
print(" Hyper dropout: %s"%(self.HP_dropout))
print(" Hyper lstm_layer: %s"%(self.HP_lstm_layer))
print(" Hyper bilstm: %s"%(self.HP_bilstm))
print(" Hyper GPU: %s"%(self.HP_gpu))
print(" Hyper number of tasks: %s"%(self.HP_tasks))
print("DATA SUMMARY END.")
print("++"*50)
sys.stdout.flush()
def initial_feature_alphabets(self):
for l in open(self.train_dir,'r').readlines():
if not l.startswith("#") and not l.startswith("-BOS-"):
items = l.strip("\n").split()
break
total_column = len(items)
if total_column > 2:
for idx in range(1, total_column-1):
feature_prefix = items[idx].split(']',1)[0]+"]"
self.feature_alphabets.append(Alphabet(feature_prefix))
self.feature_name.append(feature_prefix)
print "Find feature: ", feature_prefix
self.feature_num = len(self.feature_alphabets)
self.pretrain_feature_embeddings = [None]*self.feature_num
self.feature_emb_dims = [self.HP_feature_default_size]*self.feature_num
#self.feature_emb_dims = [20]*self.feature_num
self.feature_emb_dirs = [None]*self.feature_num
self.norm_feature_embs = [False]*self.feature_num
self.feature_alphabet_sizes = [0]*self.feature_num
if self.feat_config:
for idx in range(self.feature_num):
if self.feature_name[idx] in self.feat_config:
self.feature_emb_dims[idx] = self.feat_config[self.feature_name[idx]]['emb_size']
self.feature_emb_dirs[idx] = self.feat_config[self.feature_name[idx]]['emb_dir']
self.norm_feature_embs[idx] = self.feat_config[self.feature_name[idx]]['emb_norm']
def build_alphabet(self, input_file):
sample_corpus = None
in_lines = open(input_file,'r').readlines()
for line in in_lines:
if line.upper().startswith(TREEBANK_LINE):#Check the treebank this sentence comes from
sample_corpus = "["+line.upper().replace(TREEBANK_LINE,"").strip()+"]"
elif len(line) > 2:
pairs = line.strip().split()
word = pairs[0].decode('utf-8')
if self.number_normalized:
word = normalize_word(word)
label = pairs[-1]
if self.HP_tasks > 1 or not self.disjoint: #self.task_config[sample_corpus]["nb_tasks"] > 1:
label = parse_multitask_label(label)
else:
label = [label]
if len(label) != len(self.label_alphabet) and not self.disjoint:
raise ValueError("The number of tasks and the number of labels in the output column do not match")
init_label_alp_index = 0 if not self.disjoint else self.task_config[sample_corpus]["idstask"]
for idtask, l in enumerate(label,init_label_alp_index):
#for idtask, l in enumerate(label):
self.label_alphabet[idtask].add(l)
self.word_alphabet.add(word)
for idx in range(self.feature_num):
feat_idx = pairs[idx+1].split(']',1)[-1]
self.feature_alphabets[idx].add(feat_idx)
for char in word:
self.char_alphabet.add(char)
self.word_alphabet_size = self.word_alphabet.size()
self.char_alphabet_size = self.char_alphabet.size()
for idtask in self.label_alphabet:
self.label_alphabet_sizes[idtask] = self.label_alphabet[idtask].size()
for idx in range(self.feature_num):
self.feature_alphabet_sizes[idx] = self.feature_alphabets[idx].size()
for idtask in self.label_alphabet:
startS = False
startB = False
for label,_ in self.label_alphabet[idtask].iteritems():
if "S-" in label.upper():
startS = True
elif "B-" in label.upper():
startB = True
if startB:
if startS:
self.tagScheme = "BMES"
else:
self.tagScheme = "BIO"
def fix_alphabet(self):
self.word_alphabet.close()
self.char_alphabet.close()
for idtask in self.label_alphabet:
self.label_alphabet[idtask].close()
for idx in range(self.feature_num):
self.feature_alphabets[idx].close()
def build_pretrain_emb(self):
if self.word_emb_dir:
print("Load pretrained word embedding, norm: %s, dir: %s"%(self.norm_word_emb, self.word_emb_dir))
self.pretrain_word_embedding, self.word_emb_dim = build_pretrain_embedding(self.word_emb_dir, self.word_alphabet, self.word_emb_dim, self.norm_word_emb)
if self.char_emb_dir:
print("Load pretrained char embedding, norm: %s, dir: %s"%(self.norm_char_emb, self.char_emb_dir))
self.pretrain_char_embedding, self.char_emb_dim = build_pretrain_embedding(self.char_emb_dir, self.char_alphabet, self.char_emb_dim, self.norm_char_emb)
for idx in range(self.feature_num):
if self.feature_emb_dirs[idx]:
print("Load pretrained feature %s embedding:, norm: %s, dir: %s"%(self.feature_name[idx], self.norm_feature_embs[idx], self.feature_emb_dirs[idx]))
self.pretrain_feature_embeddings[idx], self.feature_emb_dims[idx] = build_pretrain_embedding(self.feature_emb_dirs[idx], self.feature_alphabets[idx], self.feature_emb_dims[idx], self.norm_feature_embs[idx])
def generate_instance(self, name):
self.fix_alphabet()
if name == "train":
self.train_texts, self.train_Ids = read_instance(self.train_dir, self.word_alphabet, self.char_alphabet,
self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.task_config if self.disjoint else None)
elif name == "dev":
self.dev_texts, self.dev_Ids = read_instance(self.dev_dir, self.word_alphabet, self.char_alphabet,
self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.task_config if self.disjoint else None)
elif name == "test":
self.test_texts, self.test_Ids = read_instance(self.test_dir, self.word_alphabet, self.char_alphabet,
self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.task_config if self.disjoint else None)
elif name == "raw":
self.raw_texts, self.raw_Ids = read_instance(self.raw_dir, self.word_alphabet, self.char_alphabet,
self.feature_alphabets, self.label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH,
self.task_config if self.disjoint else None)
else:
print("Error: you can only generate train/dev/test instance! Illegal input:%s"%(name))
def write_decoded_results(self, predict_results, name, indexes =None):
fout = open(self.decode_dir,'w')
content_list = []
if name == 'raw':
content_list = self.raw_texts
elif name == 'test':
content_list = self.test_texts
elif name == 'dev':
content_list = self.dev_texts
elif name == 'train':
content_list = self.train_texts
else:
print("Error: illegal name during writing predict result, name should be within train/dev/test/raw !")
for task_predict_results in predict_results:
sent_num = len(task_predict_results)
assert(sent_num == len(content_list))
for idx in range(sent_num):
if indexes is not None and idx not in indexes:
continue
sent_length = len(predict_results[0][idx]) #Index 0 to know the length of the input sentence
for idy in range(sent_length):
## content_list[idx] is a list with [word, char, label]
inputs = []
for id_input in range(len(content_list[idx])-2):
if content_list[idx][id_input][0] != []:
if type(content_list[idx][id_input][idy]) == type([]):
for feature in content_list[idx][id_input][idy]:
inputs.append(feature.encode('utf-8'))
else:
inputs.append(content_list[idx][id_input][idy].encode('utf-8'))
outputs = []
for task in predict_results:
outputs.append(task[idx][idy])
fout.write( "\t".join(inputs) + "\t" + "{}".join(outputs) + '\n')
fout.write('\n')
fout.close()
print("Predict %s result has been written into file. %s"%(name, self.decode_dir))
def load(self,data_file):
f = open(data_file, 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
def save(self,save_file):
f = open(save_file, 'wb')
pickle.dump(self.__dict__, f, 2)
f.close()
def write_nbest_decoded_results(self, predict_results, pred_scores, name):
fout = open(self.decode_dir,'w')
sent_num = len(predict_results)
content_list = []
if name == 'raw':
content_list = self.raw_texts
elif name == 'test':
content_list = self.test_texts
elif name == 'dev':
content_list = self.dev_texts
elif name == 'train':
content_list = self.train_texts
else:
print("Error: illegal name during writing predict result, name should be within train/dev/test/raw !")
for idtask_predict_results, task_predict_results in enumerate(predict_results):
sent_num = len(task_predict_results)
assert(sent_num == len(content_list))
for idx in range(sent_num):
score_string = "# "
for idtask_predict_results, task_predict_results in enumerate(predict_results):
sent_length = len(task_predict_results[idx][0])
nbest = len(task_predict_results[0])
#Printing the probabilities
for idz in range(nbest):
score_string += format(pred_scores[idtask_predict_results][idx][idz], '.4f')+" "
fout.write(score_string.strip() + "\t")
fout.write("\n")
for idy in range(sent_length):
label_string = content_list[idx][0][idy].encode('utf-8') + "\t"
for ifeat in range(len(content_list[idx][1][idy])):
label_string += content_list[idx][1][idy][ifeat].encode('utf-8') + "\t"
for idtask_predict_results, task_predict_results in enumerate(predict_results):
for idz in range(nbest):
label_string += task_predict_results[idx][idz][idy]+","
label_string = label_string.strip().strip(",") + "{}"
fout.write(label_string)
fout.write('\n')
fout.write('\n')
fout.close()
print("Predict %s %s-best result has been written into file. %s"%(name,nbest, self.decode_dir))
def read_config(self,config_file):
config = config_file_to_dict(config_file)
## read data:
the_item = 'train_dir'
if the_item in config:
self.train_dir = config[the_item]
the_item = 'dev_dir'
if the_item in config:
self.dev_dir = config[the_item]
the_item = 'test_dir'
if the_item in config:
self.test_dir = config[the_item]
the_item = 'raw_dir'
if the_item in config:
self.raw_dir = config[the_item]
the_item = 'decode_dir'
if the_item in config:
self.decode_dir = config[the_item]
the_item = 'dset_dir'
if the_item in config:
self.dset_dir = config[the_item]
the_item = 'model_dir'
if the_item in config:
self.model_dir = config[the_item]
the_item = 'load_model_dir'
if the_item in config:
self.load_model_dir = config[the_item]
the_item = 'word_emb_dir'
if the_item in config:
self.word_emb_dir = config[the_item]
the_item = 'char_emb_dir'
if the_item in config:
self.char_emb_dir = config[the_item]
the_item = 'MAX_SENTENCE_LENGTH'
if the_item in config:
self.MAX_SENTENCE_LENGTH = int(config[the_item])
the_item = 'MAX_WORD_LENGTH'
if the_item in config:
self.MAX_WORD_LENGTH = int(config[the_item])
the_item = 'norm_word_emb'
if the_item in config:
self.norm_word_emb = str2bool(config[the_item])
the_item = 'norm_char_emb'
if the_item in config:
self.norm_char_emb = str2bool(config[the_item])
the_item = 'number_normalized'
if the_item in config:
self.number_normalized = str2bool(config[the_item])
the_item = 'seg'
if the_item in config:
self.seg = str2bool(config[the_item])
the_item = 'word_emb_dim'
if the_item in config:
self.word_emb_dim = int(config[the_item])
the_item = 'char_emb_dim'
if the_item in config:
self.char_emb_dim = int(config[the_item])
## read network:
the_item = 'use_crf'
if the_item in config:
self.use_crf = str2bool(config[the_item])
the_item = 'use_char'
if the_item in config:
self.use_char = str2bool(config[the_item])
the_item = 'word_seq_feature'
if the_item in config:
self.word_feature_extractor = config[the_item]
the_item = 'char_seq_feature'
if the_item in config:
self.char_feature_extractor = config[the_item]
the_item = 'nbest'
if the_item in config:
self.nbest = int(config[the_item])
the_item = 'feature'
if the_item in config:
self.feat_config = config[the_item] ## feat_config is a dict
the_item = 'feature_default_size'
if the_item in config:
self.HP_feature_default_size = int(config[the_item])
## read training setting:
the_item = 'optimizer'
if the_item in config:
self.optimizer = config[the_item]
the_item = 'ave_batch_loss'
if the_item in config:
self.average_batch_loss = str2bool(config[the_item])
the_item = 'status'
if the_item in config:
self.status = config[the_item]
## read Hyperparameters:
the_item = 'cnn_layer'
if the_item in config:
self.HP_cnn_layer = int(config[the_item])
the_item = 'iteration'
if the_item in config:
self.HP_iteration = int(config[the_item])
the_item = 'batch_size'
if the_item in config:
self.HP_batch_size = int(config[the_item])
the_item = 'char_hidden_dim'
if the_item in config:
self.HP_char_hidden_dim = int(config[the_item])
the_item = 'hidden_dim'
if the_item in config:
self.HP_hidden_dim = int(config[the_item])
the_item = 'dropout'
if the_item in config:
self.HP_dropout = float(config[the_item])
the_item = 'lstm_layer'
if the_item in config:
self.HP_lstm_layer = int(config[the_item])
the_item = 'bilstm'
if the_item in config:
self.HP_bilstm = str2bool(config[the_item])
the_item = 'gpu'
if the_item in config:
self.HP_gpu = str2bool(config[the_item])
the_item = 'learning_rate'
if the_item in config:
self.HP_lr = float(config[the_item])
the_item = 'lr_decay'
if the_item in config:
self.HP_lr_decay = float(config[the_item])
the_item = 'clip'
if the_item in config:
self.HP_clip = float(config[the_item])
the_item = 'momentum'
if the_item in config:
self.HP_momentum = float(config[the_item])
the_item = 'l2'
if the_item in config:
self.HP_l2 = float(config[the_item])
#Hyperparameters for auxiliary tasks over the same treebank
the_item = 'disjoint'
if the_item in config:
self.disjoint=str2bool(config[the_item])
if not self.disjoint:
the_item = 'tasks'
if the_item in config:
self.HP_tasks = int(config[the_item])
if self.HP_tasks > 1:
self.label_alphabet = {idtask: Alphabet('label',True) for idtask in range(self.HP_tasks)}
self.label_alphabet_sizes = {idtask: self.label_alphabet[idtask].size() for idtask in range(self.HP_tasks)}
the_item = "main_tasks"
if the_item in config:
self.HP_main_tasks = int(config[the_item])
print self.HP_main_tasks, self.HP_tasks
if self.HP_main_tasks > self.HP_tasks:
raise ValueError("HP_main_tasks cannot be greater than HP_tasks")
the_item = 'tasks_weights'
if the_item in config:
self.HP_tasks_weights = map(float,config[the_item].split("|"))
else:
#Hyperparameters for auxiliary tasks over a different treebank
the_item = 'dataset'
if the_item in config:
self.task_config = config[the_item] ## feat_config is a dict
self.HP_tasks = sum([self.task_config[idtask]["nb_tasks"]
for idtask in self.task_config])
self.HP_main_tasks = sum([self.task_config[idtask]["nb_tasks"]
for idtask in self.task_config
if self.task_config[idtask]["main"]])
self.label_alphabet = {idtask: Alphabet('label',True) for idtask in range(self.HP_tasks)}
self.label_alphabet_sizes = {idtask: self.label_alphabet[idtask].size() for idtask in range(self.HP_tasks)}
self.HP_tasks_weights = []
self.HP_tasks_weight_decays = []
for idtask in self.task_config:
for weight in self.task_config[idtask]["weight"]:
self.HP_tasks_weights.append(weight)
if "weight_decay" in self.task_config[idtask]:
for weight_decay in self.task_config[idtask]["weight_decay"]:
self.HP_tasks_weight_decays.append(weight_decay)
else:
for j in range(self.task_config[idtask]["nb_tasks"]):
self.HP_tasks_weight_decays.append(0)
self.dataset_ids = {treebank:range(self.task_config[treebank]["idstask"],
self.task_config[treebank]["idstask"]+self.task_config[treebank]["nb_tasks"])
for id,treebank in enumerate(self.task_config)}
self.ignore_after_epoch = {treebank:self.task_config[treebank]["ignore_after_epoch"]
if "ignore_after_epoch" in self.task_config[treebank]
else self.HP_iteration+1
for treebank in self.task_config}
self.inv_dataset_ids = {}
for tb in self.dataset_ids:
for subtask in self.dataset_ids[tb]:
self.inv_dataset_ids[subtask] = tb
self.task_metric = {}
for dataset in self.task_config:
for i in range(self.task_config[dataset]["idstask"],
self.task_config[dataset]["idstask"]+self.task_config[dataset]["nb_tasks"]) :
if "metric" in self.task_config[dataset]:
self.task_metric[i] = self.task_config[dataset]["metric"]
the_item = 'evaluate'
if the_item in config:
self.evaluate = config[the_item]
the_item = "gold_dev_trees"
if the_item in config:
self.gold_dev_trees = config[the_item]
the_item = "gold_dev_dep"
if the_item in config:
self.gold_dev_dep= config[the_item]
the_item = "combine_dependency_offset"
if the_item in config:
self.offset= str2bool(config[the_item])
the_item = "pretrained_model"
if the_item in config:
self.pretrained_model = config[the_item]
the_item = "pretrained_part"
if the_item in config:
if config[the_item].lower() not in [self.PRETRAINED_ALL, self.PRETRAINED_LSTMS]:
raise ValueError("Invalidad value for pretrained_part (must be 'all' or 'lstms' ")
self.pretrained_part = config[the_item]
the_item = "optimize_with_las"
if the_item in config:
self.optimize_with_las = str2bool(config[the_item])
the_item = "gold_train_trees"
if the_item in config:
self.gold_train_trees = config[the_item]
def config_file_to_dict(input_file):
config = {}
fins = open(input_file,'r').readlines()
idstask = 0 #Needed for training with disjoint treebanks
for line in fins:
if len(line) > 0 and line[0] == "#":
continue
if "=" in line:
pair = line.strip().split('#',1)[0].split('=',1)
item = pair[0]
if item=="feature":
if item not in config:
feat_dict = {}
config[item]= feat_dict
feat_dict = config[item]
new_pair = pair[-1].split()
feat_name = new_pair[0]
one_dict = {}
one_dict["emb_dir"] = None
one_dict["emb_size"] = 10
one_dict["emb_norm"] = False
if len(new_pair) > 1:
for idx in range(1,len(new_pair)):
conf_pair = new_pair[idx].split('=')
if conf_pair[0] == "emb_dir":
one_dict["emb_dir"]=conf_pair[-1]
elif conf_pair[0] == "emb_size":
one_dict["emb_size"]=int(conf_pair[-1])
elif conf_pair[0] == "emb_norm":
one_dict["emb_norm"]=str2bool(conf_pair[-1])
feat_dict[feat_name] = one_dict
elif item=="dataset":
if item not in config:
task_dict = OrderedDict()
config[item] = task_dict
task_dict = config[item]
new_pair = pair[-1].split()
task_name = new_pair[0]
one_dict = {}
one_dict["nb_tasks"] = None
one_dict["main"] = None
one_dict["idstask"] = idstask
if len(new_pair) > 1:
for idx in range(1, len(new_pair)):
print line, one_dict
conf_pair = new_pair[idx].split("=")
if conf_pair[0] == "nb_tasks":
one_dict["nb_tasks"]=int(conf_pair[-1])
idstask+=int(conf_pair[-1])
if conf_pair[0] == "main":
one_dict["main"] = str2bool(conf_pair[-1])
if conf_pair[0] == "weight":
one_dict["weight"] = [float(w) for w in conf_pair[-1].split("|")]
if conf_pair[0] == "weight_decay":
one_dict["weight_decay"] = [float(w) for w in conf_pair[-1].split("|")]
if conf_pair[0] == "metric":
one_dict["metric"] = conf_pair[-1]
if conf_pair[0] == "ignore_after_epoch":
one_dict["ignore_after_epoch"] = int(conf_pair[-1])
task_dict[task_name] = one_dict
else:
if item in config:
print("Warning: duplicated config item found: %s, updated."%(pair[0]))
config[item] = pair[-1]
return config
def str2bool(string):
if string == "True" or string == "true" or string == "TRUE":
return True
else:
return False
class Task(object):
def __init__(self, name, n_task, main):
self.name = name
self.n_task = n_task
self.main = main
| 42.685 | 222 | 0.552887 |
42cf25b9f49f3148f87d8431524cfcc27b0cd78a | 1,970 | py | Python | bili/base.py | wyapx/bili-api | 65160b01a413710ed31588f1bfff9e5ed14d08be | [
"MIT"
] | null | null | null | bili/base.py | wyapx/bili-api | 65160b01a413710ed31588f1bfff9e5ed14d08be | [
"MIT"
] | null | null | null | bili/base.py | wyapx/bili-api | 65160b01a413710ed31588f1bfff9e5ed14d08be | [
"MIT"
] | null | null | null | from typing import Dict
import aiohttp
from .utils import assert_success
class Network:
def __init__(self, cookies: Dict[str, str] = None) -> None:
cookiejar = aiohttp.CookieJar()
if cookies:
cookiejar.update_cookies(cookies)
self._session = aiohttp.ClientSession(cookie_jar=cookiejar)
async def get(self, url: str, params=None) -> dict:
async with self._session.get(url, params=params) as resp:
if resp.status != 200:
raise ConnectionError(resp.status, await resp.read())
return await resp.json()
async def post(self, url: str, data=None) -> dict:
async with self._session.post(url, data=data) as resp:
if resp.status != 200:
raise ConnectionError(resp.status, await resp.read())
return await resp.json()
async def websocket(self, host: str, path="/", port=443, wss=True) -> aiohttp.ClientWebSocketResponse:
return await self._session.ws_connect(f"{'wss' if wss else 'ws'}://"
f"{host}:{port}{path}")
class APIBase:
base = None
def __init__(self, network: Network) -> None:
if not self.base:
raise AttributeError("base url not set")
self._network = network
self._verified = None
def _join_url(self, path: str) -> str:
return self.base + path
async def verify_auth(self):
if self._verified is None:
assert_success(
await self._network.get("https://api.bilibili.com/x/space/myinfo")
)
self._verified = True
async def _get(self, path: str, params=None) -> dict:
return assert_success(
await self._network.get(self._join_url(path), params)
)
async def _post(self, path: str, data=None) -> dict:
return assert_success(
await self._network.post(self._join_url(path), data)
)
| 32.833333 | 106 | 0.59797 |
43d67fd6650762365dcf2a075dacfc884ffdc203 | 1,600 | py | Python | src/wallabag/commands/add.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 16 | 2020-09-30T23:08:45.000Z | 2022-03-30T02:34:17.000Z | src/wallabag/commands/add.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 15 | 2020-11-05T09:22:38.000Z | 2022-03-11T16:56:18.000Z | src/wallabag/commands/add.py | artur-shaik/wallabag-client | 6c03a3beebcf27f51076e0eb11bb99f618f8daa3 | [
"MIT"
] | 1 | 2021-04-02T11:00:57.000Z | 2021-04-02T11:00:57.000Z | # -*- coding: utf-8 -*-
from colorama import Fore
from wallabag.api.add_entry import AddEntry, Params as AddEntryParams
from wallabag.api.entry_exists import EntryExists
from wallabag.commands.command import Command
from wallabag.commands.tags_param import TagsParam
from wallabag.commands.params import Params
from wallabag.entry import Entry
class AddCommandParams(Params, TagsParam):
target_url = None
title = None
starred = None
read = None
tags = None
def __init__(self, target_url, title=None,
starred=None, read=None, tags=None):
self.target_url = target_url
self.title = title
self.starred = starred
self.read = read
self.tags = tags
def validate(self):
return self._validate_tags()
class AddCommand(Command):
def __init__(self, config, params=None):
Command.__init__(self)
self.config = config
self.params = params
def _run(self):
params = self.params
api = EntryExists(self.config, params.target_url)
if api.request().response['exists']:
return True, "The url was already saved."
entry = Entry(AddEntry(self.config, params.target_url, {
AddEntryParams.TITLE: params.title,
AddEntryParams.READ: params.read,
AddEntryParams.STARRED: params.starred,
AddEntryParams.TAGS: params.tags
}).request().response)
return True, (
"Entry successfully added:\n\n"
f"\t{Fore.GREEN}{entry.entry_id}. {entry.title}{Fore.RESET}\n")
| 29.62963 | 79 | 0.64875 |
e87d16d0d0474d51c83be2f03958f2c58f1b88d0 | 27,057 | py | Python | rest_framework/renderers.py | marctc/django-rest-framework | 06fd63dade20e1a19276b7414a54b9f5d2ef8329 | [
"Unlicense"
] | 1 | 2015-12-31T10:24:25.000Z | 2015-12-31T10:24:25.000Z | rest_framework/renderers.py | marctc/django-rest-framework | 06fd63dade20e1a19276b7414a54b9f5d2ef8329 | [
"Unlicense"
] | null | null | null | rest_framework/renderers.py | marctc/django-rest-framework | 06fd63dade20e1a19276b7414a54b9f5d2ef8329 | [
"Unlicense"
] | null | null | null | """
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer the renders the browsable API.
"""
from __future__ import unicode_literals
import json
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.http.multipartparser import parse_header
from django.template import Context, RequestContext, loader, Template
from django.test.client import encode_multipart
from django.utils import six
from django.utils.xmlutils import SimplerXMLGenerator
from rest_framework import exceptions, serializers, status, VERSION
from rest_framework.compat import (
SHORT_SEPARATORS, LONG_SEPARATORS, StringIO, smart_text, yaml
)
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.request import is_form_media_type, override_method
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplemented('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: http://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
separators = SHORT_SEPARATORS if (indent is None and self.compact) else LONG_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
return bytes(ret.encode('utf-8'))
return ret
class JSONPRenderer(JSONRenderer):
"""
Renderer which serializes to json,
wrapping the json output in a callback function.
"""
media_type = 'application/javascript'
format = 'jsonp'
callback_parameter = 'callback'
default_callback = 'callback'
charset = 'utf-8'
def get_callback(self, renderer_context):
"""
Determine the name of the callback to wrap around the json output.
"""
request = renderer_context.get('request', None)
params = request and request.query_params or {}
return params.get(self.callback_parameter, self.default_callback)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders into jsonp, wrapping the json output in a callback function.
Clients may set the callback function name using a query parameter
on the URL, for example: ?callback=exampleCallbackName
"""
renderer_context = renderer_context or {}
callback = self.get_callback(renderer_context)
json = super(JSONPRenderer, self).render(data, accepted_media_type,
renderer_context)
return callback.encode(self.charset) + b'(' + json + b');'
class XMLRenderer(BaseRenderer):
"""
Renderer which serializes to XML.
"""
media_type = 'application/xml'
format = 'xml'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized XML.
"""
if data is None:
return ''
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement("root", {})
self._to_xml(xml, data)
xml.endElement("root")
xml.endDocument()
return stream.getvalue()
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("list-item", {})
self._to_xml(xml, item)
xml.endElement("list-item")
elif isinstance(data, dict):
for key, value in six.iteritems(data):
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
elif data is None:
# Don't output any value
pass
else:
xml.characters(smart_text(data))
class YAMLRenderer(BaseRenderer):
"""
Renderer which serializes to YAML.
"""
media_type = 'application/yaml'
format = 'yaml'
encoder = encoders.SafeDumper
charset = 'utf-8'
ensure_ascii = False
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized YAML.
"""
assert yaml, 'YAMLRenderer requires pyyaml to be installed'
if data is None:
return ''
return yaml.dump(data, stream=None, encoding=self.charset, Dumper=self.encoder, allow_unicode=not self.ensure_ascii)
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
context = self.resolve_context(data, request, response)
return template.render(context)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def resolve_context(self, data, request, response):
if response.exception:
data['status_code'] = response.status_code
return RequestContext(request, data)
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured('Returned a template response with no `template_name` attribute set on either the view or response')
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context['response']
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
context = self.resolve_context(data, request, response)
return template.render(context)
return data
class HTMLFormRenderer(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/horizontal/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
}
})
def render_field(self, field, parent_style):
style = dict(self.default_style[field])
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
if style.get('input_type') == 'datetime-local' and isinstance(field.value, six.text_type):
field.value = field.value.rstrip('Z')
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = Context({'field': field, 'style': style})
return template.render(context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
form = data.serializer
meta = getattr(form, 'Meta', None)
style = getattr(meta, 'style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
if 'base_template' not in style:
style['base_template'] = self.base_template
style['renderer'] = self
# This API needs to be finessed and finalized for 3.1
if 'template' in renderer_context:
template_name = renderer_context['template']
elif 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
renderer_context = renderer_context or {}
request = renderer_context['request']
template = loader.get_template(template_name)
context = RequestContext(request, {
'form': form,
'style': style
})
return template.render(context)
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
if not api_settings.FORM_METHOD_OVERRIDE:
return # Cannot use form overloading
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
data = request.data
except ParseError:
data = None
existing_serializer = serializer
else:
data = None
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
if (
not getattr(view, 'get_serializer', None)
or not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
serializer = existing_serializer
else:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, data=data)
else:
serializer = view.get_serializer(data=data)
if data is not None:
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
dict(
list(self.renderer_context.items()) +
[('template', 'rest_framework/api_form.html')]
)
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
else:
instance = None
with override_method(view, request, method) as request:
# If we're not using content overloading there's no point in
# supplying a generic form, as the view won't treat the form's
# value as the content of the request.
if not (api_settings.FORM_CONTENT_OVERRIDE
and api_settings.FORM_CONTENTTYPE_OVERRIDE):
return None
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if (hasattr(view, 'get_serializer') and renderer_class):
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
content = renderer.render(serializer.data, accepted, context)
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE
content_field = api_settings.FORM_CONTENT_OVERRIDE
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
# NB. http://jacobian.org/writing/dynamic-form-generation/
class GenericContentForm(forms.Form):
def __init__(self):
super(GenericContentForm, self).__init__()
self.fields[content_type_field] = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial
)
self.fields[content_field] = forms.CharField(
label='Content',
widget=forms.Textarea,
initial=content
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view):
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = dict(response.items())
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view),
'name': self.get_name(view),
'version': VERSION,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
context = RequestContext(renderer_context['request'], context)
ret = template.render(context)
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
class MultiPartRenderer(BaseRenderer):
media_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
format = 'multipart'
charset = 'utf-8'
BOUNDARY = 'BoUnDaRyStRiNg' if django.VERSION >= (1, 5) else b'BoUnDaRyStRiNg'
def render(self, data, accepted_media_type=None, renderer_context=None):
return encode_multipart(self.BOUNDARY, data)
| 36.712347 | 135 | 0.625679 |
4699e759fc9c02fe51f52775f52306f7bef8abfb | 3,550 | py | Python | test_array.py | sebi06/czi_demos | b3f7801f46de0138a8a1ac245e9c80787e0a3f17 | [
"MIT"
] | 3 | 2020-12-31T10:06:42.000Z | 2021-11-09T13:44:16.000Z | test_array.py | sebi06/czi_demos | b3f7801f46de0138a8a1ac245e9c80787e0a3f17 | [
"MIT"
] | null | null | null | test_array.py | sebi06/czi_demos | b3f7801f46de0138a8a1ac245e9c80787e0a3f17 | [
"MIT"
] | null | null | null | import numpy as np
import imgfileutils as imf
from aicspylibczi import CziFile
def get_dimorder(dimstring):
"""Get the order of dimensions from dimension string
:param dimstring: string containing the dimensions
:type dimstring: str
:return: dims_dict - dictionary with the dimensions and its positions
:rtype: dict
:return: dimindex_list - list with indices of dimensions
:rtype: list
:return: numvalid_dims - number of valid dimensions
:rtype: integer
"""
dimindex_list = []
dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', 'T', 'C', 'Z', 'Y', 'X', '0']
dims_dict = {}
for d in dims:
dims_dict[d] = dimstring.find(d)
dimindex_list.append(dimstring.find(d))
numvalid_dims = sum(i > 0 for i in dimindex_list)
return dims_dict, dimindex_list, numvalid_dims
filename = r"C:\Temp\input\DTScan_ID4.czi"
md, addmd = imf.get_metadata(filename)
czi = CziFile(filename)
# Get the shape of the data, the coordinate pairs are (start index, size)
dimensions = czi.dims_shape()
print(dimensions)
print(czi.dims)
print(czi.size)
print(czi.is_mosaic()) # True
# Mosaic files ignore the S dimension and use an internal mIndex to reconstruct, the scale factor allows one to generate a manageable image
mosaic_data = czi.read_mosaic(C=0, scale_factor=1)
print('CZI Mosaic Data Shape : ', mosaic_data.shape)
md = {}
md['SizeS'] = 1
md['SizeT'] = 3
md['SizeZ'] = 5
md['SizeC'] = 2
md['SizeY'] = 100
md['SizeX'] = 200
dimorder = 'STCYX'
dims_dict, dimindex_list, numvalid_dims = get_dimorder(dimorder)
new = {k: v for k, v in dims_dict.items() if v != -1}
new = {value: key for key, value in new.items()}
print(new)
ar = np.array(np.zeros([md['SizeY'], md['SizeX']]))
out = np.resize(3, (ar.shape[0], ar.shape[1]))
# in case of 2 dimensions
if dimorder == 'YX':
ar = np.array(np.zeros([md['SizeY'], md['SizeX']]))
# in case of 3 dimensions
if dimorder == 'SYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeY'], md['SizeX']]))
if dimorder == 'TYX':
ar = np.array(np.zeros([md['SizeT'], md['SizeY'], md['SizeX']]))
if dimorder == 'ZYX':
ar = np.array(np.zeros([md['SizeZ'], md['SizeY'], md['SizeX']]))
if dimorder == 'CYX':
ar = np.array(np.zeros([md['SizeC'], md['SizeY'], md['SizeX']]))
# in case of 4 dimensions
if dimorder == 'SCYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'STYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'SZYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'TCYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'TZYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'ZCYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'ZTYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'SCYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'CTYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
if dimorder == 'CZYX':
ar = np.array(np.zeros([md['SizeS'], md['SizeC'], md['SizeY'], md['SizeX']]))
ar = np.array(np.zeros([md['SizeY'], md['SizeX']]))
print(dims_dict)
for d in range(0, 6):
dim2search = dimorder[d]
print(dim2search, dims_dict[dim2search])
| 26.691729 | 139 | 0.62 |
38f48316fe6a5de705232180e386303792707117 | 652 | py | Python | charades_preprocess/preprocess.py | hudaAlamri/visdial-bert | 77fee9e49c629bff314397d7ff029389a08a4ec0 | [
"BSD-3-Clause"
] | null | null | null | charades_preprocess/preprocess.py | hudaAlamri/visdial-bert | 77fee9e49c629bff314397d7ff029389a08a4ec0 | [
"BSD-3-Clause"
] | null | null | null | charades_preprocess/preprocess.py | hudaAlamri/visdial-bert | 77fee9e49c629bff314397d7ff029389a08a4ec0 | [
"BSD-3-Clause"
] | 1 | 2022-03-07T15:48:19.000Z | 2022-03-07T15:48:19.000Z | # parse charades datasets to get the actions set
import csv
data_all = {}
input_file = csv.DictReader(open("../data/Charades/Charades_v1_train.csv"))
actions_list = {}
for row in input_file:
id = row['id']
data_all[id] = row
action_list = [v.split(' ')[0] for v in row['actions'].split(';')]
actions_list[id] = action_list
with open("../data/Charades/Charades_v1_classes.txt") as f:
actions_names = dict(x.rstrip().split(None,1) for x in f)
# given a video_id, retrieve the set of actions and their temporal locations:
vid_id = '9GS13'
l_names = [actions_names[each] for each in actions_list[vid_id]]
for each in l_names:
print(each)
| 24.148148 | 77 | 0.711656 |
247d152f2b69d9f8a6dff2a79fff958d1b6684c0 | 3,098 | py | Python | apps/gsekit/configfile/handlers/tests.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 8 | 2021-07-08T06:53:57.000Z | 2022-03-14T04:05:27.000Z | apps/gsekit/configfile/handlers/tests.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 107 | 2021-07-22T02:20:07.000Z | 2022-03-14T08:37:23.000Z | apps/gsekit/configfile/handlers/tests.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 12 | 2021-07-09T08:59:01.000Z | 2022-03-08T13:40:41.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
# -*- coding: utf-8 -*-
from django.test import TestCase
from apps.gsekit.configfile.handlers.config_template import ConfigTemplateHandler
from apps.gsekit.process.models import Process
class TestConfigTemplateHandlers(TestCase):
"""
测试配置处理器
"""
def test_bind_template_to_process(self):
# 新增配置模板绑定
process_object_list = [
{"process_object_type": Process.ProcessObjectType.INSTANCE, "process_object_id": 1},
{"process_object_type": Process.ProcessObjectType.INSTANCE, "process_object_id": 2},
{"process_object_type": Process.ProcessObjectType.TEMPLATE, "process_object_id": 1},
{"process_object_type": Process.ProcessObjectType.TEMPLATE, "process_object_id": 2},
]
count = ConfigTemplateHandler(config_template_id=1).bind_template_to_process(process_object_list)
self.assertEqual(count["deleted_relations_count"], 0)
self.assertEqual(count["created_relations_count"], 4)
# 变更
process_object_list = [
{"process_object_type": Process.ProcessObjectType.INSTANCE, "process_object_id": 1},
{"process_object_type": Process.ProcessObjectType.TEMPLATE, "process_object_id": 1},
{"process_object_type": Process.ProcessObjectType.TEMPLATE, "process_object_id": 3},
]
count = ConfigTemplateHandler(config_template_id=1).bind_template_to_process(process_object_list)
self.assertEqual(count["deleted_relations_count"], 2)
self.assertEqual(count["created_relations_count"], 1)
def test_bind_process_to_template(self):
self.test_bind_template_to_process()
# 新增配置模板绑定
process_object_id = 3
process_object_type = Process.ProcessObjectType.TEMPLATE
config_template_id_list = [1, 2, 3]
count = ConfigTemplateHandler.bind_process_to_template(
process_object_type, process_object_id, config_template_id_list
)
self.assertEqual(count["deleted_relations_count"], 0)
self.assertEqual(count["created_relations_count"], 2)
config_template_id_list = []
count = ConfigTemplateHandler.bind_process_to_template(
process_object_type, process_object_id, config_template_id_list
)
self.assertEqual(count["deleted_relations_count"], 3)
self.assertEqual(count["created_relations_count"], 0)
| 48.40625 | 115 | 0.727243 |
7ada18f15ad6980558f5c11aa42a0784c7a372a0 | 589 | py | Python | apps/about/migrations/0001_initial.py | StepicOrg/stepik-apps | 5825bc9b2444ad4690681964d1bed172706f8796 | [
"Apache-2.0"
] | 5 | 2017-03-17T10:01:25.000Z | 2018-03-23T05:56:25.000Z | apps/about/migrations/0001_initial.py | StepicOrg/stepik-apps | 5825bc9b2444ad4690681964d1bed172706f8796 | [
"Apache-2.0"
] | 4 | 2020-06-05T17:34:05.000Z | 2021-04-19T12:58:48.000Z | apps/about/migrations/0001_initial.py | StepicOrg/stepik-apps | 5825bc9b2444ad4690681964d1bed172706f8796 | [
"Apache-2.0"
] | 2 | 2017-03-21T13:01:28.000Z | 2017-04-27T14:33:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-22 08:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='About',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('pub_date', models.DateTimeField()),
],
),
]
| 24.541667 | 114 | 0.575552 |
d56fdb697519c82350da541bfb1e56a4f610ce7d | 860 | py | Python | gif_manger.py | Rix565/PygaMone | 58879a01b5427328824ab4558f6ea3916f1b844a | [
"MIT"
] | null | null | null | gif_manger.py | Rix565/PygaMone | 58879a01b5427328824ab4558f6ea3916f1b844a | [
"MIT"
] | null | null | null | gif_manger.py | Rix565/PygaMone | 58879a01b5427328824ab4558f6ea3916f1b844a | [
"MIT"
] | null | null | null | from typing import Optional
import pygame_gif
__all__: list['Gif'] = []
class Gif(object):
def __init__(self, path: str):
__all__.append(self)
self.path = path
self.gif: Optional['pygame_gif.PygameGif'] = None
def get(self):
if self.gif is None:
self.load()
return self.gif
def load(self):
if not self.gif:
self.gif = pygame_gif.PygameGif(self.path)
def un_load(self):
del self.gif
self.gif = None
def __str__(self):
print("Sound : {}".format(self.path))
EMBER = Gif('./assets/textures/ability/ember.gif')
SMALL_EMBER = Gif('./assets/textures/ability/small_ember.gif')
CONTACT = Gif('./assets/textures/ability/contact.gif')
BIDE = Gif('./assets/textures/ability/bide.gif')
def unload_all():
for g in __all__:
g.un_load()
| 22.051282 | 62 | 0.615116 |
68173bb18efddfc4c162bf6f1f9a2c185b68f123 | 12,026 | py | Python | snakebite/minicluster.py | shenfe/snakebite | 880fffd177a5fa5bbda08ea6da5e5c43a87e1c70 | [
"Apache-2.0"
] | null | null | null | snakebite/minicluster.py | shenfe/snakebite | 880fffd177a5fa5bbda08ea6da5e5c43a87e1c70 | [
"Apache-2.0"
] | null | null | null | snakebite/minicluster.py | shenfe/snakebite | 880fffd177a5fa5bbda08ea6da5e5c43a87e1c70 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import subprocess
import select
import re
import datetime
try:
long # Python 2
except NameError:
long = int # Python 3
class MiniCluster(object):
''' Class that spawns a hadoop mini cluster and wrap hadoop functionality
This class requires the ``HADOOP_HOME`` environment variable to be set to run the ``hadoop`` command.
It will search ``HADOOP_HOME`` for ``hadoop-mapreduce-client-jobclient<version>-tests.jar``, but the
location of this jar can also be supplied by the ``HADOOP_JOBCLIENT_JAR`` environment variable.
Since the current minicluster interface doesn't provide for specifying the namenode post number, and
chooses a random one, this class parses the output from the minicluster to find the port numer.
All supplied methods (like :py:func:`put`, :py:func:`ls`, etc) use the hadoop command to perform operations, and not
the snakebite client, since this is used for testing snakebite itself.
All methods return a list of maps that are snakebite compatible.
Example without :mod:`snakebite.client <client>`
>>> from snakebite.minicluster import MiniCluster
>>> cluster = MiniCluster("/path/to/test/files")
>>> ls_output = cluster.ls(["/"])
Example with :mod:`snakebite.client <client>`
>>> from snakebite.minicluster import MiniCluster
>>> from snakebite.client import Client
>>> cluster = MiniCluster("/path/to/test/files")
>>> client = Client('localhost', cluster.port)
>>> ls_output = client.ls(["/"])
Just as the snakebite client, the cluster methods take a list of strings as paths. Wherever a method
takes ``extra_args``, normal hadoop command arguments can be given (like -r, -f, etc).
More info can be found at http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CLIMiniCluster.html
.. note:: A minicluster will be started at instantiation
.. note:: Not all hadoop commands have been implemented, only the ones that
were necessary for testing the snakebite client, but please feel free to add them
'''
def __init__(self, testfiles_path, start_cluster=True, nnport=None):
'''
:param testfiles_path: Local path where test files can be found. Mainly used for ``put()``
:type testfiles_path: string
:param start_cluster: start a MiniCluster on initialization. If False, this class will act as an interface to the ``hadoop fs`` command
:type start_cluster: boolean
'''
self._testfiles_path = testfiles_path
self._hadoop_home = os.environ['HADOOP_HOME']
self._jobclient_jar = os.environ.get('HADOOP_JOBCLIENT_JAR')
self._hadoop_cmd = "%s/bin/hadoop" % self._hadoop_home
if start_cluster:
self._start_mini_cluster(nnport)
self.host = "localhost"
self.port = self._get_namenode_port()
self.hdfs_url = "hdfs://%s:%d" % (self.host, self.port)
else:
self.hdfs_url = "hdfs://"
def terminate(self):
''' Terminate the cluster
Since the minicluster is started as a subprocess, this method has to be called explicitely when
your program ends.
'''
self.hdfs.terminate()
def put(self, src, dst):
'''Upload a file to HDFS
This will take a file from the ``testfiles_path`` supplied in the constuctor.
'''
src = "%s%s" % (self._testfiles_path, src)
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-put', src, self._full_hdfs_path(dst)], True)
def put_subprocess(self, src, dst, block_size=134217728, text=True): # This is used for testing with large files.
block_size_flag = "-Ddfs.block.size=%s" % str(block_size)
cmd = [self._hadoop_cmd, 'fs', block_size_flag, '-put', src, self._full_hdfs_path(dst)]
return subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=text)
def exists(self, path):
"""Return True if <src> exists, False if doesn't"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-e', path]) == 0
def is_directory(self, path):
"""Return True if <path> is a directory, False if it's NOT a directory"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-d', self._full_hdfs_path(path)]) == 0
def is_files(self, path):
"""Return True if <path> is a file, False if it's NOT a file"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-f', self._full_hdfs_path(path)]) == 0
def is_greater_then_zero_bytes(self, path):
"""Return True if file <path> is greater than zero bytes in size, False otherwise"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-s', self._full_hdfs_path(path)]) == 0
def is_zero_bytes_file(self, path):
"""Return True if file <path> is zero bytes in size, else return False"""
return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-z', self._full_hdfs_path(path)]) == 0
def ls(self, src, extra_args=[]):
'''List files in a directory'''
src = [self._full_hdfs_path(x) for x in src]
output = self._getStdOutCmd([self._hadoop_cmd, 'fs', '-ls'] + extra_args + src, True)
return self._transform_ls_output(output, self.hdfs_url)
def mkdir(self, src, extra_args=[]):
'''Create a directory'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-mkdir'] + extra_args + [self._full_hdfs_path(src)], True)
def df(self, src):
'''Perform ``df`` on a path'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-df', self._full_hdfs_path(src)], True)
def du(self, src, extra_args=[]):
'''Perform ``du`` on a path'''
src = [self._full_hdfs_path(x) for x in src]
return self._transform_du_output(self._getStdOutCmd([self._hadoop_cmd, 'fs', '-du'] + extra_args + src, True), self.hdfs_url)
def count(self, src):
'''Perform ``count`` on a path'''
src = [self._full_hdfs_path(x) for x in src]
return self._transform_count_output(self._getStdOutCmd([self._hadoop_cmd, 'fs', '-count'] + src, True), self.hdfs_url)
def cat(self, src, extra_args=[], text=False):
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-cat'] + extra_args + [self._full_hdfs_path(src)], text)
def copyToLocal(self, src, dst, extra_args=[]):
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-copyToLocal'] + extra_args + [self._full_hdfs_path(src), dst], True)
def getmerge(self, src, dst, extra_args=[]):
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-getmerge'] + extra_args + [self._full_hdfs_path(src), dst], True)
def tail(self, src, extra_args=[], text=False):
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-tail'] + extra_args + [self._full_hdfs_path(src)], text)
def text(self, src):
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-text', self._full_hdfs_path(src)], True)
def _getReturnCodeCmd(self, cmd):
proc = self._getCmdProcess(cmd, True)
print(proc.communicate())
return proc.wait()
def _getCmdProcess(self, cmd, text=False):
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=text)
def _getStdOutCmd(self, cmd, text=False):
return self._getCmdProcess(cmd, text).communicate()[0]
def _full_hdfs_path(self, src):
return "%s%s" % (self.hdfs_url, src)
def _find_mini_cluster_jar(self, path):
for dirpath, dirnames, filenames in os.walk(path):
for files in filenames:
if re.match(".*hadoop-mapreduce-client-jobclient.+-tests.jar", files):
return os.path.join(dirpath, files)
def _start_mini_cluster(self, nnport=None):
if self._jobclient_jar:
hadoop_jar = self._jobclient_jar
else:
hadoop_jar = self._find_mini_cluster_jar(self._hadoop_home)
if not hadoop_jar:
raise Exception("No hadoop jobclient test jar found")
cmd = [self._hadoop_cmd, 'jar', hadoop_jar,
'minicluster', '-nomr', '-format']
if nnport:
cmd.extend(['-nnport', "%s" % nnport])
self.hdfs = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
def _get_namenode_port(self):
while self.hdfs.poll() is None:
rlist, wlist, xlist = select.select([self.hdfs.stderr, self.hdfs.stdout], [], [])
for f in rlist:
line = f.readline()
print((line,))
m = re.match(".*Started MiniDFSCluster -- namenode on port (\d+).*", line)
if m:
return int(m.group(1))
def _transform_ls_output(self, i, base_path):
result = []
for line in i.split("\n"):
if not line or line.startswith("Found"):
continue
(perms, replication, owner, group, length, date, time, path) = re.split("\s+", line)
node = {}
if replication == '-':
replication = 0
node['permission'] = self._perms_to_int(perms)
node['block_replication'] = int(replication)
node['owner'] = owner
node['group'] = group
node['length'] = int(length)
dt = "%s %s" % (date, time)
node['modification_time'] = long(datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M').strftime('%s'))
node['path'] = path.replace(base_path, '')
node['file_type'] = self._get_file_type(perms[0])
result.append(node)
return result
def _transform_du_output(self, i, base_path):
result = []
for line in i.split("\n"):
if line:
fields = re.split("\s+", line)
if len(fields) == 3:
(length, space_consumed, path) = re.split("\s+", line)
elif len(fields) == 2:
(length, path) = re.split("\s+", line)
else:
raise ValueError("Result of du operation should contain 2"
" or 3 field, but there's %d fields"
% len(fields))
result.append({"path": path.replace(base_path, ""),
"length": long(length)})
return result
def _transform_count_output(self, i, base_path):
result = []
for line in i.split("\n"):
if line:
(_, dir_count, file_count, length, path) = re.split("\s+", line)
result.append({"path": path.replace(base_path, ""), "length": long(length),
"directoryCount": long(dir_count), "fileCount": long(file_count)})
return result
def _get_file_type(self, i):
if i == "-":
return "f"
else:
return i
def _perms_to_int(self, perms):
s = ""
for x in perms[1:]:
if x == "-":
s += "0"
else:
s += "1"
octal = "%d%d%d" % (int(s[0:3], 2), int(s[3:6], 2), int(s[6:9], 2))
return int(octal, 8)
| 43.890511 | 143 | 0.614585 |
a902575ca6c390b725d06233d630c874110e7a6c | 2,514 | py | Python | test/test_unit/test_plugins.py | zlatsic/aimm | 166e56178773ea1333c5d842d10ec60df501f121 | [
"Apache-2.0"
] | 6 | 2021-06-16T10:57:58.000Z | 2022-02-01T13:43:17.000Z | test/test_unit/test_plugins.py | zlatsic/aimm | 166e56178773ea1333c5d842d10ec60df501f121 | [
"Apache-2.0"
] | null | null | null | test/test_unit/test_plugins.py | zlatsic/aimm | 166e56178773ea1333c5d842d10ec60df501f121 | [
"Apache-2.0"
] | 1 | 2021-05-30T23:21:57.000Z | 2021-05-30T23:21:57.000Z | from aimm import plugins
def test_instantiate(plugin_teardown):
@plugins.instantiate('test', state_cb_arg_name='state_cb')
def instantiate(state_cb):
return state_cb
assert plugins.exec_instantiate('test', 'state_cb') == 'state_cb'
def test_data_access(plugin_teardown):
@plugins.data_access('test', state_cb_arg_name='state_cb')
def data_access(state_cb):
return state_cb
assert plugins.exec_data_access('test', 'state_cb') == 'state_cb'
def test_fit(plugin_teardown):
@plugins.fit(['test'], state_cb_arg_name='state_cb',
instance_arg_name='instance')
def fit(state_cb, instance):
return (state_cb, instance)
result = plugins.exec_fit('test', 'instance', 'state_cb')
assert result == ('state_cb', 'instance')
def test_predict(plugin_teardown):
@plugins.predict(['test'], state_cb_arg_name='state_cb',
instance_arg_name='instance')
def predict(state_cb, instance):
return (state_cb, instance)
assert (plugins.exec_predict('test', 'instance', 'state_cb')
== ('state_cb', 'instance'))
def test_serialize(plugin_teardown):
@plugins.serialize(['test'])
def serialize(instance):
return instance
assert plugins.exec_serialize('test', 'instance') == 'instance'
def test_deserialize(plugin_teardown):
@plugins.deserialize(['test'])
def deserialize(instance_bytes):
return instance_bytes
assert (plugins.exec_deserialize('test', 'instance_bytes')
== 'instance_bytes')
def test_model(plugin_teardown):
@plugins.model
class Model1(plugins.Model):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def fit(self, *args, **kwargs):
self.fit_args = args
self.fit_kwargs = kwargs
return self
def predict(self, *args, **kwargs):
return args, kwargs
def serialize(self):
return bytes()
@classmethod
def deserialize(cls):
return Model1()
model_type = 'test_plugins.Model1'
model = plugins.exec_instantiate(model_type, None,
'a1', 'a2', k1='1', k2='2')
assert model.args == ('a1', 'a2')
assert model.kwargs == {'k1': '1', 'k2': '2'}
plugins.exec_fit(model_type, model, None, 'fit_a1', fit_k1='1')
assert model.fit_args == ('fit_a1', )
assert model.fit_kwargs == {'fit_k1': '1'}
| 27.933333 | 69 | 0.625298 |
7a50997d76dfa105f7852cc8c3b8151131394b8e | 21,812 | py | Python | homeassistant/components/fan/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 1 | 2017-05-30T22:21:05.000Z | 2017-05-30T22:21:05.000Z | homeassistant/components/fan/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 46 | 2020-12-18T07:15:15.000Z | 2022-03-31T06:04:00.000Z | homeassistant/components/fan/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 2 | 2021-03-22T21:42:48.000Z | 2021-04-12T12:26:39.000Z | """Provides functionality to interact with fans."""
from datetime import timedelta
import functools as ft
import logging
import math
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "fan"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the fan entity
SUPPORT_SET_SPEED = 1
SUPPORT_OSCILLATE = 2
SUPPORT_DIRECTION = 4
SUPPORT_PRESET_MODE = 8
SERVICE_SET_SPEED = "set_speed"
SERVICE_INCREASE_SPEED = "increase_speed"
SERVICE_DECREASE_SPEED = "decrease_speed"
SERVICE_OSCILLATE = "oscillate"
SERVICE_SET_DIRECTION = "set_direction"
SERVICE_SET_PERCENTAGE = "set_percentage"
SERVICE_SET_PRESET_MODE = "set_preset_mode"
SPEED_OFF = "off"
SPEED_LOW = "low"
SPEED_MEDIUM = "medium"
SPEED_HIGH = "high"
DIRECTION_FORWARD = "forward"
DIRECTION_REVERSE = "reverse"
ATTR_SPEED = "speed"
ATTR_PERCENTAGE = "percentage"
ATTR_PERCENTAGE_STEP = "percentage_step"
ATTR_SPEED_LIST = "speed_list"
ATTR_OSCILLATING = "oscillating"
ATTR_DIRECTION = "direction"
ATTR_PRESET_MODE = "preset_mode"
ATTR_PRESET_MODES = "preset_modes"
# Invalid speeds do not conform to the entity model, but have crept
# into core integrations at some point so we are temporarily
# accommodating them in the transition to percentages.
_NOT_SPEED_OFF = "off"
_NOT_SPEED_ON = "on"
_NOT_SPEED_AUTO = "auto"
_NOT_SPEED_SMART = "smart"
_NOT_SPEED_INTERVAL = "interval"
_NOT_SPEED_IDLE = "idle"
_NOT_SPEED_FAVORITE = "favorite"
_NOT_SPEED_SLEEP = "sleep"
_NOT_SPEED_SILENT = "silent"
_NOT_SPEEDS_FILTER = {
_NOT_SPEED_OFF,
_NOT_SPEED_ON,
_NOT_SPEED_AUTO,
_NOT_SPEED_SMART,
_NOT_SPEED_INTERVAL,
_NOT_SPEED_IDLE,
_NOT_SPEED_SILENT,
_NOT_SPEED_SLEEP,
_NOT_SPEED_FAVORITE,
}
_FAN_NATIVE = "_fan_native"
OFF_SPEED_VALUES = [SPEED_OFF, None]
LEGACY_SPEED_LIST = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
class NoValidSpeedsError(ValueError):
"""Exception class when there are no valid speeds."""
class NotValidSpeedError(ValueError):
"""Exception class when the speed in not in the speed list."""
class NotValidPresetModeError(ValueError):
"""Exception class when the preset_mode in not in the preset_modes list."""
@bind_hass
def is_on(hass, entity_id: str) -> bool:
"""Return if the fans are on based on the statemachine."""
state = hass.states.get(entity_id)
if ATTR_SPEED in state.attributes:
return state.attributes[ATTR_SPEED] not in OFF_SPEED_VALUES
return state.state == STATE_ON
async def async_setup(hass, config: dict):
"""Expose fan control via statemachine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
# After the transition to percentage and preset_modes concludes,
# switch this back to async_turn_on and remove async_turn_on_compat
component.async_register_entity_service(
SERVICE_TURN_ON,
{
vol.Optional(ATTR_SPEED): cv.string,
vol.Optional(ATTR_PERCENTAGE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(ATTR_PRESET_MODE): cv.string,
},
"async_turn_on_compat",
)
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
# After the transition to percentage and preset_modes concludes,
# remove this service
component.async_register_entity_service(
SERVICE_SET_SPEED,
{vol.Required(ATTR_SPEED): cv.string},
"async_set_speed_deprecated",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_INCREASE_SPEED,
{
vol.Optional(ATTR_PERCENTAGE_STEP): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_increase_speed",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_DECREASE_SPEED,
{
vol.Optional(ATTR_PERCENTAGE_STEP): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_decrease_speed",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_OSCILLATE,
{vol.Required(ATTR_OSCILLATING): cv.boolean},
"async_oscillate",
[SUPPORT_OSCILLATE],
)
component.async_register_entity_service(
SERVICE_SET_DIRECTION,
{vol.Optional(ATTR_DIRECTION): cv.string},
"async_set_direction",
[SUPPORT_DIRECTION],
)
component.async_register_entity_service(
SERVICE_SET_PERCENTAGE,
{
vol.Required(ATTR_PERCENTAGE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_set_percentage",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_SET_PRESET_MODE,
{vol.Required(ATTR_PRESET_MODE): cv.string},
"async_set_preset_mode",
[SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
def _fan_native(method):
"""Native fan method not overridden."""
setattr(method, _FAN_NATIVE, True)
return method
class FanEntity(ToggleEntity):
"""Representation of a fan."""
@_fan_native
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
raise NotImplementedError()
async def async_set_speed_deprecated(self, speed: str):
"""Set the speed of the fan."""
_LOGGER.warning(
"fan.set_speed is deprecated, use fan.set_percentage or fan.set_preset_mode instead."
)
await self.async_set_speed(speed)
@_fan_native
async def async_set_speed(self, speed: str):
"""Set the speed of the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
return
if speed in self.preset_modes:
if not hasattr(self.async_set_preset_mode, _FAN_NATIVE):
await self.async_set_preset_mode(speed)
return
if not hasattr(self.set_preset_mode, _FAN_NATIVE):
await self.hass.async_add_executor_job(self.set_preset_mode, speed)
return
else:
if not hasattr(self.async_set_percentage, _FAN_NATIVE):
await self.async_set_percentage(self.speed_to_percentage(speed))
return
if not hasattr(self.set_percentage, _FAN_NATIVE):
await self.hass.async_add_executor_job(
self.set_percentage, self.speed_to_percentage(speed)
)
return
await self.hass.async_add_executor_job(self.set_speed, speed)
@_fan_native
def set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
raise NotImplementedError()
@_fan_native
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
if percentage == 0:
await self.async_turn_off()
elif not hasattr(self.set_percentage, _FAN_NATIVE):
await self.hass.async_add_executor_job(self.set_percentage, percentage)
else:
await self.async_set_speed(self.percentage_to_speed(percentage))
async def async_increase_speed(self, percentage_step: Optional[int] = None) -> None:
"""Increase the speed of the fan."""
await self._async_adjust_speed(1, percentage_step)
async def async_decrease_speed(self, percentage_step: Optional[int] = None) -> None:
"""Decrease the speed of the fan."""
await self._async_adjust_speed(-1, percentage_step)
async def _async_adjust_speed(
self, modifier: int, percentage_step: Optional[int]
) -> None:
"""Increase or decrease the speed of the fan."""
current_percentage = self.percentage or 0
if percentage_step is not None:
new_percentage = current_percentage + (percentage_step * modifier)
else:
speed_range = (1, self.speed_count)
speed_index = math.ceil(
percentage_to_ranged_value(speed_range, current_percentage)
)
new_percentage = ranged_value_to_percentage(
speed_range, speed_index + modifier
)
new_percentage = max(0, min(100, new_percentage))
await self.async_set_percentage(new_percentage)
@_fan_native
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
self._valid_preset_mode_or_raise(preset_mode)
self.set_speed(preset_mode)
@_fan_native
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if not hasattr(self.set_preset_mode, _FAN_NATIVE):
await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode)
return
self._valid_preset_mode_or_raise(preset_mode)
await self.async_set_speed(preset_mode)
def _valid_preset_mode_or_raise(self, preset_mode):
"""Raise NotValidPresetModeError on invalid preset_mode."""
preset_modes = self.preset_modes
if preset_mode not in preset_modes:
raise NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {preset_modes}"
)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
raise NotImplementedError()
async def async_set_direction(self, direction: str):
"""Set the direction of the fan."""
await self.hass.async_add_executor_job(self.set_direction, direction)
# pylint: disable=arguments-differ
def turn_on(
self,
speed: Optional[str] = None,
percentage: Optional[int] = None,
preset_mode: Optional[str] = None,
**kwargs,
) -> None:
"""Turn on the fan."""
raise NotImplementedError()
async def async_turn_on_compat(
self,
speed: Optional[str] = None,
percentage: Optional[int] = None,
preset_mode: Optional[str] = None,
**kwargs,
) -> None:
"""Turn on the fan.
This _compat version wraps async_turn_on with
backwards and forward compatibility.
After the transition to percentage and preset_modes concludes, it
should be removed.
"""
if preset_mode is not None:
self._valid_preset_mode_or_raise(preset_mode)
speed = preset_mode
percentage = None
elif speed is not None:
_LOGGER.warning(
"Calling fan.turn_on with the speed argument is deprecated, use percentage or preset_mode instead."
)
if speed in self.preset_modes:
preset_mode = speed
percentage = None
else:
percentage = self.speed_to_percentage(speed)
elif percentage is not None:
speed = self.percentage_to_speed(percentage)
await self.async_turn_on(
speed=speed,
percentage=percentage,
preset_mode=preset_mode,
**kwargs,
)
# pylint: disable=arguments-differ
async def async_turn_on(
self,
speed: Optional[str] = None,
percentage: Optional[int] = None,
preset_mode: Optional[str] = None,
**kwargs,
) -> None:
"""Turn on the fan."""
if speed == SPEED_OFF:
await self.async_turn_off()
else:
await self.hass.async_add_executor_job(
ft.partial(
self.turn_on,
speed=speed,
percentage=percentage,
preset_mode=preset_mode,
**kwargs,
)
)
def oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
raise NotImplementedError()
async def async_oscillate(self, oscillating: bool):
"""Oscillate the fan."""
await self.hass.async_add_executor_job(self.oscillate, oscillating)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.speed not in [SPEED_OFF, None]
@property
def _implemented_percentage(self):
"""Return true if percentage has been implemented."""
return not hasattr(self.set_percentage, _FAN_NATIVE) or not hasattr(
self.async_set_percentage, _FAN_NATIVE
)
@property
def _implemented_preset_mode(self):
"""Return true if preset_mode has been implemented."""
return not hasattr(self.set_preset_mode, _FAN_NATIVE) or not hasattr(
self.async_set_preset_mode, _FAN_NATIVE
)
@property
def _implemented_speed(self):
"""Return true if speed has been implemented."""
return not hasattr(self.set_speed, _FAN_NATIVE) or not hasattr(
self.async_set_speed, _FAN_NATIVE
)
@property
def speed(self) -> Optional[str]:
"""Return the current speed."""
if self._implemented_preset_mode:
preset_mode = self.preset_mode
if preset_mode:
return preset_mode
if self._implemented_percentage:
percentage = self.percentage
if percentage is None:
return None
return self.percentage_to_speed(percentage)
return None
@property
def percentage(self) -> Optional[int]:
"""Return the current speed as a percentage."""
if not self._implemented_preset_mode:
if self.speed in self.preset_modes:
return None
if not self._implemented_percentage:
return self.speed_to_percentage(self.speed)
return 0
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
speed_list = speed_list_without_preset_modes(self.speed_list)
if speed_list:
return len(speed_list)
return 100
@property
def percentage_step(self) -> float:
"""Return the step size for percentage."""
return 100 / self.speed_count
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
speeds = []
if self._implemented_percentage:
speeds += [SPEED_OFF, *LEGACY_SPEED_LIST]
if self._implemented_preset_mode:
speeds += self.preset_modes
return speeds
@property
def current_direction(self) -> Optional[str]:
"""Return the current direction of the fan."""
return None
@property
def oscillating(self):
"""Return whether or not the fan is currently oscillating."""
return None
@property
def capability_attributes(self):
"""Return capability attributes."""
attrs = {}
if self.supported_features & SUPPORT_SET_SPEED:
attrs[ATTR_SPEED_LIST] = self.speed_list
if (
self.supported_features & SUPPORT_SET_SPEED
or self.supported_features & SUPPORT_PRESET_MODE
):
attrs[ATTR_PRESET_MODES] = self.preset_modes
return attrs
@property
def _speed_list_without_preset_modes(self) -> list:
"""Return the speed list without preset modes.
This property provides forward and backwards
compatibility for conversion to percentage speeds.
"""
if not self._implemented_speed:
return LEGACY_SPEED_LIST
return speed_list_without_preset_modes(self.speed_list)
def speed_to_percentage(self, speed: str) -> int:
"""
Map a speed to a percentage.
Officially this should only have to deal with the 4 pre-defined speeds:
return {
SPEED_OFF: 0,
SPEED_LOW: 33,
SPEED_MEDIUM: 66,
SPEED_HIGH: 100,
}[speed]
Unfortunately lots of fans make up their own speeds. So the default
mapping is more dynamic.
"""
if speed in OFF_SPEED_VALUES:
return 0
speed_list = self._speed_list_without_preset_modes
if speed_list and speed not in speed_list:
raise NotValidSpeedError(f"The speed {speed} is not a valid speed.")
try:
return ordered_list_item_to_percentage(speed_list, speed)
except ValueError as ex:
raise NoValidSpeedsError(
f"The speed_list {speed_list} does not contain any valid speeds."
) from ex
def percentage_to_speed(self, percentage: int) -> str:
"""
Map a percentage onto self.speed_list.
Officially, this should only have to deal with 4 pre-defined speeds.
if value == 0:
return SPEED_OFF
elif value <= 33:
return SPEED_LOW
elif value <= 66:
return SPEED_MEDIUM
else:
return SPEED_HIGH
Unfortunately there is currently a high degree of non-conformancy.
Until fans have been corrected a more complicated and dynamic
mapping is used.
"""
if percentage == 0:
return SPEED_OFF
speed_list = self._speed_list_without_preset_modes
try:
return percentage_to_ordered_list_item(speed_list, percentage)
except ValueError as ex:
raise NoValidSpeedsError(
f"The speed_list {speed_list} does not contain any valid speeds."
) from ex
@property
def state_attributes(self) -> dict:
"""Return optional state attributes."""
data = {}
supported_features = self.supported_features
if supported_features & SUPPORT_DIRECTION:
data[ATTR_DIRECTION] = self.current_direction
if supported_features & SUPPORT_OSCILLATE:
data[ATTR_OSCILLATING] = self.oscillating
if supported_features & SUPPORT_SET_SPEED:
data[ATTR_SPEED] = self.speed
data[ATTR_PERCENTAGE] = self.percentage
data[ATTR_PERCENTAGE_STEP] = self.percentage_step
if (
supported_features & SUPPORT_PRESET_MODE
or supported_features & SUPPORT_SET_SPEED
):
data[ATTR_PRESET_MODE] = self.preset_mode
return data
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., auto, smart, interval, favorite.
Requires SUPPORT_SET_SPEED.
"""
speed = self.speed
if speed in self.preset_modes:
return speed
return None
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_SET_SPEED.
"""
return preset_modes_from_speed_list(self.speed_list)
def speed_list_without_preset_modes(speed_list: List):
"""Filter out non-speeds from the speed list.
The goal is to get the speeds in a list from lowest to
highest by removing speeds that are not valid or out of order
so we can map them to percentages.
Examples:
input: ["off", "low", "low-medium", "medium", "medium-high", "high", "auto"]
output: ["low", "low-medium", "medium", "medium-high", "high"]
input: ["off", "auto", "low", "medium", "high"]
output: ["low", "medium", "high"]
input: ["off", "1", "2", "3", "4", "5", "6", "7", "smart"]
output: ["1", "2", "3", "4", "5", "6", "7"]
input: ["Auto", "Silent", "Favorite", "Idle", "Medium", "High", "Strong"]
output: ["Medium", "High", "Strong"]
"""
return [speed for speed in speed_list if speed.lower() not in _NOT_SPEEDS_FILTER]
def preset_modes_from_speed_list(speed_list: List):
"""Filter out non-preset modes from the speed list.
The goal is to return only preset modes.
Examples:
input: ["off", "low", "low-medium", "medium", "medium-high", "high", "auto"]
output: ["auto"]
input: ["off", "auto", "low", "medium", "high"]
output: ["auto"]
input: ["off", "1", "2", "3", "4", "5", "6", "7", "smart"]
output: ["smart"]
input: ["Auto", "Silent", "Favorite", "Idle", "Medium", "High", "Strong"]
output: ["Auto", "Silent", "Favorite", "Idle"]
"""
return [
speed
for speed in speed_list
if speed.lower() in _NOT_SPEEDS_FILTER and speed.lower() != SPEED_OFF
]
| 31.795918 | 115 | 0.639052 |
ec7bdaf82920b92a3cdd49c5863a7e7cbd5f1d55 | 12,673 | py | Python | train.py | yzyouzhang/AIR-ASVspoof | b26830d56d4baa6247a72955292ad9d2a336c6f6 | [
"MIT"
] | 40 | 2020-10-30T20:41:58.000Z | 2022-03-14T05:36:47.000Z | train.py | AirLabUR/AIR-ASVspoof | b26830d56d4baa6247a72955292ad9d2a336c6f6 | [
"MIT"
] | 13 | 2020-11-01T16:58:12.000Z | 2021-12-29T16:49:36.000Z | train.py | AirLabUR/AIR-ASVspoof | b26830d56d4baa6247a72955292ad9d2a336c6f6 | [
"MIT"
] | 18 | 2020-12-23T09:03:12.000Z | 2022-03-30T10:20:33.000Z | import argparse
import os
import json
import shutil
from resnet import setup_seed, ResNet
from loss import *
from dataset import ASVspoof2019
from collections import defaultdict
from tqdm import tqdm
import eval_metrics as em
import numpy as np
import torch
from torch.utils.data import DataLoader
torch.set_default_tensor_type(torch.FloatTensor)
def initParams():
parser = argparse.ArgumentParser(description=__doc__)
# Data folder prepare
parser.add_argument("-a", "--access_type", type=str, help="LA or PA", default='LA')
parser.add_argument("-f", "--path_to_features", type=str, help="features path",
default='/dataNVME/neil/ASVspoof2019LAFeatures/')
parser.add_argument("-p", "--path_to_protocol", type=str, help="protocol path",
default='/data/neil/DS_10283_3336/LA/ASVspoof2019_LA_cm_protocols/')
parser.add_argument("-o", "--out_fold", type=str, help="output folder", required=True, default='./models/try/')
# Dataset prepare
parser.add_argument("--feat_len", type=int, help="features length", default=750)
parser.add_argument('--padding', type=str, default='repeat', choices=['zero', 'repeat'],
help="how to pad short utterance")
parser.add_argument("--enc_dim", type=int, help="encoding dimension", default=256)
# Training hyperparameters
parser.add_argument('--num_epochs', type=int, default=100, help="Number of epochs for training")
parser.add_argument('--batch_size', type=int, default=64, help="Mini batch size for training")
parser.add_argument('--lr', type=float, default=0.0003, help="learning rate")
parser.add_argument('--lr_decay', type=float, default=0.5, help="decay learning rate")
parser.add_argument('--interval', type=int, default=10, help="interval to decay lr")
parser.add_argument('--beta_1', type=float, default=0.9, help="bata_1 for Adam")
parser.add_argument('--beta_2', type=float, default=0.999, help="beta_2 for Adam")
parser.add_argument('--eps', type=float, default=1e-8, help="epsilon for Adam")
parser.add_argument("--gpu", type=str, help="GPU index", default="1")
parser.add_argument('--num_workers', type=int, default=0, help="number of workers")
parser.add_argument('--seed', type=int, help="random number seed", default=598)
parser.add_argument('--add_loss', type=str, default="ocsoftmax",
choices=["softmax", 'amsoftmax', 'ocsoftmax'], help="loss for one-class training")
parser.add_argument('--weight_loss', type=float, default=1, help="weight for other loss")
parser.add_argument('--r_real', type=float, default=0.9, help="r_real for ocsoftmax")
parser.add_argument('--r_fake', type=float, default=0.2, help="r_fake for ocsoftmax")
parser.add_argument('--alpha', type=float, default=20, help="scale factor for ocsoftmax")
parser.add_argument('--continue_training', action='store_true', help="continue training with previously trained model")
args = parser.parse_args()
# Change this to specify GPU
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# Set seeds
setup_seed(args.seed)
if args.continue_training:
assert os.path.exists(args.out_fold)
else:
# Path for output data
if not os.path.exists(args.out_fold):
os.makedirs(args.out_fold)
else:
shutil.rmtree(args.out_fold)
os.mkdir(args.out_fold)
# Folder for intermediate results
if not os.path.exists(os.path.join(args.out_fold, 'checkpoint')):
os.makedirs(os.path.join(args.out_fold, 'checkpoint'))
else:
shutil.rmtree(os.path.join(args.out_fold, 'checkpoint'))
os.mkdir(os.path.join(args.out_fold, 'checkpoint'))
# Path for input data
assert os.path.exists(args.path_to_features)
# Save training arguments
with open(os.path.join(args.out_fold, 'args.json'), 'w') as file:
file.write(json.dumps(vars(args), sort_keys=True, separators=('\n', ':')))
with open(os.path.join(args.out_fold, 'train_loss.log'), 'w') as file:
file.write("Start recording training loss ...\n")
with open(os.path.join(args.out_fold, 'dev_loss.log'), 'w') as file:
file.write("Start recording validation loss ...\n")
# assign device
args.cuda = torch.cuda.is_available()
print('Cuda device available: ', args.cuda)
args.device = torch.device("cuda" if args.cuda else "cpu")
return args
def adjust_learning_rate(args, optimizer, epoch_num):
lr = args.lr * (args.lr_decay ** (epoch_num // args.interval))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(args):
torch.set_default_tensor_type(torch.FloatTensor)
# initialize model
lfcc_model = ResNet(3, args.enc_dim, resnet_type='18', nclasses=2).to(args.device)
if args.continue_training:
lfcc_model = torch.load(os.path.join(args.out_fold, 'anti-spoofing_lfcc_model.pt')).to(args.device)
lfcc_optimizer = torch.optim.Adam(lfcc_model.parameters(), lr=args.lr,
betas=(args.beta_1, args.beta_2), eps=args.eps, weight_decay=0.0005)
training_set = ASVspoof2019(args.access_type, args.path_to_features, args.path_to_protocol, 'train',
'LFCC', feat_len=args.feat_len, padding=args.padding)
validation_set = ASVspoof2019(args.access_type, args.path_to_features, args.path_to_protocol, 'dev',
'LFCC', feat_len=args.feat_len, padding=args.padding)
trainDataLoader = DataLoader(training_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
collate_fn=training_set.collate_fn)
valDataLoader = DataLoader(validation_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
collate_fn=validation_set.collate_fn)
feat, _, _, _ = training_set[29]
print("Feature shape", feat.shape)
criterion = nn.CrossEntropyLoss()
if args.add_loss == "amsoftmax":
amsoftmax_loss = AMSoftmax(2, args.enc_dim, s=args.alpha, m=args.r_real).to(args.device)
amsoftmax_loss.train()
amsoftmax_optimzer = torch.optim.SGD(amsoftmax_loss.parameters(), lr=0.01)
if args.add_loss == "ocsoftmax":
ocsoftmax = OCSoftmax(args.enc_dim, r_real=args.r_real, r_fake=args.r_fake, alpha=args.alpha).to(args.device)
ocsoftmax.train()
ocsoftmax_optimzer = torch.optim.SGD(ocsoftmax.parameters(), lr=args.lr)
early_stop_cnt = 0
prev_eer = 1e8
monitor_loss = args.add_loss
for epoch_num in tqdm(range(args.num_epochs)):
lfcc_model.train()
trainlossDict = defaultdict(list)
devlossDict = defaultdict(list)
adjust_learning_rate(args, lfcc_optimizer, epoch_num)
if args.add_loss == "ocsoftmax":
adjust_learning_rate(args, ocsoftmax_optimzer, epoch_num)
elif args.add_loss == "amsoftmax":
adjust_learning_rate(args, amsoftmax_optimzer, epoch_num)
print('\nEpoch: %d ' % (epoch_num + 1))
for i, (lfcc, audio_fn, tags, labels) in enumerate(tqdm(trainDataLoader)):
lfcc = lfcc.unsqueeze(1).float().to(args.device)
labels = labels.to(args.device)
feats, lfcc_outputs = lfcc_model(lfcc)
lfcc_loss = criterion(lfcc_outputs, labels)
if args.add_loss == "softmax":
lfcc_optimizer.zero_grad()
trainlossDict[args.add_loss].append(lfcc_loss.item())
lfcc_loss.backward()
lfcc_optimizer.step()
if args.add_loss == "ocsoftmax":
ocsoftmaxloss, _ = ocsoftmax(feats, labels)
lfcc_loss = ocsoftmaxloss * args.weight_loss
lfcc_optimizer.zero_grad()
ocsoftmax_optimzer.zero_grad()
trainlossDict[args.add_loss].append(ocsoftmaxloss.item())
lfcc_loss.backward()
lfcc_optimizer.step()
ocsoftmax_optimzer.step()
if args.add_loss == "amsoftmax":
outputs, moutputs = amsoftmax_loss(feats, labels)
lfcc_loss = criterion(moutputs, labels)
trainlossDict[args.add_loss].append(lfcc_loss.item())
lfcc_optimizer.zero_grad()
amsoftmax_optimzer.zero_grad()
lfcc_loss.backward()
lfcc_optimizer.step()
amsoftmax_optimzer.step()
with open(os.path.join(args.out_fold, "train_loss.log"), "a") as log:
log.write(str(epoch_num) + "\t" + str(i) + "\t" +
str(np.nanmean(trainlossDict[monitor_loss])) + "\n")
# Val the model
lfcc_model.eval()
with torch.no_grad():
idx_loader, score_loader = [], []
for i, (lfcc, audio_fn, tags, labels) in enumerate(tqdm(valDataLoader)):
lfcc = lfcc.unsqueeze(1).float().to(args.device)
labels = labels.to(args.device)
feats, lfcc_outputs = lfcc_model(lfcc)
lfcc_loss = criterion(lfcc_outputs, labels)
score = F.softmax(lfcc_outputs, dim=1)[:, 0]
if args.add_loss == "softmax":
devlossDict["softmax"].append(lfcc_loss.item())
elif args.add_loss == "amsoftmax":
outputs, moutputs = amsoftmax_loss(feats, labels)
lfcc_loss = criterion(moutputs, labels)
score = F.softmax(outputs, dim=1)[:, 0]
devlossDict[args.add_loss].append(lfcc_loss.item())
elif args.add_loss == "ocsoftmax":
ocsoftmaxloss, score = ocsoftmax(feats, labels)
devlossDict[args.add_loss].append(ocsoftmaxloss.item())
idx_loader.append(labels)
score_loader.append(score)
scores = torch.cat(score_loader, 0).data.cpu().numpy()
labels = torch.cat(idx_loader, 0).data.cpu().numpy()
val_eer = em.compute_eer(scores[labels == 0], scores[labels == 1])[0]
other_val_eer = em.compute_eer(-scores[labels == 0], -scores[labels == 1])[0]
val_eer = min(val_eer, other_val_eer)
with open(os.path.join(args.out_fold, "dev_loss.log"), "a") as log:
log.write(str(epoch_num) + "\t" + str(np.nanmean(devlossDict[monitor_loss])) + "\t" + str(val_eer) +"\n")
print("Val EER: {}".format(val_eer))
torch.save(lfcc_model, os.path.join(args.out_fold, 'checkpoint',
'anti-spoofing_lfcc_model_%d.pt' % (epoch_num + 1)))
if args.add_loss == "ocsoftmax":
loss_model = ocsoftmax
torch.save(loss_model, os.path.join(args.out_fold, 'checkpoint',
'anti-spoofing_loss_model_%d.pt' % (epoch_num + 1)))
elif args.add_loss == "amsoftmax":
loss_model = amsoftmax_loss
torch.save(loss_model, os.path.join(args.out_fold, 'checkpoint',
'anti-spoofing_loss_model_%d.pt' % (epoch_num + 1)))
else:
loss_model = None
if val_eer < prev_eer:
# Save the model checkpoint
torch.save(lfcc_model, os.path.join(args.out_fold, 'anti-spoofing_lfcc_model.pt'))
if args.add_loss == "ocsoftmax":
loss_model = ocsoftmax
torch.save(loss_model, os.path.join(args.out_fold, 'anti-spoofing_loss_model.pt'))
elif args.add_loss == "amsoftmax":
loss_model = amsoftmax_loss
torch.save(loss_model, os.path.join(args.out_fold, 'anti-spoofing_loss_model.pt'))
else:
loss_model = None
prev_eer = val_eer
early_stop_cnt = 0
else:
early_stop_cnt += 1
if early_stop_cnt == 100:
with open(os.path.join(args.out_fold, 'args.json'), 'a') as res_file:
res_file.write('\nTrained Epochs: %d\n' % (epoch_num - 19))
break
return lfcc_model, loss_model
if __name__ == "__main__":
args = initParams()
_, _ = train(args)
model = torch.load(os.path.join(args.out_fold, 'anti-spoofing_lfcc_model.pt'))
if args.add_loss == "softmax":
loss_model = None
else:
loss_model = torch.load(os.path.join(args.out_fold, 'anti-spoofing_loss_model.pt'))
| 46.421245 | 123 | 0.62211 |
56a01a5bad0bb4df8209525caf7cb74226bc5ee2 | 1,558 | py | Python | nf_common_source/code/services/file_system_service/objects/file_system_objects.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | nf_common_source/code/services/file_system_service/objects/file_system_objects.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | nf_common_source/code/services/file_system_service/objects/file_system_objects.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | from nf_common_source.code.services.file_system_service.objects.wrappers.absolute_path_wrappers import \
AbsolutePathWrappers
from nf_common_source.code.services.identification_services.uuid_service.uuid_helpers.uuid_factory import create_new_uuid
class FileSystemObjects:
def __init__(
self,
absolute_path_string: str):
self.uuid = \
create_new_uuid()
self.__path = \
AbsolutePathWrappers(
absolute_path_string)
@property
def base_name(
self) \
-> str:
return \
self.__path.base_name
@property
def absolute_path_string(
self) \
-> str:
return \
self.__path.absolute_path_string
@property
def absolute_level(
self) \
-> int:
return \
self.__path.absolute_level
@property
def parent_absolute_path_string(
self) \
-> str:
return \
str(self.__path.parent)
def extend_path(
self,
path_extension: str) \
-> str:
return \
self.__path.extend_path(
path_extension)
def exists(
self) \
-> bool:
return \
self.__path.exists()
def list_of_components(self):
return \
self.__path.list_of_components()
def item_count(
self) \
-> int:
return \
self.__path.item_count()
| 22.57971 | 121 | 0.539153 |
ab892bc52494c25bf5b46f92ca862965b4d99e5c | 8,367 | py | Python | Lib/lib2to3/fixes/fix_urllib.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 52,316 | 2015-01-01T15:56:25.000Z | 2022-03-31T23:19:01.000Z | Lib/lib2to3/fixes/fix_urllib.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 25,286 | 2015-03-03T23:18:02.000Z | 2022-03-31T23:17:27.000Z | Lib/lib2to3/fixes/fix_urllib.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 31,623 | 2015-01-01T13:29:37.000Z | 2022-03-31T19:55:06.000Z | """Fix changes imports of urllib which are now incompatible.
This is rather similar to fix_imports, but because of the more
complex nature of the fixing for urllib, it has its own fixer.
"""
# Author: Nick Edds
# Local imports
from lib2to3.fixes.fix_imports import alternates, FixImports
from lib2to3.fixer_util import (Name, Comma, FromImport, Newline,
find_indentation, Node, syms)
MAPPING = {"urllib": [
("urllib.request",
["URLopener", "FancyURLopener", "urlretrieve",
"_urlopener", "urlopen", "urlcleanup",
"pathname2url", "url2pathname", "getproxies"]),
("urllib.parse",
["quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "splitattr", "splithost", "splitnport",
"splitpasswd", "splitport", "splitquery", "splittag",
"splittype", "splituser", "splitvalue", ]),
("urllib.error",
["ContentTooShortError"])],
"urllib2" : [
("urllib.request",
["urlopen", "install_opener", "build_opener",
"Request", "OpenerDirector", "BaseHandler",
"HTTPDefaultErrorHandler", "HTTPRedirectHandler",
"HTTPCookieProcessor", "ProxyHandler",
"HTTPPasswordMgr",
"HTTPPasswordMgrWithDefaultRealm",
"AbstractBasicAuthHandler",
"HTTPBasicAuthHandler", "ProxyBasicAuthHandler",
"AbstractDigestAuthHandler",
"HTTPDigestAuthHandler", "ProxyDigestAuthHandler",
"HTTPHandler", "HTTPSHandler", "FileHandler",
"FTPHandler", "CacheFTPHandler",
"UnknownHandler"]),
("urllib.error",
["URLError", "HTTPError"]),
]
}
# Duplicate the url parsing functions for urllib2.
MAPPING["urllib2"].append(MAPPING["urllib"][1])
def build_pattern():
bare = set()
for old_module, changes in MAPPING.items():
for change in changes:
new_module, members = change
members = alternates(members)
yield """import_name< 'import' (module=%r
| dotted_as_names< any* module=%r any* >) >
""" % (old_module, old_module)
yield """import_from< 'from' mod_member=%r 'import'
( member=%s | import_as_name< member=%s 'as' any > |
import_as_names< members=any* >) >
""" % (old_module, members, members)
yield """import_from< 'from' module_star=%r 'import' star='*' >
""" % old_module
yield """import_name< 'import'
dotted_as_name< module_as=%r 'as' any > >
""" % old_module
# bare_with_attr has a special significance for FixImports.match().
yield """power< bare_with_attr=%r trailer< '.' member=%s > any* >
""" % (old_module, members)
class FixUrllib(FixImports):
def build_pattern(self):
return "|".join(build_pattern())
def transform_import(self, node, results):
"""Transform for the basic import case. Replaces the old
import name with a comma separated list of its
replacements.
"""
import_mod = results.get("module")
pref = import_mod.prefix
names = []
# create a Node list of the replacement modules
for name in MAPPING[import_mod.value][:-1]:
names.extend([Name(name[0], prefix=pref), Comma()])
names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
import_mod.replace(names)
def transform_member(self, node, results):
"""Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.
"""
mod_member = results.get("mod_member")
pref = mod_member.prefix
member = results.get("member")
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, "This is an invalid module element")
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results["members"]
for member in members:
# we only care about the actual members
if member.type == syms.import_as_name:
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if member_name != ",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if name.type == syms.import_as_name:
kids = [Name(name.children[0].value, prefix=prefix),
name.children[1].clone(),
name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[-1], pref))
new = FromImport(module, names)
if not first or node.parent.prefix.endswith(indentation):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, "All module elements are invalid")
def transform_dot(self, node, results):
"""Transform for calls to module members in code."""
module_dot = results.get("bare_with_attr")
member = results.get("member")
new_name = None
if isinstance(member, list):
member = member[0]
for change in MAPPING[module_dot.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
module_dot.replace(Name(new_name,
prefix=module_dot.prefix))
else:
self.cannot_convert(node, "This is an invalid module element")
def transform(self, node, results):
if results.get("module"):
self.transform_import(node, results)
elif results.get("mod_member"):
self.transform_member(node, results)
elif results.get("bare_with_attr"):
self.transform_dot(node, results)
# Renaming and star imports are not supported for these modules.
elif results.get("module_star"):
self.cannot_convert(node, "Cannot handle star imports.")
elif results.get("module_as"):
self.cannot_convert(node, "This module is now multiple modules")
| 42.472081 | 79 | 0.528385 |
99a72fa14a3725e55fe0bf7367bf268d879f6653 | 1,128 | py | Python | setup.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | null | null | null | setup.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | 3 | 2020-11-05T08:44:46.000Z | 2020-11-10T17:25:15.000Z | setup.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
def requirements():
with open('requirements.txt') as f:
return [line.strip() for line in f]
setup(name='aarms',
version='0.0.1',
description='Attribute-Aware Recommender ModelS',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis'
],
keywords='Attribute-Aware Recommender ModelS',
url='http://github.com/eldrin/aarms',
author='Jaehun Kim',
author_email='j.h.kim@tudelft.nl',
license='MIT',
# packages=['aarms'],
packages=find_packages('.'),
install_requires=requirements(),
extras_require={
'dev': [
'scikit-learn>=0.23.2',
]
},
test_suite='tests',
zip_safe=False)
| 28.2 | 65 | 0.583333 |
84b602af98a3c2471e0a0aa1b4d01c65b5ff9883 | 7,291 | py | Python | qa/rpc-tests/walletbackup.py | votcoin/votcoin | f4e594afe2b7b4a04325f5c304329a675decec6a | [
"MIT"
] | null | null | null | qa/rpc-tests/walletbackup.py | votcoin/votcoin | f4e594afe2b7b4a04325f5c304329a675decec6a | [
"MIT"
] | null | null | null | qa/rpc-tests/walletbackup.py | votcoin/votcoin | f4e594afe2b7b4a04325f5c304329a675decec6a | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import VotcoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(VotcoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| 36.273632 | 91 | 0.654917 |
9631b19078e2a3a5d6db3b09073a548b65302888 | 6,234 | py | Python | src/python/grpcio/grpc/framework/interfaces/links/links.py | DiracResearch/grpc | 1c6e2b4790aa24d91338c4d8c16e5808cbfe6d2a | [
"BSD-3-Clause"
] | 1 | 2017-09-14T23:59:05.000Z | 2017-09-14T23:59:05.000Z | src/python/grpcio/grpc/framework/interfaces/links/links.py | DiracResearch/grpc | 1c6e2b4790aa24d91338c4d8c16e5808cbfe6d2a | [
"BSD-3-Clause"
] | 1 | 2016-10-19T02:43:04.000Z | 2016-10-31T14:53:06.000Z | src/python/grpcio/grpc/framework/interfaces/links/links.py | DiracResearch/grpc | 1c6e2b4790aa24d91338c4d8c16e5808cbfe6d2a | [
"BSD-3-Clause"
] | 8 | 2016-10-23T00:50:02.000Z | 2019-04-21T11:11:57.000Z | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The low-level ticket-exchanging-links interface of RPC Framework."""
import abc
import collections
import enum
import six
class Protocol(collections.namedtuple('Protocol', ('kind', 'value',))):
"""A sum type for handles to a system that transmits tickets.
Attributes:
kind: A Kind value identifying the kind of value being passed.
value: The value being passed between the high-level application and the
system affording ticket transport.
"""
@enum.unique
class Kind(enum.Enum):
CALL_OPTION = 'call option'
SERVICER_CONTEXT = 'servicer context'
INVOCATION_CONTEXT = 'invocation context'
class Ticket(
collections.namedtuple(
'Ticket',
('operation_id', 'sequence_number', 'group', 'method', 'subscription',
'timeout', 'allowance', 'initial_metadata', 'payload',
'terminal_metadata', 'code', 'message', 'termination', 'protocol',))):
"""A sum type for all values sent from a front to a back.
Attributes:
operation_id: A unique-with-respect-to-equality hashable object identifying
a particular operation.
sequence_number: A zero-indexed integer sequence number identifying the
ticket's place in the stream of tickets sent in one direction for the
particular operation.
group: The group to which the method of the operation belongs. Must be
present in the first ticket from invocation side to service side. Ignored
for all other tickets exchanged during the operation.
method: The name of an operation. Must be present in the first ticket from
invocation side to service side. Ignored for all other tickets exchanged
during the operation.
subscription: A Subscription value describing the interest one side has in
receiving information from the other side. Must be present in the first
ticket from either side. Ignored for all other tickets exchanged during
the operation.
timeout: A nonzero length of time (measured from the beginning of the
operation) to allow for the entire operation. Must be present in the first
ticket from invocation side to service side. Optional for all other
tickets exchanged during the operation. Receipt of a value from the other
side of the operation indicates the value in use by that side. Setting a
value on a later ticket allows either side to request time extensions (or
even time reductions!) on in-progress operations.
allowance: A positive integer granting permission for a number of payloads
to be transmitted to the communicating side of the operation, or None if
no additional allowance is being granted with this ticket.
initial_metadata: An optional metadata value communicated from one side to
the other at the beginning of the operation. May be non-None in at most
one ticket from each side. Any non-None value must appear no later than
the first payload value.
payload: A customer payload object. May be None.
terminal_metadata: A metadata value comminicated from one side to the other
at the end of the operation. May be non-None in the same ticket as
the code and message, but must be None for all earlier tickets.
code: A value communicated at operation completion. May be None.
message: A value communicated at operation completion. May be None.
termination: A Termination value describing the end of the operation, or
None if the operation has not yet terminated. If set, no further tickets
may be sent in the same direction.
protocol: A Protocol value or None, with further semantics being a matter
between high-level application and underlying ticket transport.
"""
@enum.unique
class Subscription(enum.Enum):
"""Identifies the level of subscription of a side of an operation."""
NONE = 'none'
TERMINATION = 'termination'
FULL = 'full'
@enum.unique
class Termination(enum.Enum):
"""Identifies the termination of an operation."""
COMPLETION = 'completion'
CANCELLATION = 'cancellation'
EXPIRATION = 'expiration'
SHUTDOWN = 'shutdown'
RECEPTION_FAILURE = 'reception failure'
TRANSMISSION_FAILURE = 'transmission failure'
LOCAL_FAILURE = 'local failure'
REMOTE_FAILURE = 'remote failure'
class Link(six.with_metaclass(abc.ABCMeta)):
"""Accepts and emits tickets."""
@abc.abstractmethod
def accept_ticket(self, ticket):
"""Accept a Ticket.
Args:
ticket: Any Ticket.
"""
raise NotImplementedError()
@abc.abstractmethod
def join_link(self, link):
"""Mates this object with a peer with which it will exchange tickets."""
raise NotImplementedError()
| 43.291667 | 80 | 0.737889 |
e10e772a5892dcbc36987c6f2c2c452a3cf02b2e | 878 | py | Python | modeli.py | AnejJereb/Trg_kriptovalut | 349d219a52bb1bb5b65007166f542b3b50ea77a8 | [
"MIT"
] | null | null | null | modeli.py | AnejJereb/Trg_kriptovalut | 349d219a52bb1bb5b65007166f542b3b50ea77a8 | [
"MIT"
] | null | null | null | modeli.py | AnejJereb/Trg_kriptovalut | 349d219a52bb1bb5b65007166f542b3b50ea77a8 | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect('kriptovalute.db')
conn.execute("PRAGMA foreign_keys = ON")
def commit(fun):
"""
Dekorator, ki ustvari kurzor, ga poda dekorirani funkciji,
in nato zapiše spremembe v bazo.
Originalna funkcija je na voljo pod atributom nocommit.
"""
def funkcija(*largs, **kwargs):
ret = fun(conn.cursor(), *largs, **kwargs)
conn.commit()
return ret
funkcija.__doc__ = fun.__doc__
funkcija.__name__ = fun.__name__
funkcija.__qualname__ = fun.__qualname__
fun.__qualname__ += '.nocommit'
funkcija.nocommit = fun
return funkcija
def mozne_valute():
"""
Funkcija, ki vrne vse možne valute.
"""
poizvedba = """
SELECT *
FROM kriptovaluta
"""
print(conn.execute(poizvedba).fetchall())
return conn.execute(poizvedba).fetchall()
#mozne_valute() | 25.823529 | 62 | 0.65262 |
3ddd6f90c8d993b01673b6ece741499b6a678098 | 10,550 | py | Python | trove/tests/util/__init__.py | viettelidc-oss/trove | 2d301d0a21863c6c0fbb9e854c7eb8ad8f19bbc1 | [
"Apache-2.0"
] | 1 | 2020-04-08T07:42:19.000Z | 2020-04-08T07:42:19.000Z | trove/tests/util/__init__.py | phunv-bka/trove | 2d301d0a21863c6c0fbb9e854c7eb8ad8f19bbc1 | [
"Apache-2.0"
] | null | null | null | trove/tests/util/__init__.py | phunv-bka/trove | 2d301d0a21863c6c0fbb9e854c7eb8ad8f19bbc1 | [
"Apache-2.0"
] | 1 | 2020-04-08T07:45:25.000Z | 2020-04-08T07:45:25.000Z | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`tests` -- Utility methods for tests.
===================================
.. automodule:: utils
:platform: Unix
:synopsis: Tests for Nova.
"""
import subprocess
try:
EVENT_AVAILABLE = True
except ImportError:
EVENT_AVAILABLE = False
import glanceclient
from keystoneauth1.identity import v3
from keystoneauth1 import session
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from proboscis.asserts import assert_true
from proboscis.asserts import Check
from proboscis.asserts import fail
from proboscis import SkipTest
from six.moves.urllib.parse import unquote
from sqlalchemy import create_engine
from sqlalchemy.sql.expression import text
import tenacity
from troveclient.compat import Dbaas
from trove.common import cfg
from trove.common.utils import import_class
from trove.common.utils import import_object
from trove.tests.config import CONFIG as test_config
from trove.tests.util.client import TestClient
from trove.tests.util import mysql
from trove.tests.util import test_config as CONFIG
from trove.tests.util.users import Requirements
WHITE_BOX = test_config.white_box
FLUSH = text("FLUSH PRIVILEGES;")
CONF = cfg.CONF
def create_client(*args, **kwargs):
"""
Using the User Requirements as arguments, finds a user and grabs a new
DBAAS client.
"""
reqs = Requirements(*args, **kwargs)
user = test_config.users.find_user(reqs)
return create_dbaas_client(user)
def create_dbaas_client(user):
"""Creates a rich client for the Trove API using the test config."""
auth_strategy = None
kwargs = {
'service_type': 'database',
'insecure': test_config.values['trove_client_insecure'],
}
def set_optional(kwargs_name, test_conf_name):
value = test_config.values.get(test_conf_name, None)
if value is not None:
kwargs[kwargs_name] = value
force_url = 'override_trove_api_url' in test_config.values
service_url = test_config.get('override_trove_api_url', None)
if user.requirements.is_admin:
service_url = test_config.get('override_admin_trove_api_url',
service_url)
if service_url:
kwargs['service_url'] = service_url
auth_strategy = None
if user.requirements.is_admin:
auth_strategy = test_config.get('admin_auth_strategy',
test_config.auth_strategy)
else:
auth_strategy = test_config.auth_strategy
set_optional('region_name', 'trove_client_region_name')
if test_config.values.get('override_trove_api_url_append_tenant',
False):
kwargs['service_url'] += "/" + user.tenant
if auth_strategy == 'fake':
from troveclient.compat import auth
class FakeAuth(auth.Authenticator):
def authenticate(self):
class FakeCatalog(object):
def __init__(self, auth):
self.auth = auth
def get_public_url(self):
return "%s/%s" % (test_config.dbaas_url,
self.auth.tenant)
def get_token(self):
return self.auth.tenant
return FakeCatalog(self)
auth_strategy = FakeAuth
if auth_strategy:
kwargs['auth_strategy'] = auth_strategy
if not user.requirements.is_admin:
auth_url = test_config.trove_auth_url
else:
auth_url = test_config.values.get('trove_admin_auth_url',
test_config.trove_auth_url)
if test_config.values.get('trove_client_cls'):
cls_name = test_config.trove_client_cls
kwargs['client_cls'] = import_class(cls_name)
dbaas = Dbaas(user.auth_user, user.auth_key, tenant=user.tenant,
auth_url=auth_url, **kwargs)
dbaas.authenticate()
with Check() as check:
check.is_not_none(dbaas.client.auth_token, "Auth token not set!")
if not force_url and user.requirements.is_admin:
expected_prefix = test_config.dbaas_url
actual = dbaas.client.service_url
msg = "Dbaas management url was expected to start with %s, but " \
"was %s." % (expected_prefix, actual)
check.true(actual.startswith(expected_prefix), msg)
return TestClient(dbaas)
def create_keystone_session(user):
auth = v3.Password(username=user.auth_user,
password=user.auth_key,
project_id=user.tenant_id,
user_domain_name='Default',
project_domain_name='Default',
auth_url=test_config.auth_url)
return session.Session(auth=auth)
def create_nova_client(user, service_type=None):
if not service_type:
service_type = CONF.nova_compute_service_type
openstack = nova_client.Client(
CONF.nova_client_version,
username=user.auth_user,
password=user.auth_key,
user_domain_name='Default',
project_id=user.tenant_id,
auth_url=CONFIG.auth_url,
service_type=service_type, os_cache=False,
cacert=test_config.values.get('cacert', None)
)
return TestClient(openstack)
def create_neutron_client(user):
sess = create_keystone_session(user)
client = neutron_client.Client(
session=sess,
service_type=CONF.neutron_service_type,
region_name=CONFIG.trove_client_region_name,
insecure=CONF.neutron_api_insecure,
endpoint_type=CONF.neutron_endpoint_type
)
return TestClient(client)
def create_glance_client(user):
sess = create_keystone_session(user)
glance = glanceclient.Client(CONF.glance_client_version, session=sess)
return TestClient(glance)
def dns_checker(mgmt_instance):
"""Given a MGMT instance, ensures DNS provisioning worked.
Uses a helper class which, given a mgmt instance (returned by the mgmt
API) can confirm that the DNS record provisioned correctly.
"""
if CONFIG.values.get('trove_dns_checker') is not None:
checker = import_class(CONFIG.trove_dns_checker)
checker()(mgmt_instance)
else:
raise SkipTest("Can't access DNS system to check if DNS provisioned.")
def process(cmd):
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return output
def string_in_list(str, substr_list):
"""Returns True if the string appears in the list."""
return any([str.find(x) >= 0 for x in substr_list])
def unquote_user_host(user_hostname):
unquoted = unquote(user_hostname)
if '@' not in unquoted:
return unquoted, '%'
if unquoted.endswith('@'):
return unquoted, '%'
splitup = unquoted.split('@')
host = splitup[-1]
user = '@'.join(splitup[:-1])
return user, host
def iso_time(time_string):
"""Return a iso formated datetime: 2013-04-15T19:50:23Z."""
ts = time_string.replace(' ', 'T')
try:
micro = ts.rindex('.')
ts = ts[:micro]
except ValueError:
pass
return '%sZ' % ts
def assert_contains(exception_message, substrings):
for substring in substrings:
assert_true(substring in exception_message,
message="'%s' not in '%s'"
% (substring, exception_message))
# TODO(dukhlov): Still required by trove integration
# Should be removed after trove integration fix
# https://bugs.launchpad.net/trove-integration/+bug/1228306
# TODO(cp16net): DO NOT USE needs to be removed
def mysql_connection():
cls = CONFIG.get('mysql_connection',
"local.MySqlConnection")
if cls == "local.MySqlConnection":
return MySqlConnection()
return import_object(cls)()
class MySqlConnection(object):
def assert_fails(self, ip, user_name, password):
try:
with mysql.create_mysql_connection(ip, user_name, password):
pass
fail("Should have failed to connect: mysql --host %s -u %s -p%s"
% (ip, user_name, password))
except mysql.MySqlPermissionsFailure:
return # Good, this is what we wanted.
except mysql.MySqlConnectionFailure as mcf:
fail("Expected to see permissions failure. Instead got message:"
"%s" % mcf.message)
@tenacity.retry(
wait=tenacity.wait_fixed(3),
stop=tenacity.stop_after_attempt(5),
reraise=True
)
def create(self, ip, user_name, password):
print("Connecting mysql, host: %s, user: %s, password: %s" %
(ip, user_name, password))
return mysql.create_mysql_connection(ip, user_name, password)
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions."""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except Exception:
self.trans.rollback()
self.trans = None
raise
@staticmethod
def init_engine(user, password, host):
return create_engine("mysql+pymysql://%s:%s@%s:3306" %
(user, password, host),
pool_recycle=1800, echo=True)
| 32.461538 | 79 | 0.648246 |
108b5cf6c6a7b64ca7b81974ef52a7004042edf4 | 14,833 | py | Python | applications/cli/util/system.py | nparkstar/nauta | 1bda575a01f782d1dc2cd5221122651f184f7167 | [
"Apache-2.0"
] | 390 | 2019-01-23T09:07:00.000Z | 2022-02-20T04:03:34.000Z | applications/cli/util/system.py | nparkstar/nauta | 1bda575a01f782d1dc2cd5221122651f184f7167 | [
"Apache-2.0"
] | 52 | 2019-01-31T12:17:30.000Z | 2022-02-10T00:01:39.000Z | applications/cli/util/system.py | nparkstar/nauta | 1bda575a01f782d1dc2cd5221122651f184f7167 | [
"Apache-2.0"
] | 66 | 2019-01-23T18:59:39.000Z | 2020-10-18T15:24:00.000Z | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from enum import Enum
import os
import subprocess
import sys
from typing import List, Tuple
import errno
import socket
import dateutil.tz
import dateutil.parser
from datetime import timedelta
import signal
import platform
from distutils.version import LooseVersion
import click
import distro
from kubernetes.client import V1Pod
from tabulate import tabulate
from util.config import Config
from util.k8s.k8s_info import get_namespaced_pods
from util.logger import initialize_logger
from cli_text_consts import UtilSystemTexts as Texts, VERBOSE_RERUN_MSG
log = initialize_logger('util.system')
WINDOWS_EDITIONS = {
0: "undefined",
1: "ultimate",
2: "home_basic",
3: "home_premium",
4: "enterprise",
5: "home_basic_n",
6: "business",
7: "standard_server",
8: "datacenter_server",
9: "small_business_server",
10: "enterprise_server",
11: "starter",
12: "datacenter_server_core",
13: "standard_server_core",
14: "enterprise_server_core",
15: "enterprise_server_for_itanium_based_systems",
16: "business_n",
17: "web_server",
18: "cluster_server",
19: "home_server",
20: "storage_express_server",
21: "storage_standard_server",
22: "storage_workgroup_server",
23: "storage_enterprise_server",
24: "server_for_small_business",
25: "small_business_server_premium",
29: "web_server_server_core",
39: "datacenter_edition_without_hyperv_server_core",
40: "standard_edition_without_hyperv_server_core",
41: "enterprise_edition_without_hyperv_server_core",
42: "hyperv_server",
48: "pro"
}
class ExternalCliCommand:
def __init__(self, cmd: List[str], env: dict = None, cwd: str = None, timeout: int = None):
"""
:param cmd: List of strings which define a command that will be executed, e.g. ['git', 'clone']
:param env: Dictionary containing environment variables that will be used during command execution
:param cwd: Path to working directory
:param timeout: Timeout in seconds
"""
self.cmd = cmd
self.env = env
self.cwd = cwd
self.timeout = timeout
def __call__(self, *args, **kwargs) -> Tuple[str, int, str]:
"""
Call command.
:param args: Positional arguments will be passed unchanged to the executed command
:param kwargs: Keyword arguments will be passed as long parameters, e.g. passing `foo=bar` will add
`--foo`, 'bar' to the executed command. If keyword argument has binary value, it will be treated as a flag,
e.g. passing `foo=True` argument will add `--foo` to the executed command and passing `foo=False` will not add
anything new to the command. Underscores in keyword argument names will be replaced with hyphens
:return: output, exit code and formatted output of called command
"""
cmd = self.cmd
env = kwargs.get('_env') or self.env
cwd = kwargs.get('_cwd') or self.cwd
for arg in args:
cmd.append(arg)
for kwarg_name, kwarg_value in kwargs.items():
if not kwarg_name.startswith('_'): # kwargs that have name starting with '_' are reserved
option_name = kwarg_name.replace('_', '-')
if kwarg_value is False: # Handle flags
continue
elif kwarg_value is True:
cmd.append(f'--{option_name}')
else: # Standard options
cmd.append(f'--{option_name}')
cmd.append(kwarg_value)
output, exit_code, log_output = execute_system_command(command=cmd, env=env, cwd=cwd, timeout=self.timeout)
if exit_code != 0:
log.error(log_output)
raise RuntimeError(f'Failed to execute command: {self.cmd}')
else:
return output, exit_code, log_output
class ExternalCliClient:
"""
This class allows to easily create a wrapper for external CLI. Usage example:
git_client = ExternalCliClient('git')
git_client.clone('https://repo.git', quiet=True)
git_client.add('-u')
git_client.commit(message='test')
"""
def __init__(self, executable: str, env: dict = None, cwd: str = None, timeout: int = None):
"""
:param executable: Name of external CLI executable e.g. 'git' or 'helm'
:param env: Dictionary containing environment variables that will be used by the client
:param cwd: Path to working directory
:param timeout: Timeout in seconds for commands executed by the client
"""
self.executable = executable
self.env = env
self.cwd = cwd
self.timeout = timeout
def __getattr__(self, item):
return self._make_command(item)
def _make_command(self, name: str):
return ExternalCliCommand(env=self.env, cwd=self.cwd, cmd=[self.executable, name], timeout=self.timeout)
def execute_system_command(command: List[str],
timeout: int = None,
stdin=None,
env=None,
cwd=None,
logs_size: int = 0) -> Tuple[str, int, str]:
"""
Executes system's command
:param command: command to be exeucted
:param timeout: timeout of execution, when timeout pass - command is interrupted
:param stdin: stream with input data for command
:param env: environment within which command is run
:param cwd: command working directory
:param logs_size: if other than 0 - system sends to logger logs_size last characters
:return: output - output of the command
exit_code - exit code returned by a command
log_output - output that should be passed to logs. If a real output contains
special characters that are not present in a current system's encoding, this
attribute contains information about a need of changing system's encoding
"""
try:
output = subprocess.check_output( # type: ignore
command,
timeout=timeout,
stderr=subprocess.STDOUT,
universal_newlines=True,
stdin=stdin,
env=env,
cwd=cwd,
encoding='utf-8')
encoded_output = output[-logs_size:].encode('utf-8')
log.debug(f'COMMAND: {command} RESULT: {encoded_output}'.replace('\n', '\\n'))
except subprocess.CalledProcessError as ex:
log.exception(f'COMMAND: {command} RESULT: {ex.output}'.replace('\n', '\\n'))
return ex.output, ex.returncode, ex.output
else:
return output, 0, encoded_output
def execute_subprocess_command(command: List[str],
stdin=None,
env=None,
cwd=None,
shell=False) -> subprocess.Popen:
log.debug(f'executing COMMAND in subprocess: {str(command)}')
process = subprocess.Popen(
args=command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
stdin=stdin,
env=env,
cwd=cwd,
encoding='utf-8',
shell=shell)
if process.poll() is not None:
log.error(f'{command} execution FAIL: {command}')
out, err = process.communicate()
log.error(f'{command} stdout: {out}')
log.error(f'{command} stderr: {err}')
raise RuntimeError(
Texts.COMMAND_EXE_FAIL_ERROR_MSG.format(command=command))
return process
class OS(Enum):
WINDOWS = "win"
MACOS = "darwin"
LINUX = "linux"
@classmethod
def all_str(cls):
return ''.join([e.value for e in cls])
def get_current_os() -> OS:
sys_platform = sys.platform # type: str
if sys_platform.startswith(OS.WINDOWS.value):
return OS.WINDOWS
elif sys_platform.startswith(OS.LINUX.value):
return OS.LINUX
elif sys_platform.startswith(OS.MACOS.value):
return OS.MACOS
raise RuntimeError(
Texts.UNSUPPORTED_PLATFORM_ERROR_MSG.format(
sys_platform=sys_platform, supported_os=OS.all_str()))
def check_port_availability(port: int) -> bool:
"""
Checks whether a port given as a parameter is available for application.
:param port: port to be checked
:return: True if port is available, False - otherwise
In case of any problems it throws an excpetion
"""
ret_value = True
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
log.debug(f"Port {port} is occupied.")
ret_value = False
else:
# something else raised the socket.error exception
error_msg = Texts.PORT_AVAILABILITY_CHECK_ERROR_MSG
log.exception(error_msg)
raise RuntimeError(error_msg) from e
return ret_value
def format_timestamp_for_cli(timestamp: str) -> str:
"""
Change timestamp from e.g. "2018-10-11T20:20:30Z" to "2018-10-11 9:20:30 PM"
(assuming that local timezone is +01:00).
:param timestamp: timestamp which will be converted
:return: formatted version of the timestamp
"""
cli_timestamp = dateutil.parser.parse(timestamp).astimezone(
dateutil.tz.tzlocal()).strftime("%Y-%m-%d %I:%M:%S %p")
return cli_timestamp
def format_duration_for_cli(timedelta: timedelta) -> str:
days = timedelta.days
hours, remainder = divmod(timedelta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '{}d {}h {}m {}s'.format(days, hours, minutes, seconds)
def wait_for_ctrl_c():
""" Waits for pressing Ctrl-C key by a user. If Ctrl-C key has been pressed - finishes execution """
continue_loop = True
def signal_handler(signal, frame):
nonlocal continue_loop
continue_loop = False
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if get_current_os() in (OS.LINUX, OS.MACOS):
os.system("setterm --cursor on")
while continue_loop:
time.sleep(0.1)
def handle_error(logger=None,
log_msg: str = None,
user_msg: str = None,
add_verbosity_msg: bool = False):
"""
Handle error in cli. Log message may be printed. User message may be printed or not, with or without verbosity
usage info. Execution may end with an exit code. Each combination of these 3 possibilities is achievable by
specifying correct arguments. Default behaviour is exit with code 1, log nothing and print nothing.
:param logger: logger which will handle log message. If None, then no message is logged.
:param log_msg: message to be shown in log. If None, then no message is logged.
:param user_msg: message to be shown to the user. If None, then no message is shown.
:param add_verbosity_msg: whether to add information about -v usage or not.
:return:
"""
if logger is not None and log_msg is not None:
logger.exception(log_msg)
# Internationalization can be plugged in here.
if user_msg is not None:
click.echo(user_msg +
(" " + VERBOSE_RERUN_MSG if add_verbosity_msg else ""))
try:
check_nauta_pods()
except Exception:
logger.exception("Failed to get logs of pods in nauta namespace.")
def get_windows_edition():
windows_edition_number, _, _ = execute_system_command([
"powershell.exe",
"(Get-WmiObject Win32_OperatingSystem).OperatingSystemSKU"
])
return WINDOWS_EDITIONS[int(windows_edition_number)]
def get_os_version() -> Tuple[str, LooseVersion]:
system_str = platform.system()
if system_str == "Darwin":
return "macos", LooseVersion(platform.mac_ver()[0])
elif system_str == "Windows":
if LooseVersion(platform.release()) >= LooseVersion("10"):
return "windows" + "_" + get_windows_edition(), LooseVersion(
platform.release())
else:
return "windows", LooseVersion(platform.release())
elif system_str == "Linux":
os_info = distro.info()
return os_info["id"], LooseVersion(os_info["version"])
return "", LooseVersion("0")
def get_pod_logs(pod: V1Pod, namespace: str, tail: int = None):
"""
Get logs of all the containers of a given pod.
:param pod:
:param namespace:
:param tail:
:return: list of container logs of a given pod - List[List[str]]
"""
outputs: List[str] = []
for container in pod.status.container_statuses:
try:
command = ['kubectl', 'logs', '-n', namespace, pod.metadata.name, '-c', container.name]
if tail:
command.append(f'tail={tail}')
output, _, _ = execute_system_command(command=command)
log.debug(output)
outputs.append(output)
except Exception:
log.exception(f'Failed to get {pod.metadata.name} pod logs.')
return outputs
def check_nauta_pods():
"""
Check if there are failed pods. If there are any, display a list of their names and
save logs in logs directory.
"""
pods = get_namespaced_pods(label_selector=None, namespace='nauta')
failed_pods = [pod for pod in pods if pod.status.phase == 'Failed']
if failed_pods:
click.echo("Following nauta components have failed:")
tabulate([pod.metadata.name for pod in failed_pods], headers=["Pod name"])
conf_path = Config().config_path
for pod in failed_pods:
logs = get_pod_logs(pod=pod, namespace='nauta', tail=1000)
for i, log in enumerate(logs):
pod_name = pod.metadata.name
container_name = pod.status.container_statuses[i].name
with open(f'{conf_path}/logs/{pod_name}_{container_name}.log', mode='w') as log_file:
log_file.writelines(log)
click.echo('Contact Nauta administrator.')
click.echo(f'Check logs folder in your config directory({conf_path}) to get more information.')
| 37.0825 | 119 | 0.644779 |
6fc842bbf51dfb56aea3dfc6f6d239308e1e4142 | 9,436 | py | Python | src/HTMLgen/JpegImagePluginH.py | marinier/soardoc | be4023c8abd6ca93545d75e7edb4328f236ee82c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/HTMLgen/JpegImagePluginH.py | marinier/soardoc | be4023c8abd6ca93545d75e7edb4328f236ee82c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/HTMLgen/JpegImagePluginH.py | marinier/soardoc | be4023c8abd6ca93545d75e7edb4328f236ee82c | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-02-03T14:17:42.000Z | 2021-02-03T14:17:42.000Z | #
# The Python Imaging Library.
# $Id: JpegImagePluginH.py 2666 2003-10-23 22:14:14Z rmarinie $
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 95-09-09 fl Created
# 95-09-13 fl Added full parser
# 96-03-25 fl Added hack to use the IJG command line utilities
# 96-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 0.1 96-05-28 fl Added draft support, JFIF version
# 0.2 96-12-30 fl Added encoder options, added progression property
# 0.3 97-08-27 fl Save mode 1 images as BW
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-96.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
import array, string
import ImageH, ImageFileH
def i16(c):
return ord(c[1]) + (ord(c[0])<<8)
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
#
# Parser
def Skip(self, marker):
self.fp.read(i16(self.fp.read(2))-2)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
s = self.fp.read(i16(self.fp.read(2))-2)
self.app["APP%d" % (marker&15)] = s
if marker == 0xFFE0 and s[:4] == "JFIF":
self.info["jfif"] = i16(s[5:])
if marker == 0xFFEE and s[:5] == "Adobe":
self.info["adobe"] = i16(s[5:])
self.info["adobe_transform"] = ord(s[11])
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
s = self.fp.read(i16(self.fp.read(2))-2)
self.size = i16(s[3:]), i16(s[1:])
self.bits = ord(s[0])
if self.bits != 8:
raise SyntaxError, "cannot handle %d-bit layers" % self.bits
self.layers = ord(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError, "cannot handle %d-layer images" % self.layers
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progression"] = 1
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append(t[0], ord(t[1])/16, ord(t[1])&15, ord(t[2]))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
s = self.fp.read(i16(self.fp.read(2))-2)
while len(s):
if len(s) < 65:
raise SyntaxError, "bad quantization table marker"
v = ord(s[0])
if v/16 == 0:
self.quantization[v&15] = array.array("b", s[1:65])
s = s[65:]
else:
pass
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", Skip)
}
def _accept(prefix):
return prefix[0] == "\377"
class JpegImageFile(ImageFileH.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if ord(s[0]) != 255:
raise SyntaxError, "not an JPEG file"
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {}
while 1:
s = s + self.fp.read(1)
i = i16(s)
if MARKER.has_key(i):
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler != None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK" and self.info.has_key("adobe"):
rawmode = "CMYK;I" # Photoshop 2.5 is broken!
self.tile = [("jpeg", (0,0) + self.size, 0, (rawmode, ""))]
# self.offset = self.fp.tell()
break
s = self.fp.read(1)
else:
raise SyntaxError, "no marker found"
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a == "RGB" and mode in ["L", "YCC"]:
self.mode = a = mode
if size:
scale = max(self.size[0] / size[0], self.size[1] / size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)/s+e[0], (e[3]-e[1]+s-1)/s+e[1]
self.size = ((self.size[0]+s-1)/s, (self.size[1]+s-1)/s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 1)
return self
def load_hack(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import tempfile, os
file = tempfile.mktemp()
os.system("djpeg %s >%s" % (self.filename, file))
try:
self.im = ImageH.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _fetch(dict, key, default = 0):
try:
return dict[key]
except KeyError:
return default
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"CMYK": "CMYK",
}
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError, "cannot write mode %s as JPEG" % im.mode
# get keyword arguments
im.encoderconfig = (_fetch(im.encoderinfo, "quality", 0),
im.encoderinfo.has_key("progressive"),
_fetch(im.encoderinfo, "smooth", 0),
im.encoderinfo.has_key("optimize"),
_fetch(im.encoderinfo, "streamtype", 0))
ImageFileH._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)])
def _save_hack(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
file = im._dump()
os.system("cjpeg %s >%s" % (file, filename))
try: os.unlink(file)
except: pass
# -------------------------------------------------------------------q-
# Registry stuff
ImageH.register_open("JPEG", JpegImageFile, _accept)
ImageH.register_save("JPEG", _save)
ImageH.register_extension("JPEG", ".jfif")
ImageH.register_extension("JPEG", ".jpe")
ImageH.register_extension("JPEG", ".jpg")
ImageH.register_extension("JPEG", ".jpeg")
ImageH.register_mime("JPEG", "image/jpeg")
| 29.395639 | 73 | 0.609262 |
5e695c5f937c34672964f4c256e9a919e9ffc3dc | 73,761 | py | Python | omtk/modules/rigFaceAvarGrps.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | omtk/modules/rigFaceAvarGrps.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | omtk/modules/rigFaceAvarGrps.py | CDufour909/omtk_unreal | 64ae76a7b0a3f73a4b32d3b330f3174d02c54234 | [
"MIT"
] | null | null | null | import copy
import itertools
import logging
import pymel.core as pymel
from collections import defaultdict
from omtk.core.utils import decorator_uiexpose
from omtk.libs import libCtrlShapes
from omtk.libs import libPymel
from omtk.libs import libPython
from omtk.libs import libRigging
from omtk.libs.libRigging import get_average_pos_between_nodes
from omtk.models import model_ctrl_linear, model_avar_linear
from omtk.modules import rigFaceAvar
log = logging.getLogger('omtk')
def _find_mid_avar(avars):
jnts = [avar.jnt for avar in avars]
nearest_jnt = get_average_pos_between_nodes(jnts)
return avars[jnts.index(nearest_jnt)] if nearest_jnt else None
#
# Ctrls
#
class BaseCtrlUpp(rigFaceAvar.BaseCtrlFace):
"""
Deprecated, defined for backward compatibility (so libSerialization recognize it and we can access the ctrl shapes)
"""
pass
class BaseCtrlLow(rigFaceAvar.BaseCtrlFace):
"""
Deprecated, defined for backward compatibility (so libSerialization recognize it and we can access the ctrl shapes)
"""
pass
class CtrlFaceUpp(rigFaceAvar.BaseCtrlFace):
"""
Base controller class for an avar controlling the top portion of an AvarGrp.
"""
def __createNode__(self, size=1.0, **kwargs):
return libCtrlShapes.create_triangle_upp(size=size)
class CtrlFaceLow(rigFaceAvar.BaseCtrlFace):
"""
Base controller class for an avar controlling the bottom portion of an AvarGrp.
"""
def __createNode__(self, size=1.0, **kwargs):
return libCtrlShapes.create_triangle_low(size=size)
class CtrlFaceAll(rigFaceAvar.BaseCtrlFace):
ATTR_NAME_GLOBAL_SCALE = 'globalScale'
"""
Base controller class for an avar controlling all the avars of an AvarGrp.
"""
def __createNode__(self, size=1.0, **kwargs):
# todo: find the best shape
transform, _ = libCtrlShapes.create_shape_circle(size=size, normal=(0, 0, 1))
return transform
class CtrlFaceHorizontal(rigFaceAvar.BaseCtrlFace):
"""
Base controller class for an avar controlling the left or right porsion of an AvarGrp.
"""
def __createNode__(self, size=1.0, **kwargs):
return libCtrlShapes.create_triangle_left(size=size)
class CtrlFaceMacroL(rigFaceAvar.BaseCtrlFace):
def __createNode__(self, size=1.0, **kwargs):
return libCtrlShapes.create_triangle_left(size=size)
class CtrlFaceMacroR(rigFaceAvar.BaseCtrlFace):
def __createNode__(self, size=1.0, **kwargs):
return libCtrlShapes.create_triangle_right(size=size)
#
# Models
#
class ModelMicroAvarCtrl(model_ctrl_linear.ModelCtrlLinear):
def connect(self, avar, avar_grp, ud=True, fb=True, lr=True, yw=True, pt=True, rl=True, sx=True, sy=True, sz=True):
avar_tweak = avar_grp._get_micro_tweak_avars_dict().get(avar, None)
if avar_tweak:
super(ModelMicroAvarCtrl, self).connect(avar, avar_grp, ud=ud, fb=fb, lr=lr, yw=False, pt=False, rl=False,
sx=False, sy=False, sz=False)
super(ModelMicroAvarCtrl, self).connect(avar_tweak, avar_grp, ud=False, fb=False, lr=False, yw=yw, pt=pt,
rl=rl, sx=sx, sy=sy, sz=sz)
else:
super(ModelMicroAvarCtrl, self).connect(avar, avar_grp, ud=ud, fb=fb, lr=lr, yw=yw, pt=pt, rl=rl, sx=sx,
sy=sy, sz=sz)
class ModelCtrlMacroAll(model_ctrl_linear.ModelCtrlLinear):
def connect(self, avar, avar_grp, ud=True, fb=True, lr=True, yw=True, pt=True, rl=True, sx=True, sy=True, sz=True):
super(ModelCtrlMacroAll, self).connect(avar, avar_grp, ud=True, fb=True, lr=True, yw=True, pt=True, rl=True,
sx=True, sy=True, sz=True)
# #
# # Compute the calibration automatically
# #
#
# nomenclature_rig = self.get_nomenclature_rig()
#
# # Compute the calibration automatically
# attr_calibration_lr = libRigging.create_utility_node(
# 'multiplyDivide',
# name=nomenclature_rig.resolve('getCalibrationLr'),
# input1X=avar.attr_multiplier_lr,
# input2X=avar._attr_length_u
# ).outputX
# attr_calibration_ud = libRigging.create_utility_node(
# 'multiplyDivide',
# name=nomenclature_rig.resolve('getCalibrationUd'),
# input1X=avar.attr_multiplier_ud,
# input2X=avar._attr_length_v
# ).outputX
# attr_calibration_fb = libRigging.create_utility_node(
# 'multiplyDivide',
# name=nomenclature_rig.resolve('getCalibrationFb'),
# input1X=avar.attr_multiplier_fb,
# input2X=avar._attr_length_u
# ).outputX
#
# pymel.connectAttr(attr_calibration_lr, self.attr_sensitivity_tx)
# pymel.connectAttr(attr_calibration_ud, self.attr_sensitivity_ty)
# pymel.connectAttr(attr_calibration_fb, self.attr_sensitivity_tz)
def build(self, avar, parent_pos=None, parent_rot=None, **kwargs):
parent_pos = avar._grp_output
# parent_rot = avar._grp_output
super(ModelCtrlMacroAll, self).build(
avar,
parent_pos=parent_pos,
parent_rot=parent_rot,
**kwargs)
def calibrate(self, **kwargs):
"""
Since the avar_all macro follow directly the surface, we don't need to calibrate it.
"""
pass
#
# Models
#
class AvarGrp(
rigFaceAvar.AbstractAvar): # todo: why do we inherit from AbstractAvar exactly? Is inheriting from module more logical?
"""
Base class for a group of 'avars' that share the same properties.
"""
# Define the class to use for all avars.
_CLS_AVAR = rigFaceAvar.AvarSimple
_CLS_CTRL_MICRO = rigFaceAvar.CtrlFaceMicro
_CLS_CTRL_TWEAK = None # In our case we hide the tweak avars by default since they are controlled using their parent controller.
_CLS_MODEL_CTRL_MICRO = ModelMicroAvarCtrl
_CLS_MODEL_CTRL_TWEAK = None
SHOW_IN_UI = True
# Enable this flag if the module contain only one influence.
# ex: The FaceJaw module can accept two objects. The jaw and the jaw_end. However we consider the jaw_end as extra information for the positioning.
# TODO: Find a generic way to get the InteractiveCtrl follicle position.
SINGLE_INFLUENCE = False
# Set this flag to false if each avars need to have an individual parent.
# Please note that this have not been tested when used with 'tweak' avars.
# This flag have been added to diminish the chances of breaking something in production (see Task #70413),
# however we should check if it is possible to always have this behavior by default.
# todo: Find a generic way.
SINGLE_PARENT = True
def __init__(self, *args, **kwargs):
super(AvarGrp, self).__init__(*args, **kwargs)
# This property contain all the MICRO Avars.
# Micro Avars directly drive the input influence of the Module.
# Macro Avars indirectly drive nothing by themself but are generally connected to Micro Avars.
# It is really important that if you implement Macro Avars in other properties than this one.
self.avars = []
self.preDeform = False
self._grp_anm_avars_macro = None
self._grp_anm_avars_micro = None
self._grp_rig_avars_macro = None
self._grp_rig_avars_micro = None
#
# Avar properties
# Note that theses are only accessible after the avars have been built.
#
def _iter_all_avars(self):
"""
Generator that return all avars, macro and micros.
Override this method if your module implement new avars.
:return: An iterator that yield avars.
"""
for avar in self.avars:
yield avar
def get_all_avars(self):
"""
:return: All macro and micro avars of the module.
This is mainly used to automate the handling of avars and remove the need to abuse class inheritance.
"""
return list(self._iter_all_avars())
def get_avars_upp(self):
"""
:return: All the upper section avars (micro and macros).
"""
return self.get_avars_micro_upp()
@libPython.memoized_instancemethod
def get_avars_micro_upp(self):
"""
Return all the avars controlling the AvarGrp upper area.
ex: For lips, this will return the upper lip influences (without any corners).
:return: A list of Avar instances.
"""
# TODO: Find a better way
fnFilter = lambda avar: 'upp' in avar.name.lower()
return filter(fnFilter, self.avars)
def get_avars_low(self):
"""
:return: All the lower section avars (micro and macros).
"""
return self.get_avars_micro_low()
@libPython.memoized_instancemethod
def get_avars_micro_low(self):
"""
Return all the avars controlling the AvarGrp lower area.
ex: For the lips, this will return the lower lip influences (without any corners).
:return: Al list of Avar instrances.
"""
# TODO: Find a better way
fnFilter = lambda avar: 'low' in avar.name.lower()
return filter(fnFilter, self.avars)
#
# Influence properties
#
@libPython.cached_property()
def jnts(self):
fn_is_nurbsSurface = lambda obj: libPymel.isinstance_of_transform(obj, pymel.nodetypes.Joint)
return filter(fn_is_nurbsSurface, self.input)
@libPython.memoized_instancemethod
def _get_absolute_parent_level_by_influences(self):
result = defaultdict(list)
for jnt in self.jnts:
level = libPymel.get_num_parents(jnt)
result[level].append(jnt)
return dict(result)
# todo: implement Tree datatype
def _get_highest_absolute_parent_level(self):
return min(self._get_absolute_parent_level_by_influences().keys())
def _get_hierarchy_depth(self):
return max(self._get_relative_parent_level_by_influences().keys())
def _can_create_tweak_avars(self):
# If the hierarchy depth is of only 1, the avar_all have priority.
# This is because there's a potential for ambiguity between the all_avar and tweak avars.
lowest_relative_parent_level = self._get_hierarchy_depth()
if lowest_relative_parent_level == 1 and self.get_influence_all():
return False
return True
@libPython.memoized_instancemethod
def _get_relative_parent_level_by_influences(self):
result = defaultdict(list)
objs_by_absolute_parent_level = self._get_absolute_parent_level_by_influences()
top_level = self._get_highest_absolute_parent_level()
for parent_level, objs in objs_by_absolute_parent_level.iteritems():
result[parent_level - top_level] = objs
return dict(result)
@libPython.memoized_instancemethod
def get_influence_all(self):
"""
If the rigger provided a global parent for the influences in the module,
it will be considered as an influence for the 'all' macro avar.
"""
# If there's only one influence, we'll handle it as a simple avar.
if len(self.jnts) <= 1:
return None
objs_by_absolute_parent_level = self._get_absolute_parent_level_by_influences()
top_level = self._get_highest_absolute_parent_level()
root_objs = objs_by_absolute_parent_level[top_level]
if len(root_objs) == 1:
return root_objs[0]
return None
@libPython.memoized_instancemethod
def get_influence_micros(self):
"""
:return: Only the influence used in micro avars.
"""
result = set()
for avar in self.avars:
if self._is_tweak_avar(avar):
continue
result.update(avar.jnts)
return list(result)
@libPython.memoized_instancemethod
def _get_micro_avar_by_influence(self, influence):
for avar in self.avars:
if influence in avar.input:
return avar
@libPython.memoized_instancemethod
def _get_micro_tweak_avars_dict(self):
result = {}
influences_by_parent_level = self._get_relative_parent_level_by_influences()
top_level = self._get_hierarchy_depth()
for influence in influences_by_parent_level[top_level]:
parent_influence = influence.getParent()
avar = self._get_micro_avar_by_influence(influence)
avar_parent = self._get_micro_avar_by_influence(parent_influence)
if avar and avar_parent:
result[avar_parent] = avar
return result
def _is_tweak_avar(self, avar):
return avar in self._get_micro_tweak_avars_dict().values()
#
# Avar methods
#
def get_multiplier_u(self):
return 1.0
def get_multiplier_v(self):
return 1.0
def _get_default_ctrl_size(self, jnts=None, max_ctrl_size=None, epsilon=0.001):
"""
Resolve the desired ctrl size
One thing we are sure is that ctrls should not overlay,
so we'll max out their radius to half of the shortest distances between each.
Also the radius cannot be bigger than 3% of the head length.
:param epsilon: Prevent ctrl from dissapearing if two influences share the same location
"""
result = 1
# Resolve maximum ctrl size from head joint
head_jnt = self.get_head_jnt()
try:
head_length = self.rig.get_head_length(head_jnt)
except Exception, e:
head_length = None
self.warning(str(e))
if head_length:
max_ctrl_size = head_length * 0.05
if jnts is None:
# Use only the micro influence as reference since the distance
# between micro and tweak avars can be very small.
jnts = self.get_influence_micros()
if len(jnts) > 1:
distances = [libPymel.distance_between_nodes(jnt_src, jnt_dst) for jnt_src, jnt_dst in
itertools.permutations(jnts, 2)]
distances = filter(lambda x: x > epsilon, distances)
if distances:
result = min(distances) / 2.0
if max_ctrl_size is not None and result > max_ctrl_size:
self.debug("Limiting ctrl size to {}".format(max_ctrl_size))
result = max_ctrl_size
else:
self.debug("Not enough ctrls to guess size. Using default {}".format(result))
return result
def _get_avars_influences(self):
"""
Return the influences that need to have avars associated with.
Normally for 3 influences, we create 3 avars.
However if the SINGLE_INFLUENCE flag is up, only the first influence will be rigged, the others
mights be handled upstream. (ex: FaceJaw).
"""
if self.SINGLE_INFLUENCE:
return [self.jnt]
else:
return copy.copy(self.jnts) # copy to prevent modifying the cache accidentaly by reference.
def validate(self):
"""
Ensure all influences are influencing a geometry.
This allow us to prevent the user to find out when building.
"""
super(AvarGrp, self).validate()
# Try to resolve the head joint.
# With strict=True, an exception will be raised if nothing is found.
if self.get_head_jnt(strict=False) is None:
raise Exception("Can't resolve the head. Please create a Head module.")
def _create_micro_avars(self):
"""
For each influence, create it's associated avar instance.
"""
# For various reason, we may have a mismatch between the stored Avars the number of influences.
# The best way to deal with this is to check each existing Avar and see if we need to created it or keep it.
avar_influences = self._get_avars_influences()
if not avar_influences:
raise Exception("Found no avars!")
new_avars = []
for avar in self.avars:
# Any existing Avar that we don't reconize will be deleted.
# Be aware that the .avars property only store MICRO Avars. Macro Avars need to be implemented in their own properties.
if avar.jnt not in avar_influences:
self.warning("Unexpected Avar {0} will be deleted.".format(avar.name))
# Any existing Avar that don't have the desired datatype will be re-created.
# However the old value will be passed by so the factory method can handle specific tricky cases.
else:
new_avar = self._init_avar(
self._CLS_AVAR,
avar,
ref=avar.jnt
)
new_avars.append(new_avar)
for influence in avar_influences:
if not any(True for avar in new_avars if influence == avar.jnt):
new_avar = self._init_avar(
self._CLS_AVAR,
None, # no previous value
ref=influence
)
new_avars.append(new_avar)
return new_avars
def _create_avars(self):
"""
Create the avars objects if they were never created (generally on first build).
"""
# Create avars if needed (this will get skipped if the module have already been built once)
self.avars = self._create_micro_avars()
def _build_avars(self, parent=None, connect_global_scale=None, create_ctrls=True, constraint=True, **kwargs):
if parent is None:
parent = not self.preDeform
if connect_global_scale is None:
connect_global_scale = self.preDeform
# Resolve the U and V modifiers.
# Note that this only applies to avars on a surface.
# TODO: Move to AvarGrpOnSurface
mult_u = self.get_multiplier_u() if self.surface else None
mult_v = self.get_multiplier_v() if self.surface else None
# Build avars and connect them to global avars
avar_influences = self._get_avars_influences()
for jnt, avar in zip(avar_influences, self.avars):
self.configure_avar(avar)
self._build_avar_micro(avar,
create_ctrl=create_ctrls,
constraint=constraint,
mult_u=mult_u,
mult_v=mult_v,
connect_global_scale=connect_global_scale,
**kwargs
)
# Connect 'tweak' avars to their equivalent.
for avar_micro, avar_tweak in self._get_micro_tweak_avars_dict().iteritems():
libRigging.connectAttr_withBlendWeighted(avar_micro.attr_lr, avar_tweak.attr_lr)
libRigging.connectAttr_withBlendWeighted(avar_micro.attr_ud, avar_tweak.attr_ud)
libRigging.connectAttr_withBlendWeighted(avar_micro.attr_fb, avar_tweak.attr_fb)
def _build_avar(self, avar, **kwargs):
# HACK: Validate avars at runtime
try:
avar.validate()
except Exception, e:
self.warning("Can't build avar {0}, failed validation: {1}".format(
avar.name,
e
))
return None
avar.build(**kwargs)
def _build_avar_micro(self, avar, **kwargs):
self._build_avar(avar, **kwargs)
if libPymel.is_valid_PyNode(avar.grp_anm):
if self._grp_anm_avars_micro:
avar.grp_anm.setParent(self._grp_anm_avars_micro)
else:
avar.grp_anm.setParent(self.grp_anm)
if libPymel.is_valid_PyNode(avar.grp_rig):
if self._grp_rig_avars_micro:
avar.grp_rig.setParent(self._grp_rig_avars_micro)
else:
avar.grp_rig.setParent(self.grp_rig) # todo: raise warning?
def _build_avar_macro(self, cls_ctrl, avar, constraint=False, **kwargs):
"""
Factory method that create an avar that is not affiliated with any influence and is only used for connections.
:param cls_ctrl: The class definition to use for the ctrl.
:param avar: The Avar class instance to use.
:param constraint: By default, a macro Avar don't affect it's influence (directly). This is False by default.
:param kwargs: Any additional keyword arguments will be sent to the avar build method.
:return:
"""
if cls_ctrl:
avar._CLS_CTRL = cls_ctrl # Hack, find a more elegant way.
self._build_avar(avar,
constraint=constraint,
**kwargs
)
if libPymel.is_valid_PyNode(avar.grp_anm):
if self._grp_anm_avars_macro:
avar.grp_anm.setParent(self._grp_anm_avars_macro)
else:
avar.grp_anm.setParent(self.grp_anm)
if libPymel.is_valid_PyNode(avar.grp_rig):
if self._grp_rig_avars_macro:
avar.grp_rig.setParent(self._grp_rig_avars_macro)
else:
avar.grp_rig.setParent(self.grp_rig) # todo: raise warning?
return avar
def _get_parent_adjustment_tm(self, avar):
"""
Return an attribute containing the additional contribution on the parent matrix.
"""
if not self.avar_all or self._is_tweak_avar(avar):
return
attr_avar_all_stack_result_tm = self.avar_all._stack.node.worldMatrix
return libRigging.create_utility_node(
'multMatrix',
matrixIn=(
self.avar_all.grp_offset.inverseMatrix, # enter avar_all space
attr_avar_all_stack_result_tm, # apply avar_all contribution
self.avar_all.grp_offset.matrix, # exit avar_all space
)
).matrixSum
def _parent_avar(self, avar, parent_tm):
"""
Connect the 'parent' group.
This allow the avar resulting transform to be affected by something (ex: Head_Jnt).
:param avar:
:param parent_tm:
:return:
"""
if avar.model_infl:
libRigging.connect_matrix_to_node(parent_tm, avar._grp_parent)
# avar.model_infl.connect_parent(parent_tm)
if avar.model_ctrl:
pymel.connectAttr(parent_tm, avar.model_ctrl._attr_inn_parent_tm)
# pymel.connectAttr(u.outputTranslate, avar._grp_parent.translate)
# pymel.connectAttr(u.outputRotate, avar._grp_parent.rotate)
# pymel.connectAttr(u.outputScale, avar._grp_parent.scale)
def _get_parent_identity_tm(self, parent):
print(parent)
# So correctly support non-uniform scaling, we need to rely on something more than scaleConstraint.
# For this reason we'll need to store ourself the bindpose of our parent.
grp_parent_bind = pymel.createNode(
'transform',
name=(parent.name() + '_BindPose')
)
grp_parent_bind.setParent(self.grp_rig)
grp_parent_bind.setMatrix(parent.getMatrix(worldSpace=True))
attr_get_parent_tm = libRigging.create_utility_node(
'multMatrix',
matrixIn=(
grp_parent_bind.worldInverseMatrix,
parent.worldMatrix,
),
).matrixSum
return attr_get_parent_tm
def _parent_avars(self):
"""
Parent each avars to their associated parent.
:return:
"""
# If the deformation order is set to post (aka the deformer is in the final skinCluster)
# we will want the offset node to follow it's original parent (ex: the head)
nomenclature_rig = self.get_nomenclature_rig()
attr_parent_tm_by_parent = {}
for avar in self.get_all_avars():
avar_parent = None
if self.SINGLE_PARENT:
if avar.jnt:
avar_parent = avar.jnt.getParent()
else:
# If we asked for a single parent for each avar but encounter an avar that don't have any influences, fallback to the module parent.
avar_parent = self.parent
else:
avar_parent = self.parent
# Hack: If the parent is the 'all' influence, we want to skip it since the 'all' influence is used in the stack.
# Otherwise this will result in double-transformation.
all_influence = self.get_influence_all()
if avar_parent and avar_parent == all_influence:
avar_parent = avar_parent.getParent()
if avar_parent:
attr_parent_tm = attr_parent_tm_by_parent.get(avar_parent)
if not attr_parent_tm:
attr_parent_tm = attr_parent_tm_by_parent[avar_parent] = self._get_parent_identity_tm(avar_parent)
self._parent_avar(avar, attr_parent_tm)
def _create_avars_ctrls(self, connect=False, **kwargs):
for avar in self.avars:
if self._is_tweak_avar(avar):
if self._CLS_CTRL_TWEAK:
avar._CLS_MODEL_CTRL = self._CLS_MODEL_CTRL_TWEAK
avar._CLS_CTRL = self._CLS_CTRL_TWEAK
avar.create_ctrl(self, **kwargs)
else:
if self._CLS_CTRL_MICRO:
avar._CLS_MODEL_CTRL = self._CLS_MODEL_CTRL_MICRO
avar._CLS_CTRL = self._CLS_CTRL_MICRO
avar.create_ctrl(self, **kwargs)
def handle_surface(self):
"""
Create the surface that the follicle will slide on if necessary.
:return:
"""
# Hack: Provide backward compatibility for when surface was provided as an input.
if not libPymel.isinstance_of_shape(self.surface, pymel.nodetypes.NurbsSurface):
fn_is_nurbsSurface = lambda obj: libPymel.isinstance_of_shape(obj, pymel.nodetypes.NurbsSurface)
surface = next(iter(filter(fn_is_nurbsSurface, self.input)), None)
if surface:
self.input.remove(surface)
self.surface = surface
return True
# Create surface if it doesn't exist.
self.warning("Can't find surface for {0}, creating one...".format(self))
self.surface = self.create_surface()
def build(self, connect_global_scale=None, create_ctrls=True, parent=True, constraint=True,
create_grp_rig_macro=True, create_grp_rig_micro=True, create_grp_anm_macro=True,
create_grp_anm_micro=True, calibrate=True, **kwargs):
self.handle_surface()
super(AvarGrp, self).build(connect_global_scale=connect_global_scale, parent=parent, **kwargs)
# We group the avars in 'micro' and 'macro' groups to make it easier for the animator to differentiate them.
nomenclature_anm = self.get_nomenclature_anm_grp()
if create_grp_anm_macro:
name_grp_macro = nomenclature_anm.resolve('macro')
self._grp_anm_avars_macro = pymel.createNode('transform', name=name_grp_macro)
self._grp_anm_avars_macro.setParent(self.grp_anm)
if create_grp_anm_micro:
name_grp_micro = nomenclature_anm.resolve('micro')
self._grp_anm_avars_micro = pymel.createNode('transform', name=name_grp_micro)
self._grp_anm_avars_micro.setParent(self.grp_anm)
# We group the avars in 'micro' and 'macro' groups to make it easier for the rigger to differentiate them.
nomenclature_rig = self.get_nomenclature_rig_grp()
if create_grp_rig_macro:
name_grp_macro = nomenclature_rig.resolve('macro')
self._grp_rig_avars_macro = pymel.createNode('transform', name=name_grp_macro)
self._grp_rig_avars_macro.setParent(self.grp_rig)
if create_grp_rig_micro:
name_grp_micro = nomenclature_rig.resolve('micro')
self._grp_rig_avars_micro = pymel.createNode('transform', name=name_grp_micro)
self._grp_rig_avars_micro.setParent(self.grp_rig)
self._create_avars()
self._build_avars(parent=parent, connect_global_scale=connect_global_scale, constraint=constraint)
if create_ctrls:
ctrl_size = self._get_default_ctrl_size()
self._create_avars_ctrls(ctrl_size=ctrl_size)
if parent:
self._parent_avars()
if calibrate:
self.calibrate()
def unbuild(self):
for avar in self.avars:
avar.unbuild()
super(AvarGrp, self).unbuild()
def iter_ctrls(self):
for ctrl in super(AvarGrp, self).iter_ctrls():
yield ctrl
for avar in self._iter_all_avars():
for ctrl in avar.iter_ctrls():
yield ctrl
@decorator_uiexpose()
def calibrate(self):
for avar in self.avars:
if not self._is_tweak_avar(avar): # tweak avar have no ctrl and should not be calibrated
avar.calibrate()
def _init_avar(self, cls, inst, ref=None, cls_ctrl=None, cls_ctrl_model=None, cls_infl_model=None, name=None, suffix=None):
"""
Factory method that initialize an avar instance only if necessary.
If the instance already had been initialized in a previous build, it's correct value will be preserved,
This also handle the following checks
- Preserve ctrl information if we need to re-created the avar because of a type mismatch.
- Ensure that the avar always have a surface. # todo: implement this only on AvarGrpOnSurface.
:param cls: The desired class.
:param inst: The current value. This should always exist since defined in the module constructor.
:param ref:
:param cls_ctrl: The desired ctrl class. We might want to remove this for simplicity
:param cls_ctrl_model: The desired controller model class.
:param cls_infl_model: The desired influence model class.
:return: The initialized instance. If the instance was already fine, it is returned as is.
"""
# Hack: Ensure ref is a list.
# todo: fix upstream
result_inputs = [ref] if ref else []
result_inputs.extend(self.get_meshes()) # ensure avars propage the mesh to their AvarCtrlModel
# todo: remove this call when we know it is safe.
if cls is None:
self.warning("No avar class specified for {0}, using default.".format(self))
cls = rigFaceAvar.AvarSimple
result = self.init_module(cls, inst, inputs=result_inputs, suffix=suffix)
# It is possible that the old avar type don't match the desired one.
# When this happen, we'll try at least to save the ctrl instance so the shapes match.
if inst and result != inst:
result.ctrl = inst.ctrl
result.avar_network = inst.avar_network
# Ensure the result instance always have the same surface as it's parent.
result.surface = self.surface
# Apply cls_ctrl override if specified
if cls_ctrl:
result._CLS_CTRL = cls_ctrl
# Apply cls_ctrl_model override if specified
if cls_ctrl_model:
result._CLS_MODEL_CTRL = cls_ctrl_model
# Apply cls_infl_model override if specified
if cls_infl_model:
result._CLS_MODEL_INFL = cls_infl_model
# Apply name override if specified
if name:
result.name = name
else:
ref = result.jnt
if ref:
result.name = (
self.get_nomenclature() + self.rig.nomenclature(ref.stripNamespace().nodeName())).resolve()
# Keep a reference to the module parent.
# todo: implement a generic mechanism for all modules?
result._parent_module = self
return result
def configure_avar(self, avar):
"""
This method is called as soon as we access or create an avar.
Use it to configure the avar automatically.
"""
if avar.surface is None and self.surface:
avar.surface = self.surface
def _need_to_connect_macro_avar(self, avar):
"""
Macro avars are made to control micro avars.
In the first build, it is necessary to create default connection so the rigger got something that work.
However with time it is normal than a rigger remove this connection or replace it with other type of connection.
This call check if the avar is connected to at least another avar. If True, no connection is needed.
"""
def _is_obj_avar(obj):
return obj.hasAttr('avar_lr') # ugly but it work
attr_holder = avar.grp_rig
for hist in attr_holder.listHistory(future=False):
if isinstance(hist, pymel.nodetypes.Transform) and hist != attr_holder and _is_obj_avar(hist):
return False
return True
# todo: deprecate this class in favor of composition
class AvarGrpOnSurface(AvarGrp):
"""
Highest-level surface-based AvarGrp module.
With additional features like:
- Horizontal macro avars (avar_l, avar_r)
- Vertical macro avars (avar_upp, avar_low)
- Global macro avar (avar_all)
- Ability to have 'tweak' avars that follow their parent only in translation.
Especially useful to have different falloff on translation than on rotation.
Here's examples of the type of hierarchy that the rigger can provide:
--------------------------------------------------------------------------------------------------------------------
| NAME | AVAR_ALL | AVAR_L | AVAR_R | AVAR_UPP | AVAR_LOW | NOTES
--------------------------------------------------------------------------------------------------------------------
ex #1:
| jnt_avar_01 | YES | NO | NO | NO | NO |
| jnt_avar_02 | YES | NO | NO | NO | NO |
| jnt_avar_03 | YES | NO | NO | NO | NO |
ex #2:
| jnt_root | YES | NO | NO | NO | NO | Affected by avar_all only.
| jnt_avar_01 | YES | NO | NO | NO | NO |
| jnt_avar_02 | YES | NO | NO | NO | NO |
| jnt_avar_upp | YES | NO | NO | YES | NO | Affected by avar_upp because of the 'upp' token.
| jnt_avar_low | YES | NO | NO | NO | YES | Affected by avar_low because of the 'low' token.
| l_jnt_avar | YES | YES | NO | NO | NO | Affected by avar_l because of the 'l' token.
| r_jnt_avar | YES | NO | YES | NO | NO | Affected by avar_r because of the 'r' token.
ex #3:
| jnt_root | YES | NO | NO | NO | NO | Affected by avar_all only.
| jnt_avar_01 | YES | NO | NO | NO | NO |
| jnt_avar_01_tweak | NO | NO | NO | NO | NO | Affected by jnt_avar_01 in translation only.
"""
_CLS_AVAR = rigFaceAvar.AvarFollicle
_CLS_AVAR_MACRO = rigFaceAvar.AvarFollicle # Macro avars are always abstract (except the all macro which can potentially drive something)
def __init__(self, *args, **kwargs):
super(AvarGrpOnSurface, self).__init__(*args, **kwargs)
self.surface = None
self.create_macro_horizontal = self.CREATE_MACRO_AVAR_HORIZONTAL
self.create_macro_vertical = self.CREATE_MACRO_AVAR_VERTICAL
self.create_macro_all = self.CREATE_MACRO_AVAR_ALL
self.avar_all = None
self.avar_l = None
self.avar_r = None
self.avar_upp = None
self.avar_low = None
@decorator_uiexpose()
def create_surface(self, *args, **kwargs):
"""
Expose the function in the ui, using the decorator.
"""
return super(AvarGrpOnSurface, self).create_surface(*args, **kwargs)
_CLS_CTRL_LFT = CtrlFaceMacroL
_CLS_CTRL_RGT = CtrlFaceMacroR
_CLS_CTRL_UPP = CtrlFaceUpp
_CLS_CTRL_LOW = CtrlFaceLow
_CLS_CTRL_ALL = CtrlFaceAll
_CLS_MODEL_CTRL_ALL = ModelCtrlMacroAll
# We always want a linear avar-influence model for the 'all' macro avar.
# For example if eyelids are following a round surface, we still want the 'all' macro avar to be linear.
# However we might want to define the range from the shared surface.
_CLS_MODEL_INFL_ALL = model_avar_linear.AvarLinearModel
SHOW_IN_UI = True
UI_DISPLAY_NAME = 'AvarGrp'
CREATE_MACRO_AVAR_HORIZONTAL = True
CREATE_MACRO_AVAR_VERTICAL = True
CREATE_MACRO_AVAR_ALL = True
def validate(self):
super(AvarGrpOnSurface, self).validate()
# Ensure that we support the hyerarchy of the influences.
influence_hyearchy_deepness = max(self._get_relative_parent_level_by_influences().keys())
if influence_hyearchy_deepness > 2:
raise Exception("Unsupported hierarchy depth! Please revise your inputs hierarchy.")
# Ensure that we have a mesh to follow.
if not self.get_meshes():
raise Exception("Please provide one reference mesh to follow.")
# Ensure that if we are building macro avars, we have reference for all of them.
# If some are missing we won't be able to build.
if self.create_macro_horizontal:
if not self.get_jnt_l_mid():
raise Exception("Cannot find a reference input for the lft horizontal macro avar.")
if not self.get_jnt_r_mid():
raise Exception("Cannot find a reference input for the rgt horizontal macro avar.")
if self.create_macro_vertical:
if not self.get_jnt_upp_mid():
raise Exception("Cannot find a reference input for the upp macro avar.")
if not self.get_jnt_low_mid():
raise Exception("Cannot find a reference input for the dwn macro avar.")
#
# Influence getter functions.
#
@libPython.memoized_instancemethod
def get_jnts_upp(self):
"""
:return: The upper section influences.
"""
# TODO: Find a better way
fnFilter = lambda jnt: 'upp' in jnt.stripNamespace().nodeName().lower()
return filter(fnFilter, self.jnts)
@libPython.memoized_instancemethod
def get_jnt_upp_mid(self):
"""
:return: The middle influence of the upper section.
"""
return get_average_pos_between_nodes(self.get_jnts_upp())
@libPython.memoized_instancemethod
def get_jnts_low(self):
"""
:return: The upper side influences.
"""
# TODO: Find a better way
fnFilter = lambda jnt: 'low' in jnt.stripNamespace().nodeName().lower()
return filter(fnFilter, self.jnts)
@libPython.memoized_instancemethod
def get_jnt_low_mid(self):
"""
:return: The middle influence of the lower section.
"""
return get_average_pos_between_nodes(self.get_jnts_low())
@libPython.memoized_instancemethod
def get_jnts_l(self):
"""
:return: All the left side influences.
# TODO: Use the nomenclature instead of the position?
"""
middle = self.get_pos_all_middle()
jnt_all = self.get_influence_all() # ignore all influence, it have no side
def _filter(jnt):
if jnt == jnt_all:
return False
return jnt.getTranslation(space='world').x >= middle.x
return filter(_filter, self.jnts)
@libPython.memoized_instancemethod
def get_jnts_r(self):
"""
:return: All the right side influences.
# TODO: Use the nomenclature instead of the position?
"""
middle = self.get_pos_all_middle()
jnt_all = self.get_influence_all()
def _filter(jnt):
if jnt == jnt_all:
return False
return jnt.getTranslation(space='world').x < middle.x
return filter(_filter, self.jnts)
@libPython.memoized_instancemethod
def get_jnt_l_mid(self):
"""
:return: The left most influence (highest positive distance in x)
"""
fn_get_pos_x = lambda x: x.getTranslation(space='world').x
return next(iter(reversed(sorted(self.get_jnts_l(), key=fn_get_pos_x))), None)
@libPython.memoized_instancemethod
def get_jnt_r_mid(self):
"""
:return: The right most influence (highest negative distance in x)
"""
fn_get_pos_x = lambda x: x.getTranslation(space='world').x
return next(iter(sorted(self.get_jnts_r(), key=fn_get_pos_x)), None)
#
# Avars getter functions
#
@libPython.memoized_instancemethod
def get_avar_mid(self):
return _find_mid_avar(self.avars)
@libPython.memoized_instancemethod
def get_avars_micro_l(self):
"""
Resolve all micro avars on the left side of the face that would be affected by a left macro avar.
Note that we explicitly ignoring any middle avars since no 'side' macro can affect the 'middle' avars.
:return: A list of avar instances.
"""
middle = self.get_pos_all_middle()
avar_corner_upp = self.get_avar_upp_corner()
avar_corner_low = self.get_avar_low_corner()
def fn_filter(avar):
# Ignore any vertical corner avars.
if avar_corner_upp and avar is avar_corner_upp:
return False
if avar_corner_low and avar is avar_corner_low:
return False
# Ignore right-sided avars.
pos = avar.jnt.getTranslation(space='world')
if pos.x < middle.x:
return False
return True
return [avar for avar in self.avars if avar and fn_filter(avar)]
@libPython.memoized_instancemethod
def get_avars_micro_r(self):
"""
Resolve all micro avars on the right side of the face that would be affected by a right macro avar.
Note that we explicitly ignoring any middle avars since no 'side' macro can affect the 'middle' avars.
:return: A list of avar instances.
"""
middle = self.get_pos_all_middle()
avar_corner_upp = self.get_avar_upp_corner()
avar_corner_low = self.get_avar_low_corner()
def fn_filter(avar):
# Ignore any vertical corner avars.
if avar_corner_upp and avar is avar_corner_upp:
return False
if avar_corner_low and avar is avar_corner_low:
return False
# Ignore right-sided avars.
pos = avar.jnt.getTranslation(space='world')
if pos.x > middle.x:
return False
return True
return [avar for avar in self.avars if avar and fn_filter(avar)]
@libPython.memoized_instancemethod
def get_avar_l_corner(self):
"""
:return: The farthest avar in the positive X axis.
"""
fn_get_avar_pos_x = lambda avar: avar.jnt.getTranslation(space='world').x
return next(iter(reversed(sorted(self.get_avars_micro_l(), key=fn_get_avar_pos_x))), None)
@libPython.memoized_instancemethod
def get_avar_r_corner(self):
"""
:return: The farthest avar in the negative X axis.
"""
fn_get_avar_pos_x = lambda avar: avar.jnt.getTranslation(space='world').x
return next(iter(sorted(self.get_avars_micro_r(), key=fn_get_avar_pos_x)), None)
@libPython.memoized_instancemethod
def get_avar_upp_corner(self):
"""
:return: The middle upp micro avar.
"""
avars = self.get_avars_micro_upp()
middle = self.get_pos_upp_middle()
def get_distance(avar):
return abs(avar.jnt.getTranslation(space='world').x - middle.x)
avars = sorted(avars, key=get_distance)
return next(iter(avars), None)
@libPython.memoized_instancemethod
def get_avar_low_corner(self):
"""
:return: The middle low micro avar.
"""
avars = self.get_avars_micro_low()
middle = self.get_pos_low_middle()
def get_distance(avar):
return abs(avar.jnt.getTranslation(space='world').x - middle.x)
avars = sorted(avars, key=get_distance)
return next(iter(avars), None)
@libPython.memoized_instancemethod
def get_pos_all_middle(self):
# type () -> pymel.datatypes.Vector
"""
:return: The average position using all the influences.
"""
return libRigging.get_average_pos_between_vectors(self.jnts)
@libPython.memoized_instancemethod
def get_pos_upp_middle(self):
# type () -> pymel.datatypes.Vector
"""
:return: The average position using all the upper section influences.
"""
return libRigging.get_average_pos_between_vectors([avar.jnt for avar in self.get_avars_micro_upp()])
@libPython.memoized_instancemethod
def get_pos_low_middle(self):
# type () -> pymel.datatypes.Vector
"""
:return: The average position using all the lower section influences.
"""
return libRigging.get_average_pos_between_vectors([avar.jnt for avar in self.get_avars_micro_low()])
def _iter_all_avars(self):
for avar in super(AvarGrpOnSurface, self)._iter_all_avars():
yield avar
if self.create_macro_horizontal:
if self.avar_l:
yield self.avar_l
if self.avar_r:
yield self.avar_r
if self.create_macro_vertical:
if self.avar_upp:
yield self.avar_upp
if self.avar_low:
yield self.avar_low
if self.create_macro_all:
if self.avar_all:
yield self.avar_all
def add_avars(self, attr_holder):
"""
An AvarGrp don't create any avar by default.
It is the responsibility of the inherited module to implement it if necessary.
"""
pass
def get_multiplier_u(self):
"""
Since we are using the same plane for the eyebrows, we want to attenget_multiplier_lruate the relation between the LR avar
and the plane V coordinates.
In the best case scenario, at LR -1, the V coordinates of the BrowInn are 0.5 both.
"""
base_u, base_v = self.get_base_uv()
return abs(base_u - 0.5) * 2.0
def _get_avars_influences(self):
"""
If the rigger provided an influence for the 'all' Avar, don't create an Avar for it. We will handle it manually.
:return:
"""
influences = super(AvarGrpOnSurface, self)._get_avars_influences()
influence_all = self.get_influence_all()
if influence_all and influence_all in influences:
influences.remove(influence_all)
return influences
@libPython.memoized_instancemethod
def get_influences_tweak(self):
return self._get_relative_parent_level_by_influences().get(2, [])
def _create_avars(self):
super(AvarGrpOnSurface, self)._create_avars()
# todo: for horizontal and vertical avars, is ref really necessary? they are always abstract avars
middle = self.get_head_jnt().getTranslation(space='world')
# Create horizontal macro avars
if self.create_macro_horizontal:
# Create avar_l if necessary
ref_l = self.get_jnt_l_mid()
if not ref_l:
self.info("Cannot create macro avar 'L', found no matching influence.")
else:
# Resolve name
nomenclature = self.rig.nomenclature(self.get_module_name())
nomenclature.add_tokens('macro')
if self.IS_SIDE_SPECIFIC:
side = ref_l.getTranslation(space='world').x > middle.x
if side: # left
nomenclature.side = nomenclature.SIDE_L
nomenclature.add_tokens('out')
else:
nomenclature.side = nomenclature.SIDE_R
nomenclature.add_tokens('inn')
else:
nomenclature.side = nomenclature.SIDE_L
avar_macro_l_name = nomenclature.resolve()
# avar_macro_l_name = 'L_{0}'.format(self.get_module_name())
self.avar_l = self._init_avar(
self._CLS_AVAR_MACRO,
self.avar_l,
ref=ref_l,
cls_ctrl=self._CLS_CTRL_LFT,
name=avar_macro_l_name
)
# Create avar_r if necessary
ref_r = self.get_jnt_r_mid()
if not ref_r:
self.info("Cannot create macro avar 'L', found no matching influence.")
else:
# Resolve name
nomenclature = self.rig.nomenclature(self.get_module_name())
nomenclature.add_tokens('macro')
if self.IS_SIDE_SPECIFIC:
side = ref_r.getTranslation(space='world').x > middle.x
if side: # left
nomenclature.side = nomenclature.SIDE_L
nomenclature.add_tokens('inn')
else:
nomenclature.side = nomenclature.SIDE_R
nomenclature.add_tokens('out')
else:
nomenclature.side = nomenclature.SIDE_R
avar_macro_r_name = nomenclature.resolve()
# avar_macro_r_name = 'R_{0}'.format(self.get_module_name())
self.avar_r = self._init_avar(
self._CLS_AVAR_MACRO,
self.avar_r,
ref=ref_r,
cls_ctrl=self._CLS_CTRL_RGT,
name=avar_macro_r_name
)
# Create vertical macro avars
if self.create_macro_vertical:
# Create avar_upp if necessary
ref_upp = self.get_jnt_upp_mid()
if not ref_upp:
self.info(
"Cannot create macro avar '{}', found no matching influence.".format(self.rig.AVAR_NAME_UPP))
else:
# Resolve avar name
avar_upp_name = self.get_nomenclature().resolve('macro', self.rig.AVAR_NAME_UPP)
self.avar_upp = self._init_avar(
self._CLS_AVAR_MACRO,
self.avar_upp,
ref=ref_upp,
cls_ctrl=self._CLS_CTRL_UPP,
name=avar_upp_name
)
# Create avar_low if necessary
ref_low = self.get_jnt_low_mid()
if not ref_low:
self.info(
"Cannot create macro avar '{}', found no matching influence.".format(self.rig.AVAR_NAME_LOW))
else:
# Resolve avar name
avar_low_name = self.get_nomenclature().resolve('macro', self.rig.AVAR_NAME_LOW)
self.avar_low = self._init_avar(
self._CLS_AVAR_MACRO,
self.avar_low,
ref=ref_low,
cls_ctrl=self._CLS_CTRL_LOW,
name=avar_low_name
)
# Create all macro avar
# Note that the all macro avar can drive an influence or not, both are supported.
# This allow the rigger to provided an additional falloff in case the whole section is moved.
if self.create_macro_all:
avar_all_ref = self.get_influence_all()
nomenclature = self.get_nomenclature_anm().copy()
nomenclature.add_tokens('macro', self.rig.AVAR_NAME_ALL)
avar_all_name = nomenclature.resolve()
self.avar_all = self._init_avar(
self._CLS_AVAR_MACRO,
self.avar_all,
ref=avar_all_ref,
cls_ctrl=self._CLS_CTRL_UPP,
cls_ctrl_model=self._CLS_MODEL_CTRL_ALL,
cls_infl_model=self._CLS_MODEL_INFL_ALL,
name=avar_all_name
)
self.avar_all.name = avar_all_name
# The avar_all is special since it CAN drive an influence.
old_ref_all = self.avar_all.jnt
if old_ref_all != avar_all_ref:
self.warning(
"Unexpected influence for avar {0}, expected {1}, got {2}. Will update the influence.".format(
self.avar_all.name, avar_all_ref, old_ref_all
))
self.avar_all.input = [avar_all_ref if inf == old_ref_all else inf for inf in self.avar_all.input]
# Hack: Delete all cache since it may have used the old inputs.
try:
del self.avar_all._cache
except AttributeError:
pass
def _build_avar_macro_horizontal(self, avar_parent, avar_middle, avar_children, cls_ctrl, connect_ud=True,
connect_lr=True, connect_fb=True, **kwargs):
self._build_avar_macro(
cls_ctrl,
avar_parent,
**kwargs
)
def _connect_avar_macro_horizontal(self, avar_parent, avar_children, connect_ud=True, connect_lr=True,
connect_fb=True):
for child_avar in avar_children:
if connect_ud:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_ud, child_avar.attr_ud)
if connect_lr:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_lr, child_avar.attr_lr)
if connect_fb:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_fb, child_avar.attr_fb)
def _build_avar_macro_vertical(self, avar_parent, avar_middle, avar_children, cls_ctrl, **kwargs):
self._build_avar_macro(
cls_ctrl,
avar_parent,
**kwargs
)
def _connect_avar_macro_vertical(self, avar_parent, avar_children, connect_ud=True, connect_lr=True,
connect_fb=True):
for child_avar in avar_children:
if connect_ud:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_ud, child_avar.attr_ud)
if connect_lr:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_lr, child_avar.attr_lr)
if connect_fb:
libRigging.connectAttr_withLinearDrivenKeys(avar_parent.attr_fb, child_avar.attr_fb)
def _build_avar_macro_l(self, **kwargs):
# Create left avar if necessary
ref = self.get_jnt_l_mid()
if self.create_macro_horizontal and ref:
self._build_avar_macro_horizontal(self.avar_l, self.get_avar_mid(), self.get_avars_micro_l(),
self._CLS_CTRL_LFT, **kwargs)
def _connect_avar_macro_l(self, avar, child_avars):
self._connect_avar_macro_horizontal(avar, child_avars)
def _build_avar_macro_r(self, **kwargs):
# Create right avar if necessary
ref = self.get_jnt_r_mid()
if self.create_macro_horizontal and ref:
self._build_avar_macro_horizontal(self.avar_r, self.get_avar_mid(), self.get_avars_micro_r(),
self._CLS_CTRL_RGT, **kwargs)
def _connect_avar_macro_r(self, avar, child_avars):
self._connect_avar_macro_horizontal(avar, child_avars)
def _build_avar_macro_upp(self, **kwargs):
# Create upp avar if necessary
ref = self.get_jnt_upp_mid()
if self.create_macro_vertical and ref:
self._build_avar_macro_vertical(self.avar_upp, self.get_avar_mid(), self.get_avars_micro_upp(),
self._CLS_CTRL_UPP, **kwargs)
def _connect_avar_macro_upp(self, avar, child_avar):
self._connect_avar_macro_vertical(avar, child_avar)
def _build_avar_macro_low(self, **kwargs):
# Create low avar if necessary
ref = self.get_jnt_low_mid()
if self.create_macro_vertical and ref:
self._build_avar_macro_vertical(self.avar_low, self.get_avar_mid(), self.get_avars_micro_low(),
self._CLS_CTRL_LOW, **kwargs)
def _connect_avar_macro_low(self, avar, child_avars):
self._connect_avar_macro_vertical(avar, child_avars)
def _connect_avar_macro_all(self, connect_ud=True, connect_lr=True, connect_fb=True):
"""
Connect the avar_all to their micro equivalent.
The avar_all is special as it support rotation and scale like if the micro avars were parented to it.
:param connect_ud: If True, will connect the avar_ud.
:param connect_lr: If True, will connect the avar_lr
:param connect_fb: If True, will connect the avar_fb.
:return:
"""
influence_all = self.get_influence_all()
def _can_connect_avar_scale(avar):
"""
Note that we don't connect the scale on the all_influence.
Since the all_influence contain an additional falloff for (ie) when we move the mouth,
it generally give better results if it is not scaled.
"""
if influence_all and avar.jnt == influence_all:
return False
return True
for avar_child in self.avars:
# Hack: Tweak avars are affected by their parent avar which is already affected by the all influence.
if self._is_tweak_avar(avar_child):
continue
# # Connect macro_all ctrl to each avar_child.
# # Since the movement is 'absolute', we'll only do a simple transform at the beginning of the stack.
# # Using the rotate/scalePivot functionality, we are able to save some nodes.
# attr_get_pivot_tm = libRigging.create_utility_node(
# 'multMatrix',
# matrixIn=(
# self.avar_all._stack.node.worldMatrix,
# avar_child._grp_offset.worldInverseMatrix
# )
# ).matrixSum
#
# layer_parent = avar_child._stack.prepend_layer(name='globalInfluence')
# layer_parent.t.set(0, 0, 0) # Hack: why?
#
# attr_get_all_stack_tm = libRigging.create_utility_node(
# 'multMatrix',
# matrixIn=(
# self.avar_all._stack.node.worldMatrix,
# self.avar_all._grp_offset.inverseMatrix
# )
# ).matrixSum
#
# attr_global_tm = libRigging.create_utility_node(
# 'multMatrix',
# matrixIn=(
# avar_child._grp_offset.matrix,
# self.avar_all._grp_offset.inverseMatrix,
# attr_get_all_stack_tm,
# self.avar_all._grp_offset.matrix,
# avar_child._grp_offset.inverseMatrix
# )
# ).matrixSum
#
# util_decompose_global_tm = libRigging.create_utility_node(
# 'decomposeMatrix',
# inputMatrix=attr_global_tm
# )
#
# pymel.connectAttr(util_decompose_global_tm.outputTranslateX, layer_parent.tx)
# pymel.connectAttr(util_decompose_global_tm.outputTranslateY, layer_parent.ty)
# pymel.connectAttr(util_decompose_global_tm.outputTranslateZ, layer_parent.tz)
# pymel.connectAttr(util_decompose_global_tm.outputRotateX, layer_parent.rx)
# pymel.connectAttr(util_decompose_global_tm.outputRotateY, layer_parent.ry)
# pymel.connectAttr(util_decompose_global_tm.outputRotateZ, layer_parent.rz)
# pymel.connectAttr(util_decompose_global_tm.outputScaleX, layer_parent.sx)
# pymel.connectAttr(util_decompose_global_tm.outputScaleY, layer_parent.sy)
# pymel.connectAttr(util_decompose_global_tm.outputScaleZ, layer_parent.sz)
@libPython.memoized_instancemethod
def _get_avar_macro_all_influence_tm(self):
"""
Return the pivot matrix of the influence controller by the 'all' macro avar.
:return: A pymel.datatypes.Matrix instance.
"""
influence_all = self.get_influence_all()
if influence_all:
pos = influence_all.getTranslation(space='world')
elif self.surface:
# We'll always want to macro avar to be positionned at the center of the plane.
pos = libRigging.get_point_on_surface_from_uv(self.surface, 0.5, 0.5)
else:
# If we are not controlling a specific influence and no surface exist, take our chance and use the first influence.
pos = self.jnt.getTranslation(space='world')
jnt_tm = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
pos.x, pos.y, pos.z, 1
)
# By default, we expect all joint from the right side of the face to be mirrored in 'behavior'.
# Since we are creating a new transformation matrix that didn't exist before, we'll need to follow the same rules.
if pos.x < 0:
jnt_tm = pymel.datatypes.Matrix(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0) * jnt_tm
return jnt_tm
def _get_avar_macro_all_ctrl_tm(self):
"""
:return: The default ctrl matrix for the avar_all ctrl.
"""
# todo: move this logic in the model
tm = self._get_avar_macro_all_influence_tm()
pos = tm.translate
dir = pymel.datatypes.Point(0, 0, 1)
raycast_result = self.rig.raycast_farthest(pos, dir)
if raycast_result:
pos = raycast_result
# Ensure that the ctrl is affar from the head.
# Resolve maximum ctrl size from head joint
offset_z = 0
head_jnt = self.get_head_jnt()
try:
head_length = self.rig.get_head_length(head_jnt)
except Exception, e:
head_length = None
self.warning(str(e))
if head_length:
offset_z = head_length * 0.05
if pos.x >= 0:
jnt_tm = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
pos.x, pos.y, pos.z + offset_z, 1
)
else:
jnt_tm = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
pos.x, pos.y, pos.z + offset_z, 1
)
jnt_tm = pymel.datatypes.Matrix(
1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0) * jnt_tm
return jnt_tm
def _build_avar_macro_all(self, connect_ud=True, connect_lr=True, connect_fb=True, constraint=False):
# Create all avar if necessary
# Note that the use can provide an influence.
# If no influence was found, we'll create an 'abstract' avar that doesn't move anything.
if self.create_macro_all:
# We'll always want to macro avar to be positionned at the center of the plane.
jnt_tm = self._get_avar_macro_all_influence_tm()
constraint = True if self.get_influence_all() else False
self._build_avar_macro(self._CLS_CTRL_ALL, self.avar_all, jnt_tm=jnt_tm, constraint=constraint)
# self._connect_avar_macro_all(connect_ud=connect_ud, connect_lr=connect_lr, connect_fb=connect_fb)
def _build_avars(self, **kwargs):
# TODO: Some calls might need to be move
super(AvarGrpOnSurface, self)._build_avars(**kwargs)
self._build_avar_macro_l()
self._build_avar_macro_r()
self._build_avar_macro_upp()
self._build_avar_macro_low()
self._build_avar_macro_all()
self._patch_avars()
def _patch_avars(self):
"""
After all the avars are created, we might want to add custom logic for them
:return:
"""
# For each avars with an avar logic, add the 'all' macro avar contribution before.
# If there's no 'all' macro avar, skip this step.
if self.create_macro_all:
for avar in self._iter_all_avars():
if avar == self.avar_all:
continue
# If we are dealing with a 'tweak' avar, it already inherit it's parent transform.
if self._is_tweak_avar(avar):
continue
if avar.model_infl:
self._add_macro_all_avar_contribution(avar)
def _add_macro_all_avar_contribution(self, avar):
attr_avar_all_stack_result_tm = self.avar_all.model_infl._attr_out_tm
# attr_avar_all_offset_tm = self.avar_all._grp_offset.matrix
# attr_avar_all_offset_tm_inv = self.avar_all._grp_offset.inverseMatrix
attr_avar_all_offset_tm = self.avar_all._stack_post.worldMatrix
attr_avar_all_offset_tm_inv = self.avar_all._stack_post.worldInverseMatrix
new_layer = avar._stack_post.append_layer(name='macroAvarAll')
attr_tm = libRigging.create_utility_node(
'multMatrix',
matrixIn=(
avar.grp_offset.matrix, # enter local space, note that this is a hack, our parent contain the local space already...
attr_avar_all_offset_tm_inv, # enter avar_all space
attr_avar_all_stack_result_tm, # apply avar_all contribution
attr_avar_all_offset_tm, # exit avar_all space
avar.grp_offset.inverseMatrix, # exit local space (return to avar space)
)
).matrixSum
util_decompose_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=attr_tm,
)
pymel.connectAttr(util_decompose_tm.outputTranslate, new_layer.translate)
pymel.connectAttr(util_decompose_tm.outputRotate, new_layer.rotate)
pymel.connectAttr(util_decompose_tm.outputScale, new_layer.scale)
# u = libRigging.create_utility_node(
# 'multMatrix',
# matrixIn=(
# attr_avar_all_offset_tm,
# avar._grp_offset.inverseMatrix
# )
# ).matrixSum
# u2 = libRigging.create_utility_node(
# 'decomposeMatrix',
# inputMatrix=u
# )
# pymel.connectAttr(u2.outputTranslate, new_layer.rotatePivot)
# pymel.connectAttr(u2.outputTranslate, new_layer.scalePivot)
def _create_avar_macro_all_ctrls(self, parent_pos=None, parent_rot=None, ctrl_tm=None, **kwargs):
# Note: Since the avar_all might not have any influence, we resolve the ctrl_tm outside of the model.
# todo: resolve ctrl_tm inside of the model?
ctrl_tm = self._get_avar_macro_all_ctrl_tm()
parent_pos = self.avar_all._grp_output
# parent_rot=self.avar_all._grp_output
parent_rot = None
self.avar_all.create_ctrl(
self,
ctrl_tm=ctrl_tm,
follow_mesh=False,
parent_pos=parent_pos,
parent_rot=parent_rot,
**kwargs
)
def _create_avar_macro_l_ctrls(self, **kwargs):
self.avar_l.create_ctrl(self, **kwargs)
def _create_avar_macro_r_ctrls(self, **kwargs):
self.avar_r.create_ctrl(self, **kwargs)
def _create_avar_macro_upp_ctrls(self, **kwargs):
self.avar_upp.create_ctrl(self, **kwargs)
def create_avar_macro_low_ctrls(self, **kwargs):
self.avar_low.create_ctrl(self, **kwargs)
def _create_avars_ctrls(self, parent_rot=None, parent_scl=None, **kwargs):
parent_rot = self.get_head_jnt()
parent_scl = None
# Since micro avars ctrls can be constraint to macro avars ctrls, we create the macro first.
if self.create_macro_all:
self._create_avar_macro_all_ctrls(
parent_rot=parent_rot,
parent_scl=parent_scl,
**kwargs
)
self._connect_avar_macro_all()
# parent_rot = self.avar_all.model_ctrl._stack.get_stack_end()
parent_rot = self.avar_all._grp_output
parent_scl = self.avar_all.ctrl
unconnected_micro_avars = set(avar for avar in self.avars if self._need_to_connect_macro_avar(avar))
if self.create_macro_horizontal:
if self.avar_l:
self._create_avar_macro_l_ctrls(
parent_rot=parent_rot,
parent_scl=parent_scl,
**kwargs
)
child_avar_l = set(self.get_avars_micro_l()) & unconnected_micro_avars
if child_avar_l:
self._connect_avar_macro_l(self.avar_l, child_avar_l)
if self.avar_r:
self._create_avar_macro_r_ctrls(
parent_rot=parent_rot,
parent_scl=parent_scl,
**kwargs
)
child_avar_r = set(self.get_avars_micro_r()) & unconnected_micro_avars
self._connect_avar_macro_r(self.avar_r, child_avar_r)
if self.create_macro_vertical:
if self.avar_upp:
self._create_avar_macro_upp_ctrls(
parent_rot=parent_rot,
parent_scl=parent_scl,
**kwargs
)
child_avar_upp = set(self.get_avars_micro_upp()) & unconnected_micro_avars
self._connect_avar_macro_upp(self.avar_upp, child_avar_upp)
if self.avar_low:
self.create_avar_macro_low_ctrls(
parent_rot=parent_rot,
parent_scl=parent_scl,
**kwargs
)
child_avar_low = set(self.get_avars_micro_low()) & unconnected_micro_avars
self._connect_avar_macro_low(self.avar_low, child_avar_low)
super(AvarGrpOnSurface, self)._create_avars_ctrls(parent_rot=parent_rot, parent_scl=parent_scl, **kwargs)
def unbuild(self):
if self.avar_l:
self.avar_l.unbuild()
if self.avar_r:
self.avar_r.unbuild()
if self.avar_upp:
self.avar_upp.unbuild()
if self.avar_low:
self.avar_low.unbuild()
if self.avar_all:
self.avar_all.unbuild()
super(AvarGrpOnSurface, self).unbuild()
@decorator_uiexpose()
def calibrate(self):
"""
Ensure macro avars are correctly calibrated.
This override might not be necessary if the design was better.
"""
super(AvarGrpOnSurface, self).calibrate()
if self.avar_l:
self.avar_l.calibrate()
if self.avar_r:
self.avar_r.calibrate()
if self.avar_upp:
self.avar_upp.calibrate()
if self.avar_low:
self.avar_low.calibrate()
if self.avar_all:
self.avar_all.calibrate()
def get_avars_upp(self, macro=True):
result = super(AvarGrpOnSurface, self).get_avars_upp()
if macro and self.avar_upp:
result.append(self.avar_upp)
return result
def get_avars_low(self, macro=True):
result = super(AvarGrpOnSurface, self).get_avars_low()
if macro and self.avar_low:
result.append(self.avar_low)
return result
def _get_default_ctrl_size(self, jnts=None, max_ctrl_size=None, epsilon=0.001):
if self.CREATE_MACRO_AVAR_VERTICAL:
jnts_upp = [avar.jnt for avar in self.get_avars_micro_upp()]
default_ctrl_size_upp = super(AvarGrpOnSurface, self)._get_default_ctrl_size(jnts=jnts_upp,
max_ctrl_size=max_ctrl_size,
epsilon=epsilon)
jnts_low = [avar.jnt for avar in self.get_avars_micro_low()]
default_ctrl_size_low = super(AvarGrpOnSurface, self)._get_default_ctrl_size(jnts=jnts_low,
max_ctrl_size=max_ctrl_size,
epsilon=epsilon)
return max(default_ctrl_size_upp, default_ctrl_size_low)
else:
return super(AvarGrpOnSurface, self)._get_default_ctrl_size(jnts=None, max_ctrl_size=None, epsilon=epsilon)
def register_plugin():
return AvarGrpOnSurface
| 40.797013 | 152 | 0.614634 |
be8cb842253b64fbe471b433084dd2ba57b54e61 | 6,256 | py | Python | sdk/python/pulumi_aws/gamelift/alias.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/gamelift/alias.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/gamelift/alias.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Alias']
class Alias(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
routing_strategy: Optional[pulumi.Input[pulumi.InputType['AliasRoutingStrategyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Gamelift Alias resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.gamelift.Alias("example",
description="Example Description",
routing_strategy=aws.gamelift.AliasRoutingStrategyArgs(
message="Example Message",
type="TERMINAL",
))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the alias.
:param pulumi.Input[str] name: Name of the alias.
:param pulumi.Input[pulumi.InputType['AliasRoutingStrategyArgs']] routing_strategy: Specifies the fleet and/or routing type to use for the alias.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['name'] = name
if routing_strategy is None:
raise TypeError("Missing required property 'routing_strategy'")
__props__['routing_strategy'] = routing_strategy
__props__['tags'] = tags
__props__['arn'] = None
super(Alias, __self__).__init__(
'aws:gamelift/alias:Alias',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
routing_strategy: Optional[pulumi.Input[pulumi.InputType['AliasRoutingStrategyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Alias':
"""
Get an existing Alias resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Alias ARN.
:param pulumi.Input[str] description: Description of the alias.
:param pulumi.Input[str] name: Name of the alias.
:param pulumi.Input[pulumi.InputType['AliasRoutingStrategyArgs']] routing_strategy: Specifies the fleet and/or routing type to use for the alias.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["routing_strategy"] = routing_strategy
__props__["tags"] = tags
return Alias(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Alias ARN.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the alias.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the alias.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="routingStrategy")
def routing_strategy(self) -> pulumi.Output['outputs.AliasRoutingStrategy']:
"""
Specifies the fleet and/or routing type to use for the alias.
"""
return pulumi.get(self, "routing_strategy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 39.1 | 153 | 0.626119 |
e520df72cf1a91619b43b65d589f057a0b0a83e8 | 39,396 | py | Python | kiali_qe/tests/test_istio_config_mtls.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | null | null | null | kiali_qe/tests/test_istio_config_mtls.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | 3 | 2018-03-28T17:11:13.000Z | 2018-03-28T17:55:08.000Z | kiali_qe/tests/test_istio_config_mtls.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | 2 | 2018-02-13T10:56:03.000Z | 2018-03-20T14:07:51.000Z | import pytest
from kiali_qe.tests import ValidationsTest, ConfigValidationObject, NamespaceTLSObject
from kiali_qe.utils.path import istio_objects_mtls_path
from kiali_qe.components.enums import MeshWideTLSType
from kiali_qe.components.error_codes import (
KIA0207,
KIA0501,
KIA0208,
KIA0205,
KIA0401,
KIA0206,
KIA0505,
KIA0506
)
'''
Tests are divided into groups using different services and namespaces. This way the group of tests
can be run in parallel.
'''
BOOKINFO = 'bookinfo'
ISTIO_SYSTEM = 'istio-system'
SCENARIO_1 = "scenario1.yaml"
SCENARIO_2 = "scenario2.yaml"
SCENARIO_3 = "scenario3.yaml"
SCENARIO_4 = "scenario4.yaml"
SCENARIO_5 = "scenario5.yaml"
SCENARIO_6 = "scenario6.yaml"
SCENARIO_7 = "scenario7.yaml"
SCENARIO_8 = "scenario8.yaml"
SCENARIO_9 = "scenario9.yaml"
SCENARIO_10 = "scenario10.yaml"
SCENARIO_11 = "scenario11.yaml"
SCENARIO_12 = "scenario12.yaml"
SCENARIO_13 = "scenario13.yaml"
SCENARIO_14 = "scenario14.yaml"
SCENARIO_15 = "scenario15.yaml"
SCENARIO_16 = "scenario16.yaml"
SCENARIO_17 = "scenario17.yaml"
SCENARIO_18 = "scenario18.yaml"
SCENARIO_19 = "scenario19.yaml"
SCENARIO_20 = "scenario20.yaml"
SCENARIO_21 = "scenario21.yaml"
SCENARIO_22 = "scenario22.yaml"
SCENARIO_23 = "scenario23.yaml"
SCENARIO_24 = "scenario24.yaml"
SCENARIO_25 = "scenario25.yaml"
SCENARIO_26 = "scenario26.yaml"
SCENARIO_27 = "scenario27.yaml"
SCENARIO_28 = "scenario28.yaml"
SCENARIO_29 = "scenario29.yaml"
@pytest.mark.p_group_last
def test_scenario1(kiali_client, openshift_client, browser):
""" PeerAuthentication is in permissive mode, it allows mTLS connections """
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_1,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls', namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default', namespace=BOOKINFO,
error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED or MeshWideTLSType.PARTLY_ENABLED)
])
@pytest.mark.p_group_last
def test_scenario2(kiali_client, openshift_client, browser):
""" PeerAuthentication explicitly asks for mTLS connections
but DestinationRule disables workload mtls connections
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_2, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls',
namespace=BOOKINFO,
error_messages=[KIA0207]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[KIA0501])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED or MeshWideTLSType.PARTLY_ENABLED)
])
@pytest.mark.p_group_last
def test_scenario3(kiali_client, openshift_client, browser):
""" PeerAuthentication explicitly ask for mTLS connections
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_3, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls',
namespace=BOOKINFO,
error_messages=[KIA0208]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace='istio-system', error_messages=[]),
ConfigValidationObject(
'DestinationRule', 'default',
namespace='istio-system', error_messages=[])
],
tls_type=MeshWideTLSType.ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.ENABLED)
])
@pytest.mark.p_group_last
def test_scenario4(kiali_client, openshift_client, browser):
""" PeerAuthentication allows non-mTLS connections in the service mesh
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_4, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls',
namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace='istio-system', error_messages=[]),
ConfigValidationObject(
'DestinationRule', 'default',
namespace='istio-system',
error_messages=[])
],
tls_type=MeshWideTLSType.PARTLY_ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario5(kiali_client, openshift_client, browser):
""" There aren't any PeerAuthentication defining mTLS settings
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_5, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls',
namespace=BOOKINFO, error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario6(kiali_client, openshift_client, browser):
""" Destination Rule valid: it doesn't define any mTLS setting
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_6,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'reviews', namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[KIA0501])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.ENABLED)),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario7(kiali_client, openshift_client, browser):
""" classic ns-wide mTLS config
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_7, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule',
'enable-mtls', namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO, error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario8(kiali_client, openshift_client, browser):
""" DR mesh-wide enables clients start mTLS connections
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_8,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO,
error_messages=[KIA0205]),
ConfigValidationObject(
'PeerAuthentication', 'default', namespace=BOOKINFO,
error_messages=[])
],
tls_type=(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.ENABLED))
@pytest.mark.p_group_last
def test_scenario9(kiali_client, openshift_client, browser):
""" there isn't any Destination Rule enabling services start mTLS connection
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_9,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[KIA0501])
])
@pytest.mark.p_group_last
def test_scenario10(kiali_client, openshift_client, browser):
""" Permissive mode allow mTLS connections to services
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_10, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO, error_messages=[])
])
@pytest.mark.p_group_last
def test_scenario11(kiali_client, openshift_client, browser):
""" STRICT mode allow only mTLS connections to services
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_11, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO, error_messages=[])
])
@pytest.mark.p_group_last
def test_scenario12(kiali_client, openshift_client, browser):
""" PERMISSIVE mode allow mTLS connections to services to the whole mesh
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_12, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace='istio-system', error_messages=[])
])
@pytest.mark.p_group_last
def test_scenario13(kiali_client, openshift_client, browser):
""" STRICT mode allow only mTLS connections to services to the whole service mesh
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_13, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO, error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace='istio-system',
error_messages=[KIA0401])
],
tls_type=(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.ENABLED))
@pytest.mark.p_group_last
def test_scenario14(kiali_client, openshift_client, browser):
""" there isn't any policy enabling mTLS on service clients
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_14, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO,
error_messages=[KIA0206])
])
@pytest.mark.p_group_last
def test_scenario15(kiali_client, openshift_client, browser):
""" PeerAuthentication in STRICT mode + DestinationRule enabling mTLS mesh-wide (classic scenario)
PeerAuthentication ns-level in PERMISSIVE mode + DR disabling mTLS ns-wide.
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_15, namespace=None,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'default',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_scenario16(kiali_client, openshift_client, browser):
""" PeerAuthentication OK
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_16, namespace=None,
config_validation_objects=[
],
tls_type=MeshWideTLSType.ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.ENABLED)
])
@pytest.mark.p_group_last
def test_scenario17(kiali_client, openshift_client, browser):
""" Destination Rule valid: it doesn't define any mTLS setting
PeerAuth: STRICT
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_17,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'reviews', namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[KIA0501])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.ENABLED)),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario18(kiali_client, openshift_client, browser):
""" Destination Rule valid: ISTIO_MUTUAL
PeerAuth: PERMISSIVE
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_18,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario19(kiali_client, openshift_client, browser):
""" Destination Rule valid: Empty
PeerAuth: DISABLE
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_19,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'reviews',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=BOOKINFO,
error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.DISABLED)),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario20(kiali_client, openshift_client, browser):
""" Destination Rule valid: ISTIO_MUTUAL
PeerAuth: DISABLE
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(
SCENARIO_20,
namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'default',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=ISTIO_SYSTEM,
error_messages=[])
],
tls_type=(MeshWideTLSType.PARTLY_ENABLED if not openshift_client.is_auto_mtls()
else MeshWideTLSType.DISABLED),
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario21(kiali_client, openshift_client, browser):
""" PeerAuthentication is DISABLE
DestinationRule is DISABLE
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_21,
namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mtls', namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default', namespace=BOOKINFO,
error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario22(kiali_client, openshift_client, browser):
""" PeerAuthentication is DISABLE in namespace level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_22,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'bookinfo-enable-mtls',
namespace=BOOKINFO,
error_messages=[KIA0206]),
ConfigValidationObject(
'PeerAuthentication', 'disable-mtls-bookinfo',
namespace=BOOKINFO,
error_messages=[KIA0505])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.PARTLY_ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario23(kiali_client, openshift_client, browser):
""" PeerAuthentication is DISABLE in mesh level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_23,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mesh-mtls',
namespace=BOOKINFO,
error_messages=[KIA0205]),
ConfigValidationObject(
'PeerAuthentication', 'disable-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0506])
],
tls_type=MeshWideTLSType.PARTLY_ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario24(kiali_client, openshift_client, browser):
""" DestinationRule: DISABLED at mesh-level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_24,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'disable-mesh-mtls',
namespace=BOOKINFO,
error_messages=[KIA0208]),
ConfigValidationObject(
'PeerAuthentication', 'disable-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0401])
],
tls_type=MeshWideTLSType.PARTLY_ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario25(kiali_client, openshift_client, browser):
""" PeerAuthentication is STRICT in mesh level but DISABLED in port level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_25,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mesh-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'strict-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'grafana-ports-mtls-disabled',
namespace=ISTIO_SYSTEM,
error_messages=[])
],
tls_type=MeshWideTLSType.ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.ENABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.ENABLED)
])
@pytest.mark.p_group_last
def test_scenario26(kiali_client, openshift_client, browser):
""" PeerAuthentication is PERMISSIVE in mesh level but STRICT in port level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_26,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mesh-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'permissive-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'grafana-ports-mtls-strict',
namespace=ISTIO_SYSTEM,
error_messages=[])
],
tls_type=MeshWideTLSType.PARTLY_ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario27(kiali_client, openshift_client, browser):
""" PeerAuthentication is PERMISSIVE in mesh level, Grafana UNSET but DISABLE in port level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_27,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mesh-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'permissive-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'grafana-unset-ports-mtls-disabled',
namespace=ISTIO_SYSTEM,
error_messages=[])
],
tls_type=MeshWideTLSType.PARTLY_ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'default',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario28(kiali_client, openshift_client, browser):
""" PeerAuthentication is set to STRICT at the workload level,
but set to PERMISSIVE at the mesh and namespace level
KIA0105 should not be displayed
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_28,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'details-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'DestinationRule', 'ratings-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'default-policy',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'details-policy',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'ratings-policy',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'AuthorizationPolicy', 'ratings',
namespace=BOOKINFO,
error_messages=[])
],
tls_type=MeshWideTLSType.DISABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo',
(MeshWideTLSType.PARTLY_ENABLED if not
openshift_client.is_auto_mtls()
else MeshWideTLSType.DISABLED)),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.DISABLED)
])
@pytest.mark.p_group_last
def test_scenario29(kiali_client, openshift_client, browser):
""" Enable mtls at mesh-level (PeerAuthn + DR)
Disable mtls at ns-level (PA + DR)
No validations for DR/PA at NS-level
"""
tests = ValidationsTest(
kiali_client=kiali_client, openshift_client=openshift_client, browser=browser,
objects_path=istio_objects_mtls_path.strpath)
tests.test_istio_objects(SCENARIO_29,
config_validation_objects=[
ConfigValidationObject(
'DestinationRule', 'enable-mesh-mtls',
namespace=ISTIO_SYSTEM,
error_messages=[]),
ConfigValidationObject(
'DestinationRule', 'bookinfo-disable-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'disable-mtls-bookinfo',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
'PeerAuthentication', 'mtls-mesh',
namespace=ISTIO_SYSTEM,
error_messages=[])
],
tls_type=MeshWideTLSType.ENABLED,
namespace_tls_objects=[
NamespaceTLSObject(
'bookinfo', MeshWideTLSType.DISABLED),
NamespaceTLSObject(
'istio-system',
MeshWideTLSType.ENABLED)
])
| 40.909657 | 102 | 0.533201 |
abc0fa290cb953e3237f04d5240ffe2a79e49b93 | 3,883 | py | Python | Lib/fontTools/pens/reverseContourPen.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-04-07T16:47:04.000Z | 2022-01-15T04:01:01.000Z | Lib/fontTools/pens/reverseContourPen.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-22T09:01:31.000Z | 2020-05-22T09:47:18.000Z | Lib/fontTools/pens/reverseContourPen.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from fontTools.misc.py23 import *
from fontTools.misc.arrayTools import pairwise
from fontTools.pens.filterPen import ContourFilterPen
__all__ = ["reversedContour", "ReverseContourPen"]
class ReverseContourPen(ContourFilterPen):
"""Filter pen that passes outline data to another pen, but reversing
the winding direction of all contours. Components are simply passed
through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def filterContour(self, contour):
return reversedContour(contour)
def reversedContour(contour):
""" Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
return # nothing to do, stop iteration
# valid contours must have at least a starting and ending command,
# can't have one without the other
assert len(contour) > 1, "invalid contour"
# the type of the last command determines if the contour is closed
contourType = contour.pop()[0]
assert contourType in ("endPath", "closePath")
closed = contourType == "closePath"
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, (
"off-curve only paths must end with 'None'")
assert not contour, (
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
if not contour:
# contour contains only one segment, nothing to reverse
if firstType == "moveTo":
closed = False # single-point paths can't be closed
else:
closed = True # off-curve paths are closed by definition
yield firstType, firstPts
else:
lastType, lastPts = contour[-1]
lastOnCurve = lastPts[-1]
if closed:
# for closed paths, we keep the starting point
yield firstType, firstPts
if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType,
tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
else:
# contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts
# if a lineTo follows the initial moveTo, after reversing it
# will be implied by the closePath, so we don't emit one;
# unless the lineTo and moveTo overlap, in which case we keep the
# duplicate points
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType,
tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(
contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()
| 40.030928 | 79 | 0.611383 |
ab4f9c06d53a1b23da73796aa6f140cd7e9cbf4f | 688 | py | Python | netbox/extras/tests/test_registry.py | sourcery-ai-bot/netbox | 454971874004356654c7c6d07ce52b4498468b2d | [
"Apache-2.0"
] | null | null | null | netbox/extras/tests/test_registry.py | sourcery-ai-bot/netbox | 454971874004356654c7c6d07ce52b4498468b2d | [
"Apache-2.0"
] | null | null | null | netbox/extras/tests/test_registry.py | sourcery-ai-bot/netbox | 454971874004356654c7c6d07ce52b4498468b2d | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from extras.registry import Registry
class RegistryTest(TestCase):
def test_add_store(self):
reg = Registry()
reg['foo'] = 123
self.assertEqual(reg['foo'], 123)
def test_manipulate_store(self):
reg = Registry()
reg['foo'] = [1, 2, 3]
self.assertListEqual(reg['foo'], [1, 2, 3])
def test_overwrite_store(self):
reg = Registry()
reg['foo'] = 123
with self.assertRaises(KeyError):
reg['foo'] = 456
def test_delete_store(self):
reg = Registry()
reg['foo'] = 123
with self.assertRaises(TypeError):
del(reg['foo'])
| 21.5 | 51 | 0.569767 |
16b7938bf4e917bb4b59f3bf7d4bc0201f7f1f56 | 617 | py | Python | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/rootwrap/wrapper.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/rootwrap/wrapper.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/rootwrap/wrapper.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_rootwrap.wrapper import * # noqa
| 44.071429 | 78 | 0.729335 |
a0ab285e30f2da90ab3b8491915db1e00e209086 | 4,466 | py | Python | source/python/neuropod/backends/caffe/packager.py | judgeeeeee/neuropod | 6a614e2f09abdd9ccb73c46ecce16994c66c9e32 | [
"Apache-2.0"
] | null | null | null | source/python/neuropod/backends/caffe/packager.py | judgeeeeee/neuropod | 6a614e2f09abdd9ccb73c46ecce16994c66c9e32 | [
"Apache-2.0"
] | 8 | 2020-09-07T07:44:59.000Z | 2021-01-04T08:22:19.000Z | source/python/neuropod/backends/caffe/packager.py | judgeeeeee/neuropod | 6a614e2f09abdd9ccb73c46ecce16994c66c9e32 | [
"Apache-2.0"
] | 1 | 2020-09-18T01:42:25.000Z | 2020-09-18T01:42:25.000Z | # Copyright (c) 2020 UATC, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import caffe
from neuropod.utils.packaging_utils import packager
@packager(platform="caffe")
def create_caffe_neuropod(neuropod_path,
input_spec,
output_spec,
node_name_mapping,
prototxt,
caffemodel=None,
code_path_spec=None,
**kwargs):
"""
Packages a caffe model as a neuropod package.
{common_doc_pre}
:param node_name_mapping: Mapping from a neuropod input/output name to a blob in caffe.
!!! note ""
***Example***:
```
{
"x": "input0",
"y": "input1",
"out": "output",
}
```
:param code_path_spec: The folder paths of all the code that will be packaged. Note that
*.pyc files are ignored.
!!! note ""
This is specified as follows:
```
[{
"python_root": "/some/path/to/a/python/root",
"dirs_to_package": ["relative/path/to/package"]
}, ...]
```
:param prototxt deploy.prototxt describes the network architecture for deployment (and not training) time
:param caffemodel serialized models as binary protocol buffer (binaryproto) files
{common_doc_post}
"""
# Make sure the inputs are valid
# Create a folder to store the model
neuropod_data_path = os.path.join(neuropod_path, "0", "data")
os.makedirs(neuropod_data_path)
# Copy the specified source code while preserving package paths
if code_path_spec is not None:
neuropod_code_path = os.path.join(neuropod_path, "0", "code")
for copy_spec in code_path_spec:
python_root = copy_spec["python_root"]
for dir_to_package in copy_spec["dirs_to_package"]:
if len(dir_to_package) == 0: continue
shutil.copytree(
os.path.join(python_root, dir_to_package),
os.path.join(neuropod_code_path, dir_to_package),
)
# Add the model to the neuropod
prototxt_path = os.path.join(neuropod_data_path, "model.prototxt")
caffemodel_path = os.path.join(neuropod_data_path, "model.caffemodel")
shutil.copyfile(prototxt, prototxt_path)
if caffemodel is not None:
shutil.copyfile(caffemodel, caffemodel_path)
# Make sure we have mappings for everything in the spec
expected_keys = set()
for spec in [input_spec, output_spec]:
for tensor in spec:
expected_keys.add(tensor["name"])
actual_keys = set(node_name_mapping.keys())
missing_keys = expected_keys - actual_keys
if len(missing_keys) > 0:
raise ValueError(
"Expected an item in `node_name_mapping` for every tensor in input_spec and output_spec. Missing: `{}`"
.format(missing_keys))
# We also need to save the node name mapping so we know how to run the model
# This is tensorflow specific config so it's not saved in the overall neuropod config
with open(os.path.join(neuropod_path, "0", "config.json"),
"w") as config_file:
json.dump(
{
"node_name_mapping": node_name_mapping,
},
config_file,
)
| 38.17094 | 121 | 0.549037 |
0b57db9baffacd576e110e0b8170e8f56293340f | 2,014 | py | Python | accounts/forms.py | Gareth-1987/mvp_landing_page | a2bfce0928d15f15f5cc3782848bda54d44903da | [
"MIT"
] | null | null | null | accounts/forms.py | Gareth-1987/mvp_landing_page | a2bfce0928d15f15f5cc3782848bda54d44903da | [
"MIT"
] | null | null | null | accounts/forms.py | Gareth-1987/mvp_landing_page | a2bfce0928d15f15f5cc3782848bda54d44903da | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class LoginForm(forms.Form):
username = forms.CharField(label='', widget=forms.TextInput(
attrs={'placeholder': 'Your username'}))
password = forms.CharField(label='', widget=forms.PasswordInput(
attrs={'placeholder': 'Your password'}))
# CONIDER DOING A CHECK USING #
# def clean_username(self):
# username = self.cleaned_data.get("username")
# # check username
# return username
class RegisterForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(
attrs={'placeholder': 'Your username'}))
email = forms.EmailField(widget=forms.EmailInput(
attrs={'placeholder': 'Your email'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={'placeholder': 'Your password'}))
password2 = forms.CharField(label='Confirm Password', widget=forms.PasswordInput(
attrs={'placeholder': 'Your password'}))
class Meta:
model = User
fields = [
'username',
'email'
]
def clean(self):
data = self.cleaned_data
password_1 = data.get("password")
password_2 = data.get("password2")
if password_1 != password_2:
# assign to non_field_error
#raise forms.ValidationError("Passwords must match.")
self.add_error("password", "Passwords must match.")
return data
def clean_username(self):
username = self.cleaned_data.get("username")
qs = User.objects.filter(username__iexact=username)
if qs.exists():
raise forms.ValidationError(f"{username} is taken. Try again")
return username
def clean_email(self):
email = self.cleaned_data.get("email")
qs = User.objects.filter(email__iexact=email)
if qs.exists():
raise forms.ValidationError(f"{email} is taken. Try again")
return email
| 33.566667 | 85 | 0.635055 |
ce0f8b7b9e65687806086113f865b9a50b1a3d65 | 750 | py | Python | foodplanapp/migrations/0020_auto_20220321_1354.py | 949027/food-plan | 382791059469058614bfd028d674ba1f69b25e2b | [
"MIT"
] | null | null | null | foodplanapp/migrations/0020_auto_20220321_1354.py | 949027/food-plan | 382791059469058614bfd028d674ba1f69b25e2b | [
"MIT"
] | null | null | null | foodplanapp/migrations/0020_auto_20220321_1354.py | 949027/food-plan | 382791059469058614bfd028d674ba1f69b25e2b | [
"MIT"
] | 1 | 2022-03-22T02:04:59.000Z | 2022-03-22T02:04:59.000Z | # Generated by Django 4.0.3 on 2022-03-21 10:54
import random
from django.db import migrations
class Migration(migrations.Migration):
def set_menutype_for_dishes(apps, schema_editor):
menu_types = [
"classic",
"low_calorie",
"vegan",
"keto",
]
Dishes = apps.get_model("foodplanapp", "Dish")
dishes_set = Dishes.objects.all()
for dish in dishes_set.iterator():
dish.menu_type = random.choice(menu_types)
dish.save()
dependencies = [
(
"foodplanapp",
"0019_remove_order_allergy1_remove_order_allergy2_and_more",
),
]
operations = [migrations.RunPython(set_menutype_for_dishes)]
| 25 | 72 | 0.598667 |
dd98d9db322a1fed2abe71697a6f73e3820f0d22 | 7,306 | py | Python | scripts/sequence_clusters/expansion/compare_clusters.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/sequence_clusters/expansion/compare_clusters.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/sequence_clusters/expansion/compare_clusters.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from collections import OrderedDict
from RouToolPa.GeneralRoutines import FileRoutines
def read_cluster_file(filename, with_counts=False):
cluster_dict = OrderedDict()
noname_family_index = 1
with open(filename, "r") as in_fd:
for line in in_fd:
tmp = line.strip().split("\t")
if with_counts:
cluster_dict[tmp[0]] = (int(tmp[1]), set(tmp[2].split(",")))
else:
try:
tmp[1] = set(tmp[1].split(","))
except IndexError:
tmp = ["Noname_fam_%i" % noname_family_index, set(tmp[0].split(","))]
noname_family_index += 1
cluster_dict[tmp[0]] = (len(tmp[1]), tmp[1])
return cluster_dict
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--reference_file", action="store", dest="ref_file", required=True,
help="Input file with reference clusters.")
parser.add_argument("-c", "--file_to_check", action="store", dest="file_to_check", required=True,
help="File with clusters to compare with reference")
parser.add_argument("-u", "--ref_file_contains_gene_counts", action="store_true", dest="ref_with_counts",
default=False,
help="Reference file contains gene counts")
parser.add_argument("-n", "--check_file_contains_gene_counts", action="store_true", dest="check_with_counts",
default=False,
help="File to check contains gene counts")
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir", default="compare_dir",
help="Output directory")
args = parser.parse_args()
FileRoutines.safe_mkdir(args.out_dir)
ref_clusters_dict = read_cluster_file(args.ref_file, with_counts=args.ref_with_counts)
check_clusters_dict = read_cluster_file(args.file_to_check, with_counts=args.check_with_counts)
totally_in_ref = len(ref_clusters_dict)
totally = len(check_clusters_dict)
synonym_file = "synonym.t"
contained_fully_in_file = "contained_fully_in.t"
contained_in_file = "contained_in.t"
include_file = "include.t"
all_file = "all.t"
synonym_dict = OrderedDict()
contained_fully_in_dict = OrderedDict()
contained_in_dict = OrderedDict()
include_dict = OrderedDict()
index = 1
for ref_cluster_id in ref_clusters_dict:
for check_cluster_id in check_clusters_dict.keys():
common_clusters = ref_clusters_dict[ref_cluster_id][1] & check_clusters_dict[check_cluster_id][1]
common_clusters_len = len(common_clusters)
if not common_clusters:
continue
if (common_clusters_len == ref_clusters_dict[ref_cluster_id][0]) and (common_clusters_len == check_clusters_dict[check_cluster_id][0]):
synonym_dict[ref_cluster_id] = [check_cluster_id, ref_clusters_dict[ref_cluster_id][0], check_clusters_dict[check_cluster_id][0]]
check_clusters_dict.pop(check_cluster_id, None) # remove check_cluster_id from corresponding dict
break
if len(common_clusters) == ref_clusters_dict[ref_cluster_id][0]:
# reference family is fully contained in checked cluster
contained_fully_in_dict[ref_cluster_id] = [check_cluster_id, ref_clusters_dict[ref_cluster_id][0], check_clusters_dict[check_cluster_id][0]]
break
if len(common_clusters) == check_clusters_dict[check_cluster_id][0]:
# reference family includes checked cluster
if ref_cluster_id not in include_dict:
include_dict[ref_cluster_id] = [check_cluster_id]
else:
include_dict[ref_cluster_id].append(check_cluster_id)
continue
# reference family is contained in checked clusters
if ref_cluster_id not in contained_in_dict:
contained_in_dict[ref_cluster_id] = [check_cluster_id]
else:
contained_in_dict[ref_cluster_id].append(check_cluster_id)
else:
if ref_cluster_id in include_dict:
# checks in part of genes from reference cluster were not included in analysis
if len(include_dict[ref_cluster_id]) == 1 and (ref_cluster_id not in contained_in_dict):
#print ref_cluster_id
synonym_dict[ref_cluster_id] = include_dict.pop(ref_cluster_id)
#print synonym_dict[ref_cluster_id]
number_of_common_families = len(synonym_dict)
contained_fully_in_number = len(contained_fully_in_dict)
contained_in_number = len(contained_in_dict)
include_number = len(include_dict)
with open("%s/%s" % (args.out_dir, synonym_file), "w") as syn_fd:
for fam_id in synonym_dict:
#syn_fd.write("%s\t%s\t%i\t%i\n" % (fam_id, synonym_dict[fam_id][0], synonym_dict[fam_id][1], synonym_dict[fam_id][2]))
syn_fd.write("%s\t%s\n" % (fam_id, synonym_dict[fam_id][0]))
with open("%s/%s" % (args.out_dir, all_file), "w") as syn_fd:
for fam_id in ref_clusters_dict:
#syn_fd.write("%s\t%s\t%i\t%i\n" % (fam_id, synonym_dict[fam_id][0], synonym_dict[fam_id][1], synonym_dict[fam_id][2]))
#value = -1
if fam_id in synonym_dict:
value = synonym_dict[fam_id][0]
elif fam_id in contained_fully_in_dict:
# reference families fully contained in check families
value = "C_%s" % contained_fully_in_dict[fam_id][0]
elif fam_id in include_dict:
# reference families that includes whole several check families and in some cases parts of check families
value = "I_%s" % ",".join(include_dict[fam_id])
if fam_id in contained_in_dict:
value += ";M_%s" % ",".join(contained_in_dict[fam_id])
elif fam_id in contained_in_dict:
value = "M_%s" % ",".join(contained_in_dict[fam_id])
#if value == -1:
# value = "NF"
syn_fd.write("%s\t%s\n" % (fam_id, value))
with open("%s/%s" % (args.out_dir, contained_fully_in_file), "w") as syn_fd:
for fam_id in contained_fully_in_dict:
#syn_fd.write("%s\t%s\t%i\t%i\n" % (fam_id, contained_fully_in_dict[fam_id][0], contained_fully_in_dict[fam_id][1], contained_fully_in_dict[fam_id][2]))
syn_fd.write("%s\t%s\n" % (fam_id, contained_fully_in_dict[fam_id][0]))
with open("%s/%s" % (args.out_dir, contained_in_file), "w") as syn_fd:
for fam_id in contained_in_dict:
syn_fd.write("%s\t%s\n" % (fam_id, ",".join(contained_in_dict[fam_id])))
with open("%s/%s" % (args.out_dir, include_file), "w") as syn_fd:
for fam_id in include_dict:
syn_fd.write("%s\t%s\n" % (fam_id, ",".join(include_dict[fam_id])))
with open("%s/%s" % (args.out_dir, "stat.t"), "w") as syn_fd:
syn_fd.write("Total_ref\t%i\n" % totally_in_ref)
syn_fd.write("Total\t%i\n" % totally)
syn_fd.write("Synonyms\t%i\nContains_fully_in\t%i\nContains_in\t%i\nIncludes_fully\t%i\n" % (number_of_common_families,
contained_fully_in_number,
contained_in_number,
include_number)) | 50.041096 | 160 | 0.648782 |
b69e542b01316f996bce327cc511fa1bd7837beb | 2,151 | py | Python | app/entity/TrafficLight.py | ia2067/TRAPP | a4a7a6b04fda3c781c10d023e1fc7408ea522194 | [
"MIT"
] | null | null | null | app/entity/TrafficLight.py | ia2067/TRAPP | a4a7a6b04fda3c781c10d023e1fc7408ea522194 | [
"MIT"
] | null | null | null | app/entity/TrafficLight.py | ia2067/TRAPP | a4a7a6b04fda3c781c10d023e1fc7408ea522194 | [
"MIT"
] | null | null | null | import traci
from LogicWrapper import LogicWrapper
class TrafficLight:
""" a class to represent an individual traffic light """
def __init__(self, id, incLanes, intLanes, shape, x, y):
# the string id
self.id = id
self.incLanes = incLanes
self.intLanes = intLanes
self.shape = shape
self.x = x
self.y = y
def getControlledLanes(self):
""" wrapper method to get the lanes controlled by this traffic light"""
try:
return traci.trafficlight.getControlledLanes(self.id)
except:
return None
def getControlledLinks(self):
""" wrapper method to get the links controlled by this traffic light"""
try:
return traci.trafficlight.getControlledLinks(self.id)
except Exception as ex:
print(ex)
return None
def getProgram(self):
""" DO NOT USE THIS ONE"""
""" wrapper method to get the ID of the program currently running"""
try:
pID = traci.trafficlight.getProgram(self.id)
currPhaseIndex = traci.trafficlight.getPhase(self.id)
prog = traci.trafficlight.Logic(pID, None, currPhaseIndex)
return prog
except:
return None
def getAllProgramLogic(self):
""" wrapper method to get all the programs running on this TL"""
try:
progs = traci.trafficlight.getAllProgramLogics(self.id)
return progs
except Exception as ex:
print(ex)
return None
def setProgramLogic(self, logic):
""" wrapper method to set the program running on the TL"""
try:
traci.trafficlight.setProgramLogic(self.id, logic.logic)
except Exception as ex:
print(ex)
return False
return True
def getProgramLogic(self):
""" get just the first 'program' of the TL"""
progs = self.getAllProgramLogic()
if progs is not None and len(progs) > 0:
return LogicWrapper(progs[0], self.getControlledLanes())
else:
return None | 31.632353 | 79 | 0.595072 |
09e16d656b53c224f6c6292b4f86b1cc9a59314b | 632 | py | Python | tests/settings.py | dropseedlabs/django-test-recorder | 1bde4053a2a35997dd2c86e6c495d0bea35dc2bb | [
"MIT"
] | null | null | null | tests/settings.py | dropseedlabs/django-test-recorder | 1bde4053a2a35997dd2c86e6c495d0bea35dc2bb | [
"MIT"
] | null | null | null | tests/settings.py | dropseedlabs/django-test-recorder | 1bde4053a2a35997dd2c86e6c495d0bea35dc2bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "77777777777777777777777777777777777777777777777777"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
# "test_recorder",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
| 18.588235 | 66 | 0.674051 |
63a0052f65c1422cef6ef1c3816392c4f05a01d0 | 12,061 | py | Python | herald/base.py | jproffitt/django-herald | 3a5aaa395c64c71ffff524b29928b835de66debe | [
"MIT"
] | null | null | null | herald/base.py | jproffitt/django-herald | 3a5aaa395c64c71ffff524b29928b835de66debe | [
"MIT"
] | null | null | null | herald/base.py | jproffitt/django-herald | 3a5aaa395c64c71ffff524b29928b835de66debe | [
"MIT"
] | null | null | null | """
Base notification classes
"""
import json
from email.mime.base import MIMEBase
from mimetypes import guess_type
import jsonpickle
import re
import six
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils import timezone
from django.core.files import File
from .models import SentNotification
class NotificationBase(object):
"""
base class for sending notifications
"""
render_types = []
template_name = None
context = None
user = None
can_disable = True
verbose_name = None
def get_context_data(self):
"""
:return: the context data for rendering the email or text template
"""
context = self.context or {}
site = Site.objects.get_current()
context['base_url'] = 'http://' + site.domain
return context
@classmethod
def get_verbose_name(cls):
if cls.verbose_name:
return cls.verbose_name
else:
return re.sub(
r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))',
r' \1',
cls.__name__
)
@classmethod
def get_class_path(cls):
return '{}.{}'.format(cls.__module__, cls.__name__)
def send(self, raise_exception=False, user=None):
"""
Handles the preparing the notification for sending. Called to trigger the send from code.
If raise_exception is True, it will raise any exceptions rather than simply logging them.
returns boolean whether or not the notification was sent successfully
"""
context = self.get_context_data()
recipients = self.get_recipients()
if 'text' in self.render_types:
text_content = self.render('text', context)
else:
text_content = None
if 'html' in self.render_types:
html_content = self.render('html', context)
else:
html_content = None
sent_from = self.get_sent_from()
subject = self.get_subject()
extra_data = self.get_extra_data()
sent_notification = SentNotification(
recipients=','.join(recipients),
text_content=text_content,
html_content=html_content,
sent_from=sent_from,
subject=subject,
extra_data=json.dumps(extra_data) if extra_data else None,
notification_class=self.get_class_path(),
attachments=self._get_encoded_attachments(),
user=user,
)
return self.resend(sent_notification, raise_exception=raise_exception)
def _get_encoded_attachments(self):
attachments = self.get_attachments()
new_attachments = []
for attachment in attachments or []:
if isinstance(attachment, File):
# cannot do with attachment.open() since django 1.11 doesn't support that
attachment.open()
new_attachments.append((attachment.name, attachment.read(), guess_type(attachment.name)[0]))
attachment.close()
else:
new_attachments.append(attachment)
return jsonpickle.dumps(new_attachments)
@staticmethod
def _delete_expired_notifications():
"""
This deletes any notifications that have passed the retention time setting
"""
retention_time = getattr(settings, 'HERALD_NOTIFICATION_RETENTION_TIME', None)
if not retention_time:
return
cutoff_date = timezone.now() - retention_time
notifications = SentNotification.objects.filter(date_sent__lt=cutoff_date)
count = notifications.delete()
print('Deleted {} expired notifications.'.format(count))
def get_recipients(self):
"""
Returns a list of recipients. However the subclass defines that. (emails, phone numbers, etc)
"""
raise NotImplementedError('Must implement get_recipients.')
def get_extra_data(self):
"""
Returns a dictionary of extra data to be stored, and used for sending.
MUST BE JSON SERIALIZABLE
"""
return {}
def get_sent_from(self):
"""
Returns a "sent from" string. However the subclass defines that. (email, phone number, etc)
"""
raise NotImplementedError('Must implement get_recipients.')
def get_subject(self):
"""
Returns a subject string. Optional.
"""
return None
def get_attachments(self):
"""
Return a list of attachments or None.
This only works with email.
"""
return None
def render(self, render_type, context):
"""
Renders the template
:param render_type: the content type to render
:param context: context data dictionary
:return: the rendered content
"""
assert render_type in self.render_types, 'Invalid Render Type'
try:
content = render_to_string('herald/{}/{}.{}'.format(
render_type,
self.template_name,
'txt' if render_type == 'text' else render_type
), context)
except TemplateDoesNotExist:
content = None
if settings.DEBUG:
raise
return content
@staticmethod
def get_demo_args():
"""
Returns iterable of arguments needed to initialize notification for demo purposes
Usually you want to generate dummy data here for testing
"""
return []
@classmethod
def resend(cls, sent_notification, raise_exception=False):
"""
Takes a saved sent_notification and sends it again.
returns boolean whether or not the notification was sent successfully
"""
# handle skipping a notification based on user preference
if hasattr(sent_notification.user, 'usernotification'):
notifications = sent_notification.user.usernotification
if notifications.disabled_notifications.filter(notification_class=cls.get_class_path()).exists():
sent_notification.date_sent = timezone.now()
sent_notification.status = sent_notification.STATUS_USER_DISABLED
sent_notification.save()
return True
try:
cls._send(
sent_notification.get_recipients(),
sent_notification.text_content,
sent_notification.html_content,
sent_notification.sent_from,
sent_notification.subject,
sent_notification.get_extra_data(),
sent_notification.get_attachments(),
)
sent_notification.status = sent_notification.STATUS_SUCCESS
except Exception as exc: # pylint: disable=W0703
# we want to handle any exception whatsoever
sent_notification.status = sent_notification.STATUS_FAILED
sent_notification.error_message = six.text_type(exc)
if raise_exception:
raise exc
sent_notification.date_sent = timezone.now()
sent_notification.save()
cls._delete_expired_notifications()
return sent_notification.status == sent_notification.STATUS_SUCCESS
@staticmethod
def _send(recipients, text_content=None, html_content=None, sent_from=None, subject=None, extra_data=None,
attachments=None):
"""
Handles the actual sending of the notification. Sub classes must override this
"""
raise NotImplementedError('Must implement send.')
class EmailNotification(NotificationBase):
"""
Base class for email notifications
"""
render_types = ['text', 'html']
from_email = None
subject = None
to_emails = None
bcc = None
cc = None # pylint: disable=C0103
headers = None
reply_to = None
attachments = None
def get_context_data(self):
context = super(EmailNotification, self).get_context_data()
context['subject'] = self.subject
return context
def get_recipients(self):
return self.to_emails
def get_sent_from(self):
from_email = self.from_email
if not from_email:
from_email = settings.DEFAULT_FROM_EMAIL
return from_email
def get_subject(self):
return self.subject
def get_extra_data(self):
extra_data = {}
if self.bcc:
extra_data['bcc'] = self.bcc
if self.cc:
extra_data['cc'] = self.cc
if self.headers:
extra_data['headers'] = self.headers
if self.reply_to:
extra_data['reply_to'] = self.reply_to
return extra_data
def get_attachments(self):
"""
Return a list of attachments or None.
This only works with email.
"""
return self.attachments
@staticmethod
def _send(recipients, text_content=None, html_content=None, sent_from=None, subject=None, extra_data=None,
attachments=None):
extra_data = extra_data or {}
mail = EmailMultiAlternatives(
subject=subject,
body=text_content,
from_email=sent_from,
to=recipients,
bcc=extra_data.get('bcc', None),
headers=extra_data.get('headers', None),
cc=extra_data.get('cc', None),
reply_to=extra_data.get('reply_to', None),
)
if html_content:
mail.attach_alternative(html_content, 'text/html')
for attachment in (attachments or []):
# All mimebase attachments must have a Content-ID or Content-Disposition header
# or they will show up as unnamed attachments"
if isinstance(attachment, MIMEBase):
if attachment.get('Content-ID', False):
# if you are sending attachment with content id,
# subtype must be 'related'.
mail.mixed_subtype = 'related'
mail.attach(attachment)
else:
mail.attach(*attachment)
mail.send()
class TwilioTextNotification(NotificationBase):
"""
Base class for text notifications.
Uses twilio
"""
render_types = ['text']
from_number = None
to_number = None
def get_recipients(self):
return [self.to_number]
def get_sent_from(self):
from_number = self.from_number
if not from_number:
try:
from_number = settings.TWILIO_DEFAULT_FROM_NUMBER
except AttributeError:
raise Exception(
'TWILIO_DEFAULT_FROM_NUMBER setting is required for sending a TwilioTextNotification'
)
return from_number
@staticmethod
def _send(recipients, text_content=None, html_content=None, sent_from=None, subject=None, extra_data=None,
attachments=None):
try:
# twilio version 6
from twilio.rest import Client
except ImportError:
try:
# twillio version < 6
from twilio.rest import TwilioRestClient as Client
except ImportError:
raise Exception('Twilio is required for sending a TwilioTextNotification.')
try:
account_sid = settings.TWILIO_ACCOUNT_SID
auth_token = settings.TWILIO_AUTH_TOKEN
except AttributeError:
raise Exception(
'TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN settings are required for sending a TwilioTextNotification'
)
client = Client(account_sid, auth_token)
client.messages.create(
body=text_content,
to=recipients[0],
from_=sent_from
)
| 29.92804 | 117 | 0.613879 |
a323944f67399fcbe7e1ebb0167183883de0dd6b | 219 | py | Python | leetCode/algorithms/easy/shuffle_string.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | leetCode/algorithms/easy/shuffle_string.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | leetCode/algorithms/easy/shuffle_string.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | from operator import itemgetter
from typing import List
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
return "".join(e for e, i in sorted(zip(s, indices), key=itemgetter(1)))
| 27.375 | 80 | 0.689498 |
9813fb8afddc4fc777c5e503736a44d6d39fb09e | 2,014 | py | Python | util/util.py | giakou4/MNIST_classification | fdffb36e59a3a90cd06ef3db8feadf6d0d71b03d | [
"MIT"
] | 5 | 2021-11-20T18:40:14.000Z | 2022-02-22T19:57:19.000Z | util/util.py | giakou4/MNIST_classification | fdffb36e59a3a90cd06ef3db8feadf6d0d71b03d | [
"MIT"
] | 1 | 2021-11-22T08:35:19.000Z | 2021-11-23T11:41:17.000Z | util/util.py | giakou4/MNIST_classification | fdffb36e59a3a90cd06ef3db8feadf6d0d71b03d | [
"MIT"
] | null | null | null | from __future__ import print_function
import math
import numpy as np
import torch
import torch.optim as optim
class TwoCropTransform:
"""Create two crops of the same image"""
def __init__(self, transform):
self.transform = transform
def __call__(self, x):
return [self.transform(x), self.transform(x)]
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def adjust_learning_rate(args, optimizer, epoch):
lr = args.learning_rate
if args.cosine:
eta_min = lr * (args.lr_decay_rate ** 3)
lr = eta_min + (lr - eta_min) * (
1 + math.cos(math.pi * epoch / args.num_epochs)) / 2
else:
steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))
if steps > 0:
lr = lr * (args.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def warmup_learning_rate(args, epoch, batch_id, total_batches, optimizer):
if args.warm and epoch <= args.warm_epochs:
p = (batch_id + (epoch - 1) * total_batches) / \
(args.warm_epochs * total_batches)
lr = args.warmup_from + p * (args.warmup_to - args.warmup_from)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_model(model, optimizer, epoch, save_file):
print('\n==> Saving...')
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
torch.save(state, save_file)
del state
| 30.059701 | 88 | 0.612711 |
d3e36ce817401f46adccf705ce7e65ca3970a8aa | 783 | py | Python | hacktheback/forms/managers.py | hackthevalley/hack-the-back | a418f2d2751656fed76d0b8c95c8e2a060525e78 | [
"MIT"
] | null | null | null | hacktheback/forms/managers.py | hackthevalley/hack-the-back | a418f2d2751656fed76d0b8c95c8e2a060525e78 | [
"MIT"
] | null | null | null | hacktheback/forms/managers.py | hackthevalley/hack-the-back | a418f2d2751656fed76d0b8c95c8e2a060525e78 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
class FormManager(models.Manager):
def miscellaneous(self):
return self.filter(type=self.model.FormType.MISCELLANEOUS)
def hacker_application(self):
return self.filter(type=self.model.FormType.HACKER_APPLICATION)
def viewable_hacker_application(self):
"""
Hacker application form that is viewable.
"""
return self.hacker_application().filter(
is_draft=False,
)
def open_hacker_application(self):
"""
Hacker application form that is open to submissions.
"""
return self.viewable_hacker_application().filter(
start_at__lte=timezone.now(),
end_at__gte=timezone.now(),
)
| 27.964286 | 71 | 0.652618 |
6e726ec5639807f12559a135648343687c408c44 | 654 | py | Python | hw3/wuclient.py | kirk-walker/csc321 | 6f8b25e68923fc897c0de05d79d8e13ca448822e | [
"MIT"
] | null | null | null | hw3/wuclient.py | kirk-walker/csc321 | 6f8b25e68923fc897c0de05d79d8e13ca448822e | [
"MIT"
] | null | null | null | hw3/wuclient.py | kirk-walker/csc321 | 6f8b25e68923fc897c0de05d79d8e13ca448822e | [
"MIT"
] | null | null | null | import sys
import zmq
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Collecting updates from weather server...")
socket.connect("tcp://node00:5556")
# Subscribe to zipcode, default is NYC, 10001
zip_filter = sys.argv[1] if len(sys.argv) > 1 else "10001"
socket.setsockopt_string(zmq.SUBSCRIBE, zip_filter)
# Process 5 updates
total_temp = 0
for update_nbr in range(5):
string = socket.recv_string()
zipcode, temperature, relhumidity = string.split()
total_temp += int(temperature)
print((f"Average temperature for zipcode "
f"'{zip_filter}' was {total_temp / (update_nbr+1)} F")) | 27.25 | 62 | 0.717125 |
c8b8d37f17cfbbe6116712cc2218fd7bfad0d596 | 4,401 | py | Python | sideboard/lib/_threads.py | EliAndrewC/sideboard | 81f0099f4c03e7abb5856e046539aa033ecf04f9 | [
"BSD-3-Clause"
] | null | null | null | sideboard/lib/_threads.py | EliAndrewC/sideboard | 81f0099f4c03e7abb5856e046539aa033ecf04f9 | [
"BSD-3-Clause"
] | null | null | null | sideboard/lib/_threads.py | EliAndrewC/sideboard | 81f0099f4c03e7abb5856e046539aa033ecf04f9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import time
import heapq
from warnings import warn
from threading import Thread, Timer, Event, Lock
from six.moves.queue import Queue, Empty
from sideboard.lib import log, on_startup, on_shutdown
class DaemonTask(object):
def __init__(self, func, interval=0.1, threads=1):
self.lock = Lock()
self.threads = []
self.stopped = Event()
self.func, self.interval, self.thread_count = func, interval, threads
on_startup(self.start)
on_shutdown(self.stop)
@property
def running(self):
return any(t.is_alive() for t in self.threads)
def run(self):
while not self.stopped.is_set():
try:
self.func()
except:
log.error('unexpected error', exc_info=True)
if self.interval:
self.stopped.wait(self.interval)
def start(self):
with self.lock:
if not self.running:
self.stopped.clear()
del self.threads[:]
for i in range(self.thread_count):
t = Thread(target = self.run)
t.name = '{}-{}'.format(self.func.__name__, i + 1)
t.daemon = True
t.start()
self.threads.append(t)
def stop(self):
with self.lock:
if self.running:
self.stopped.set()
for i in range(50):
self.threads[:] = [t for t in self.threads if t.is_alive()]
if self.threads:
time.sleep(0.1)
else:
break
else:
log.warning('not all daemons have been joined: {}', self.threads)
del self.threads[:]
class TimeDelayQueue(Queue):
def __init__(self, maxsize=0):
self.delayed = []
Queue.__init__(self, maxsize)
self.task = DaemonTask(self._put_and_notify)
def put(self, item, block=True, timeout=None, delay=0):
Queue.put(self, (delay, item), block, timeout)
def _put(self, item):
delay, item = item
if delay:
if self.task.running:
heapq.heappush(self.delayed, (time.time() + delay, item))
else:
message = 'TimeDelayQueue.put called with a delay parameter without background task having been started'
log.warning(message)
warn(message)
else:
Queue._put(self, item)
def _put_and_notify(self):
with self.not_empty:
while self.delayed:
when, item = heapq.heappop(self.delayed)
if when <= time.time():
Queue._put(self, item)
self.not_empty.notify()
else:
heapq.heappush(self.delayed, (when, item))
break
class Caller(DaemonTask):
def __init__(self, func, interval=0, threads=1):
self.q = TimeDelayQueue()
DaemonTask.__init__(self, self.call, interval=interval, threads=threads)
self.callee = func
def call(self):
try:
args, kwargs = self.q.get(timeout = 0.1)
self.callee(*args, **kwargs)
except Empty:
pass
def start(self):
self.q.task.start()
DaemonTask.start(self)
def stop(self):
self.q.task.stop()
DaemonTask.stop(self)
def defer(self, *args, **kwargs):
self.q.put([args, kwargs])
def delayed(self, delay, *args, **kwargs):
self.q.put([args, kwargs], delay=delay)
class GenericCaller(DaemonTask):
def __init__(self, interval=0, threads=1):
DaemonTask.__init__(self, self.call, interval=interval, threads=threads)
self.q = TimeDelayQueue()
def call(self):
try:
func, args, kwargs = self.q.get(timeout = 0.1)
func(*args, **kwargs)
except Empty:
pass
def start(self):
self.q.task.start()
DaemonTask.start(self)
def stop(self):
self.q.task.stop()
DaemonTask.stop(self)
def defer(self, func, *args, **kwargs):
self.q.put([func, args, kwargs])
def delayed(self, delay, func, *args, **kwargs):
self.q.put([func, args, kwargs], delay=delay)
| 29.736486 | 120 | 0.540786 |
e7668a534c69e87ad7e109b382224ba6bcb6277f | 1,758 | py | Python | Lib/site-packages/graphviz/__init__.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/graphviz/__init__.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/graphviz/__init__.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | # graphviz - create dot, save, render, view
"""Assemble DOT source code and render it with Graphviz.
>>> dot = Digraph(comment='The Round Table')
>>> dot.node('A', 'King Arthur')
>>> dot.node('B', 'Sir Bedevere the Wise')
>>> dot.node('L', 'Sir Lancelot the Brave')
>>> dot.edges(['AB', 'AL'])
>>> dot.edge('B', 'L', constraint='false')
>>> print(dot) #doctest: +NORMALIZE_WHITESPACE
// The Round Table
digraph {
A [label="King Arthur"]
B [label="Sir Bedevere the Wise"]
L [label="Sir Lancelot the Brave"]
A -> B
A -> L
B -> L [constraint=false]
}
"""
from .dot import Graph, Digraph
from .files import Source
from .lang import escape, nohtml
from .backend import (render, pipe, version, view,
ENGINES, FORMATS, RENDERERS, FORMATTERS,
ExecutableNotFound, RequiredArgumentError)
__all__ = [
'Graph', 'Digraph',
'Source',
'escape', 'nohtml',
'render', 'pipe', 'version', 'view',
'ENGINES', 'FORMATS', 'RENDERERS', 'FORMATTERS',
'ExecutableNotFound', 'RequiredArgumentError',
]
__title__ = 'graphviz'
__version__ = '0.15'
__author__ = 'Sebastian Bank <sebastian.bank@uni-leipzig.de>'
__license__ = 'MIT, see LICENSE.txt'
__copyright__ = 'Copyright (c) 2013-2020 Sebastian Bank'
#: Set of known layout commands used for rendering (``'dot'``, ``'neato'``, ...)
ENGINES = ENGINES
#: Set of known output formats for rendering (``'pdf'``, ``'png'``, ...)
FORMATS = FORMATS
#: Set of known output formatters for rendering (``'cairo'``, ``'gd'``, ...)
FORMATTERS = FORMATTERS
#: Set of known output renderers for rendering (``'cairo'``, ``'gd'``, ...)
RENDERERS = RENDERERS
ExecutableNotFound = ExecutableNotFound
RequiredArgumentError = RequiredArgumentError
| 27.46875 | 80 | 0.646758 |
0255d3720ffb06977a6ab1b927fd479410e3e991 | 6,805 | py | Python | test/continuous/test_sac_with_il.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | test/continuous/test_sac_with_il.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | test/continuous/test_sac_with_il.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | import os
import gym
import torch
import pprint
import argparse
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tianshou.env import VectorEnv, SubprocVectorEnv
from tianshou.trainer import offpolicy_trainer
from tianshou.data import Collector, ReplayBuffer
from tianshou.policy import SACPolicy, ImitationPolicy
if __name__ == '__main__':
from net import Actor, ActorProb, Critic
else: # pytest
from test.continuous.net import Actor, ActorProb, Critic
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--actor-lr', type=float, default=3e-4)
parser.add_argument('--critic-lr', type=float, default=1e-3)
parser.add_argument('--il-lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--tau', type=float, default=0.005)
parser.add_argument('--alpha', type=float, default=0.2)
parser.add_argument('--epoch', type=int, default=2000)
parser.add_argument('--step-per-epoch', type=int, default=2400)
parser.add_argument('--collect-per-step', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--layer-num', type=int, default=1)
parser.add_argument('--training-num', type=int, default=8)
parser.add_argument('--test-num', type=int, default=10)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument('--rew-norm', type=bool, default=True)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
args = parser.parse_known_args()[0]
return args
from gym.wrappers import TransformReward
class BipedalWrapper(gym.Wrapper):
def __init__(self, env, action_repeat=3):
super(BipedalWrapper, self).__init__(env)
self.action_repeat = action_repeat
def step(self, action):
act_noise = 0.3 * (np.random.random(action.shape) * 2 - 1)
action += act_noise
r = 0.0
obs_, reward_, done_, info_ = self.env.step(action)
for i in range(self.action_repeat - 1):
obs_, reward_, done_, info_ = self.env.step(action)
r = r + reward_
if done_:
return obs_, 0.0, done_, info_
return obs_, r, done_, info_
def test_sac_with_il(args=get_args()):
torch.set_num_threads(1) # we just need only one thread for NN
env = gym.make(args.task)
if args.task == 'Pendulum-v0':
env.spec.reward_threshold = -250
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
args.max_action = env.action_space.high[0]
# you can also use tianshou.env.SubprocVectorEnv
# train_envs = gym.make(args.task)
train_envs = SubprocVectorEnv([
lambda: TransformReward(BipedalWrapper(gym.make(args.task)), lambda reward: 5 * reward)
for _ in range(args.training_num)
])
# test_envs = gym.make(args.task)
test_envs = SubprocVectorEnv([lambda: gym.make(args.task) for _ in range(args.test_num)])
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
actor = ActorProb(
args.layer_num, args.state_shape, args.action_shape,
args.max_action, args.device
).to(args.device)
actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr)
critic1 = Critic(
args.layer_num, args.state_shape, args.action_shape, args.device
).to(args.device)
critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr)
critic2 = Critic(
args.layer_num, args.state_shape, args.action_shape, args.device
).to(args.device)
critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr)
policy = SACPolicy(
actor, actor_optim, critic1, critic1_optim, critic2, critic2_optim, env.action_space,
args.tau, args.gamma, args.alpha,
reward_normalization=args.rew_norm, ignore_done=True)
# collector
train_collector = Collector(
policy, train_envs, ReplayBuffer(args.buffer_size))
train_collector.collect(10000, sampling=True)
test_collector = Collector(policy, test_envs)
# train_collector.collect(n_step=args.buffer_size)
# log
log_path = os.path.join(args.logdir, args.task, 'sac')
writer = SummaryWriter(log_path)
def save_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
def stop_fn(x):
return x >= env.spec.reward_threshold
# trainer
result = offpolicy_trainer(
policy, train_collector, test_collector, args.epoch,
args.step_per_epoch, args.collect_per_step, args.test_num,
args.batch_size, stop_fn=stop_fn, save_fn=save_fn, writer=writer)
assert stop_fn(result['best_reward'])
# test_collector.close()
if __name__ == '__main__':
pprint.pprint(result)
# Let's watch its performance!
env = gym.make(args.task)
collector = Collector(policy, env)
result = collector.collect(n_episode=1, render=args.render)
print(f'Final reward: {result["ep/reward"]}, length: {result["ep/len"]}')
collector.close()
# here we define an imitation collector with a trivial policy
if args.task == 'Pendulum-v0':
env.spec.reward_threshold = -300 # lower the goal
net = Actor(1, args.state_shape, args.action_shape,
args.max_action, args.device).to(args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.il_lr)
il_policy = ImitationPolicy(net, optim, mode='continuous')
il_test_collector = Collector(il_policy, test_envs)
train_collector.reset()
result = offpolicy_trainer(
il_policy, train_collector, il_test_collector, args.epoch,
args.step_per_epoch, args.collect_per_step, args.test_num,
args.batch_size, stop_fn=stop_fn, save_fn=save_fn, writer=writer)
assert stop_fn(result['best_reward'])
train_collector.close()
il_test_collector.close()
if __name__ == '__main__':
pprint.pprint(result)
# Let's watch its performance!
env = gym.make(args.task)
collector = Collector(il_policy, env)
result = collector.collect(n_episode=1, render=args.render)
print(f'Final reward: {result["ep/reward"]}, length: {result["ep/len"]}')
collector.close()
if __name__ == '__main__':
test_sac_with_il()
| 41.493902 | 95 | 0.685966 |
041060360ebbe2030bd62e581bb5abb2c040d1b9 | 24,426 | py | Python | src/hpatterns.py | sahitpj/syntaxnet_triplets | 4932247a60228722b61381e95a03e6ce66d102ab | [
"MIT"
] | 2 | 2019-06-06T21:59:14.000Z | 2020-03-17T05:35:24.000Z | src/hpatterns.py | sahitpj/SyntaxNet-triplets | 4932247a60228722b61381e95a03e6ce66d102ab | [
"MIT"
] | null | null | null | src/hpatterns.py | sahitpj/SyntaxNet-triplets | 4932247a60228722b61381e95a03e6ce66d102ab | [
"MIT"
] | null | null | null | import sys
sys.path.append("../../..")
import re
import string
import spacy
from .conllu.conllu import parse_single, TokenList
from .hpatternUtils import create_default, create_greedy, create_semi
class HearstPatterns(object):
"""
Contains two methods. One which uses the .conllu file to develop a tokentree which can then
be converted into a tagged sentence to be able to extract hearst pattern,
the second one uses Spacy to derive the tagged sentence to be able to extract hearst patterns.
For tagged sentences, check out the get_noun_chunks functions.
"""
def __init__(self, extended = False, greedy = False, same_sentence = False, semi = False):
self.__adj_stopwords = ['able', 'available', 'brief', 'certain', 'different', 'due', 'enough', 'especially','few', 'fifth', 'former', 'his', 'howbeit', 'immediate', 'important', 'inc', 'its', 'last', 'latter', 'least', 'less', 'likely', 'little', 'many', 'ml', 'more', 'most', 'much', 'my', 'necessary', 'new', 'next', 'non', 'old', 'other', 'our', 'ours', 'own', 'particular', 'past', 'possible', 'present', 'proud', 'recent', 'same', 'several', 'significant', 'similar', 'such', 'sup', 'sure']
# now define the Hearst patterns
# format is <hearst-pattern>, <general-term>
# so, what this means is that if you apply the first pattern, the firsr Noun Phrase (NP)
# is the general one, and the rest are specific NPs
self.__hearst_patterns = [
('(NP_\\w+ (, )?such as (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(such NP_\\w+ (, )?as (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?other NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?include (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?especially (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
(r'NP_(\w+).*?born.*on.* CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'bornOn', 4),
(r'NP_(\w+).*?(died|passed away).*?on.*?CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'diedOn', 4),
(r'NP_(\w+).*?(born|developed|made|established|published).*?(in|at).*?CD_(\w+)', 'last', 'madeIn', 4),
(r'NP_(\w+).*?(present|found).*?in.*?NP_(\w+)', 'last', 'foundIn', 3),
(r'NP_(\w+).*?(member).*?of.*?NP_(\w+)', 'last', 'memberOf', 3),
(r'NP_(\w+).*?(developed|made|published|established).*?by.*?NP_(\w+)', 'last', 'madeBy', 3),
(r'NP_(\w+).*?(composed).*?of.*?NP_(\w+)', 'last', 'composedOf', 3),
(r'NP_(\w+).*?also known as.*?NP_(\w+)', 'last', 'also known as', 2),
(r'NP_(\w+).*?located.*?(in|on).*?NP_(\w+)', 'last', 'locatedIn|On', 3),
(r'NP_(\w+).*?(was|is) a.*?NP_(\w+)', 'first', 'attribute', 3),
(r'NP_(\w+).*?(comparable|related) to.*?NP_(\w+)', 'last', 'comparable to', 3),
('(NP_\\w+ (, )?made of (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'madeOf_multiple', 0),
('(NP_\\w+ (, )?(was|is) a (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'attribute|profession_multiple', 0),
(r'NP_(\w+).*?(was|is).*?published.*?(in|on).*?CD_(\w+)', 'first', 'publishedIn', 3),
(r'NP_(\w+).*?represent.*?NP_(\w+)', 'first', 'representedBy', 2),
(r'NP_(\w+).*?used.*?(by|in|as).*?NP_(\w+)', 'first', 'used_', 3),
(r'NP_(\w+).*?made.*?of.*?NP_(\w+)', 'first', 'madeOf', 2),
(r'NP_(\w+).*?form.*?of.*?NP_(\w+)', 'first', 'formOf', 2),
(r'NP_(\w+).*?(leader|ruler|king|head).*?of.*?NP_(\w+)', 'first', 'leaderOf', 3),
(r'NP_(\w+).*?famous.*?for.*?NP_(\w+)', 'first', 'famousFor', 2),
('(NP_\\w+ (, )?famous for (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'FamousFor_multiple', 0),
] + create_default()
self.__hearst_patterns_greedy = [
('(NP_\\w+ (, )?such as (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(such NP_\\w+ (, )?as (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?other NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?include (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?especially (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
(r'.*NP_(\w+).*?born.*on.* CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'bornOn', 4),
(r'.*NP_(\w+).*?(died|passed away).*?on.*?CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'diedOn', 4),
(r'.*NP_(\w+).*?(born|developed|made|established|published).*?(in|at).*?CD_(\w+)', 'last', 'madeIn', 4),
(r'.*NP_(\w+).*?(present|found).*?in.*?NP_(\w+)', 'last', 'foundIn', 3),
(r'.*NP_(\w+).*?(member).*?of.*?NP_(\w+)', 'last', 'memberOf', 3),
(r'.*NP_(\w+).*?(developed|made|published|established).*?by.*?NP_(\w+)', 'last', 'madeBy', 3),
(r'.*NP_(\w+).*?(composed).*?of.*?NP_(\w+)', 'last', 'composedOf', 3),
(r'.*NP_(\w+).*?also known as.*?NP_(\w+)', 'last', 'also known as', 2),
(r'.*NP_(\w+).*?located.*?(in|on).*?NP_(\w+)', 'last', 'locatedIn|On', 3),
(r'.*NP_(\w+).*?(was|is) a.*?NP_(\w+)', 'first', 'attribute', 3),
(r'.*NP_(\w+).*?(comparable|related) to.*?NP_(\w+)', 'last', 'comparable to', 3),
('(NP_\\w+ (, )?made of (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'madeOf_multiple', 0),
('(NP_\\w+ (, )?(was|is) a (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'attribute|profession_multiple', 0),
(r'.*NP_(\w+) (was|is).*?published.*?(in|on).*?CD_(\w+)', 'first', 'publishedIn', 3),
(r'.*NP_(\w+).*?represent.*?NP_(\w+)', 'first', 'representedBy', 2),
(r'.*NP_(\w+).*?used.*?(by|in|as).*?NP_(\w+)', 'first', 'used_', 3),
(r'.*NP_(\w+).*?made.*?of.*?NP_(\w+)', 'first', 'madeOf', 2),
(r'.*NP_(\w+).*?form.*?of.*?NP_(\w+)', 'first', 'formOf', 2),
(r'.*NP_(\w+).*?(leader|ruler|king|head) .*?of.*?NP_(\w+)', 'first', 'leaderOf', 3),
(r'.*NP_(\w+).*?famous.*?for.*?NP_(\w+)', 'first', 'famousFor', 2),
('(NP_\\w+ (, )?famous for (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'FamousFor_multiple', 0),
]
self.__hearst_patterns_semigreedy = [
('(NP_\\w+ (, )?such as (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(such NP_\\w+ (, )?as (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?other NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?include (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?especially (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
(r'.*?NP_(\w+).*?born.*on.* CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'bornOn', 4),
(r'.*?NP_(\w+).*?(died|passed away).*?on.*?CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'diedOn', 4),
(r'.*?NP_(\w+).*?(born|developed|made|established|published).*?(in|at).*?CD_(\w+)', 'last', 'madeIn', 4),
(r'.*?NP_(\w+).*?(present|found).*?in.*?NP_(\w+)', 'last', 'foundIn', 3),
(r'.*?NP_(\w+).*?(member).*?of.*?NP_(\w+)', 'last', 'memberOf', 3),
(r'.*?NP_(\w+).*?(developed|made|published|established).*?by.*?NP_(\w+)', 'last', 'madeBy', 3),
(r'.*?NP_(\w+).*?(composed).*?of.*?NP_(\w+)', 'last', 'composedOf', 3),
(r'.*?NP_(\w+).*?also known as.*?NP_(\w+)', 'last', 'also known as', 2),
(r'.*?NP_(\w+).*?located.*?(in|on).*?NP_(\w+)', 'last', 'locatedIn|On', 3),
(r'.*?NP_(\w+).*?(was|is) a.*?NP_(\w+)', 'first', 'attribute', 3),
(r'.*?NP_(\w+).*?(comparable|related) to.*?NP_(\w+)', 'last', 'comparable to', 3),
('(NP_\\w+ (, )?made of (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'madeOf_multiple', 0),
('(NP_\\w+ (, )?(was|is) a (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'attribute|profession_multiple', 0),
(r'.*?NP_(\w+) (was|is).*?published.*?(in|on).*?CD_(\w+)', 'first', 'publishedIn', 3),
(r'.*?NP_(\w+).*?represent.*?NP_(\w+)', 'first', 'representedBy', 2),
(r'.*?NP_(\w+).*?used.*?(by|in|as).*?NP_(\w+)', 'first', 'used_', 3),
(r'.*?NP_(\w+).*?made.*?of.*?NP_(\w+)', 'first', 'madeOf', 2),
(r'.*?NP_(\w+).*?form.*?of.*?NP_(\w+)', 'first', 'formOf', 2),
(r'.*?NP_(\w+).*?(leader|ruler|king|head).*?of.*?NP_(\w+)', 'first', 'leaderOf', 3),
(r'.*?NP_(\w+).*?famous.*?for.*?NP_(\w+)', 'first', 'famousFor', 2),
('(NP_\\w+ (, )?famous for (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'FamousFor_multiple', 0),
]
self.__hearst_patterns_ss = [
('(NP_\\w+ (, )?such as (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(such NP_\\w+ (, )?as (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?other NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?include (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?especially (NP_\\w+ ?(, )?(and |or )?)+)', 'first', 'typeOf', 0),
(r'NP_(\w+)[^.]*?born[^.]*on[^.]* CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'bornOn', 4),
(r'NP_(\w+)[^.]*?(died|passed away)[^.]*?on[^.]*?CD_(\d+)? (\w+) CD_(\d+)? ', 'last', 'diedOn', 4),
(r'NP_(\w+)[^.]*?(born|developed|made|established|published)[^.]*?(in|at)[^.]*?CD_(\w+)', 'last', 'madeIn', 4),
(r'NP_(\w+)[^.]*?(present|found)[^.]*?in[^.]*?NP_(\w+)', 'last', 'foundIn', 3),
(r'NP_(\w+)[^.]*?(member)[^.]*?of[^.]*?NP_(\w+)', 'last', 'memberOf', 3),
(r'NP_(\w+)[^.]*?(developed|made|published|established)[^.]*?by[^.]*?NP_(\w+)', 'last', 'madeBy', 3),
(r'NP_(\w+)[^.]*?(composed)[^.]*?of[^.]*?NP_(\w+)', 'last', 'composedOf', 3),
(r'NP_(\w+)[^.]*?also known as[^.]*?NP_(\w+)', 'last', 'also known as', 2),
(r'NP_(\w+)[^.]*?located[^.]*?(in|on)[^.]*?NP_(\w+)', 'last', 'locatedIn|On', 3),
(r'NP_(\w+)[^.]*?(was|is) a[^.]*?NP_(\w+)', 'first', 'attribute', 3),
(r'NP_(\w+)[^.]*?(comparable|related) to[^.]*?NP_(\w+)', 'last', 'comparable to', 3),
('(NP_\\w+ (, )?made of (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'madeOf_multiple', 0),
('(NP_\\w+ (, )?(was|is) a (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'attribute|profession_multiple', 0),
(r'NP_(\w+) (was|is)[^.]*?published[^.]*?(in|on)[^.]*?CD_(\w+)', 'first', 'publishedIn', 3),
(r'NP_(\w+)[^.]*?represent[^.]*?NP_(\w+)', 'first', 'representedBy', 2),
(r'NP_(\w+)[^.]*?used[^.]*?(by|in|as)[^.]*?NP_(\w+)', 'first', 'used_', 3),
(r'NP_(\w+)[^.]*?made[^.]*?of[^.]*?NP_(\w+)', 'first', 'madeOf', 2),
(r'NP_(\w+)[^.]*?form[^.]*?of[^.]*?NP_(\w+)', 'first', 'formOf', 2),
(r'NP_(\w+)[^.]*?(leader|ruler|king|head)[^.]*?of[^.]*?NP_(\w+)', 'first', 'leaderOf', 3),
(r'NP_(\w+)[^.]*?famous[^.]*?for[^.]*?NP_(\w+)', 'first', 'famousFor', 2),
('(NP_\\w+ (, )?famous for (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'Famousfor_multiple', 0),
]
if extended:
self.__hearst_patterns.extend([
('((NP_\\w+ ?(, )?)+(and |or )?any other NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?some other NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?be a NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?like (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('such (NP_\\w+ (, )?as (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?like other NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?one of the NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?one of these NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?one of those NP_\\w+)', 'last', 'typeOf', 0),
('example of (NP_\\w+ (, )?be (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?be example of NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?for example (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?wich be call NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?which be name NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?mainly (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?mostly (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?notably (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?particularly (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?principally (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?in particular (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?except (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?other than (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?e.g. (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?i.e. (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?a kind of NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?kind of NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?form of NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?which look like NP_\\w+)', 'last', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?which sound like NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?which be similar to (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?example of this be (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?type (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )? NP_\\w+ type)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?whether (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(compare (NP_\\w+ ?(, )?)+(and |or )?with NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )?compare to (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('(NP_\\w+ (, )?among -PRON- (NP_\\w+ ? (, )?(and |or )?)+)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?as NP_\\w+)', 'last', 'typeOf', 0),
('(NP_\\w+ (, )? (NP_\\w+ ? (, )?(and |or )?)+ for instance)', 'first', 'typeOf', 0),
('((NP_\\w+ ?(, )?)+(and |or )?sort of NP_\\w+)', 'last', 'typeOf', 0)
])
self.__spacy_nlp = spacy.load('en')
if greedy:
self.__hearst_patterns = self.__hearst_patterns_greedy + create_greedy()
if same_sentence:
self.__hearst_patterns = self.__hearst_patterns_ss
if semi:
self.__hearst_patterns = self.__hearst_patterns_semigreedy + create_semi()
def chunk(self, rawtext):
STOP_TOKENS = ["the", "a", "an"]
doc = self.__spacy_nlp(rawtext)
chunks = []
for sentence in doc.sents:
sentence_text = sentence.text.lower()
for chunk in sentence.noun_chunks:
chunk_arr = []
replace_arr = []
for token in chunk:
if token.text not in STOP_TOKENS:
chunk_arr.append(token.text)
# Remove punctuation and stopword adjectives (generally quantifiers of plurals)
if token.lemma_.isalnum() and token.lemma_ not in self.__adj_stopwords and token.text not in STOP_TOKENS:
replace_arr.append(token.lemma_)
elif not token.lemma_.isalnum() and token.text not in STOP_TOKENS:
if token.lemma_ != '-PRON-':
replace_arr.append(''.join(char for char in token.lemma_ if char.isalnum()))
else:
replace_arr.append(token.text)
chunk_lemma = ' '.join(chunk_arr).lower()
replacement_value = 'NP_' + '_'.join(replace_arr).lower()
if chunk_lemma:
sentence_text = re.sub(r'\b%s\b' % re.escape(chunk_lemma),
r'%s' % replacement_value,
sentence_text)
chunks.append(sentence_text)
return chunks
def chunk_root(self, rawtext):
doc = self.__spacy_nlp(rawtext)
chunks = []
for sentence in doc.sents:
sentence_text = sentence.lemma_
for chunk in sentence.noun_chunks:
chunk_arr = []
replace_arr = []
for token in chunk:
chunk_arr.append(token.lemma_)
# Remove punctuation and stopword adjectives (generally quantifiers of plurals)
if token.lemma_.isalnum() and token.lemma_ not in self.__adj_stopwords:
replace_arr.append(token.lemma_)
elif not token.lemma_.isalnum():
replace_arr.append(''.join(char for char in token.lemma_ if char.isalnum()))
chunk_lemma = ' '.join(chunk_arr)
replacement_value = 'NP_' + '_'.join(replace_arr)
if chunk_lemma:
sentence_text = re.sub(r'\b%s\b' % re.escape(chunk_lemma),
r'%s' % replacement_value,
sentence_text)
chunks.append(sentence_text)
return chunks
"""
This is the main entry point for this code.
It takes as input the rawtext to process and returns a list of tuples (specific-term, general-term)
where each tuple represents a hypernym pair.
"""
def find_hearstpatterns(self, filepath_to_conll, subject):
data_file = open(filepath_to_conll, "r", encoding="utf-8")
tokenList = parse_single(data_file)
sentence_tokenList = tokenList[0]
hearst_patterns = []
# np_tagged_sentences = self.chunk(rawtext)
np_tagged_sentences = sentence_tokenList.get_noun_chunks(subject)
# for sentence in np_tagged_sentences:
# two or more NPs next to each other should be merged into a single NP, it's a chunk error
for (hearst_pattern, parser, hearst_type, process_type) in self.__hearst_patterns:
matches = re.search(hearst_pattern, np_tagged_sentences)
if matches:
match_str = matches.group(0)
if process_type == 0:
nps = [a for a in match_str.split() if a.startswith("NP_")]
if parser == "first":
general = nps[0]
specifics = nps[1:]
else:
general = nps[-1]
specifics = nps[:-1]
for i in range(len(specifics)):
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((self.clean_hyponym_term(specifics[i]), self.clean_hyponym_term(general), hearst_type))
else:
if parser == "first":
general = matches.group(1)
specifics = [matches.group(i) for i in range(2,process_type+1)]
else:
general = matches.group(process_type)
specifics = [matches.group(i) for i in range(1,process_type)]
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((specifics, general, hearst_type))
return hearst_patterns
def find_hearstpatterns_spacy(self, rawtext):
hearst_patterns = []
np_tagged_sentences = self.chunk(rawtext)
for sentence in np_tagged_sentences:
# two or more NPs next to each other should be merged into a single NP, it's a chunk error
for (hearst_pattern, parser, hearst_type, process_type) in self.__hearst_patterns[:-1]:
matches = re.search(hearst_pattern, sentence)
if matches:
match_str = matches.group(0)
if process_type == 0:
nps = [a for a in match_str.split() if a.startswith("NP_")]
if parser == "first":
general = nps[0]
specifics = nps[1:]
else:
general = nps[-1]
specifics = nps[:-1]
for i in range(len(specifics)):
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((self.clean_hyponym_term(specifics[i]), self.clean_hyponym_term(general), hearst_type))
else:
if parser == "first":
general = matches.group(1)
specifics = [matches.group(i) for i in range(2,process_type+1)]
else:
general = matches.group(process_type)
specifics = [matches.group(i) for i in range(1,process_type)]
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((specifics, general, hearst_type, parser))
return hearst_patterns
def find_hearstpatterns_spacy_root(self, rawtext):
hearst_patterns = []
np_tagged_sentences = self.chunk_root(rawtext)
for sentence in np_tagged_sentences:
# two or more NPs next to each other should be merged into a single NP, it's a chunk error
for (hearst_pattern, parser, hearst_type, process_type) in self.__hearst_patterns:
matches = re.search(hearst_pattern, sentence)
if matches:
match_str = matches.group(0)
if process_type == 0:
nps = [a for a in match_str.split() if a.startswith("NP_")]
if parser == "first":
general = nps[0]
specifics = nps[1:]
else:
general = nps[-1]
specifics = nps[:-1]
for i in range(len(specifics)):
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((self.clean_hyponym_term(specifics[i]), self.clean_hyponym_term(general), hearst_type))
else:
if parser == "first":
general = matches.group(1)
specifics = [matches.group(i) for i in range(2,process_type+1)]
else:
general = matches.group(process_type)
specifics = [matches.group(i) for i in range(1,process_type)]
#print("%s, %s %s" % (specifics[i], general, hearst_type))
hearst_patterns.append((specifics, general, hearst_type, parser))
return hearst_patterns
def add_patterns(self, patterns, t):
if t == 'Default':
self.__hearst_patterns.extend(patterns)
elif t == 'Non-greedy':
self.__hearst_patterns_greedy.extend(patterns)
else:
self.__hearst_patterns_semigreedy.extend(patterns)
def clean_hyponym_term(self, term):
# good point to do the stemming or lemmatization
return term.replace("NP_","").replace("_", " ")
| 62.470588 | 503 | 0.456972 |
c2e0702dd3bb18f64332997c837e7069b65b843d | 69 | py | Python | abc/128/a.py | wotsushi/competitive-programming | 17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86 | [
"MIT"
] | 3 | 2019-06-25T06:17:38.000Z | 2019-07-13T15:18:51.000Z | abc/128/a.py | wotsushi/competitive-programming | 17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86 | [
"MIT"
] | null | null | null | abc/128/a.py | wotsushi/competitive-programming | 17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86 | [
"MIT"
] | null | null | null | A, P = map(int, input().split())
ans = (3 * A + P) // 2
print(ans)
| 11.5 | 32 | 0.478261 |
6f01fa2283577d1f8b28081df66fd468f8b05b00 | 15,710 | py | Python | main.py | ishine/HTS-Audio-Transformer | 356521f5dbb1893082c449a4993977fd624905f0 | [
"MIT"
] | 24 | 2022-02-02T10:19:31.000Z | 2022-03-23T16:59:19.000Z | main.py | ishine/HTS-Audio-Transformer | 356521f5dbb1893082c449a4993977fd624905f0 | [
"MIT"
] | 3 | 2022-02-23T02:31:36.000Z | 2022-03-11T06:29:25.000Z | main.py | ishine/HTS-Audio-Transformer | 356521f5dbb1893082c449a4993977fd624905f0 | [
"MIT"
] | 9 | 2022-02-02T17:42:11.000Z | 2022-03-25T07:12:13.000Z | # Ke Chen
# knutchen@ucsd.edu
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
# The main code for training and evaluating HTSAT
import os
from re import A, S
import sys
import librosa
import numpy as np
import argparse
import h5py
import math
import time
import logging
import pickle
import random
from datetime import datetime
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, sampler
from torch.utils.data.distributed import DistributedSampler
from utils import create_folder, dump_config, process_idc, prepprocess_audio, init_hier_head
import config
from sed_model import SEDWrapper, Ensemble_SEDWrapper
from models import Cnn14_DecisionLevelMax
from data_generator import SEDDataset, DESED_Dataset, ESC_Dataset, SCV2_Dataset
from model.htsat import HTSAT_Swin_Transformer
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
import warnings
warnings.filterwarnings("ignore")
class data_prep(pl.LightningDataModule):
def __init__(self, train_dataset, eval_dataset, device_num):
super().__init__()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.device_num = device_num
def train_dataloader(self):
train_sampler = DistributedSampler(self.train_dataset, shuffle = False) if self.device_num > 1 else None
train_loader = DataLoader(
dataset = self.train_dataset,
num_workers = config.num_workers,
batch_size = config.batch_size // self.device_num,
shuffle = False,
sampler = train_sampler
)
return train_loader
def val_dataloader(self):
eval_sampler = DistributedSampler(self.eval_dataset, shuffle = False) if self.device_num > 1 else None
eval_loader = DataLoader(
dataset = self.eval_dataset,
num_workers = config.num_workers,
batch_size = config.batch_size // self.device_num,
shuffle = False,
sampler = eval_sampler
)
return eval_loader
def test_dataloader(self):
test_sampler = DistributedSampler(self.eval_dataset, shuffle = False) if self.device_num > 1 else None
test_loader = DataLoader(
dataset = self.eval_dataset,
num_workers = config.num_workers,
batch_size = config.batch_size // self.device_num,
shuffle = False,
sampler = test_sampler
)
return test_loader
def save_idc():
train_index_path = os.path.join(config.dataset_path, "hdf5s", "indexes", config.index_type + ".h5")
eval_index_path = os.path.join(config.dataset_path,"hdf5s", "indexes", "eval.h5")
process_idc(train_index_path, config.classes_num, config.index_type + "_idc.npy")
process_idc(eval_index_path, config.classes_num, "eval_idc.npy")
def weight_average():
model_ckpt = []
model_files = os.listdir(config.wa_folder)
wa_ckpt = {
"state_dict": {}
}
for model_file in model_files:
model_file = os.path.join(config.wa_folder, model_file)
model_ckpt.append(torch.load(model_file, map_location="cpu")["state_dict"])
keys = model_ckpt[0].keys()
for key in keys:
model_ckpt_key = torch.cat([d[key].float().unsqueeze(0) for d in model_ckpt])
model_ckpt_key = torch.mean(model_ckpt_key, dim = 0)
assert model_ckpt_key.shape == model_ckpt[0][key].shape, "the shape is unmatched " + model_ckpt_key.shape + " " + model_ckpt[0][key].shape
wa_ckpt["state_dict"][key] = model_ckpt_key
torch.save(wa_ckpt, config.wa_model_path)
def esm_test():
device_num = torch.cuda.device_count()
print("each batch size:", config.batch_size // device_num)
if config.fl_local:
fl_npy = np.load(config.fl_dataset, allow_pickle = True)
# import dataset SEDDataset
eval_dataset = DESED_Dataset(
dataset = fl_npy,
config = config
)
else:
# dataset file pathes
eval_index_path = os.path.join(config.dataset_path,"hdf5s", "indexes", "eval.h5")
eval_idc = np.load("eval_idc.npy", allow_pickle = True)
# import dataset SEDDataset
eval_dataset = SEDDataset(
index_path=eval_index_path,
idc = eval_idc,
config = config,
eval_mode = True
)
audioset_data = data_prep(eval_dataset, eval_dataset, device_num)
trainer = pl.Trainer(
deterministic=True,
gpus = device_num,
max_epochs = config.max_epoch,
auto_lr_find = True,
sync_batchnorm = True,
checkpoint_callback = False,
accelerator = "ddp" if device_num > 1 else None,
num_sanity_val_steps = 0,
# resume_from_checkpoint = config.resume_checkpoint,
replace_sampler_ddp = False,
gradient_clip_val=1.0
)
sed_models = []
for esm_model_path in config.esm_model_pathes:
sed_model = HTSAT_Swin_Transformer(
spec_size=config.htsat_spec_size,
patch_size=config.htsat_patch_size,
in_chans=1,
num_classes=config.classes_num,
window_size=config.htsat_window_size,
config = config,
depths = config.htsat_depth,
embed_dim = config.htsat_dim,
patch_stride=config.htsat_stride,
num_heads=config.htsat_num_head
)
sed_wrapper = SEDWrapper(
sed_model = sed_model,
config = config,
dataset = eval_dataset
)
ckpt = torch.load(esm_model_path, map_location="cpu")
ckpt["state_dict"].pop("sed_model.head.weight")
ckpt["state_dict"].pop("sed_model.head.bias")
sed_wrapper.load_state_dict(ckpt["state_dict"], strict=False)
sed_models.append(sed_wrapper)
model = Ensemble_SEDWrapper(
sed_models = sed_models,
config = config,
dataset = eval_dataset
)
trainer.test(model, datamodule=audioset_data)
def test():
device_num = torch.cuda.device_count()
print("each batch size:", config.batch_size // device_num)
# dataset file pathes
if config.fl_local:
fl_npy = np.load(config.fl_dataset, allow_pickle = True)
# import dataset SEDDataset
eval_dataset = DESED_Dataset(
dataset = fl_npy,
config = config
)
else:
if config.dataset_type == "audioset":
eval_index_path = os.path.join(config.dataset_path,"hdf5s", "indexes", "eval.h5")
eval_idc = np.load("eval_idc.npy", allow_pickle = True)
eval_dataset = SEDDataset(
index_path=eval_index_path,
idc = eval_idc,
config = config,
eval_mode = True
)
elif config.dataset_type == "esc-50":
full_dataset = np.load(os.path.join(config.dataset_path, "esc-50-data.npy"), allow_pickle = True)
eval_dataset = ESC_Dataset(
dataset = full_dataset,
config = config,
eval_mode = True
)
elif config.dataset_type == "scv2":
test_set = np.load(os.path.join(config.dataset_path, "scv2_test.npy"), allow_pickle = True)
eval_dataset = SCV2_Dataset(
dataset = test_set,
config = config,
eval_mode = True
)
# import dataset SEDDataset
audioset_data = data_prep(eval_dataset, eval_dataset, device_num)
trainer = pl.Trainer(
deterministic=True,
gpus = device_num,
max_epochs = config.max_epoch,
auto_lr_find = True,
sync_batchnorm = True,
checkpoint_callback = False,
accelerator = "ddp" if device_num > 1 else None,
num_sanity_val_steps = 0,
# resume_from_checkpoint = config.resume_checkpoint,
replace_sampler_ddp = False,
gradient_clip_val=1.0
)
sed_model = HTSAT_Swin_Transformer(
spec_size=config.htsat_spec_size,
patch_size=config.htsat_patch_size,
in_chans=1,
num_classes=config.classes_num,
window_size=config.htsat_window_size,
config = config,
depths = config.htsat_depth,
embed_dim = config.htsat_dim,
patch_stride=config.htsat_stride,
num_heads=config.htsat_num_head
)
model = SEDWrapper(
sed_model = sed_model,
config = config,
dataset = eval_dataset
)
if config.resume_checkpoint is not None:
ckpt = torch.load(config.resume_checkpoint, map_location="cpu")
ckpt["state_dict"].pop("sed_model.head.weight")
ckpt["state_dict"].pop("sed_model.head.bias")
model.load_state_dict(ckpt["state_dict"], strict=False)
trainer.test(model, datamodule=audioset_data)
def train():
device_num = torch.cuda.device_count()
print("each batch size:", config.batch_size // device_num)
# dataset file pathes
if config.dataset_type == "audioset":
train_index_path = os.path.join(config.dataset_path, "hdf5s","indexes", config.index_type + ".h5")
eval_index_path = os.path.join(config.dataset_path,"hdf5s", "indexes", "eval.h5")
train_idc = np.load(config.index_type + "_idc.npy", allow_pickle = True)
eval_idc = np.load("eval_idc.npy", allow_pickle = True)
elif config.dataset_type == "esc-50":
full_dataset = np.load(os.path.join(config.dataset_path, "esc-50-data.npy"), allow_pickle = True)
elif config.dataset_type == "scv2":
train_set = np.load(os.path.join(config.dataset_path, "scv2_train.npy"), allow_pickle = True)
test_set = np.load(os.path.join(config.dataset_path, "scv2_test.npy"), allow_pickle = True)
# set exp folder
exp_dir = os.path.join(config.workspace, "results", config.exp_name)
checkpoint_dir = os.path.join(config.workspace, "results", config.exp_name, "checkpoint")
if not config.debug:
create_folder(os.path.join(config.workspace, "results"))
create_folder(exp_dir)
create_folder(checkpoint_dir)
dump_config(config, os.path.join(exp_dir, config.exp_name), False)
# import dataset SEDDataset
if config.dataset_type == "audioset":
print("Using Audioset")
dataset = SEDDataset(
index_path=train_index_path,
idc = train_idc,
config = config
)
eval_dataset = SEDDataset(
index_path=eval_index_path,
idc = eval_idc,
config = config,
eval_mode = True
)
elif config.dataset_type == "esc-50":
print("Using ESC")
dataset = ESC_Dataset(
dataset = full_dataset,
config = config,
eval_mode = False
)
eval_dataset = ESC_Dataset(
dataset = full_dataset,
config = config,
eval_mode = True
)
elif config.dataset_type == "scv2":
print("Using SCV2")
dataset = SCV2_Dataset(
dataset = train_set,
config = config,
eval_mode = False
)
eval_dataset = SCV2_Dataset(
dataset = test_set,
config = config,
eval_mode = True
)
audioset_data = data_prep(dataset, eval_dataset, device_num)
if config.dataset_type == "audioset":
checkpoint_callback = ModelCheckpoint(
monitor = "mAP",
filename='l-{epoch:d}-{mAP:.3f}-{mAUC:.3f}',
save_top_k = 20,
mode = "max"
)
else:
checkpoint_callback = ModelCheckpoint(
monitor = "acc",
filename='l-{epoch:d}-{acc:.3f}',
save_top_k = 20,
mode = "max"
)
trainer = pl.Trainer(
deterministic=True,
default_root_dir = checkpoint_dir,
gpus = device_num,
val_check_interval = 0.1,
max_epochs = config.max_epoch,
auto_lr_find = True,
sync_batchnorm = True,
callbacks = [checkpoint_callback],
accelerator = "ddp" if device_num > 1 else None,
num_sanity_val_steps = 0,
resume_from_checkpoint = None,
replace_sampler_ddp = False,
gradient_clip_val=1.0
)
sed_model = HTSAT_Swin_Transformer(
spec_size=config.htsat_spec_size,
patch_size=config.htsat_patch_size,
in_chans=1,
num_classes=config.classes_num,
window_size=config.htsat_window_size,
config = config,
depths = config.htsat_depth,
embed_dim = config.htsat_dim,
patch_stride=config.htsat_stride,
num_heads=config.htsat_num_head
)
model = SEDWrapper(
sed_model = sed_model,
config = config,
dataset = dataset
)
if config.resume_checkpoint is not None:
ckpt = torch.load(config.resume_checkpoint, map_location="cpu")
ckpt["state_dict"].pop("sed_model.head.weight")
ckpt["state_dict"].pop("sed_model.head.bias")
# finetune on the esc and spv2 dataset
ckpt["state_dict"].pop("sed_model.tscam_conv.weight")
ckpt["state_dict"].pop("sed_model.tscam_conv.bias")
model.load_state_dict(ckpt["state_dict"], strict=False)
elif config.swin_pretrain_path is not None: # train with pretrained model
ckpt = torch.load(config.swin_pretrain_path, map_location="cpu")
# load pretrain model
ckpt = ckpt["model"]
found_parameters = []
unfound_parameters = []
model_params = dict(model.state_dict())
for key in model_params:
m_key = key.replace("sed_model.", "")
if m_key in ckpt:
if m_key == "patch_embed.proj.weight":
ckpt[m_key] = torch.mean(ckpt[m_key], dim = 1, keepdim = True)
if m_key == "head.weight" or m_key == "head.bias":
ckpt.pop(m_key)
unfound_parameters.append(key)
continue
assert model_params[key].shape==ckpt[m_key].shape, "%s is not match, %s vs. %s" %(key, str(model_params[key].shape), str(ckpt[m_key].shape))
found_parameters.append(key)
ckpt[key] = ckpt.pop(m_key)
else:
unfound_parameters.append(key)
print("pretrain param num: %d \t wrapper param num: %d"%(len(found_parameters), len(ckpt.keys())))
print("unfound parameters: ", unfound_parameters)
model.load_state_dict(ckpt, strict = False)
model_params = dict(model.named_parameters())
trainer.fit(model, audioset_data)
def main():
parser = argparse.ArgumentParser(description="HTS-AT")
subparsers = parser.add_subparsers(dest = "mode")
parser_train = subparsers.add_parser("train")
parser_test = subparsers.add_parser("test")
parser_esm_test = subparsers.add_parser("esm_test")
parser_saveidc = subparsers.add_parser("save_idc")
parser_wa = subparsers.add_parser("weight_average")
args = parser.parse_args()
# default settings
logging.basicConfig(level=logging.INFO)
pl.utilities.seed.seed_everything(seed = config.random_seed)
if args.mode == "train":
train()
elif args.mode == "test":
test()
elif args.mode == "esm_test":
esm_test()
elif args.mode == "save_idc":
save_idc()
elif args.mode == "weight_average":
weight_average()
else:
raise Exception("Error Mode!")
if __name__ == '__main__':
main()
| 36.450116 | 156 | 0.629408 |
a149982a6c8f4906806fbff87d44aeb36ed91296 | 740 | py | Python | dbtest/blog/migrations/0002_userinfo.py | marktiu7/demo | 07df5d706b17b2d9a6275a96d803914b32ab1630 | [
"Apache-2.0"
] | null | null | null | dbtest/blog/migrations/0002_userinfo.py | marktiu7/demo | 07df5d706b17b2d9a6275a96d803914b32ab1630 | [
"Apache-2.0"
] | null | null | null | dbtest/blog/migrations/0002_userinfo.py | marktiu7/demo | 07df5d706b17b2d9a6275a96d803914b32ab1630 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2016-09-28 07:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='userinfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('memo', models.TextField()),
],
options={
'indexes': [],
},
),
]
| 26.428571 | 114 | 0.537838 |
a3f2415e55adc1079228ea945610fe73a36926e2 | 319 | py | Python | 2-Medium/numRescueBoats.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | 2-Medium/numRescueBoats.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | 2-Medium/numRescueBoats.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | class Solution:
def numRescueBoats(self, people: list[int], limit: int) -> int:
people.sort(reverse=True)
l, r = 0, len(people)-1
while l <= r: # Prioritise fatasses
if people[l] + people[r] <= limit:
r -= 1 # fatty + thinny
l += 1
return l
| 31.9 | 67 | 0.501567 |
7d956410d3e9a54e2db9cee46f9a7fd4935f9735 | 2,540 | py | Python | demo/dirichlet_poisson2D.py | liqihao2000/shenfun | 2164596ccf906242779d9ec361168246ee6214d8 | [
"BSD-2-Clause"
] | 1 | 2021-10-09T06:48:30.000Z | 2021-10-09T06:48:30.000Z | demo/dirichlet_poisson2D.py | liqihao2000/shenfun | 2164596ccf906242779d9ec361168246ee6214d8 | [
"BSD-2-Clause"
] | null | null | null | demo/dirichlet_poisson2D.py | liqihao2000/shenfun | 2164596ccf906242779d9ec361168246ee6214d8 | [
"BSD-2-Clause"
] | null | null | null | r"""
Solve Poisson equation in 2D with periodic bcs in one direction
and homogeneous Dirichlet in the other
\nabla^2 u = f,
Use Fourier basis for the periodic direction and Shen's Dirichlet basis for the
non-periodic direction.
The equation to solve is
(\nabla^2 u, v) = (f, v)
"""
import sys
import os
import importlib
from sympy import symbols, cos, sin
import numpy as np
from shenfun import inner, div, grad, TestFunction, TrialFunction, \
Array, Function, FunctionSpace, TensorProductSpace, comm
assert len(sys.argv) == 3, "Call with two command-line arguments"
assert sys.argv[-1].lower() in ('legendre', 'chebyshev', 'jacobi')
assert isinstance(int(sys.argv[-2]), int)
# Collect solver
family = sys.argv[-1].lower()
base = importlib.import_module('.'.join(('shenfun', family)))
Solver = base.la.Helmholtz
# Use sympy to compute a rhs, given an analytical solution
a = 1
b = -1
if family == 'jacobi':
a = 0
b = 0
x, y = symbols("x,y")
ue = (cos(4*x) + sin(2*y))*(1 - x**2) + a*(1 - x)/2 + b*(1 + x)/2
fe = ue.diff(x, 2) + ue.diff(y, 2)
# Size of discretization
N = (int(sys.argv[-2]), int(sys.argv[-2])+1)
SD = FunctionSpace(N[0], family=family, scaled=True, bc=(a, b))
K1 = FunctionSpace(N[1], family='F', dtype='d', domain=(-2*np.pi, 2*np.pi))
T = TensorProductSpace(comm, (SD, K1), axes=(0, 1))
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(T)
f_hat = inner(v, fj, output_array=f_hat)
# Get left hand side of Poisson equation
matrices = inner(v, div(grad(u)))
# Create Helmholtz linear algebra solver
H = Solver(*matrices)
# Solve and transform to real space
u_hat = Function(T) # Solution spectral space
u_hat = H(u_hat, f_hat) # Solve
uq = u_hat.backward()
uh = uq.forward()
# Compare with analytical solution
uj = Array(T, buffer=ue)
assert np.allclose(uj, uq)
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = T.local_mesh(True)
plt.contourf(X[0], X[1], uq)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uj)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uq-uj)
plt.colorbar()
plt.title('Error')
plt.figure()
X = T.local_mesh()
for xj in np.squeeze(X[0]):
plt.plot((xj, xj), (np.squeeze(X[1])[0], np.squeeze(X[1])[-1]), 'k')
for yj in np.squeeze(X[1]):
plt.plot((np.squeeze(X[0])[0], np.squeeze(X[0])[-1]), (yj, yj), 'k')
#plt.show()
| 25.4 | 79 | 0.645276 |
4c85fd3963648e556fc1ffca330ce49808418565 | 3,835 | py | Python | kapsoya/models.py | Chebichii-Lab/Kapsoya-Estate | ce10224f30a6823396a51d3edd017b286e399acd | [
"MIT"
] | null | null | null | kapsoya/models.py | Chebichii-Lab/Kapsoya-Estate | ce10224f30a6823396a51d3edd017b286e399acd | [
"MIT"
] | null | null | null | kapsoya/models.py | Chebichii-Lab/Kapsoya-Estate | ce10224f30a6823396a51d3edd017b286e399acd | [
"MIT"
] | 1 | 2021-09-30T19:35:45.000Z | 2021-09-30T19:35:45.000Z | from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
#class Neighbourhood
class Neighbourhood(models.Model):
neighbourhood_name = models.CharField(max_length=200)
neighbourhood_location = models.CharField(max_length=200)
neighbourhood_description = models.TextField(max_length=500, blank=True)
neighbourhood_photo = CloudinaryField('photo', default='photo')
admin = models.ForeignKey(User, on_delete=models.CASCADE, related_name='admin')
def __str__(self):
return self.neighbourhood_name
def create_neighbourhood(self):
self.save()
def save_neighbourhood(self):
self.save()
def delete_neighbourhood(self):
self.delete()
@classmethod
def find_hood(cls, hood_id):
return cls.objects.filter(id=hood_id)
def update_hood(self):
neighbourhood_name = self.neighbourhood_name
self.neighbourhood_name = neighbourhood_name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
id_number = models.IntegerField(default=0)
email = models.CharField(max_length=30, blank=True)
profile_picture = CloudinaryField('profile')
bio = models.TextField(max_length=500, blank=True)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def update_profile(cls, id):
Profile.objects.get(user_id=id)
#class Business
class Business(models.Model):
business_name = models.CharField(max_length=100,blank=False)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='business_owner')
neighbourhood_id = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='business', blank=True, null=True)
business_email = models.CharField(max_length=150,blank=False)
business_description = models.TextField()
def __str__(self):
return self.business_name
def save_business(self):
self.save()
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business = cls.objects.get(id = business_id)
return business
@classmethod
def search_by_name(cls,search_term):
businesses = cls.objects.filter(business_name__icontains=search_term)
return businesses
def update_business(self):
name = self.business_name
self.business_name = name
class Post(models.Model):
CHOICES = (
('1', 'Security'),
('2', 'Health Emergency'),
('3', 'Entertainment'),
('4', 'Fire Breakouts'),
('5', 'Playground'),
('6', 'Death'),
('7', 'Gym'),
)
category = models.CharField(max_length=120, choices=CHOICES)
title = models.CharField(max_length=100, null=True)
post = models.TextField()
date_posted = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='post_owner')
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='neighbourhood_post')
def __str__(self):
return f'{self.title} Post'
def save_post(self):
self.save()
def delete_post(self):
self.delete()
| 31.178862 | 129 | 0.695698 |
9b6934e67fa20a6e188120791e2d052823c1d689 | 165 | py | Python | api_digital_certificate/digital_api/urls.py | slarda/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | 1 | 2020-12-17T11:21:01.000Z | 2020-12-17T11:21:01.000Z | api_digital_certificate/digital_api/urls.py | soft-super/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | 5 | 2021-03-19T01:48:07.000Z | 2021-06-09T18:26:31.000Z | api_digital_certificate/digital_api/urls.py | tiny-1996/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from digital_api.views import DigitalCertificateView
urlpatterns = [
path('get_signed_key/', DigitalCertificateView.as_view()),
]
| 20.625 | 62 | 0.787879 |
21cba58902c7717ee527d9b8aeb59e40ee44792b | 6,617 | py | Python | InplusTrader/dataEngine/drawDK.py | zhengwsh/InplusTrader_Linux | 5f7eb17004da0b76ceafb93cb314de7a6009cd04 | [
"MIT"
] | 17 | 2017-04-20T05:17:25.000Z | 2020-09-30T08:58:03.000Z | InplusTrader/dataEngine/drawDK.py | vladhj38/InplusTrader_Linux | 5f7eb17004da0b76ceafb93cb314de7a6009cd04 | [
"MIT"
] | 1 | 2017-11-12T01:24:06.000Z | 2019-09-19T08:50:38.000Z | InplusTrader/dataEngine/drawDK.py | vladhj38/InplusTrader_Linux | 5f7eb17004da0b76ceafb93cb314de7a6009cd04 | [
"MIT"
] | 17 | 2017-04-17T08:17:00.000Z | 2020-10-25T01:56:49.000Z | # -*- coding: utf-8 -*-
"""
Create on 2017/02/18
@author: vinson zheng
@group: inpluslab
@contact: 1530820222@qq.com
"""
import sys, os
import datetime
import numpy as np
import pymongo
from pymongo import MongoClient
import talib as ta
import plot as iplot
import matplotlib.colors as colors
import matplotlib.dates as mdates
from matplotlib.dates import date2num
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
class DrawDailyK(object):
"""docstring for DrawDailyK"""
def __init__(self):
self.conn = MongoClient('172.18.181.134', 27017)
self.Symbol_Db = self.conn.ifTrader_Symbol_Db
self.Daily_Db = self.conn.ifTrader_Daily_Db
#self.OneMin_Db = self.conn.ifTrader_1Min_Db
#self.Tick_Db = self.conn.ifTrader_Tick_Db
def fetch_data(self, ticker, start=None, end=None):
self.ticker = ticker
self.today = datetime.date.today()
self.end = self.today.strftime("%Y-%m-%d") if end is None else end
self.start = (self.today-datetime.timedelta(days=180)).strftime("%Y-%m-%d") if start is None else start
self.startdate = datetime.datetime.strptime(self.start, "%Y-%m-%d")
self.enddate = datetime.datetime.strptime(self.end, "%Y-%m-%d")
flt = {'date' : {'$gte':self.start, '$lt':self.end}}
self.r = self.Daily_Db[self.ticker].find(flt).sort("date",pymongo.ASCENDING)
def draw(self):
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
#fig, (ax2, ax3, ax1) = plt.subplots(3, 1)
fig, ax2 = plt.subplots(1, 1)
fig.set_facecolor('gray')
ax2.set_facecolor('#d5d5d5')
ax2t = ax2.twinx()
#set(gca, 'Units', 'normalized', 'Position', [0.505 0.505 0.495 0.495])
plt.subplots_adjust(0.05, 0.05, 0.95, 0.95)
#-----------------------------------
kwidth = 0.4
OFFSET = kwidth / 2.0
alpha = 1.0
lines = []
patches = []
colorup = 'r'
colordown = 'g'
dateL = []
openL = []
highL = []
lowL = []
closeL = []
volumeL = []
amountL = []
for daybar in self.r:
date = datetime.datetime.strptime(daybar['date'], "%Y-%m-%d")
t = date2num(date)
open = float(daybar['open'])
high = float(daybar['high'])
low = float(daybar['low'])
close = float(daybar['close'])
volume = float(daybar['volume'])
amount = float(daybar['amount'])
dateL.append(t)
closeL.append(close)
volumeL.append(volume)
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width=kwidth,
height=height,
facecolor=color,
edgecolor=color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax2.add_line(vline)
ax2.add_patch(rect)
ax2.autoscale_view()
ax2.xaxis_date()
ax2.set_title('%s DAILY K-LINE' % self.ticker)
dateL = np.array(dateL)
openL = np.array(openL)
highL = np.array(highL)
lowL = np.array(lowL)
closeL = np.array(closeL)
volumeL = np.array(volumeL)
amountL = np.array(amountL)
vmax = volumeL.max()
poly = ax2t.fill_between(dateL, volumeL, 0, label='Volume', facecolor='darkgoldenrod', edgecolor='darkgoldenrod')
ax2t.xaxis_date()
ax2t.set_ylim(0, 5*vmax)
ax2t.set_yticks([])
ma5 = ta.SMA(closeL, 5)
ma30 = ta.SMA(closeL, 30)
linema5, = ax2.plot(dateL, ma5, color='blue', lw=2, label='SMA (5)')
linema30, = ax2.plot(dateL, ma30, color='red', lw=2, label='SMA (30)')
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='upper right', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
plt.show()
'''
s = '%s O:%1.2f H:%1.2f L:%1.2f C:%1.2f, V:%1.1fM Chg:%+1.2f' % (
dateL[-1],
openL[-1], lastL[-1],
lowL[-1], closeL[-1],
volumeL[-1],
closeL[-1] - openL[-1])
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
'''
'''
#ax3.set_yticks([])
# turn off upper axis tick labels, rotate the lower ones, etc
for ax in ax1, ax2, ax2t, ax3:
if ax != ax3:
for label in ax.get_xticklabels():
label.set_visible(False)
else:
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 5 ticks, pruning the upper and lower so they don't overlap
# with other ticks
#ax2.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
#ax3.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
ax2.yaxis.set_major_locator(MyLocator(5, prune='both'))
ax3.yaxis.set_major_locator(MyLocator(5, prune='both'))
'''
# setup application
if __name__ == '__main__':
dt = DrawDailyK()
if len(sys.argv) > 2:
dt.fetch_data(sys.argv[1], sys.argv[2], sys.argv[3])
else:
dt.fetch_data(sys.argv[1])
dt.draw()
# python drawDK.py 600048 2017-01-01 2017-02-20
| 31.509524 | 121 | 0.552818 |