code
stringlengths
10
805k
def_use_chains
sequencelengths
0
667
"""Platform for sensor integration.""" from __future__ import annotations import homeassistant.helpers.config_validation as cv import requests import voluptuous as vol from homeassistant.components.sensor import SensorEntity, PLATFORM_SCHEMA, SensorStateClass, SensorDeviceClass from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_API_TOKEN from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from requests.auth import HTTPBasicAuth PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_API_TOKEN): cv.string, }) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None ) -> None: """Set up the sensor platform.""" url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + config.get(CONF_API_TOKEN) payload = {} headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } response = requests.get(url, auth=HTTPBasicAuth(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)), headers=headers, data=payload) response_json = response.json() for x in response_json: account = x.get('user_account') add_entities( [KontomierzSensor(hass, config, account.get('bank_name') + " - " + account.get('display_name'), account.get('iban'))]) class KontomierzSensor(SensorEntity): """Representation of a Sensor.""" def __init__(self, hass, config: dict, entity_name: string, iban: string) -> None: self._attr_device_class = SensorDeviceClass.MONETARY self._attr_state_class = SensorStateClass.MEASUREMENT self._state = None self.hass = hass self.username = config.get(CONF_USERNAME) self.password = config.get(CONF_PASSWORD) self.apiToken = config.get(CONF_API_TOKEN) self.entity_name = entity_name self.iban = iban @property def unique_id(self) -> str | None: return "kontomierz_sensor" + self.entity_name @property def name(self) -> str: return self.entity_name @property def state(self): """Return the state of the sensor.""" return self._state def update(self) -> None: """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + self.apiToken response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers={ 'Content-Type': 'application/json', 'Accept': 'application/json', }, data={}) response_json = response.json() result = 0.0 for x in response_json: user_account = x.get('user_account') if self.iban == user_account.get('iban'): result = float(user_account.get('balance')) self._attr_native_unit_of_measurement = user_account.get('currency_name') self._state = result
[ [ [ 62, 73 ] ], [ [ 82, 127 ], [ 660, 662 ], [ 704, 706 ], [ 749, 751 ] ], [ [ 135, 143 ], [ 1227, 1235 ], [ 2795, 2803 ] ], [ [ 151, 168 ], [ 631, 634 ], [ 675, 678 ], [ 719, 722 ] ], [ [ 213, 225 ], [ 1690, 1702 ] ], [ [ 227, 242 ], [ 602, 617 ] ], [ [ 244, 260 ], [ 1925, 1941 ] ], [ [ 262, 279 ], [ 1865, 1882 ] ], [ [ 312, 325 ], [ 644, 657 ], [ 1275, 1288 ], [ 2041, 2054 ] ], [ [ 327, 340 ], [ 688, 701 ], [ 1302, 1315 ], [ 2091, 2104 ] ], [ [ 342, 356 ], [ 732, 746 ], [ 1075, 1089 ], [ 2141, 2155 ] ], [ [ 388, 401 ], [ 799, 812 ] ], [ [ 452, 471 ], [ 864, 883 ] ], [ [ 513, 523 ], [ 830, 840 ] ], [ [ 525, 542 ], [ 909, 926 ] ], [ [ 569, 582 ], [ 1250, 1263 ], [ 2818, 2831 ] ], [ [ 584, 599 ] ], [ [ 769, 783 ] ], [ [ 1673, 1689 ], [ 1517, 1533 ] ] ]
import torch import numpy as np def get_sigmas(config): if config.model.sigma_dist == 'geometric': sigmas = torch.tensor( np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end), config.model.num_classes))).float().to(config.device) elif config.model.sigma_dist == 'uniform': sigmas = torch.tensor( np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes) ).float().to(config.device) else: raise NotImplementedError('sigma distribution not supported') return sigmas @torch.no_grad() def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False, denoise=True, add_noise=True): images = [] with torch.no_grad(): for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): grad = scorenet(x_mod, labels) #choose whether to add random noise during each gradient ascent step if add_noise: noise = torch.randn_like(x_mod) else: noise = torch.zeros_like(x_mod) #calculate l2 norms of gradient (score) and the additive noise for logging grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() #calc snr as scaled version of [||s(x, \sigma_i)|| / ||z_t||] and mean of score for logging snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2 if not final_only: images.append(x_mod.to('cpu')) if verbose: print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format( c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item())) #final denoising step if desired - removes the very last additive z_L if denoise: last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device) last_noise = last_noise.long() x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise) images.append(x_mod.to('cpu')) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False, denoise=True, add_noise=True, decimate_sigma=None, mode=None, true_x=None): images = [] #if desired, decimate the number of noise scales to speed up inference if decimate_sigma is not None: sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list # num_sigmas = sigmas.shape[0] // decimate_sigma # sigmas_temp = [] # for i in range(num_sigmas): # sigmas_temp.append(sigmas[-1]) sigmas = sigmas_temp #swap the new decimated sigma list for the main one mse = torch.nn.MSELoss() N, C, H, W = x_mod.shape steps = np.geomspace(start=5, stop=1, num=len(sigmas)) c2 = 1 with torch.no_grad(): #outer loop over noise scales for c, sigma in enumerate(sigmas): #dummy target 1...T depending on iteration labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() #step_size = step_lr * (sigma / sigmas[-1]) ** 2 step_size = steps[c] #Inner loop over T for s in range(n_steps_each): #s(x_t) ~= \grad_x log p(x) -- THE PRIOR grad = scorenet(x_mod, labels) prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() #prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2 #calculate the maximum likelihood gradient - i.e. MSE gradient #A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1] if mode=='denoising': Axt = x_mod mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x else: Axt = torch.matmul(A, x_mod.view(N, -1, 1)) mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient #mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() #likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2 if c == 0 and s == 0: c2 = prior_norm.item() / likelihood_norm.item() mle_grad = mle_grad * c2 #MSE gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() #The final gradient grad = grad - mle_grad grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() #grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 #choose whether to add random noise during each gradient ascent step if add_noise: noise = torch.randn_like(x_mod) else: noise = torch.zeros_like(x_mod) x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm mse_iter = mse(Axt, y) if true_x is not None: mse_true = mse(true_x, x_mod) if not final_only: images.append(x_mod.to('cpu')) if verbose: print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \ image_norm: {:.4f}, train_mse: {:.4f}".format( \ c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \ mse_iter.item())) if true_x is not None: print("true_mse: {:.4f}".format(mse_true.item())) #final denoising step if desired - removes the very last additive z_L if denoise: last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device) last_noise = last_noise.long() x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise) images.append(x_mod.to('cpu')) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True, final_only=False, verbose=False, likelihood_every=1, decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"): images = [] #if desired, decimate the number of noise scales to speed up inference if decimate_sigma is not None: if sigma_type == 'subsample': #grab equally-spaced sigma values sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() sigmas_temp.append(sigmas[-1]) elif sigma_type == 'last': #grab just the last sigma value multiple times num_sigmas = sigmas.shape[0] // decimate_sigma sigmas_temp = [] for i in range(num_sigmas): sigmas_temp.append(sigmas[-1]) else: sigmas_temp = sigmas sigmas = sigmas_temp mse = torch.nn.MSELoss() N, C, H, W = x_mod.shape steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas)) likelihood_norm = 0 with torch.no_grad(): if sigma_type == 'last': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099 labels = labels.long() for c, sigma in enumerate(sigmas): if sigma_type == 'subsample': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c labels = labels.long() elif sigma_type != 'last': labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = steps[c] #s(x_t) ~= \grad_x log p(x) -- THE PRIOR grad = scorenet(x_mod, labels) * c1 prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() if c % likelihood_every == 0: #\grad_x log p(y | x) -- LIKELIHOOD if mode=='denoising': Axt = x_mod if likelihood_type == "l2": mle_grad = (Axt - y) * c2 elif likelihood_type == "l1": mle_grad = torch.sign(Axt - y) * c2 else: Axt = torch.matmul(A, x_mod.view(N, -1, 1)) if likelihood_type == "l2": mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 elif likelihood_type == "l1": mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2 likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() if auto_c2 and c == 0: c2 = prior_norm.item() / likelihood_norm.item() mle_grad = mle_grad * c2 #MSE gradient likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean() grad = grad - mle_grad grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad #x_mod = torch.clamp(x_mod, 0.0, 1.0) #calc l2 norm of iterate variable for logging image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() mse_iter = mse(Axt, y) if true_x is not None: mse_true = mse(true_x, x_mod) if not final_only: images.append(x_mod.cpu()) if verbose: print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \ image_norm: {:.4f}, train_mse: {:.4f}".format( \ c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \ mse_iter.item())) if true_x is not None: print("true_mse: {:.4f}".format(mse_true.item())) if final_only: return [x_mod.to('cpu')] else: return images @torch.no_grad() def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size, n_steps_each=100, step_lr=0.000008): """ Currently only good for 32x32 images. Assuming the right half is missing. """ images = [] #refer_image is the untainted x (?) #right now this only works with 3-channel images refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1) refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size) x_mod = x_mod.view(-1, 3, image_size, image_size) cols = image_size // 2 half_refer_image = refer_image[..., :cols] with torch.no_grad(): for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): images.append(x_mod.to('cpu')) corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma x_mod[:, :, :, :cols] = corrupted_half_image noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2) grad = scorenet(x_mod, labels) x_mod = x_mod + step_size * grad + noise print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(), grad.abs().max())) return images @torch.no_grad() def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008, final_only=False, verbose=False): images = [] n_rows = x_mod.shape[0] x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1) x_mod = x_mod.reshape(-1, *x_mod.shape[2:]) for c, sigma in enumerate(sigmas): labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long() step_size = step_lr * (sigma / sigmas[-1]) ** 2 for s in range(n_steps_each): grad = scorenet(x_mod, labels) noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3], device=x_mod.device) noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3], device=x_mod.device) angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device) noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \ noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None] noise = noise.reshape(-1, *noise.shape[2:]) grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean() image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean() x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm if not final_only: images.append(x_mod.to('cpu')) if verbose: print( "level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format( c, step_size, image_norm.item(), grad_norm.item(), snr.item())) if final_only: return [x_mod.to('cpu')] else: return images
[ [ [ 7, 12 ], [ 633, 638 ], [ 3049, 3054 ], [ 7977, 7982 ], [ 12160, 12165 ], [ 13732, 13737 ], [ 121, 126 ], [ 381, 386 ], [ 858, 863 ], [ 939, 944 ], [ 1362, 1367 ], [ 1437, 1442 ], [ 1581, 1586 ], [ 1666, 1671 ], [ 1919, 1924 ], [ 2190, 2195 ], [ 2738, 2743 ], [ 3903, 3908 ], [ 4034, 4039 ], [ 4208, 4213 ], [ 4599, 4604 ], [ 5134, 5139 ], [ 5203, 5208 ], [ 5216, 5221 ], [ 5464, 5469 ], [ 5819, 5824 ], [ 5989, 5994 ], [ 6266, 6271 ], [ 6341, 6346 ], [ 6560, 6565 ], [ 6647, 6652 ], [ 7666, 7671 ], [ 8922, 8927 ], [ 9074, 9079 ], [ 9145, 9150 ], [ 9346, 9351 ], [ 9518, 9523 ], [ 9771, 9776 ], [ 10176, 10181 ], [ 10250, 10255 ], [ 10372, 10377 ], [ 10385, 10390 ], [ 10530, 10535 ], [ 10543, 10548 ], [ 10571, 10576 ], [ 10650, 10655 ], [ 10919, 10924 ], [ 11048, 11053 ], [ 11284, 11289 ], [ 12852, 12857 ], [ 12933, 12938 ], [ 13228, 13233 ], [ 13356, 13361 ], [ 14153, 14158 ], [ 14396, 14401 ], [ 14541, 14546 ], [ 14685, 14690 ], [ 14799, 14804 ], [ 14896, 14901 ], [ 15022, 15027 ], [ 15103, 15108 ], [ 15186, 15191 ] ], [ [ 20, 31 ], [ 147, 149 ], [ 154, 156 ], [ 166, 168 ], [ 200, 202 ], [ 407, 409 ], [ 1784, 1786 ], [ 2108, 2110 ], [ 3965, 3967 ], [ 6425, 6427 ], [ 6727, 6729 ], [ 8984, 8986 ], [ 13382, 13384 ], [ 14703, 14705 ], [ 15300, 15302 ], [ 15342, 15344 ] ], [ [ 37, 47 ] ], [ [ 653, 677 ] ], [ [ 3069, 3085 ] ], [ [ 7997, 8011 ] ], [ [ 12180, 12215 ] ], [ [ 13752, 13790 ] ] ]
""" User Animation Card =================== Copyright (c) 2019 Ivanov Yuri For suggestions and questions: <kivydevelopment@gmail.com> This file is distributed under the terms of the same license, as the Kivy framework. Example ------- from kivymd.app import MDApp from kivy.lang import Builder from kivy.factory import Factory from kivymd.toast import toast from kivymd.theming import ThemeManager from kivymd.uix.useranimationcard import MDUserAnimationCard from kivymd.uix.button import MDIconButton from kivymd.uix.list import ILeftBodyTouch # Your content for a contact card. Builder.load_string(''' #:import get_hex_from_color kivy.utils.get_hex_from_color <TestAnimationCard@BoxLayout> orientation: 'vertical' padding: dp(10) spacing: dp(10) size_hint_y: None height: self.minimum_height BoxLayout: size_hint_y: None height: self.minimum_height Widget: MDRoundFlatButton: text: "Free call" Widget: MDRoundFlatButton: text: "Free message" Widget: OneLineIconListItem: text: "Video call" IconLeftSampleWidget: icon: 'camera-front-variant' TwoLineIconListItem: text: "Call Viber Out" secondary_text: "[color=%s]Advantageous rates for calls[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'phone' TwoLineIconListItem: text: "Call over mobile network" secondary_text: "[color=%s]Operator's tariffs apply[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'remote' ''') class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton): pass class Example(MDApp): title = "Example Animation Card" def __init__(self, **kwargs): super().__init__(**kwargs) self.user_animation_card = None def build(self): def main_back_callback(): toast('Close card') if not self.user_animation_card: self.user_animation_card = MDUserAnimationCard( user_name="Lion Lion", path_to_avatar="./assets/african-lion-951778_1280.jpg", callback=main_back_callback) self.user_animation_card.box_content.add_widget( Factory.TestAnimationCard()) self.user_animation_card.open() Example().run() """ from kivy.clock import Clock from kivy.animation import Animation from kivy.core.window import Window from kivy.metrics import dp, sp from kivy.properties import ObjectProperty, StringProperty, ListProperty from kivy.lang import Builder from kivy.uix.boxlayout import BoxLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.modalview import ModalView from kivymd.uix.behaviors import SpecificBackgroundColorBehavior from kivymd.uix.button import MDIconButton from kivymd.theming import ThemableBehavior Builder.load_string( """ #:import Window kivy.core.window.Window #:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect <ModifiedToolbar> size_hint_y: None height: root.theme_cls.standard_increment padding: [root.theme_cls.horizontal_margins - dp(12), 0] BoxLayout: id: left_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] BoxLayout: padding: dp(12), 0 MDLabel: font_style: 'H6' opposite_colors: root.opposite_colors theme_text_color: 'Custom' text_color: root.specific_text_color text: root.title shorten: True shorten_from: 'right' BoxLayout: id: right_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] <UserAnimationCard> canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos FitImage: id: image source: root.path_to_avatar size_hint: 1, None height: Window.height * 40 // 100 y: Window.height - self.height allow_stretch: True keep_ratio: False canvas.after: Color: rgba: root._primary_color Rectangle: size: self.size pos: self.pos MDLabel: id: user_name font_style: 'H4' theme_text_color: 'Custom' color: 1, 1, 1, 1 shorten: True shorten_from: 'right' text: root.user_name size_hint_y: None height: self.texture_size[1] ModifiedToolbar: id: toolbar md_bg_color: 0, 0, 0, 0 left_action_items: [['arrow-left', lambda x: root._callback_back()]] y: Window.height - self.height ScrollView: id: scroll y: -image.height effect_cls: StiffScrollEffect scroll_distance: 100 GridLayout: id: box_content size_hint_y: None height: self.minimum_height cols: 1 canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos """ ) class MDUserAnimationCard(ThemableBehavior, ModalView): user_name = StringProperty() path_to_avatar = StringProperty() box_content = ObjectProperty() callback = ObjectProperty() _anim_bottom = True def __init__(self, **kwargs): super().__init__(**kwargs) self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card = UserAnimationCard( user_name=self.user_name, path_to_avatar=self.path_to_avatar, _callback_back=self._callback_back, _primary_color=self._primary_color, ) self.user_animation_card.ids.user_name.pos = ( dp(15), Window.height - self.user_animation_card.ids.image.height, ) self.box_content = self.user_animation_card.ids.box_content self.add_widget(self.user_animation_card) self._obj_avatar = self.user_animation_card.ids.image self._obj_user_name = self.user_animation_card.ids.user_name self._obj_toolbar = self.user_animation_card.ids.toolbar self._obj_scroll = self.user_animation_card.ids.scroll self._set_current_pos_objects() def _callback_back(self): self.dismiss() if self.callback: self.callback() def on_open(self): self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card._primary_color = self._primary_color def _set_current_pos_objects(self): self._avatar_y = self._obj_avatar.y self._toolbar_y = self._obj_toolbar.y self._user_name_y = self._obj_user_name.y self._scroll_y = self._obj_scroll.y def on_touch_move(self, touch): if touch.ud["swipe_begin"] < touch.y: if self._anim_bottom: self._anim_bottom = False self.animation_to_top() else: if not self._anim_bottom: self._anim_bottom = True self.animation_to_bottom() def on_touch_down(self, touch): touch.ud["swipe_begin"] = touch.y return super().on_touch_down(touch) def on_touch_up(self, touch): touch.ud["swipe_begin"] = 0 def animation_to_bottom(self): Animation(y=self._scroll_y, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=self._user_name_y, d=0.5, x=dp(15), t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(36), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation(_primary_color=[0, 0, 0, 0], d=0.3, t="in_out_cubic").start( self.user_animation_card ) Animation(y=self._avatar_y, d=0.4, t="in_out_cubic").start( self._obj_avatar ) def animation_to_top(self): user_name_y = ( Window.height - self._obj_toolbar.height + (self.theme_cls.standard_increment // 2 - dp(12)) ) user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5 Animation(y=-self._obj_toolbar.height, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=user_name_y, d=0.3, x=user_name_x, t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(20), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation( _primary_color=self.theme_cls.primary_color, d=0.3, t="in_out_cubic" ).start(self.user_animation_card) Animation(y=self._obj_avatar.y + 30, d=0.4, t="in_out_cubic").start( self._obj_avatar ) class UserAnimationCard(ThemableBehavior, FloatLayout): user_name = StringProperty() path_to_avatar = StringProperty() _callback_back = ObjectProperty() _primary_color = ListProperty() class ModifiedToolbar( ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout ): left_action_items = ListProperty() title = StringProperty() def __init__(self, **kwargs): super().__init__(**kwargs) self.bind(specific_text_color=self.update_action_bar_text_colors) Clock.schedule_once( lambda x: self.on_left_action_items(0, self.left_action_items) ) def on_left_action_items(self, instance, value): self.update_action_bar(self.ids["left_actions"], value) def update_action_bar(self, action_bar, action_bar_items): action_bar.clear_widgets() new_width = 0 for item in action_bar_items: new_width += dp(48) action_bar.add_widget( MDIconButton( icon=item[0], on_release=item[1], opposite_colors=True, text_color=self.specific_text_color, theme_text_color="Custom", ) ) action_bar.width = new_width def update_action_bar_text_colors(self, instance, value): for child in self.ids["left_actions"].children: child.text_color = self.specific_text_color
[ [ [ 2454, 2459 ], [ 9846, 9851 ] ], [ [ 2487, 2496 ], [ 7891, 7900 ], [ 7998, 8007 ], [ 8121, 8130 ], [ 8231, 8240 ], [ 8357, 8366 ], [ 8730, 8739 ], [ 8848, 8857 ], [ 8970, 8979 ], [ 9080, 9089 ], [ 9222, 9231 ] ], [ [ 2526, 2532 ], [ 6310, 6316 ], [ 8525, 8531 ] ], [ [ 2558, 2560 ], [ 6290, 6292 ], [ 8038, 8040 ], [ 8634, 8636 ], [ 8710, 8712 ], [ 10254, 10256 ] ], [ [ 2562, 2564 ], [ 8141, 8143 ], [ 8990, 8992 ] ], [ [ 2593, 2607 ], [ 5740, 5754 ], [ 5772, 5786 ], [ 9480, 9494 ] ], [ [ 2609, 2623 ], [ 5667, 5681 ], [ 5705, 5719 ], [ 9404, 9418 ], [ 9442, 9456 ], [ 9677, 9691 ] ], [ [ 2625, 2637 ], [ 9518, 9530 ], [ 9650, 9662 ] ], [ [ 2660, 2667 ], [ 2949, 2956 ] ], [ [ 2699, 2708 ], [ 9613, 9622 ] ], [ [ 2742, 2753 ], [ 9374, 9385 ] ], [ [ 2785, 2794 ], [ 5639, 5648 ] ], [ [ 2829, 2860 ], [ 9580, 9611 ] ], [ [ 2891, 2903 ], [ 10312, 10324 ] ], [ [ 2931, 2947 ], [ 5621, 5637 ], [ 9356, 9372 ], [ 9562, 9578 ] ], [ [ 5601, 5620 ] ], [ [ 9338, 9355 ], [ 6012, 6029 ] ], [ [ 9541, 9556 ] ] ]
# Moduł definiujący walidatory API from marshmallow import Schema, fields, validate fields.Email.default_error_messages['required'] = 'Email jest wymagany' fields.Email.default_error_messages['invalid'] = 'Niepoprawny adres email' class VUser(Schema): # Walidator rejestracji nick = fields.String( required=True, validate=validate.Length(min=4, max=30, error='Login musi mieć 4 - 30 znaków')) email = fields.Email(required=True) password = fields.String( required=True, validate=validate.Length(min=8, max=30, error='Hasło musi mieć 8 - 30 znakow')) class VUserLogin(Schema): # Walidator logowania email = fields.Email(required=True) password = fields.String( required=True, validate=validate.Length(min=8, max=30, error='Hasło jest wymagane')) class VEmail(Schema): # Walidator adresu email email = fields.Email(required=True) class VUserPatch(Schema): # Walidator zapytania o zmianę pól w rekordzie użytkownika field = fields.String(required=True, validate=validate.OneOf(['nick'])) value = fields.String(required=True) class VEntry(Schema): # Walidator wpisu w dzienniku value = fields.Number(required=True) description = fields.String() class VDiary(Schema): # Walidator dziennika name = fields.String(required=True) max = fields.Number(required=True) date = fields.Number() color = fields.String(validate=validate.Regexp("#[0-9a-fA-F]{6}")) entries = fields.List(fields.Nested(VEntry), required=True) class VJson(Schema): # Walidator danych JSON diaries = fields.List(fields.Nested(VDiary)) class VDiaryIndex(Schema): # Walidator indexu dziennika index = fields.Integer(required=True)
[ [ [ 60, 66 ], [ 248, 254 ], [ 606, 612 ], [ 819, 825 ], [ 916, 922 ], [ 1120, 1126 ], [ 1253, 1259 ], [ 1543, 1549 ], [ 1649, 1655 ] ], [ [ 68, 74 ], [ 87, 93 ], [ 159, 165 ], [ 296, 302 ], [ 426, 432 ], [ 469, 475 ], [ 653, 659 ], [ 696, 702 ], [ 869, 875 ], [ 1000, 1006 ], [ 1076, 1082 ], [ 1175, 1181 ], [ 1222, 1228 ], [ 1299, 1305 ], [ 1338, 1344 ], [ 1378, 1384 ], [ 1406, 1412 ], [ 1479, 1485 ], [ 1491, 1497 ], [ 1594, 1600 ], [ 1606, 1612 ], [ 1703, 1709 ] ], [ [ 76, 84 ], [ 343, 351 ], [ 516, 524 ], [ 743, 751 ], [ 1038, 1046 ], [ 1429, 1437 ] ], [ [ 242, 247 ] ], [ [ 595, 605 ] ], [ [ 812, 818 ] ], [ [ 905, 915 ] ], [ [ 1113, 1119 ], [ 1505, 1511 ] ], [ [ 1246, 1252 ], [ 1620, 1626 ] ], [ [ 1537, 1542 ] ], [ [ 1637, 1648 ] ] ]
class Option: def __init__(self, option_info): self.option_info = option_info self.flag = option_info['flag'] def mkdir(self): if self.flag == False: return False return self.option_info['mkdir'] def dir_name(self, problem): if self.flag == False: return '' if not self.mkdir(): return '' return self.replace_name(self.option_info['dir_name'], problem) + '/' def source_name(self, problem): if self.flag == False: return problem['problem_id'] return self.replace_info(self.option_info['source_name'], problem) def replace_name(self, value, problem): value = value.replace('[NO]', problem['problem_id']) value = value.replace('[TITLE]', problem['problem_title']) return value def get_ext(self, language): extensions = { 'C': '.c', 'C++': '.cpp', 'C++11': '.cpp', 'C++14': '.cpp', 'C++17': '.cpp', 'Java': '.java', 'Java (OpenJDK)': '.java', 'C11': '.c', 'Python 2': '.py', 'Python 3': '.py', 'PyPy2': '.py', 'PyPy3': '.py', 'Ruby2.5': '.rb', 'Kotlin': '.kt', 'Swift': '.swift', 'C# 6.0': '.cs', 'Text': '.txt', 'node.js': 'js', 'Go': '.go', 'F#': '.fs', 'PHP': '.php', 'Pascal': '.pas', 'Lua': '.lua', 'Perl': '.pl', 'Objective-C': '.m', 'Objective-C++': '.mm', 'C (Clang)': '.c', 'C++11 (Clang)': '.cpp', 'C++14 (Clang)': '.cpp', 'C++17 (Clang)': '.cpp', 'Golfscript': '.gs', 'Bash': '.sh', 'Fortran': '.f95', 'Scheme': '.scm', 'Ada': '.ada', 'awk': '.awk', 'OCaml': '.ml', 'Brainfuck': '.bf', 'Whitespace': '.ws', 'Tcl': '.tcl', 'Assembly (32bit)': '.asm', 'Assembly (32bit)': '.asm', 'D': '.d', 'Clojure': '.clj', 'Rhino': '.js', 'Cobol': '.cob', 'SpiderMonkey': '.js', 'Pike': '.pike', 'sed': '.sed', 'Rust': '.rs', 'Boo': '.boo', 'Intercal': '.i', 'bc': '.bc', 'Nemerle': '.n', 'Cobra': '.cobra', 'Algol 68': '.a68', 'Befunge': '.bf', 'Haxe': '.hx', 'LOLCODE': '.lol', 'VB.NET 4.0': '.vb', '아희': '.aheui' } if not language in extensions: return True, 'Unknown extension' return False, extensions[language]
[ [ [ 6, 12 ] ] ]
import copy import torch.nn as nn from torch.quantization.fuser_method_mappings import get_fuser_method # for backward compatiblity from torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401 from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401 from typing import List, Optional # Generalization of getattr def _get_module(model, submodule_key): tokens = submodule_key.split('.') cur_mod = model for s in tokens: cur_mod = getattr(cur_mod, s) return cur_mod # Generalization of setattr def _set_module(model, submodule_key, module): tokens = submodule_key.split('.') sub_tokens = tokens[:-1] cur_mod = model for s in sub_tokens: cur_mod = getattr(cur_mod, s) setattr(cur_mod, tokens[-1], module) def fuse_known_modules(mod_list, additional_fuser_method_mapping=None): r"""Returns a list of modules that fuses the operations specified in the input module list. Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, bn linear, relu For these sequences, the first element in the output module list performs the fused operation. The rest of the elements are set to nn.Identity() """ types = tuple(type(m) for m in mod_list) fuser_method = get_fuser_method(types, additional_fuser_method_mapping) if fuser_method is None: raise NotImplementedError("Cannot fuse modules: {}".format(types)) new_mod : List[Optional[nn.Module]] = [None] * len(mod_list) fused = fuser_method(*mod_list) # NOTE: forward hooks not processed in the two following for loops will be lost after the fusion # Move pre forward hooks of the base module to resulting fused module for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items(): fused.register_forward_pre_hook(pre_hook_fn) del mod_list[0]._forward_pre_hooks[handle_id] # Move post forward hooks of the last module to resulting fused module for handle_id, hook_fn in mod_list[-1]._forward_hooks.items(): fused.register_forward_hook(hook_fn) del mod_list[-1]._forward_hooks[handle_id] new_mod[0] = fused for i in range(1, len(mod_list)): identity = nn.Identity() identity.training = mod_list[0].training new_mod[i] = identity return new_mod def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None): if fuse_custom_config_dict is None: fuse_custom_config_dict = {} additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {}) mod_list = [] for item in modules_to_fuse: mod_list.append(_get_module(model, item)) # Fuse list of modules new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping) # Replace original module list with fused module list for i, item in enumerate(modules_to_fuse): _set_module(model, item, new_mod_list[i]) def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None): r"""Fuses a list of modules into a single module Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, relu bn, relu All other sequences are left unchanged. For these sequences, replaces the first item in the list with the fused module, replacing the rest of the modules with identity. Args: model: Model containing the modules to be fused modules_to_fuse: list of list of module names to fuse. Can also be a list of strings if there is only a single list of modules to fuse. inplace: bool specifying if fusion happens in place on the model, by default a new model is returned fuser_func: Function that takes in a list of modules and outputs a list of fused modules of the same length. For example, fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()] Defaults to torch.quantization.fuse_known_modules `fuse_custom_config_dict`: custom configuration for fusion .. code-block:: python # Example of fuse_custom_config_dict fuse_custom_config_dict = { # Additional fuser_method mapping "additional_fuser_method_mapping": { (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn }, } Returns: model with fused modules. A new copy is created if inplace=True. Examples:: >>> m = myModel() >>> # m is a module containing the sub-modules below >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']] >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) >>> m = myModel() >>> # Alternately provide a single list of modules to fuse >>> modules_to_fuse = ['conv1', 'bn1', 'relu1'] >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) """ if not inplace: model = copy.deepcopy(model) if all(isinstance(module_element, str) for module_element in modules_to_fuse): # Handle case of modules_to_fuse being a list _fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict) else: # Handle case of modules_to_fuse being a list of lists for module_list in modules_to_fuse: _fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict) return model
[ [ [ 8, 12 ], [ 5310, 5314 ] ], [ [ 21, 35 ], [ 1525, 1527 ], [ 2273, 2275 ] ], [ [ 90, 106 ], [ 1336, 1352 ] ], [ [ 188, 200 ] ], [ [ 268, 285 ] ], [ [ 320, 324 ], [ 1511, 1515 ] ], [ [ 326, 334 ], [ 1516, 1524 ] ], [ [ 368, 379 ], [ 2748, 2759 ] ], [ [ 572, 583 ], [ 2989, 3000 ] ], [ [ 812, 830 ], [ 2440, 2458 ], [ 3099, 3117 ] ], [ [ 2391, 2404 ], [ 5477, 5490 ], [ 5681, 5694 ] ], [ [ 3036, 3048 ] ] ]
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ # Institute for the Design of Advanced Energy Systems Process Systems # Engineering Framework (IDAES PSE Framework) Copyright (c) 2018, by the # software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia # University Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and # license information, respectively. Both files are also available online # at the URL "https://github.com/IDAES/idaes". """ __all__ = [ "ripemodel", "ems", "rspace", "sharedata", "debug", "powerlawp5", "powerlaw2", "powerlaw3", "powerlaw4", "avrami2", "avrami3", "avrami4", "avrami5", "randomnuc", "ptompkins", "jander", "antijander", "valensi", "parabolic", "gb3d", "zlt", "grain", # PYLINT-TODO-FIX: this seems to be a genuine error since "massact" is not imported from .mechs "massact", # pylint: disable=undefined-all-variable "massactm", "getmechs", ] from .main import ripemodel, ripewrite, print_results # noqa: F401 from .shared import rspace, sharedata, debug # noqa: F401 from .atermconstruct import ( makeaterm, formatinputs, checkargs, normalizefeatures, ) # noqa: F401 from .kinforms import lin, linjac, arr, arrjac, refarr, refarrjac # noqa: F401 from .mechs import ( powerlawp5, powerlaw2, powerlaw3, powerlaw4, avrami2, avrami3, avrami4, avrami5, randomnuc, ptompkins, jander, antijander, valensi, parabolic, gb3d, zlt, grain, getmechs, massactm, ) # noqa: F401 from .genpyomo import ripeomo # noqa: F401 from .targets import ( doalamo, dopwalamo, gentargets, sstargets, dynamictargets, ) # noqa: F401 from .confinv import confinv # noqa: F401 from .emsampling import constructmodel, ems # noqa: F401 from .checkoptions import checkoptions # noqa: F401 from .bounds import stoich_cons, count_neg, get_bounds # noqa: F401
[ [ [ 1405, 1412 ] ], [ [ 1962, 1971 ] ], [ [ 1973, 1982 ] ], [ [ 1984, 1997 ] ], [ [ 2032, 2038 ] ], [ [ 2040, 2049 ] ], [ [ 2051, 2056 ] ], [ [ 2105, 2114 ] ], [ [ 2120, 2132 ] ], [ [ 2138, 2147 ] ], [ [ 2153, 2170 ] ], [ [ 2210, 2213 ] ], [ [ 2215, 2221 ] ], [ [ 2223, 2226 ] ], [ [ 2228, 2234 ] ], [ [ 2236, 2242 ] ], [ [ 2244, 2253 ] ], [ [ 2293, 2303 ] ], [ [ 2309, 2318 ] ], [ [ 2324, 2333 ] ], [ [ 2339, 2348 ] ], [ [ 2354, 2361 ] ], [ [ 2367, 2374 ] ], [ [ 2380, 2387 ] ], [ [ 2393, 2400 ] ], [ [ 2406, 2415 ] ], [ [ 2421, 2430 ] ], [ [ 2436, 2442 ] ], [ [ 2448, 2458 ] ], [ [ 2464, 2471 ] ], [ [ 2477, 2486 ] ], [ [ 2492, 2496 ] ], [ [ 2502, 2505 ] ], [ [ 2511, 2516 ] ], [ [ 2522, 2530 ] ], [ [ 2536, 2544 ] ], [ [ 2584, 2591 ] ], [ [ 2633, 2640 ] ], [ [ 2646, 2655 ] ], [ [ 2661, 2671 ] ], [ [ 2677, 2686 ] ], [ [ 2692, 2706 ] ], [ [ 2745, 2752 ] ], [ [ 2791, 2805 ] ], [ [ 2807, 2810 ] ], [ [ 2851, 2863 ] ], [ [ 2898, 2909 ] ], [ [ 2911, 2920 ] ], [ [ 2922, 2932 ] ] ]
"""Test component helpers.""" # pylint: disable=protected-access from collections import OrderedDict import unittest from homeassistant import helpers from tests.common import get_test_home_assistant class TestHelpers(unittest.TestCase): """Tests homeassistant.helpers module.""" # pylint: disable=invalid-name def setUp(self): """Init needed objects.""" self.hass = get_test_home_assistant() # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_extract_domain_configs(self): """Test the extraction of domain configuration.""" config = { 'zone': None, 'zoner': None, 'zone ': None, 'zone Hallo': None, 'zone 100': None, } self.assertEqual(set(['zone', 'zone Hallo', 'zone 100']), set(helpers.extract_domain_configs(config, 'zone'))) def test_config_per_platform(self): """Test config per platform method.""" config = OrderedDict([ ('zone', {'platform': 'hello'}), ('zoner', None), ('zone Hallo', [1, {'platform': 'hello 2'}]), ('zone 100', None), ]) assert [ ('hello', config['zone']), (None, 1), ('hello 2', config['zone Hallo'][1]), ] == list(helpers.config_per_platform(config, 'zone'))
[ [ [ 89, 100 ], [ 1083, 1094 ] ], [ [ 108, 116 ], [ 222, 230 ] ], [ [ 144, 151 ], [ 929, 936 ], [ 1420, 1427 ] ], [ [ 178, 201 ], [ 400, 423 ] ], [ [ 210, 221 ] ] ]
import pandas as pd import numpy as np def top_time(ind=None, gs=None): """ Selects the location (by coordinates) which was visited for the longest period during given time interval :param ind: user id :param gs: GeoDataFrame from groupby execution containing all the data in the given time interval :return: user id (if given) and the data for the longest visited location """ aggregated = [] for tstamp, g in gs: # for each record in the GeoDataFrame if len(g) > 1: # if there is more than one record diff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] # checks when coordinates change if diff_places.any(): # if there is change in locations g_res = g.reset_index() # drop index diffs = g_res.shift(-1)['datetime'] - g_res['datetime'] # find time differences (spent in location) joined_dfs = g_res.join(diffs, rsuffix='a') # add them to locations joined_dfs['geometry'] = g_res['geometry'].astype(str) # copy geometry as string point_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() # grouping locations find the longest time sum selected = g[g['geometry'].astype(str) == point_max] # select the location with the highest total time else: selected = g # if one location visited - copy GeoDataFrame else: selected = g aggregated.append(selected) if ind is None: return pd.concat(aggregated) else: return ind, pd.concat(aggregated) def mode_geoseries(ind, gs): """ Calculates mode for GeoSeries :param ind: identifier :param gs: GeoSeries :return: identifier and a mode for GeoSeries """ aggregated = [] for g in gs: if g[1].empty: aggregated.append(None) else: selected = g[1].mode() selected = selected.set_index(g[1].index) aggregated.append(selected) return ind, pd.concat(aggregated) def rowwise_average(gs, row_count=None): """ Calculates an average for each row in each group - rowwise. :param gs: GeoSeries :param row_count: defines how much rows should be considered :return: averaged GeoSeries rowwise """ if row_count is None: row_count = gs.groupby(level=0).size().max() return pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)]) def groupwise_average(gs): """ Calculates an average from each group of GeoSeries :param gs: GeoSeries :return: averaged GeoSeries """ return gs.groupby(level=0).mean() def groupwise_normalise(gs): """ Normalises each group of GeoSeries :param gs: GeoSeries :return: normalised GeoSeries """ return gs.groupby(level=0).apply(lambda x: x / x.sum()) def groupwise_expansion(gs): """ Calculates expanding mean for each group of GeoSeries :param gs: GeoSeries :return: averaged GeoSeries """ return gs.groupby(level=0).expanding().mean() def total_normalise(gs): """ Performs complete normalisation of GeoSeries :param gs: GeoSeries :return: normalised GeoSeries """ return gs / gs.sum() def start_end(trajectories_frame): """ Compresses stops in TrajectoriesFrame by adding start and end of visits in locations :param trajectories_frame: TrajectoriesFrame object class :return: compressed TrajectoriesFrame """ to_concat = [] if 'date' not in trajectories_frame.columns: trajectories_frame['date'] = trajectories_frame.index.get_level_values(1) for gs in trajectories_frame.groupby(level=0): firsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']] lasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']] firsts.loc[:, 'start'] = firsts['date'] lasts = lasts.set_index(firsts.index) firsts.loc[:, 'end'] = lasts['date'] firsts = firsts[firsts['start'] != firsts['end']] to_concat.append(firsts) return pd.concat(to_concat)
[ [ [ 7, 19 ], [ 1371, 1373 ], [ 1414, 1416 ], [ 1798, 1800 ], [ 2133, 2135 ], [ 3678, 3680 ] ], [ [ 27, 38 ] ], [ [ 45, 53 ] ], [ [ 1442, 1456 ] ], [ [ 1826, 1841 ] ], [ [ 2212, 2229 ] ], [ [ 2389, 2408 ] ], [ [ 2576, 2595 ] ], [ [ 2770, 2785 ] ], [ [ 2928, 2937 ] ] ]
import numpy as np np.show_config()
[ [ [ 7, 18 ], [ 20, 22 ] ] ]
from PIL import Image as im import numpy as np from io import BytesIO import csv class outputResponse(): def __init__(self,reponse): self.response = reponse def retrieveResult(response, returntype): if (returntype == "image/png" or returntype == "image/jpeg"): img_arr = np.array(im.open(BytesIO(response.content))) data = im.fromarray(img_arr) data.show() elif (returntype == "text/csv"): response = response.content.decode('utf-8') my_list = response.split (",") with open ('x.csv', 'w') as file: writer = csv.writer(file, delimiter = ',') writer.writerow(my_list) elif (returntype == 1 or returntype == 0): print(response.content) else: response = response.content.decode('utf-8') print (response)
[ [ [ 16, 27 ], [ 319, 321 ], [ 374, 376 ] ], [ [ 36, 47 ], [ 310, 312 ] ], [ [ 63, 70 ], [ 327, 334 ] ], [ [ 78, 81 ], [ 634, 637 ] ], [ [ 89, 103 ] ] ]
import pytest import os from machaon.types.file import TextFile from machaon.types.shell import Path from machaon.core.invocation import instant_return_test, instant_context def test_construct(tmp_path): FILEPATH = Path(__file__) context = instant_context() context.define_type(TextFile) f = instant_return_test(context, FILEPATH, "TextFile").value assert isinstance(f, TextFile) assert isinstance(f.path(), Path) assert f.pathstr == FILEPATH.get() p = Path(tmp_path) / "hello.txt" f = instant_return_test(context, p, "TextFile").value f.set_encoding("utf-8") assert f.encoding() == "utf-8" with f.open("w"): f.stream.write("HELLO\n") f.stream.write("WORLD") assert f.text() == "HELLO\nWORLD"
[ [ [ 7, 13 ] ], [ [ 21, 23 ] ], [ [ 56, 64 ], [ 292, 300 ], [ 392, 400 ] ], [ [ 97, 101 ], [ 221, 225 ], [ 434, 438 ], [ 488, 492 ] ], [ [ 138, 157 ], [ 310, 329 ], [ 525, 544 ] ], [ [ 159, 174 ], [ 250, 265 ] ], [ [ 180, 194 ] ] ]
import re # match whole string data1 = "aaab" data2 = "aaaba" pattern = r"\Aa+b\Z" match1 = re.match(pattern, data1) print(match1) match2 = re.match(pattern, data2) print(match2) # regular expression options data = "AaaA\n\raaaA" pattern = r"^(a+)$" match = re.match(pattern, data, re.I | re.M) print(match) print(match.group()) # search all matches data = "Pi = 3.14, exponent = 2.718" pattern = r"(\d+\.\d+)" matches = re.findall(pattern, data) print(matches) # replacement of the match(with catch group) data = re.sub(pattern, r'<f>\1</f>', data) print(data) # search for a match match = re.search(pattern, data) if match: print(match.group()) print(float(match.group()))
[ [ [ 7, 9 ], [ 95, 97 ], [ 143, 145 ], [ 263, 265 ], [ 287, 289 ], [ 294, 296 ], [ 427, 429 ], [ 522, 524 ], [ 600, 602 ] ], [ [ 33, 38 ], [ 113, 118 ] ], [ [ 48, 53 ], [ 161, 166 ] ], [ [ 64, 71 ], [ 104, 111 ], [ 152, 159 ] ], [ [ 86, 92 ], [ 126, 132 ] ], [ [ 134, 140 ], [ 174, 180 ] ], [ [ 213, 217 ], [ 281, 285 ] ], [ [ 235, 242 ], [ 272, 279 ] ], [ [ 255, 260 ], [ 306, 311 ], [ 319, 324 ] ], [ [ 356, 360 ], [ 447, 451 ], [ 552, 556 ] ], [ [ 393, 400 ], [ 438, 445 ], [ 529, 536 ], [ 610, 617 ] ], [ [ 417, 424 ], [ 459, 466 ] ], [ [ 515, 519 ], [ 564, 568 ], [ 619, 623 ] ], [ [ 592, 597 ], [ 628, 633 ], [ 645, 650 ], [ 676, 681 ] ] ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Rate', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('rate', models.DecimalField(null=True, verbose_name=b'Exchange rate', max_digits=8, decimal_places=4, blank=True)), ('date', models.DateField(db_index=True)), ('currency', models.CharField(default=b'USD', max_length=3, db_index=True, choices=[(b'CHF', b'CHF'), (b'EUR', b'EUR'), (b'GBP', b'GBP'), (b'USD', b'USD')])), ], options={ 'ordering': ['-date', 'currency'], }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='rate', unique_together=set([('date', 'currency')]), ), ]
[ [ [ 47, 63 ] ], [ [ 87, 93 ], [ 296, 302 ], [ 413, 419 ], [ 546, 552 ], [ 609, 615 ], [ 877, 883 ] ], [ [ 95, 105 ], [ 124, 134 ], [ 203, 213 ], [ 912, 922 ] ], [ [ 114, 123 ] ] ]
DEBUG = True ALLOWED_HOSTS = ['*', ]
[ [ [ 0, 5 ] ], [ [ 14, 27 ] ] ]
import logging import logging.handlers import sys import os import json import sqlite3 import signal import threading import time import difflib import vk_api from vk_api.longpoll import VkLongPoll, VkEventType import requests.exceptions cwd = os.path.dirname(os.path.abspath(__file__)) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout, level=logging.WARNING ) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) handler = logging.handlers.RotatingFileHandler( os.path.join(cwd, 'log.txt'), maxBytes=102400 ) handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) logger.addHandler(handler) logger.info("Запуск...") def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, requests.exceptions.RequestException): return elif issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = handle_exception defaultConfig = { "ACCESS_TOKEN": "", "createIndex": False, "maxCacheAge": 86400, "preloadMessages": False, "customActions": False, "disableMessagesLogging": False, 'enableFlaskWebServer': False, 'useAuth': False, 'users': { 'admin':'password' }, 'port': 8080, 'https': False, 'httpsPort': 8443, 'cert': [ os.path.join(cwd, "cert.pem"), os.path.join(cwd, "key.pem") ] } def grab_token_from_args(): if len(sys.argv) > 1: defaultConfig['ACCESS_TOKEN'] = sys.argv[1] elif defaultConfig['ACCESS_TOKEN'] == "": raise Exception("Не задан ACCESS_TOKEN") if not os.path.exists(os.path.join(cwd, "config.json")): with open(os.path.join(cwd, "config.json"), 'w') as conf: grab_token_from_args() json.dump(defaultConfig, conf, indent=4) config = defaultConfig del defaultConfig else: with open(os.path.join(cwd, "config.json"), 'r') as conf: config = json.load(conf) for i in config: if i in defaultConfig: defaultConfig[i] = config[i] grab_token_from_args() if len(set(config)) - len(set(defaultConfig)) != 0: with open(os.path.join(cwd, "config.json"), 'w') as conf: json.dump(defaultConfig, conf, indent=4) config = defaultConfig del defaultConfig stop_mutex = threading.Lock() def run_flask_server(): port = config['httpsPort'] if config['https'] else config['port'] import socket ip = socket.gethostbyname(socket.gethostname()) del socket while True: try: if config['https']: logger.info("Trying to run on https://%s:%s/", ip, port) app.run( host='0.0.0.0', port=port, ssl_context=( config['cert'][0], config['cert'][1] ) ) else: logger.info("Trying to run on http://%s:%s/", ip, port) app.run(host='0.0.0.0', port=port) except OSError: port += 1 if config['enableFlaskWebServer']: from flaskWebServer import app threading.Thread(target=run_flask_server).start() if config['createIndex']: from updateIndex import indexUpdater indexUpdater() def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs): c = maxRetries delay = 1 while True: try: return func(*args, **kwargs) except vk_api.exceptions.ApiError: if str(sys.exc_info()[1]).find("User authorization failed") != -1: logger.warning("Токен недействителен.") interrupt_handler(0, None) raise Warning except requests.exceptions.RequestException: if delay < 32: delay*=2 time.sleep(delay) continue except BaseException: if maxRetries == 0: logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs) raise Warning logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay) if delay < 32: delay*=2 time.sleep(delay) if maxRetries > 0: maxRetries -= 1 continue vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130') longpoll = VkLongPoll(vk_session, wait=60, mode=2) vk = vk_session.get_api() account_id = tryAgainIfFailed(vk.users.get)[0]['id'] if not config['disableMessagesLogging']: if not os.path.exists( os.path.join( cwd, "mesAct" ) ): os.makedirs( os.path.join( cwd, "mesAct" ) ) f = open( os.path.join( cwd, "mesAct", "vkGetVideoLink.html" ), 'w', encoding='utf-8' ) f.write("""<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <style> html,body,iframe{ width: 100%; height: 100%; } </style> </head> <body> <p>Если видео не проигрывается, прямую ссылку можно получить через api:</p> <script> function embedLink(id) { var link = document.createElement('a'); link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "&params[count]=1&params[offset]=1"; link.innerText = id; link.setAttribute('target', '_blank') document.getElementsByTagName("body")[0].appendChild(link); } function embedPlayer(link) { var frame = document.createElement('iframe'); frame.src = link; frame.style = "width:100%;height:100%;"; frame.setAttribute('allowFullScreen', '') document.getElementsByTagName("body")[0].appendChild(frame); } function splitArgs(){ var args = document.location.search; var lastAmpersand = args.lastIndexOf('&'); return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)]; } var args = splitArgs(); embedLink(args[1]); embedPlayer(args[0]); </script> </body> </html>""") f.close() if not os.path.exists( os.path.join( cwd, "messages.db" ) ): conn = sqlite3.connect( os.path.join( cwd, "messages.db" ), check_same_thread=False, isolation_level=None, timeout=15.0 ) cursor = conn.cursor() cursor.execute("""CREATE TABLE "messages" ( "peer_id" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "message_id" INTEGER NOT NULL UNIQUE, "message" TEXT, "attachments" TEXT, "timestamp" INTEGER NOT NULL, "fwd_messages" TEXT )""") cursor.execute("""CREATE TABLE "chats_cache" ( "chat_id" INTEGER NOT NULL UNIQUE, "chat_name" TEXT NOT NULL )""") cursor.execute("""CREATE TABLE "users_cache" ( "user_id" INTEGER NOT NULL UNIQUE, "user_name" TEXT NOT NULL )""") account_name = tryAgainIfFailed( vk.users.get, user_id=account_id )[0] account_name = f"{account_name['first_name']} {account_name['last_name']}" cursor.execute( """INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (account_id, account_name,) ) conn.commit() else: conn = sqlite3.connect( os.path.join(cwd, "messages.db"), check_same_thread=False, timeout=15.0 ) cursor = conn.cursor() if not os.path.exists( os.path.join( cwd, "mesAct", "bootstrap.css" ) ): f = open( os.path.join( cwd, "mesAct", "bootstrap.css" ), 'w', encoding='utf-8' ) f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}') f.close() if config['customActions']: from customActions import customActions cust = customActions(vk, conn, cursor) def bgWatcher(): while True: maxCacheAge = config['maxCacheAge'] with stop_mutex: logger.info("Обслуживание БД...") try: showMessagesWithDeletedAttachments() except BaseException: logger.exception("Ошибка при поиске удаленных фото") try: if maxCacheAge != -1: cursor.execute( """DELETE FROM messages WHERE timestamp < ?""", (time.time() - maxCacheAge,) ) conn.commit() cursor.execute("VACUUM") else: maxCacheAge = 86400 except BaseException: logger.exception("Ошибка при очистке базы данных") logger.info("Обслуживание БД завершено.") time.sleep(maxCacheAge) def interrupt_handler(signum, frame): conn.commit() cursor.close() try: tableWatcher.cancel() except AttributeError: pass logger.info("Завершение...") os._exit(0) signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler) def eventWorker_predefinedDisabled(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: try: cust.act(event) except BaseException: logger.exception("Ошибка в customActions. \n %s", vars(event)) if len(events) == 0: flag.clear() def eventWorker_customDisabled(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: predefinedActions(event) if len(events) == 0: flag.clear() conn.commit() def eventWorker(): global events while True: flag.wait() event = events.pop(0) with stop_mutex: try: cust.act(event) except BaseException: logger.exception("Ошибка в customActions. \n %s", vars(event)) predefinedActions(event) if len(events) == 0: flag.clear() conn.commit() def predefinedActions(event): try: if event.type == VkEventType.MESSAGE_NEW: cursor.execute( """INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],) ) conn.commit() elif event.type == VkEventType.MESSAGE_EDIT: if event.message_data[0]: activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text) cursor.execute( """INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],) ) conn.commit() elif event.type == VkEventType.MESSAGE_FLAGS_SET: try: activityReport(event.message_id) cursor.execute( """DELETE FROM messages WHERE message_id = ?""", (event.message_id,) ) conn.commit() except TypeError: logger.info("Удаление невозможно, сообщение отсутствует в БД.") except sqlite3.IntegrityError: logger.warning("Запущено несколько копий программы, завершение...") interrupt_handler(0, None) except Warning: pass except BaseException: logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event)) def main(): logger.info("Запущен основной цикл.") global events for event in longpoll.listen(): try: if event.raw[0] == 4 or event.raw[0] == 5: if event.attachments != {}: event.message_data = getAttachments(event) else: event.message_data = True, None, None if event.from_user and event.raw[2] & 2: event.user_id = account_id elif event.from_group: if event.from_me: event.user_id = account_id else: event.user_id = event.peer_id if not event.message: event.message = None events.append(event) flag.set() elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128): events.append(event) flag.set() except Warning: pass except BaseException: logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event)) def showMessagesWithDeletedAttachments(): cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""") fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()] cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""") fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()] c = 0 for i in range(len(fetch_attachments)): for j in fetch_attachments[i - c][1]: if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc': break else: del fetch_attachments[i - c] c += 1 messages_attachments = [] messages_fwd = [] for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]: messages_attachments.extend(tryAgainIfFailed( vk.messages.getById, message_ids=','.join(i))['items'] ) for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]: messages_fwd.extend(tryAgainIfFailed( vk.messages.getById, message_ids=','.join(i))['items'] ) c = 0 for i in range(len(fetch_attachments)): if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]): del fetch_attachments[i - c] del messages_attachments[i - c] c += 1 for i in range(len(fetch_attachments)): activityReport(fetch_attachments[i][0]) if messages_attachments[i]['attachments'] == []: cursor.execute( """UPDATE messages SET attachments = ? WHERE message_id = ?""", (None, fetch_attachments[i][0],) ) else: cursor.execute( """UPDATE messages SET attachments = ? WHERE message_id = ?""", ( json.dumps(messages_attachments[i]['attachments']), fetch_attachments[i][0], ) ) c = 0 for i in range(len(fetch_fwd)): if compareFwd( messages_fwd[i - c], { 'fwd_messages': fetch_fwd[i - c][1] } ): del fetch_fwd[i - c] del messages_fwd[i - c] c += 1 for i in range(len(fetch_fwd)): activityReport(fetch_fwd[i][0]) if messages_fwd[i]['fwd_messages'] == []: cursor.execute( """UPDATE messages SET fwd_messages = ? WHERE message_id = ?""", (None, fetch_fwd[i][0],) ) else: cursor.execute( """UPDATE messages SET fwd_messages = ? WHERE message_id = ?""", ( json.dumps(messages_fwd[i]['fwd_messages']), fetch_fwd[i][0], ) ) conn.commit() def compareFwd(new, old): if 'reply_message' in new: new['fwd_messages'] = [new['reply_message']] if 'reply_message' in old: old['fwd_messages'] = [old['reply_message']] for i in range(len(old['fwd_messages'])): if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]: if not compareFwd( new['fwd_messages'][i], old['fwd_messages'][i] ): return False if not compareAttachments( new['fwd_messages'][i]['attachments'], old['fwd_messages'][i]['attachments'] ): return False return True def compareAttachments(new, old): if len(new) < len(old): return False return True def attachmentsParse(urls): if urls is None: return "" html = """<div> """ for i in urls: urlSplit = i.split(',') if i.find('vk.com/sticker/') != -1: html += """ <img src="{}" /> """.format(i) elif i.find('.jpg') != -1 and i.find(',') == -1: html += """ <img src="{}" /> """.format(i) elif i.find('.mp3') != -1: html += """ <audio src="{}" controls></audio> """.format(i) elif i.find('https://vk.com/audio') != -1: html += """ <a href="{}" target="_blank"> {} </a> """.format(i, i[23:-11].replace('%20', ' ')) elif i.find('@') != -1: i = i.rsplit('@', 1) html += """ <a href="{}" target="_blank"> {} </a> """.format(i[1], i[0]) elif len(urlSplit) == 3: html += """ <a href="{}" target="_blank"> Видео <img src="{}"/> </a> """.format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0]) else: html += """ <a href="{0}" target="_blank"> {0} </a> """.format(i) html += """</div>""" return html def getAttachments(event): message_id = event.message_id fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments) count = 0 if fullLoadUnNeeded: for i in range(1,11): if f'attach{i}_type' in event.attachments: if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'): fullLoadUnNeeded = False else: count = i break if fullLoadUnNeeded: attachments = [] for i in range(1,count): if event.attachments[f'attach{i}_type'] == 'sticker': attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f"attach{i}"]}-64'}]}}) else: if f'attach{i}_title' in event.attachments: title = event.attachments[f'attach{i}_title'] else: title = event.attachments[f'attach{i}_url'] attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}}) return False, json.dumps(attachments, ensure_ascii=False,), None mes = tryAgainIfFailed( vk.messages.getById, message_ids=message_id )['items'] if not len(mes): logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id) return False, "[]", "[]" else: mes = mes[0] hasUpdateTime = 'update_time' in mes fwd_messages = None if 'reply_message' in mes: fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,) elif mes['fwd_messages'] != []: fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,) if mes['attachments'] == []: attachments = None else: attachments = json.dumps(mes['attachments'], ensure_ascii=False,) return hasUpdateTime, attachments, fwd_messages def parseUrls(attachments): urls = [] for i in attachments: if i['type'] == 'photo': maxHeight = 0 maxUrl = "" for j in i['photo']['sizes']: if j['height'] > maxHeight: maxHeight = j['height'] maxUrl = j['url'] urls.append(maxUrl) elif i['type'] == 'audio_message': urls.append(i['audio_message']['link_mp3']) elif i['type'] == 'sticker': urls.append(i['sticker']['images'][0]['url']) elif i['type'] == 'gift': urls.append(i['gift']['thumb_48']) elif i['type'] == 'link': urls.append(f"Ссылка: {i['link']['title']}@{i['link']['url']}") elif i['type'] == 'video': urls.append(f"{i['video']['image'][0]['url']},{i['video']['player']},{i['video']['owner_id']}_{i['video']['id']}_{i['video']['access_key']}") elif i['type'] == 'wall': urls.append(f"Пост: {i['wall']['text'][:25]}@https://vk.com/wall{i['wall']['from_id']}_{i['wall']['id']}") elif i['type'] == 'wall_reply': urls.append(f"Комментарий: {i['wall_reply']['text'][:25]}@https://vk.com/wall{i['wall_reply']['owner_id']}_{i['wall_reply']['post_id']}?reply={i['wall_reply']['id']}") elif i['type'] == 'audio': urls.append(f"https://vk.com/audio?q={i['audio']['artist'].replace(' ', '%20')}%20-%20{i['audio']['title'].replace(' ', '%20')}&tab=global") elif i['type'] == 'audio_playlist': urls.append(f"Плейлист: {i['audio_playlist']['title']}@https://vk.com/music?z=audio_playlist{i['audio_playlist']['owner_id']}_{i['audio_playlist']['id']}/{i['audio_playlist']['access_key']}") elif i['type'] == 'market': urls.append(f"https://vk.com/market?w=product{i['market']['owner_id']}_{i['market']['id']}") elif i['type'] == 'poll': urls.append(f"Голосование: {i['poll']['question'][:25]}@https://vk.com/poll{i['poll']['owner_id']}_{i['poll']['id']}") elif i['type'] == 'doc': urls.append(f"Документ: {i['doc']['title']}@{i['doc']['url']}") else: if 'url' in i[i['type']]: urls.append(i[i['type']]['url']) if urls == []: return None return urls def getPeerName(id): if id > 2000000000: cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: try: name = tryAgainIfFailed( vk.messages.getChat, chat_id=id-2000000000 )['title'] cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,)) conn.commit() except Warning: name = "Секретный чат, используйте токен другого приложения" else: name = fetch[0] elif id < 0: cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: name = tryAgainIfFailed( vk.groups.getById, group_id=-id )[0]['name'] cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,)) conn.commit() else: name = fetch[0] else: cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,)) fetch = cursor.fetchone() if fetch is None: name = tryAgainIfFailed( vk.users.get, user_id=id )[0] name = f"{name['first_name']} {name['last_name']}" cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,)) conn.commit() else: name = fetch[0] return name def fwdParse(fwd): html = """<table class="table table-sm table-bordered"> """ for i in fwd: user_name = getPeerName(i['from_id']) if i['from_id'] < 0: html += """ <tr> <td> <a href='https://vk.com/public{}' target="_blank"> {} </a> </td> </tr> """.format(-i['from_id'], user_name) else: html += """ <tr> <td> <a href='https://vk.com/id{}' target="_blank"> {} </a> </td> </tr> """.format(i['from_id'], user_name) if i['text'] != "": html += """ <tr> <td> <div class='mes'> {} </div> """.format(xssFilter(i['text'])) else: html += """ <tr> <td> """ if i['attachments'] != []: html += attachmentsParse(parseUrls(i['attachments'])) if 'fwd_messages' in i: html += fwdParse(i['fwd_messages']) elif 'reply_message' in i: html += fwdParse([i['reply_message']]) html += """ </td> </tr> <tr> <td> {} </td> </tr> """.format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date']))) html += "</table>" return html def xssFilter(s): return s\ .replace('<', '&lt;')\ .replace('>', '&gt;')\ .replace('\n', '<br />') def compareStrings(a, b): aCounter = 0 bCounter = 0 for i in difflib.SequenceMatcher(None, a, b).get_opcodes(): if i[0] == 'insert': b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}" bCounter += 11 elif i[0] == 'delete': a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}" aCounter += 11 elif i[0] == 'replace': a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}" b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}" aCounter += 11 bCounter += 11 return a, b def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None): try: peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,)) fetch = cursor.fetchone() if attachments is not None: attachments = parseUrls(json.loads(attachments)) if fwd is not None: fwd = json.loads(fwd) if fetch is None: if isEdited: logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id) fetch = [0]*7 peer_name = getPeerName(peer_id) user_name = getPeerName(user_id) oldMessage = f"⚠️ {message}" oldAttachments = attachments oldFwd = fwd date = f"<b>Доб:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime(timestamp))}<br /><b>Изм:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime())}" else: raise TypeError else: if fetch[3] is not None: oldMessage = str(fetch[3]) if fetch[4] is not None: oldAttachments = parseUrls(json.loads(fetch[4])) if fetch[6] is not None: oldFwd = json.loads(fetch[6]) peer_name = getPeerName(fetch[0]) user_name = getPeerName(fetch[1]) date = f"<b>Доб:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime(fetch[5]))}<br /><b>Изм:</b>&nbsp;{time.strftime('%H:%M:%S&nbsp;%d.%m', time.localtime())}" peer_id = fetch[0] user_id = fetch[1] del fetch row = """ <tr><!-- {} --> <td>{} </td> <td>{} </td> {} <td> {} </td> </tr> """ messageBlock = """ <div class='mes'> {} </div>""" attachmentsBlock = """ <div> <b>Вложения</b><br /> {} </div>""" fwdBlock = """ <div> <b>Пересланное</b><br /> {} </div>""" if peer_id > 2000000000: peer_id = """ <a href='https://vk.com/im?sel=c{}' target='_blank'> {} </a>""".format(str(peer_id-2000000000), peer_name) elif peer_id < 0: peer_id = """ <a href='https://vk.com/public{}' target='_blank'> {} </a>""".format(str(-peer_id), peer_name) else: peer_id = """ <a href='https://vk.com/id{}' target='_blank'> {} </a>""".format(str(peer_id), peer_name) if user_id < 0: user_id = """ <a href='https://vk.com/public{}' target='_blank'> {} </a>""".format(str(-user_id), user_name) else: user_id = """ <a href='https://vk.com/id{}' target='_blank'> {} </a>""".format(str(user_id), user_name) if isEdited: if not (oldMessage is None or message is None): message = xssFilter(message) oldMessage = xssFilter(oldMessage) message, oldMessage = compareStrings(message, oldMessage) oldMessage = messageBlock.format(oldMessage) message = messageBlock.format(message) elif oldMessage is None: oldMessage = "" message = messageBlock.format(xssFilter(message)) else: oldMessage = messageBlock.format(xssFilter(oldMessage)) message = "" if oldAttachments is not None: oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments)) else: oldAttachments = "" if oldFwd is not None: oldFwd = fwdBlock.format(fwdParse(oldFwd)) else: oldFwd = "" if attachments is not None: attachments = attachmentsBlock.format(attachmentsParse(attachments)) else: attachments = "" if fwd is not None: fwd = fwdBlock.format(fwdParse(fwd)) else: fwd = "" messageBlock = """<td width='50%'> <b>Старое</b><br />{} </td> <td width='50%'> <b>Новое</b><br />{} </td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd) else: if oldMessage is not None: oldMessage = messageBlock.format(xssFilter(oldMessage)) else: oldMessage = "" if oldAttachments is not None: oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments)) else: oldAttachments = "" if oldFwd is not None: oldFwd = fwdBlock.format(fwdParse(oldFwd)) else: oldFwd = "" messageBlock = """<td width='100%' colspan='2'> <b>Удалено</b><br />{} </td>""".format(oldMessage+oldAttachments+oldFwd) row = row.format(message_id, peer_id, user_id, messageBlock, date) if os.path.exists( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y', time.localtime())}.html" ) ): messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'r', encoding='utf-8' ) messagesDump = messagesActivities.read() messagesActivities.close() messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'w', encoding='utf-8' ) else: messagesDump = template messagesActivities = open( os.path.join( cwd, "mesAct", f"messages_{time.strftime('%d%m%y',time.localtime())}.html" ), 'w', encoding='utf-8' ) messagesDump = messagesDump[:offset]+row+messagesDump[offset:] messagesActivities.write(messagesDump) messagesActivities.close() except TypeError: raise TypeError except BaseException: logger.exception("Ошибка при логгировании изменений.") if not config['disableMessagesLogging']: tableWatcher = threading.Thread(target=bgWatcher) tableWatcher.start() template = """<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <link rel="stylesheet" href="./bootstrap.css"> </head> <body> <table class="table table-sm"> </table> </body> </html>""" offset = template.index(""" </table>""") events = [] flag = threading.Event() def preloadMessages(): logger.info("Предзагрузка сообщений...") offset = 0 peer_ids = [] messages = [] shouldContinue = True try: while shouldContinue: shouldContinue = False dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20) for i in range(0,len(dialogs['items'])): if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']: peer_ids.append(dialogs['items'][i]['conversation']['peer']['id']) if i == len(dialogs['items']) - 1: shouldContinue = True offset+=20 for i in peer_ids: offset = 0 if i > 2000000000: count = 200 else: count = 50 shouldContinue = True while shouldContinue: shouldContinue = False mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items'] if mes[-1]['date']>= time.time() - config['maxCacheAge']: shouldContinue = True offset+=count for j in mes: if j['date'] >= time.time() - config['maxCacheAge']: messages.append(j) for i in messages: message_id = i['id'] with stop_mutex: cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,)) if cursor.fetchone() is not None: continue peer_id = i['peer_id'] user_id = i['from_id'] message = i['text'] timestamp = i['date'] fwd_messages = None if 'reply_message' in i: fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,) elif i['fwd_messages'] != []: fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,) if i['attachments'] == []: attachments = None else: attachments = json.dumps(i['attachments'], ensure_ascii=False,) with stop_mutex: cursor.execute( """INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""", (peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,) ) conn.commit() except BaseException: logger.exception("Ошибка во время предзагрузки сообщений") logger.info("Предзагрузка сообщений завершена.") if config['customActions'] and config['disableMessagesLogging']: threading.Thread(target=eventWorker_predefinedDisabled).start() elif not config['disableMessagesLogging'] and not config['customActions']: threading.Thread(target=eventWorker_customDisabled).start() else: threading.Thread(target=eventWorker).start() if config['preloadMessages']: threading.Thread(target=preloadMessages).start() try: tryAgainIfFailed( main, maxRetries=-1 ) except Warning: pass
[ [ [ 7, 14 ] ], [ [ 22, 38 ], [ 288, 295 ], [ 398, 405 ], [ 425, 432 ], [ 469, 476 ], [ 493, 500 ], [ 608, 615 ] ], [ [ 46, 49 ], [ 376, 379 ], [ 1084, 1087 ], [ 923, 926 ], [ 1613, 1616 ], [ 1668, 1671 ], [ 3697, 3700 ] ], [ [ 57, 59 ], [ 245, 247 ], [ 261, 263 ], [ 535, 537 ], [ 1497, 1499 ], [ 1536, 1538 ], [ 1783, 1785 ], [ 1798, 1800 ], [ 1847, 1849 ], [ 2052, 2054 ], [ 2327, 2329 ], [ 4777, 4779 ], [ 4801, 4803 ], [ 4878, 4880 ], [ 4903, 4905 ], [ 5009, 5011 ], [ 6798, 6800 ], [ 6822, 6824 ], [ 6940, 6942 ], [ 8148, 8150 ], [ 8296, 8298 ], [ 8320, 8322 ], [ 8448, 8450 ], [ 12254, 12256 ], [ 36207, 36209 ], [ 36235, 36237 ], [ 36453, 36455 ], [ 36836, 36838 ], [ 37177, 37179 ] ], [ [ 67, 71 ], [ 1934, 1938 ], [ 2117, 2121 ], [ 2387, 2391 ], [ 16470, 16474 ], [ 16648, 16652 ], [ 18245, 18249 ], [ 19106, 19110 ], [ 22737, 22741 ], [ 23198, 23202 ], [ 23313, 23317 ], [ 23458, 23462 ], [ 30844, 30848 ], [ 30915, 30919 ], [ 31743, 31747 ], [ 31827, 31831 ], [ 40004, 40008 ], [ 40131, 40135 ], [ 40304, 40308 ] ], [ [ 79, 86 ], [ 6911, 6918 ], [ 8119, 8126 ], [ 14883, 14890 ] ], [ [ 94, 100 ], [ 12267, 12273 ], [ 12281, 12287 ], [ 12315, 12321 ], [ 12329, 12335 ] ], [ [ 108, 117 ], [ 2491, 2500 ], [ 3335, 3344 ], [ 37762, 37771 ], [ 38133, 38142 ], [ 40928, 40937 ], [ 41071, 41080 ], [ 41141, 41150 ], [ 41221, 41230 ] ], [ [ 125, 129 ], [ 3999, 4003 ], [ 4415, 4419 ], [ 11688, 11692 ], [ 12038, 12042 ], [ 29435, 29439 ], [ 29470, 29474 ], [ 31377, 31381 ], [ 31414, 31418 ], [ 31468, 31472 ], [ 31505, 31509 ], [ 31982, 31986 ], [ 32019, 32023 ], [ 32072, 32076 ], [ 32109, 32113 ], [ 36324, 36328 ], [ 36348, 36352 ], [ 36554, 36558 ], [ 36577, 36581 ], [ 36937, 36941 ], [ 36960, 36964 ], [ 37278, 37282 ], [ 37301, 37305 ], [ 38584, 38588 ], [ 39235, 39239 ], [ 39414, 39418 ] ], [ [ 137, 144 ], [ 29739, 29746 ] ], [ [ 152, 158 ], [ 4531, 4537 ], [ 3650, 3656 ] ], [ [ 187, 197 ], [ 4605, 4615 ] ], [ [ 199, 210 ], [ 13462, 13473 ], [ 13864, 13875 ], [ 14476, 14487 ] ], [ [ 218, 237 ], [ 811, 819 ], [ 3897, 3905 ] ], [ [ 239, 242 ], [ 548, 551 ], [ 1510, 1513 ], [ 1549, 1552 ], [ 1811, 1814 ], [ 1860, 1863 ], [ 2065, 2068 ], [ 2340, 2343 ], [ 4827, 4830 ], [ 4933, 4936 ], [ 5035, 5038 ], [ 6848, 6851 ], [ 6970, 6973 ], [ 8161, 8164 ], [ 8346, 8349 ], [ 8474, 8477 ], [ 36265, 36268 ], [ 36487, 36490 ], [ 36870, 36873 ], [ 37211, 37214 ] ], [ [ 416, 422 ], [ 453, 459 ], [ 672, 678 ], [ 699, 705 ], [ 997, 1003 ], [ 2765, 2771 ], [ 3107, 3113 ], [ 3773, 3779 ], [ 4116, 4122 ], [ 4258, 4264 ], [ 11293, 11299 ], [ 11447, 11453 ], [ 11929, 11935 ], [ 11988, 11994 ], [ 12221, 12227 ], [ 12611, 12617 ], [ 13217, 13223 ], [ 14808, 14814 ], [ 14915, 14921 ], [ 15085, 15091 ], [ 15174, 15180 ], [ 16209, 16215 ], [ 22920, 22926 ], [ 30998, 31004 ], [ 37646, 37652 ], [ 38179, 38185 ], [ 40746, 40752 ], [ 40809, 40815 ] ], [ [ 483, 490 ], [ 587, 594 ], [ 690, 697 ] ], [ [ 729, 745 ], [ 1101, 1117 ] ], [ [ 1119, 1132 ], [ 1944, 1957 ], [ 1992, 2005 ], [ 2018, 2031 ], [ 2170, 2183 ], [ 2197, 2210 ], [ 2287, 2300 ], [ 2397, 2410 ], [ 2441, 2454 ], [ 2463, 2476 ], [ 1636, 1649 ], [ 1689, 1702 ] ], [ [ 1578, 1598 ], [ 1903, 1923 ], [ 2230, 2250 ] ], [ [ 1889, 1893 ], [ 1959, 1963 ] ], [ [ 1983, 1989 ], [ 3264, 3270 ], [ 3389, 3395 ], [ 4550, 4556 ], [ 4732, 4738 ], [ 11066, 11072 ], [ 37709, 37715 ], [ 40862, 40868 ], [ 40890, 40896 ], [ 41001, 41007 ], [ 41042, 41048 ], [ 41190, 41196 ], [ 2567, 2573 ], [ 2544, 2550 ], [ 2588, 2594 ], [ 2732, 2738 ], [ 2972, 2978 ], [ 3015, 3021 ], [ 11234, 11240 ], [ 38598, 38604 ], [ 39249, 39255 ], [ 39428, 39434 ] ], [ [ 2094, 2098 ], [ 2127, 2131 ] ], [ [ 2108, 2114 ], [ 2146, 2152 ], [ 2216, 2222 ], [ 2268, 2274 ] ], [ [ 2141, 2142 ], [ 2165, 2166 ], [ 2223, 2224 ], [ 2211, 2212 ] ], [ [ 2369, 2373 ], [ 2412, 2416 ] ], [ [ 2432, 2438 ], [ 3264, 3270 ], [ 3389, 3395 ], [ 4550, 4556 ], [ 4732, 4738 ], [ 11066, 11072 ], [ 37709, 37715 ], [ 40862, 40868 ], [ 40890, 40896 ], [ 41001, 41007 ], [ 41042, 41048 ], [ 41190, 41196 ], [ 2567, 2573 ], [ 2544, 2550 ], [ 2588, 2594 ], [ 2732, 2738 ], [ 2972, 2978 ], [ 3015, 3021 ], [ 11234, 11240 ], [ 38598, 38604 ], [ 39249, 39255 ], [ 39428, 39434 ] ], [ [ 2478, 2488 ], [ 11269, 11279 ], [ 12500, 12510 ], [ 12860, 12870 ], [ 13106, 13116 ], [ 39571, 39581 ], [ 40371, 40381 ] ], [ [ 2513, 2529 ], [ 3359, 3375 ] ], [ [ 3327, 3330 ], [ 2838, 2841 ], [ 3179, 3182 ] ], [ [ 3440, 3452 ], [ 3457, 3469 ] ], [ [ 3477, 3493 ], [ 4684, 4700 ], [ 7751, 7767 ], [ 41280, 41296 ], [ 17169, 17185 ], [ 17396, 17412 ], [ 22798, 22814 ], [ 26103, 26119 ], [ 26701, 26717 ], [ 27158, 27174 ], [ 38393, 38409 ] ], [ [ 4518, 4528 ], [ 4616, 4626 ], [ 4650, 4660 ] ], [ [ 4594, 4602 ], [ 15247, 15255 ] ], [ [ 4645, 4647 ], [ 4701, 4703 ], [ 7781, 7783 ], [ 11160, 11162 ], [ 17199, 17201 ], [ 17426, 17428 ], [ 22824, 22826 ], [ 26141, 26143 ], [ 26735, 26737 ], [ 27192, 27194 ], [ 38410, 38412 ], [ 39127, 39129 ] ], [ [ 4671, 4681 ], [ 7815, 7825 ], [ 8035, 8045 ], [ 15614, 15624 ], [ 15742, 15752 ] ], [ [ 4991, 4992 ], [ 5155, 5156 ], [ 6777, 6778 ] ], [ [ 6904, 6908 ], [ 7143, 7147 ], [ 8080, 8084 ], [ 11164, 11168 ], [ 11758, 11762 ], [ 12105, 12109 ], [ 12975, 12979 ], [ 13383, 13387 ], [ 13823, 13827 ], [ 14435, 14439 ], [ 14748, 14752 ], [ 19224, 19228 ], [ 26355, 26359 ], [ 26924, 26928 ], [ 27429, 27433 ], [ 40698, 40702 ] ], [ [ 7134, 7140 ], [ 7165, 7171 ], [ 7460, 7466 ], [ 7598, 7604 ], [ 7930, 7936 ], [ 11170, 11176 ], [ 11575, 11581 ], [ 11792, 11798 ], [ 12123, 12129 ], [ 13499, 13505 ], [ 14100, 14106 ], [ 14589, 14595 ], [ 16336, 16342 ], [ 16497, 16503 ], [ 16520, 16526 ], [ 16675, 16681 ], [ 17926, 17932 ], [ 18111, 18117 ], [ 18793, 18799 ], [ 18971, 18977 ], [ 25922, 25928 ], [ 26019, 26025 ], [ 26247, 26253 ], [ 26541, 26547 ], [ 26638, 26644 ], [ 26820, 26826 ], [ 26998, 27004 ], [ 27095, 27101 ], [ 27325, 27331 ], [ 30657, 30663 ], [ 30754, 30760 ], [ 39599, 39605 ], [ 39708, 39714 ], [ 40399, 40405 ] ], [ [ 7736, 7748 ], [ 7865, 7877 ], [ 7894, 7906 ] ], [ [ 7847, 7859 ], [ 8047, 8059 ] ], [ [ 8112, 8116 ], [ 8271, 8275 ], [ 11164, 11168 ], [ 11758, 11762 ], [ 12105, 12109 ], [ 12975, 12979 ], [ 13383, 13387 ], [ 13823, 13827 ], [ 14435, 14439 ], [ 14748, 14752 ], [ 19224, 19228 ], [ 26355, 26359 ], [ 26924, 26928 ], [ 27429, 27433 ], [ 40698, 40702 ] ], [ [ 8262, 8268 ], [ 11170, 11176 ], [ 11575, 11581 ], [ 11792, 11798 ], [ 12123, 12129 ], [ 13499, 13505 ], [ 14100, 14106 ], [ 14589, 14595 ], [ 16336, 16342 ], [ 16497, 16503 ], [ 16520, 16526 ], [ 16675, 16681 ], [ 17926, 17932 ], [ 18111, 18117 ], [ 18793, 18799 ], [ 18971, 18977 ], [ 25922, 25928 ], [ 26019, 26025 ], [ 26247, 26253 ], [ 26541, 26547 ], [ 26638, 26644 ], [ 26820, 26826 ], [ 26998, 27004 ], [ 27095, 27101 ], [ 27325, 27331 ], [ 30657, 30663 ], [ 30754, 30760 ], [ 39599, 39605 ], [ 39708, 39714 ], [ 40399, 40405 ] ], [ [ 8426, 8427 ], [ 8608, 8609 ], [ 11052, 11053 ] ], [ [ 11121, 11134 ], [ 11146, 11159 ] ], [ [ 11139, 11143 ], [ 12545, 12549 ], [ 13151, 13155 ] ], [ [ 11183, 11192 ], [ 37786, 37795 ] ], [ [ 12067, 12084 ], [ 12296, 12313 ], [ 12345, 12362 ], [ 3829, 3846 ], [ 14991, 15008 ] ], [ [ 12369, 12399 ], [ 40952, 40982 ] ], [ [ 12733, 12759 ], [ 41095, 41121 ] ], [ [ 12994, 13005 ], [ 41165, 41176 ] ], [ [ 13402, 13419 ], [ 12884, 12901 ], [ 13292, 13309 ] ], [ [ 15162, 15166 ], [ 41306, 41310 ] ], [ [ 16294, 16328 ], [ 11360, 11394 ] ], [ [ 19243, 19253 ], [ 18431, 18441 ], [ 19596, 19606 ] ], [ [ 19924, 19942 ], [ 17568, 17586 ], [ 19746, 19764 ] ], [ [ 20024, 20040 ], [ 28915, 28931 ], [ 34641, 34657 ], [ 34963, 34979 ], [ 35724, 35740 ] ], [ [ 21603, 21617 ], [ 15419, 15433 ] ], [ [ 23567, 23576 ], [ 28932, 28941 ], [ 30834, 30843 ], [ 31733, 31742 ] ], [ [ 25873, 25884 ], [ 27647, 27658 ], [ 31142, 31153 ], [ 31191, 31202 ], [ 31872, 31883 ], [ 31918, 31929 ] ], [ [ 27506, 27514 ], [ 29013, 29021 ], [ 29096, 29104 ], [ 34805, 34813 ], [ 35115, 35123 ], [ 35888, 35896 ] ], [ [ 29542, 29551 ], [ 28727, 28736 ], [ 34027, 34036 ], [ 34075, 34084 ], [ 34402, 34411 ], [ 34489, 34498 ], [ 35551, 35560 ] ], [ [ 29670, 29684 ], [ 34135, 34149 ] ], [ [ 30427, 30441 ], [ 13944, 13958 ], [ 14540, 14554 ], [ 17817, 17831 ], [ 18699, 18713 ] ], [ [ 37747, 37759 ], [ 37801, 37813 ], [ 12155, 12167 ] ], [ [ 37826, 37834 ], [ 38075, 38083 ], [ 37113, 37121 ] ], [ [ 38066, 38072 ], [ 37450, 37456 ], [ 37475, 37481 ] ], [ [ 38114, 38120 ], [ 12473, 12479 ], [ 12689, 12695 ], [ 12833, 12839 ], [ 12924, 12930 ], [ 13079, 13085 ], [ 13332, 13338 ], [ 15928, 15934 ], [ 16078, 16084 ] ], [ [ 38126, 38130 ], [ 12445, 12449 ], [ 12715, 12719 ], [ 12805, 12809 ], [ 12950, 12954 ], [ 13051, 13055 ], [ 13358, 13362 ], [ 15965, 15969 ], [ 16115, 16119 ] ], [ [ 38156, 38171 ], [ 41245, 41260 ] ] ]
""" Definition of events. """ from abc import ABC EVENT_LOG = 'eLog' #Log Event EVENT_MARKETDATA = 'eMarketData' #Pushing MarketData Event EVENT_TRADE = 'eTrade' #Trade Event EVENT_BUY = 'eBuy' #Buy Event EVENT_SELL = 'eSell' #Sell Event EVENT_CANCEL = 'eCancel' #Cancel Event EVENT_POSITION = 'ePosition' #Position Query Event EVENT_STATUS = 'eStatus' #Order Status Event EVENT_ACCOUNT = 'eAccount' #Account Query Event EVENT_PROFIT_CHANGED = 'eProfitChanged' #Profit Event class StrategyEvent: def __init__(self, type_=None, even_param_=None): self.type_ = type_ self.even_param_ = even_param_ def clear(self): """ Delete unreferenced source. """ self.even_param_.clear() class EventEngine(ABC): pass
[ [ [ 48, 51 ], [ 931, 934 ] ], [ [ 54, 63 ] ], [ [ 109, 125 ] ], [ [ 178, 189 ] ], [ [ 235, 244 ] ], [ [ 290, 300 ] ], [ [ 346, 358 ] ], [ [ 404, 418 ] ], [ [ 469, 481 ] ], [ [ 532, 545 ] ], [ [ 596, 616 ] ], [ [ 661, 674 ] ], [ [ 919, 930 ] ] ]
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import openerp from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ class sale_configuration(osv.osv_memory): _inherit = 'sale.config.settings' _columns = { 'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders', implied_group='sale_stock.group_invoice_deli_orders', help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."), 'task_work': fields.boolean("Prepare invoices based on task's activities", help='Lets you transfer the entries under tasks defined for Project Management to ' 'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways ' 'and to automatically creates project tasks from procurement lines.\n' '-This installs the modules project_timesheet and sale_service.'), 'default_order_policy': fields.selection( [('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')], 'The default invoicing method is', default_model='sale.order', help="You can generate invoices based on sales orders or based on shippings."), 'module_delivery': fields.boolean('Allow adding shipping costs', help='Allows you to add delivery methods in sales orders and delivery orders.\n' 'You can define your own carrier and delivery grids for prices.\n' '-This installs the module delivery.'), 'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.", help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."), 'group_mrp_properties': fields.boolean('Product properties on order lines', implied_group='sale.group_mrp_properties', help="Allows you to tag sales order lines with properties."), 'module_project_timesheet': fields.boolean("Project Timesheet"), 'module_sale_service': fields.boolean("Sale Service"), 'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines', implied_group='sale_stock.group_route_so_lines', help="Allows you to choose a delivery route on sales order lines"), } _defaults = { 'default_order_policy': 'manual', } def default_get(self, cr, uid, fields, context=None): res = super(sale_configuration, self).default_get(cr, uid, fields, context) # task_work, time_unit depend on other fields res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet') return res def get_default_sale_config(self, cr, uid, ids, context=None): ir_values = self.pool.get('ir.values') default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy') return { 'default_picking_policy': default_picking_policy == 'one', } def set_sale_defaults(self, cr, uid, ids, context=None): if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'): raise openerp.exceptions.AccessError(_("Only administrators can change the settings")) ir_values = self.pool.get('ir.values') wizard = self.browse(cr, uid, ids)[0] default_picking_policy = 'one' if wizard.default_picking_policy else 'direct' ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy) res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context) return res def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None): if not group_invoice_deli_orders: return {'value': {'default_order_policy': 'manual'}} if not group_invoice_so_lines: return {'value': {'default_order_policy': 'picking'}} return {}
[ [ [ 995, 1002 ], [ 4414, 4421 ] ], [ [ 1023, 1035 ], [ 4306, 4318 ], [ 4709, 4721 ] ], [ [ 1060, 1066 ], [ 1246, 1252 ], [ 1525, 1531 ], [ 2037, 2043 ], [ 2352, 2358 ], [ 2667, 2673 ], [ 2971, 2977 ], [ 3188, 3194 ], [ 3256, 3262 ], [ 3320, 3326 ] ], [ [ 1068, 1071 ], [ 1136, 1139 ] ], [ [ 1108, 1109 ], [ 4445, 4446 ] ], [ [ 1117, 1135 ], [ 3682, 3700 ], [ 4799, 4817 ] ] ]
import pytest import io from cite_seq_count import preprocessing @pytest.fixture def data(): from collections import OrderedDict from itertools import islice # Test file paths pytest.correct_whitelist_path = 'tests/test_data/whitelists/correct.csv' pytest.correct_tags_path = 'tests/test_data/tags/correct.csv' pytest.correct_R1_path = 'tests/test_data/fastq/correct_R1.fastq.gz' pytest.correct_R2_path = 'tests/test_data/fastq/correct_R2.fastq.gz' pytest.corrupt_R1_path = 'tests/test_data/fastq/corrupted_R1.fastq.gz' pytest.corrupt_R2_path = 'tests/test_data/fastq/corrupted_R2.fastq.gz' # Create some variables to compare to pytest.correct_whitelist = set(['ACTGTTTTATTGGCCT','TTCATAAGGTAGGGAT']) pytest.correct_tags = { 'AGGACCATCCAA':'CITE_LEN_12_1', 'ACATGTTACCGT':'CITE_LEN_12_2', 'AGCTTACTATCC':'CITE_LEN_12_3', 'TCGATAATGCGAGTACAA':'CITE_LEN_18_1', 'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2', 'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3', 'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1', 'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2', 'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3'} pytest.correct_ordered_tags = OrderedDict({ 'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1-TGTGACGTATTGCTAGCTAG', 'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2-ACTGTCTAACGGGTCAGTGC', 'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3-TATCACATCGGTGGATCCAT', 'TCGATAATGCGAGTACAA':'CITE_LEN_18_1-TCGATAATGCGAGTACAA', 'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2-GAGGCTGAGCTAGCTAGT', 'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3-GGCTGATGCTGACTGCTA', 'AGGACCATCCAA':'CITE_LEN_12_1-AGGACCATCCAA', 'ACATGTTACCGT':'CITE_LEN_12_2-ACATGTTACCGT', 'AGCTTACTATCC':'CITE_LEN_12_3-AGCTTACTATCC'}) pytest.barcode_slice = slice(0, 16) pytest.umi_slice = slice(16, 26) pytest.barcode_umi_length = 26 @pytest.mark.dependency() def test_parse_whitelist_csv(data): assert preprocessing.parse_whitelist_csv(pytest.correct_whitelist_path, 16, 1) == (pytest.correct_whitelist,1) @pytest.mark.dependency() def test_parse_tags_csv(data): assert preprocessing.parse_tags_csv(pytest.correct_tags_path) == pytest.correct_tags @pytest.mark.dependency(depends=['test_parse_tags_csv']) def test_check_tags(data): assert preprocessing.check_tags(pytest.correct_tags, 5) == pytest.correct_ordered_tags @pytest.mark.dependency(depends=['test_check_tags']) def test_check_distance_too_big_between_tags(data): with pytest.raises(SystemExit): preprocessing.check_tags(pytest.correct_tags, 8) @pytest.mark.dependency(depends=['test_parse_whitelist_csv']) def test_check_barcodes_lengths(data): assert preprocessing.check_barcodes_lengths(26, 1, 16, 17, 26) == (pytest.barcode_slice, pytest.umi_slice, pytest.barcode_umi_length) @pytest.mark.dependency() def test_get_n_lines(data): assert preprocessing.get_n_lines(pytest.correct_R1_path) == (200 * 4) @pytest.mark.dependency(depends=['test_get_n_lines']) def test_get_n_lines_not_multiple_of_4(data): with pytest.raises(SystemExit): preprocessing.get_n_lines(pytest.corrupt_R1_path)
[ [ [ 7, 13 ], [ 68, 74 ], [ 1911, 1917 ], [ 2089, 2095 ], [ 2236, 2242 ], [ 2412, 2418 ], [ 2611, 2617 ], [ 2851, 2857 ], [ 2978, 2984 ], [ 199, 205 ], [ 276, 282 ], [ 342, 348 ], [ 415, 421 ], [ 488, 494 ], [ 563, 569 ], [ 685, 691 ], [ 761, 767 ], [ 1191, 1197 ], [ 1801, 1807 ], [ 1841, 1847 ], [ 1878, 1884 ], [ 2017, 2023 ], [ 2059, 2065 ], [ 2185, 2191 ], [ 2214, 2220 ], [ 2355, 2361 ], [ 2382, 2388 ], [ 2525, 2531 ], [ 2585, 2591 ], [ 2782, 2788 ], [ 2804, 2810 ], [ 2822, 2828 ], [ 2939, 2945 ], [ 3084, 3090 ], [ 3141, 3147 ] ], [ [ 21, 23 ] ], [ [ 51, 64 ], [ 1983, 1996 ], [ 2156, 2169 ], [ 2330, 2343 ], [ 2560, 2573 ], [ 2722, 2735 ], [ 2913, 2926 ], [ 3115, 3128 ] ], [ [ 87, 91 ] ], [ [ 1940, 1964 ] ], [ [ 2118, 2137 ] ], [ [ 2296, 2311 ] ], [ [ 2468, 2508 ] ], [ [ 2676, 2703 ] ], [ [ 2880, 2896 ] ], [ [ 3035, 3069 ] ] ]
from game_data import * from hosting import ServerHandler, ClientHandler import json board = [ ["R", "K", "B", "Q", "E", "B", "K", "R"], ["P", "P", "P", "P", "P", "P", "P", "P"], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " ", " ", " "], ["P", "P", "P", "P", "P", "P", "P", "P"], ["R", "K", "B", "Q", "E", "B", "K", "R"] ] pieces = Initiator() pos_handler = PositionHandler(pieces[0]+pieces[1]) p1 = Player("white", pieces[0]) p2 = Player("black", pieces[1]) player_handler = PlayerHandler(p1, p2) end = False win_team = None checkmate = False try: try: net = eval(input("Enter Server IP, Port to Host: ")) except KeyboardInterrupt: exit() if type(net[0]) == str and net[1] > 5000 and net[1] < 65000: server = ServerHandler(*net) DisplayBoard(board) while True: error_msg = "" if player_handler.current.team == "white": if checkmate: error_msg = "You're in Checkmate" print(player_handler.current.give_pieces_position()) try: piece_pos = eval(input("Position of Piece: ")) piece_to_go = eval(input("Position To Go: ")) except KeyboardInterrupt: break if PositionChecks(piece_pos) and PositionChecks(piece_to_go): piece = pos_handler.get_piece(piece_pos) if piece == False or piece.team != player_handler.current.team: error_msg = "Piece Position is Incorrect" else: check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler) if check: board = n_board if piece != " ": pieces[2].append(piece) player_handler.remove_piece(piece) pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces) end, lose_player = player_handler.game_end() checkmate = player_handler.checkmate(board, pos_handler) player_handler.change_player() else: error_msg = "Bad Position" else: error_msg = "Bad Position" clear_screen() DisplayBoard(board) print(error_msg) if end: break win_team = "white" if lose_player.team == "black" else "black" else: if checkmate: server.send_state(server.encode_state("", "", "You're in Checkmate")) server.send_state(server.encode_state(board, player_handler.current.give_pieces_position(), "")) server.send_state("input") pos_data = server.recv_inputs() try: pos_data = json.loads(pos_data) print(pos_data) piece_pos = tuple(pos_data["piece_pos"]) piece_to_go = tuple(pos_data["piece_to_go"]) if PositionChecks(piece_pos) and PositionChecks(piece_to_go): piece = pos_handler.get_piece(piece_pos) print(piece) if piece == False or piece.team != player_handler.current.team: server.send_state(server.encode_state("", "", "Piece Position is Incorrect")) else: check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler) if check: board = n_board if piece != " ": pieces[2].append(piece) player_handler.remove_piece(piece) pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces) end, lose_player = player_handler.game_end() checkmate = player_handler.checkmate(board, pos_handler) player_handler.change_player() server.send_state(server.encode_state(board, "", "")) else: server.send_state(server.encode_state("", "", "Bad Position")) else: server.send_state(server.encode_state("", "", "Bad Position")) # clear_screen() if end: win_team = "white" if lose_player.team == "black" else "black" break clear_screen() DisplayBoard(board) except json.decoder.JSONDecodeError: pass server.send_state(server.encode_state("", "", f"{win_team} Won The Match")) server.close_conn("end") else: print("[-] IP/Port is not Correctly Specified as rules.") print("[-] Ip should be like \"127.0.0.1\" and Port Should be Between 5000 and 65000") print("[-] Enter both like this \"127.0.0.1\", 9999") print("[-] Do It Correctly Next Time Bitch :]") except ConnectionResetError: print("Client Disconnected") except SyntaxError: server.close_conn("end") print("Syntax Error")
[ [ [ 22, 23 ], [ 476, 485 ], [ 502, 517 ], [ 544, 550 ], [ 576, 582 ], [ 620, 633 ], [ 885, 897 ], [ 1257, 1271 ], [ 1287, 1301 ], [ 1745, 1760 ], [ 2070, 2082 ], [ 2089, 2101 ], [ 2662, 2676 ], [ 2692, 2706 ], [ 3216, 3231 ], [ 3799, 3811 ], [ 3819, 3831 ] ], [ [ 44, 57 ], [ 863, 876 ] ], [ [ 59, 72 ] ], [ [ 80, 84 ], [ 2516, 2520 ], [ 3850, 3854 ] ], [ [ 86, 91 ], [ 898, 903 ], [ 1566, 1571 ], [ 2102, 2107 ], [ 2365, 2370 ], [ 3031, 3036 ], [ 3832, 3837 ] ], [ [ 467, 473 ], [ 518, 524 ], [ 528, 534 ], [ 560, 566 ], [ 592, 598 ], [ 1657, 1663 ], [ 3126, 3132 ] ], [ [ 488, 499 ], [ 1329, 1340 ], [ 1573, 1584 ], [ 2735, 2746 ], [ 3038, 3049 ] ], [ [ 539, 541 ], [ 634, 636 ] ], [ [ 571, 573 ], [ 638, 640 ] ], [ [ 603, 617 ], [ 943, 957 ], [ 1050, 1064 ], [ 1402, 1416 ], [ 1520, 1534 ], [ 1689, 1703 ], [ 1761, 1775 ], [ 1793, 1807 ], [ 1850, 1864 ], [ 1895, 1909 ], [ 1947, 1961 ], [ 2372, 2386 ], [ 2828, 2842 ], [ 2985, 2999 ], [ 3159, 3173 ], [ 3232, 3246 ], [ 3264, 3278 ], [ 3322, 3336 ], [ 3368, 3382 ], [ 3421, 3435 ] ], [ [ 642, 645 ], [ 2137, 2140 ], [ 3708, 3711 ] ], [ [ 654, 662 ], [ 3941, 3949 ] ], [ [ 670, 679 ], [ 990, 999 ], [ 2237, 2246 ] ], [ [ 701, 704 ], [ 799, 802 ], [ 818, 821 ], [ 836, 839 ], [ 878, 881 ] ], [ [ 854, 860 ], [ 2253, 2259 ], [ 2271, 2277 ], [ 2327, 2333 ], [ 2345, 2351 ], [ 2428, 2434 ], [ 2470, 2476 ], [ 2864, 2870 ], [ 2882, 2888 ], [ 3460, 3466 ], [ 3478, 3484 ], [ 3535, 3541 ], [ 3553, 3559 ], [ 3615, 3621 ], [ 3633, 3639 ], [ 3892, 3898 ], [ 3910, 3916 ], [ 3970, 3976 ], [ 4337, 4343 ] ], [ [ 922, 931 ], [ 2119, 2128 ] ], [ [ 1006, 1015 ], [ 2119, 2128 ] ], [ [ 1111, 1120 ], [ 1272, 1281 ], [ 1351, 1360 ] ], [ [ 1163, 1174 ], [ 1302, 1313 ], [ 1553, 1564 ] ], [ [ 1321, 1326 ], [ 1370, 1375 ], [ 1388, 1393 ], [ 1546, 1551 ] ], [ [ 1437, 1446 ], [ 2119, 2128 ] ], [ [ 1496, 1501 ], [ 1595, 1600 ] ], [ [ 1503, 1508 ], [ 1635, 1640 ], [ 1674, 1679 ], [ 1717, 1722 ] ], [ [ 1510, 1517 ], [ 1617, 1624 ] ], [ [ 1609, 1614 ], [ 1920, 1925 ], [ 2102, 2107 ], [ 1566, 1571 ], [ 2365, 2370 ], [ 3031, 3036 ], [ 3832, 3837 ] ], [ [ 1731, 1742 ], [ 1927, 1938 ], [ 1329, 1340 ], [ 1573, 1584 ], [ 2735, 2746 ], [ 3038, 3049 ] ], [ [ 1831, 1834 ], [ 2137, 2140 ], [ 3708, 3711 ] ], [ [ 1836, 1847 ], [ 2180, 2191 ], [ 3741, 3752 ] ], [ [ 1883, 1892 ], [ 990, 999 ], [ 2237, 2246 ] ], [ [ 1997, 2006 ], [ 2119, 2128 ] ], [ [ 2039, 2048 ], [ 2119, 2128 ] ], [ [ 2158, 2166 ] ], [ [ 2459, 2467 ], [ 2527, 2535 ] ], [ [ 2505, 2513 ], [ 2548, 2556 ], [ 2581, 2589 ], [ 2629, 2637 ] ], [ [ 2563, 2572 ], [ 2677, 2686 ], [ 2757, 2766 ], [ 1272, 1281 ], [ 1351, 1360 ] ], [ [ 2609, 2620 ], [ 2707, 2718 ], [ 3018, 3029 ], [ 1302, 1313 ], [ 1553, 1564 ] ], [ [ 2727, 2732 ], [ 2780, 2785 ], [ 2796, 2801 ], [ 2814, 2819 ], [ 3011, 3016 ] ], [ [ 2961, 2966 ], [ 3061, 3066 ] ], [ [ 2968, 2973 ], [ 3103, 3108 ], [ 3143, 3148 ], [ 3187, 3192 ] ], [ [ 2975, 2982 ], [ 3084, 3091 ] ], [ [ 3076, 3081 ], [ 3393, 3398 ], [ 3498, 3503 ], [ 3832, 3837 ], [ 1566, 1571 ], [ 2102, 2107 ], [ 2365, 2370 ], [ 3031, 3036 ] ], [ [ 3202, 3213 ], [ 3400, 3411 ], [ 1329, 1340 ], [ 1573, 1584 ], [ 2735, 2746 ], [ 3038, 3049 ] ], [ [ 3303, 3306 ], [ 3708, 3711 ], [ 2137, 2140 ] ], [ [ 3308, 3319 ], [ 3741, 3752 ], [ 2180, 2191 ] ], [ [ 3356, 3365 ], [ 990, 999 ], [ 2237, 2246 ] ], [ [ 3719, 3727 ], [ 3941, 3949 ] ] ]
# Pydifact - a python edifact library # # Copyright (c) 2019 Christian González # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from typing import Union, List from pydifact.api import EDISyntaxError, PluginMount from pydifact.control import Characters class SegmentProvider(metaclass=PluginMount): """This is a plugin mount point for Segment plugins which represent a certain EDIFACT Segment. Classes implementing this PluginMount should provide the following attributes: """ def __str__(self): """Returns the user readable text representation of this segment.""" def validate(self) -> bool: """Validates the Segment.""" class Segment(SegmentProvider): """Represents a low-level segment of an EDI interchange. This class is used internally. read-world implementations of specialized should subclass Segment and provide the `tag` and `validate` attributes. """ # tag is not a class attribute in this case, as each Segment instance could have another tag. __omitted__ = True def __init__(self, tag: str, *elements: Union[str, List[str]]): """Create a new Segment instance. :param str tag: The code/tag of the segment. Must not be empty. :param list elements: The data elements for this segment, as (possibly empty) list. """ self.tag = tag # The data elements for this segment. # this is converted to a list (due to the fact that python creates a tuple # when passing a variable arguments list to a method) self.elements = list(elements) def __str__(self) -> str: """Returns the Segment in Python list printout""" return "'{tag}' EDI segment: {elements}".format( tag=self.tag, elements=str(self.elements) ) def __repr__(self) -> str: return "{} segment: {}".format(self.tag, str(self.elements)) def __eq__(self, other) -> bool: # FIXME the other way round too? isinstance(other, type(self))? return ( isinstance(self, type(other)) and self.tag == other.tag and list(self.elements) == list(other.elements) ) def __getitem__(self, key): return self.elements[key] def __setitem__(self, key, value): self.elements[key] = value def validate(self) -> bool: """ Segment validation. The Segment class is part of the lower level interfaces of pydifact. So it assumes that the given parameters are correct, there is no validation done here. However, in segments derived from this class, there should be validation. :return: bool True if given tag and elements are a valid EDIFACT segment, False if not. """ # FIXME: there should be a way of returning an error message - WHICH kind of validation failed. if not self.tag: return False return True class EDIenergySegment(Segment): def __init__(self, tag: str, *elements: Union[str, List[str]]): super().__init__(tag, *elements) def validate(self) -> bool: if not super().validate(): return False else: # TODO add validation method for EDI@Energy pass class SegmentFactory: """Factory for producing segments.""" characters = None @staticmethod def create_segment( name: str, *elements: Union[str, List[str]], validate: bool = True ) -> Segment: """Create a new instance of the relevant class type. :param name: The name of the segment :param elements: The data elements for this segment :param validate: bool if True, the created segment is validated before return """ if not SegmentFactory.characters: SegmentFactory.characters = Characters() # Basic segment type validation is done here. # The more special validation must be done in the corresponding Segment if not name: raise EDISyntaxError("The tag of a segment must not be empty.") if type(name) != str: raise EDISyntaxError( "The tag name of a segment must be a str, but is a {}: {}".format( type(name), name ) ) if not name.isalnum(): raise EDISyntaxError( "Tag '{}': A tag name must only contain alphanumeric characters.".format( name ) ) for Plugin in SegmentProvider.plugins: if getattr(Plugin, "tag", "") == name: s = Plugin(name, *elements) break else: # we don't support this kind of EDIFACT segment (yet), so # just create a generic Segment() s = Segment(name, *elements) if validate: if not s.validate(): raise EDISyntaxError( "could not create '{}' Segment. Validation failed.".format(name) ) # FIXME: characters is not used! return s
[ [ [ 1156, 1161 ], [ 2097, 2102 ], [ 4017, 4022 ], [ 4423, 4428 ] ], [ [ 1163, 1167 ], [ 2108, 2112 ], [ 4028, 4032 ], [ 4434, 4438 ] ], [ [ 1194, 1208 ], [ 5021, 5035 ], [ 5128, 5142 ], [ 5346, 5360 ], [ 5922, 5936 ] ], [ [ 1210, 1221 ], [ 1296, 1307 ] ], [ [ 1251, 1261 ], [ 4833, 4843 ] ], [ [ 1270, 1285 ], [ 1688, 1703 ], [ 5532, 5547 ] ], [ [ 1680, 1687 ], [ 3962, 3969 ], [ 4477, 4484 ], [ 5820, 5827 ] ], [ [ 3945, 3961 ] ], [ [ 4269, 4283 ], [ 4766, 4780 ], [ 4805, 4819 ] ] ]
"""Utilities for reading real time clocks and keeping soft real time constraints.""" import gc import os import time import multiprocessing from common.clock import sec_since_boot # pylint: disable=no-name-in-module, import-error from selfdrive.hardware import PC, TICI # time step for each process DT_CTRL = 0.01 # controlsd DT_MDL = 0.05 # model DT_TRML = 0.5 # thermald and manager # driver monitoring if TICI: DT_DMON = 0.05 else: DT_DMON = 0.1 class Priority: # CORE 2 # - modeld = 55 # - camerad = 54 CTRL_LOW = 51 # plannerd & radard # CORE 3 # - boardd = 55 CTRL_HIGH = 53 def set_realtime_priority(level): if not PC: os.sched_setscheduler(0, os.SCHED_FIFO, os.sched_param(level)) def set_core_affinity(core): if not PC: os.sched_setaffinity(0, [core,]) def config_realtime_process(core, priority): gc.disable() set_realtime_priority(priority) set_core_affinity(core) class Ratekeeper(): def __init__(self, rate, print_delay_threshold=0.): """Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative.""" self._interval = 1. / rate self._next_frame_time = sec_since_boot() + self._interval self._print_delay_threshold = print_delay_threshold self._frame = 0 self._remaining = 0 self._process_name = multiprocessing.current_process().name @property def frame(self): return self._frame @property def remaining(self): return self._remaining # Maintain loop rate by calling this at the end of each loop def keep_time(self): lagged = self.monitor_time() if self._remaining > 0: time.sleep(self._remaining) return lagged # this only monitor the cumulative lag, but does not enforce a rate def monitor_time(self): lagged = False remaining = self._next_frame_time - sec_since_boot() self._next_frame_time += self._interval if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold: print("%s lagging by %.2f ms" % (self._process_name, -remaining * 1000)) lagged = True self._frame += 1 self._remaining = remaining return lagged
[ [ [ 92, 94 ], [ 856, 858 ] ], [ [ 102, 104 ], [ 663, 665 ], [ 688, 690 ], [ 703, 705 ], [ 774, 776 ] ], [ [ 112, 116 ], [ 1615, 1619 ] ], [ [ 124, 139 ], [ 1304, 1319 ] ], [ [ 166, 180 ], [ 1145, 1159 ], [ 1817, 1831 ] ], [ [ 263, 265 ], [ 655, 657 ], [ 766, 768 ] ], [ [ 267, 271 ], [ 416, 420 ] ], [ [ 303, 310 ] ], [ [ 331, 337 ] ], [ [ 354, 361 ] ], [ [ 424, 431 ] ], [ [ 447, 454 ] ], [ [ 469, 477 ] ], [ [ 616, 637 ], [ 871, 892 ] ], [ [ 732, 749 ], [ 905, 922 ] ], [ [ 813, 836 ] ], [ [ 937, 947 ] ] ]
import os, json import shutil, logging import click from pyspark.sql.functions import lit, udf, explode, array, to_json from pyspark.sql.types import ArrayType, StringType, IntegerType, MapType, StructType, StructField from luna.common.CodeTimer import CodeTimer from luna.common.config import ConfigSet from luna.common.custom_logger import init_logger from luna.common.sparksession import SparkConfig from luna.common.utils import get_absolute_path from luna.pathology.common.slideviewer_client import fetch_slide_ids import luna.common.constants as const os.environ['OPENBLAS_NUM_THREADS'] = '1' def download_point_annotation(slideviewer_url, slideviewer_path, project_id, user): """Downloads point-click nuclear annotations using slideviewer API Args: slideviewer_url (string): slideviewer base url e.g. https://slideviewer-url.com slideviewer_path (string): slide path in slideviewer project_id (string): slideviewer project id user (string): username used to create the expert annotation Returns: json: point-click nuclear annotations """ from slideviewer_client import download_sv_point_annotation print (f" >>>>>>> Processing [{slideviewer_path}] <<<<<<<<") url = slideviewer_url + "/slides/" + str(user) + "@mskcc.org/projects;" + \ str(project_id) + ';' + slideviewer_path + "/getSVGLabels/nucleus" print(url) return download_sv_point_annotation(url) @click.command() @click.option('-d', '--data_config_file', default=None, type=click.Path(exists=True), help="path to yaml file containing data input and output parameters. " "See data_config.yaml.template") @click.option('-a', '--app_config_file', default='config.yaml', type=click.Path(exists=True), help="path to yaml file containing application runtime parameters. " "See config.yaml.template") def cli(data_config_file, app_config_file): """This module generates a parquet table of point-click nuclear annotation jsons. The configuration files are copied to your project/configs/table_name folder to persist the metadata used to generate the proxy table. INPUT PARAMETERS app_config_file - path to yaml file containing application runtime parameters. See config.yaml.template data_config_file - path to yaml file containing data input and output parameters. See data_config.yaml.template - ROOT_PATH: path to output data - DATA_TYPE: data type used in table name e.g. POINT_RAW_JSON - PROJECT: your project name. used in table path - DATASET_NAME: optional, dataset name to version your table - PROJECT_ID: Slideviewer project id - USERS: list of users that provide expert annotations for this project - SLIDEVIEWER_CSV_FILE: an optional path to a SlideViewer csv file to use that lists the names of the whole slide images and for which the regional annotation proxy table generator should download point annotations. If this field is left blank, then the regional annotation proxy table generator will download this file from SlideViewer. TABLE SCHEMA - slideviewer_path: path to original slide image in slideviewer platform - slide_id: id for the slide. synonymous with image_id - sv_project_id: same as the PROJECT_ID from data_config_file, refers to the SlideViewer project number. - sv_json: json annotation file downloaded from slideviewer. - user: username of the annotator for a given annotation - sv_json_record_uuid: hash of raw json annotation file from slideviewer, format: SVPTJSON-{json_hash} """ logger = init_logger() with CodeTimer(logger, 'generate POINT_RAW_JSON table'): logger.info('data config file: ' + data_config_file) logger.info('app config file: ' + app_config_file) # load configs cfg = ConfigSet(name=const.DATA_CFG, config_file=data_config_file) cfg = ConfigSet(name=const.APP_CFG, config_file=app_config_file) # copy app and data configuration to destination config dir config_location = const.CONFIG_LOCATION(cfg) os.makedirs(config_location, exist_ok=True) shutil.copy(app_config_file, os.path.join(config_location, "app_config.yaml")) shutil.copy(data_config_file, os.path.join(config_location, "data_config.yaml")) logger.info("config files copied to %s", config_location) create_proxy_table() def create_proxy_table(): """Create a proxy table of point annotation json files downloaded from the SlideViewer API Each row of the table is a point annotation json created by a user for a slide. Returns: None """ cfg = ConfigSet() logger = logging.getLogger(__name__) spark = SparkConfig().spark_session(config_name=const.APP_CFG, app_name="luna.pathology.point_annotation.proxy_table.generate") # load paths from configs point_table_path = const.TABLE_LOCATION(cfg) PROJECT_ID = cfg.get_value(path=const.DATA_CFG+'::PROJECT_ID') SLIDEVIEWER_URL = cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_URL') # Get slide list to use # Download CSV file in the project configs dir slides = fetch_slide_ids(SLIDEVIEWER_URL, PROJECT_ID, const.CONFIG_LOCATION(cfg), cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_CSV_FILE')) logger.info(slides) schema = StructType([StructField("slideviewer_path", StringType()), StructField("slide_id", StringType()), StructField("sv_project_id", IntegerType()) ]) df = spark.createDataFrame(slides, schema) # populate columns df = df.withColumn("users", array([lit(user) for user in cfg.get_value(const.DATA_CFG+'::USERS')])) df = df.select("slideviewer_path", "slide_id", "sv_project_id", explode("users").alias("user")) # download slide point annotation jsons # example point json: # [{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1440","y":"747","class":"0","classname":"Tissue 1"},{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1424","y":"774","class":"3","classname":"Tissue 4"}] point_json_struct = ArrayType( MapType(StringType(), StringType()) ) spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/slideviewer_client.py")) download_point_annotation_udf = udf(download_point_annotation, point_json_struct) df = df.withColumn("sv_json", download_point_annotation_udf(lit(SLIDEVIEWER_URL), "slideviewer_path", "sv_project_id", "user"))\ .cache() # drop empty jsons that may have been created df = df.dropna(subset=["sv_json"]) # populate "date_added", "date_updated","latest", "sv_json_record_uuid" spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/EnsureByteContext.py")) spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/utils.py")) from luna.common.utils import generate_uuid_dict sv_json_record_uuid_udf = udf(generate_uuid_dict, StringType()) df = df.withColumn("sv_json_record_uuid", sv_json_record_uuid_udf(to_json("sv_json"), array(lit("SVPTJSON")))) df.show(10, False) df.write.format("parquet").mode("overwrite").save(point_table_path) if __name__ == "__main__": cli()
[ [ [ 8, 10 ], [ 562, 564 ], [ 4163, 4165 ], [ 4245, 4247 ], [ 4333, 4335 ] ], [ [ 12, 16 ] ], [ [ 24, 30 ], [ 4216, 4222 ], [ 4303, 4309 ] ], [ [ 32, 39 ], [ 4759, 4766 ] ], [ [ 48, 53 ], [ 1461, 1466 ], [ 1478, 1483 ], [ 1538, 1543 ], [ 1701, 1706 ], [ 1769, 1774 ] ], [ [ 88, 91 ], [ 5768, 5771 ], [ 6610, 6613 ], [ 7266, 7269 ] ], [ [ 93, 96 ], [ 6471, 6474 ], [ 7131, 7134 ] ], [ [ 98, 105 ], [ 5901, 5908 ] ], [ [ 107, 112 ], [ 5761, 5766 ], [ 7260, 7265 ] ], [ [ 114, 121 ], [ 7240, 7247 ] ], [ [ 152, 161 ], [ 6274, 6283 ] ], [ [ 163, 173 ], [ 5483, 5493 ], [ 5547, 5557 ], [ 6301, 6311 ], [ 6315, 6325 ], [ 7155, 7165 ] ], [ [ 175, 186 ], [ 5616, 5627 ] ], [ [ 188, 195 ], [ 6293, 6300 ] ], [ [ 197, 207 ], [ 5439, 5449 ] ], [ [ 209, 220 ], [ 5451, 5462 ], [ 5523, 5534 ], [ 5587, 5598 ] ], [ [ 256, 265 ], [ 3688, 3697 ] ], [ [ 297, 306 ], [ 3898, 3907 ], [ 3973, 3982 ], [ 4734, 4743 ] ], [ [ 345, 356 ], [ 3664, 3675 ] ], [ [ 394, 405 ], [ 4800, 4811 ] ], [ [ 436, 453 ], [ 6368, 6385 ], [ 6895, 6912 ], [ 6994, 7011 ] ], [ [ 507, 522 ], [ 5238, 5253 ] ], [ [ 530, 560 ], [ 3913, 3918 ], [ 3988, 3993 ], [ 4128, 4133 ], [ 4840, 4845 ], [ 4974, 4979 ], [ 5037, 5042 ], [ 5109, 5114 ], [ 5283, 5288 ], [ 5359, 5364 ], [ 5804, 5809 ] ], [ [ 609, 634 ], [ 6475, 6500 ] ], [ [ 1928, 1931 ], [ 7414, 7417 ] ], [ [ 4486, 4504 ], [ 4459, 4477 ] ] ]
# Copyright 2021 Supun Nakandala. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import sys import numpy as np import tensorflow as tf import pandas as pd import random import math import argparse sys.path.append('./') from commons import cnn_bi_lstm_model, input_iterator # Setting random seeds tf.random.set_random_seed(2019) random.seed(2019) np.random.seed(2019) def get_train_ops(y, logits, learning_rate, n_classes, class_weights): y = tf.reshape(y, [-1]) logits = tf.reshape(logits, [-1, n_classes]) balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes) y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes]) loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss) return train_op, update_op, balanced_accuracy, loss def window_generator(data_root, win_size_10s, subject_ids): x_segments = []; y_segments = [] for subject_id in subject_ids: for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True): x_window = []; y_window = [] for x,y in zip(x_seq, y_seq): x_window.append(x) y_window.append(y) if len(y_window) == win_size_10s: yield np.stack(x_window, axis=0), np.stack(y_window, axis=0) x_window = []; y_window = [] if __name__ == "__main__": parser = argparse.ArgumentParser(description='Argument parser for training CNN model.') optional_arguments = parser._action_groups.pop() required_arguments = parser.add_argument_group('required arguments') required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True) optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS']) optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False) optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False) optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False) optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False) optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False) optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False) optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False) optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False) optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False) optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False) optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False) optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False) optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False) optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False) optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true') parser._action_groups.append(optional_arguments) args = parser.parse_args() if os.path.exists(args.model_checkpoint_path): raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path)) if args.transfer_learning_model: if args.transfer_learning_model == 'CHAP_ALL_ADULTS': args.amp_factor = 2 args.cnn_window_size = 10 args.bi_lstm_win_size = 7 else: raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model)) assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%' subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)] random.shuffle(subject_ids) n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.)) train_subjects = subject_ids[:n_train_subjects] subject_ids = subject_ids[n_train_subjects:] test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100 n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.)) test_subjects = subject_ids[:n_test_subjects] valid_subjects = subject_ids[n_test_subjects:] output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size))) bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10) valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).batch(args.batch_size).prefetch(10) test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32), output_shapes=output_shapes).batch(args.batch_size).prefetch(10) iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes) train_init_op = iterator.make_initializer(train_dataset) valid_init_op = iterator.make_initializer(valid_dataset) test_init_op = iterator.make_initializer(test_dataset) x, y = iterator.get_next() x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1]) x = tf.identity(x, name='input') y = tf.reshape(y, [-1, bi_lstm_win_size]) learning_rate = tf.placeholder(tf.float32) logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes) output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output') prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction') class_weights = eval(args.class_weights) train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if args.transfer_learning_model: ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model') # Weights for the final classification layer (dense) are ignored variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')] restorer = tf.train.Saver(variables) restorer.restore(sess, ckpt_path) if not args.silent: print('Training subjects: {}'.format(train_subjects)) print('Validation subjects: {}'.format(valid_subjects)) print('Testing subjects: {}'.format(test_subjects)) for epoch in range(args.num_epochs): for label, init_op, subjects in zip(["Train", "Validation", "Test"], [train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]): sess.run(tf.local_variables_initializer()) sess.run(init_op) losses = [] while True: try: if label == "Train": _, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate}) elif label == "Validation": _, l = sess.run([update_op, loss]) elif label == "Test": _, l = sess.run([update_op, loss]) losses.append(l) except tf.errors.OutOfRangeError: if not args.silent: ba = sess.run(balanced_accuracy) print("Epoch: %d, %s Loss: %f, Balanced Accuracy: %f" %(epoch, label, sum(losses), ba)) break if not os.path.exists(args.model_checkpoint_path): os.makedirs(args.model_checkpoint_path) tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={"input": x}, outputs={"output": output}) if not args.silent: print('Model saved in path: {}'.format(args.model_checkpoint_path))
[ [ [ 690, 692 ], [ 5338, 5340 ], [ 6046, 6048 ], [ 8749, 8751 ], [ 8762, 8764 ], [ 8778, 8780 ], [ 10581, 10583 ], [ 10637, 10639 ], [ 10719, 10721 ] ], [ [ 700, 703 ], [ 810, 813 ] ], [ [ 711, 722 ], [ 960, 962 ], [ 2093, 2095 ], [ 2121, 2123 ] ], [ [ 730, 746 ], [ 910, 912 ], [ 6863, 6865 ], [ 6991, 6993 ], [ 7003, 7005 ], [ 7149, 7151 ], [ 7277, 7279 ], [ 7289, 7291 ], [ 7400, 7402 ], [ 7527, 7529 ], [ 7539, 7541 ], [ 7652, 7654 ], [ 7967, 7969 ], [ 8050, 8052 ], [ 8087, 8089 ], [ 8146, 8148 ], [ 8161, 8163 ], [ 8273, 8275 ], [ 8283, 8285 ], [ 8367, 8369 ], [ 8379, 8381 ], [ 8609, 8611 ], [ 8648, 8650 ], [ 8999, 9001 ], [ 9017, 9019 ], [ 9107, 9109 ], [ 9678, 9680 ], [ 10287, 10289 ], [ 10686, 10688 ], [ 1061, 1063 ], [ 1094, 1096 ], [ 1165, 1167 ], [ 1203, 1205 ], [ 1244, 1246 ], [ 1255, 1257 ], [ 1324, 1326 ], [ 1338, 1340 ], [ 1408, 1410 ], [ 1422, 1424 ], [ 1455, 1457 ], [ 1498, 1500 ] ], [ [ 754, 766 ] ], [ [ 774, 780 ], [ 942, 948 ], [ 6086, 6092 ] ], [ [ 788, 792 ], [ 6142, 6146 ], [ 6425, 6429 ] ], [ [ 800, 808 ], [ 2239, 2247 ] ], [ [ 852, 869 ], [ 8186, 8203 ] ], [ [ 871, 885 ], [ 1812, 1826 ] ], [ [ 986, 999 ], [ 8526, 8539 ] ], [ [ 1653, 1669 ], [ 6902, 6918 ], [ 7188, 7204 ], [ 7439, 7455 ] ], [ [ 2230, 2236 ], [ 2343, 2349 ], [ 2396, 2402 ], [ 5250, 5256 ], [ 5310, 5316 ] ], [ [ 2322, 2340 ], [ 2560, 2578 ], [ 2748, 2766 ], [ 2908, 2926 ], [ 3056, 3074 ], [ 3193, 3211 ], [ 3362, 3380 ], [ 3542, 3560 ], [ 3736, 3754 ], [ 3927, 3945 ], [ 4098, 4116 ], [ 4273, 4291 ], [ 4442, 4460 ], [ 4637, 4655 ], [ 4790, 4808 ], [ 4948, 4966 ], [ 5112, 5130 ], [ 5279, 5297 ] ], [ [ 2375, 2393 ], [ 2448, 2466 ] ], [ [ 5303, 5307 ], [ 5353, 5357 ], [ 5452, 5456 ], [ 5489, 5493 ], [ 5530, 5534 ], [ 5593, 5597 ], [ 5625, 5629 ], [ 5663, 5667 ], [ 5780, 5784 ], [ 5828, 5832 ], [ 5858, 5862 ], [ 5890, 5894 ], [ 6057, 6061 ], [ 6171, 6175 ], [ 6326, 6330 ], [ 6364, 6368 ], [ 6601, 6605 ], [ 6631, 6635 ], [ 6654, 6658 ], [ 6675, 6679 ], [ 6708, 6712 ], [ 6738, 6742 ], [ 6789, 6793 ], [ 6818, 6822 ], [ 7067, 7071 ], [ 7099, 7103 ], [ 7351, 7355 ], [ 7601, 7605 ], [ 7986, 7990 ], [ 8007, 8011 ], [ 8207, 8211 ], [ 8242, 8246 ], [ 8307, 8311 ], [ 8451, 8455 ], [ 8566, 8570 ], [ 8695, 8699 ], [ 8846, 8850 ], [ 9203, 9207 ], [ 9442, 9446 ], [ 9974, 9978 ], [ 10345, 10349 ], [ 10596, 10600 ], [ 10649, 10653 ], [ 10732, 10736 ], [ 10842, 10846 ], [ 10906, 10910 ], [ 6919, 6923 ], [ 7205, 7209 ], [ 7456, 7460 ] ], [ [ 5998, 6009 ], [ 6101, 6112 ], [ 6156, 6167 ], [ 6229, 6240 ], [ 6278, 6289 ] ], [ [ 6119, 6135 ], [ 6242, 6258 ], [ 6290, 6306 ] ], [ [ 6212, 6226 ], [ 9265, 9279 ], [ 9604, 9618 ], [ 6961, 6975 ] ], [ [ 6264, 6275 ], [ 6439, 6450 ], [ 6493, 6504 ], [ 6544, 6555 ] ], [ [ 6314, 6323 ], [ 6454, 6463 ] ], [ [ 6403, 6418 ], [ 6506, 6521 ], [ 6556, 6571 ] ], [ [ 6477, 6490 ], [ 9398, 9411 ], [ 9636, 9649 ], [ 7498, 7511 ] ], [ [ 6527, 6541 ], [ 9333, 9347 ], [ 9620, 9634 ], [ 7247, 7261 ] ], [ [ 6583, 6596 ], [ 7044, 7057 ], [ 7330, 7343 ], [ 7580, 7593 ] ], [ [ 6766, 6782 ], [ 8106, 8122 ], [ 8224, 8240 ], [ 6943, 6959 ], [ 7229, 7245 ], [ 7480, 7496 ] ], [ [ 6847, 6860 ], [ 7684, 7697 ], [ 7712, 7725 ], [ 7788, 7801 ] ], [ [ 7133, 7146 ], [ 7849, 7862 ] ], [ [ 7385, 7397 ], [ 7909, 7921 ] ], [ [ 7640, 7648 ], [ 7762, 7770 ], [ 7823, 7831 ], [ 7883, 7891 ], [ 7934, 7942 ] ], [ [ 7746, 7759 ], [ 9558, 9571 ] ], [ [ 7807, 7820 ], [ 9573, 9586 ] ], [ [ 7868, 7880 ], [ 9588, 9600 ] ], [ [ 7927, 7928 ], [ 7978, 7979 ] ], [ [ 7930, 7931 ], [ 8098, 8099 ] ], [ [ 7963, 7964 ], [ 8062, 8063 ] ], [ [ 8046, 8047 ], [ 8204, 8205 ], [ 10794, 10795 ] ], [ [ 8083, 8084 ], [ 8540, 8541 ] ], [ [ 8130, 8143 ], [ 8551, 8564 ], [ 9959, 9972 ] ], [ [ 8177, 8183 ], [ 8294, 8300 ], [ 8389, 8395 ], [ 8543, 8549 ] ], [ [ 8264, 8270 ], [ 10817, 10823 ] ], [ [ 8354, 8364 ] ], [ [ 8430, 8443 ], [ 8584, 8597 ] ], [ [ 8479, 8487 ], [ 9920, 9928 ] ], [ [ 8489, 8498 ], [ 9930, 9939 ], [ 10092, 10101 ], [ 10201, 10210 ] ], [ [ 8500, 8517 ], [ 10400, 10417 ] ], [ [ 8519, 8523 ], [ 9941, 9945 ], [ 10103, 10107 ], [ 10212, 10216 ] ], [ [ 8625, 8629 ], [ 8639, 8643 ], [ 9162, 9166 ], [ 9669, 9673 ], [ 9728, 9732 ], [ 9910, 9914 ], [ 10082, 10086 ], [ 10191, 10195 ], [ 10391, 10395 ], [ 10713, 10717 ] ], [ [ 8737, 8746 ], [ 9168, 9177 ] ], [ [ 8975, 8984 ], [ 9122, 9131 ] ], [ [ 9096, 9104 ], [ 9145, 9153 ] ], [ [ 9427, 9432 ], [ 10503, 10508 ] ], [ [ 9476, 9481 ], [ 9854, 9859 ], [ 10024, 10029 ], [ 10139, 10144 ], [ 10510, 10515 ] ], [ [ 9483, 9490 ], [ 9737, 9744 ] ], [ [ 9492, 9500 ] ], [ [ 9762, 9768 ], [ 10243, 10249 ], [ 10521, 10527 ] ], [ [ 9900, 9901 ] ], [ [ 9903, 9904 ] ], [ [ 9906, 9907 ], [ 10257, 10258 ] ], [ [ 10075, 10076 ] ], [ [ 10078, 10079 ], [ 10257, 10258 ] ], [ [ 10184, 10185 ] ], [ [ 10187, 10188 ], [ 10257, 10258 ] ], [ [ 10386, 10388 ], [ 10530, 10532 ] ] ]
# labplus mPython-box library # MIT license; Copyright (c) 2018 labplus # mpython-box buildin periphers drivers # history: # V1.0 zhaohuijiang from machine import Pin, UART import time import ujson from time import sleep_ms, sleep_us, sleep # touchpad class BS8112A(object): """ """ def __init__(self, i2c): self.addr = 80 # config self._i2c = i2c self.config = [0xB0, 0x00, 0x00, 0x83, 0xf3, 0x98, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x00] checksum = 0 for i in range(1, 19): checksum += self.config[i] checksum &= 0xff self.config[18] = checksum # print(self.config[18]) retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, bytearray(self.config), True) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") # i2c.writeto(self.addr, b'\xB0', False) # time.sleep_ms(10) # print(i2c.readfrom(self.addr, 17, True)) # key map: # value bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 # bs8112a key Key8 Key7 Key6 Key5 Key4 Key3 Key2 Key1 # mpython key N O H T Y P def key_value(self): retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, b'\x08', False) time.sleep_ms(10) value = self._i2c.readfrom(self.addr, 1, True) time.sleep_ms(10) return value except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") class Codec_mode(): ES_MODULE_ADC_DAC = 0x00 ES_MODULE_DAC = 0x01 ES_MODULE_ADC = 0x02 class Es8388(): """ """ def __init__(self, i2c, adc_volume=0, dac_volume=0, volume=65): self._i2c = i2c self.addr = 16 self.adc_volume = adc_volume self.dac_volume = dac_volume self.volume = volume self.set_voice_mute(1) retry = 0 if (retry < 5): try: # i2c.writeto(self.addr, bytearray([0x19, 0x04])) # ES8388_DACCONTROL3 0x04 mute/0x00 unmute&ramp;DAC unmute and disabled digital volume control soft ramp # Chip Control and Power Management self._i2c.writeto(self.addr, bytearray( [0x01, 0x50])) # ES8388_CONTROL2 0x40? # ES8388_CHIPPOWER normal all and power up all self._i2c.writeto(self.addr, bytearray([0x02, 0x00])) # ES8388_MASTERMODE CODEC IN I2S SLAVE MODE 0x00: slave self._i2c.writeto(self.addr, bytearray([0x08, 0x00])) # dac setup # ES8388_DACPOWER . disable DAC and disable Lout/Rout/1/2 self._i2c.writeto(self.addr, bytearray([0x04, 0xC0])) # ES8388_CONTROL1. Enfr=0,Play&Record Mode,(0x17-both of mic&paly) self._i2c.writeto(self.addr, bytearray([0x00, 0x12])) # ES8388_DACCONTROL1 1a 0x18:16bit iis , 0x00:24 self._i2c.writeto(self.addr, bytearray([0x17, 0x18])) # ES8388_DACCONTROL2 DACFsMode,SINGLE SPEED; DACFsRatio,256 self._i2c.writeto(self.addr, bytearray([0x18, 0x02])) # ES8388_DACCONTROL16 0x00 audio on LIN1&RIN1, 0x09 LIN2&RIN2 self._i2c.writeto(self.addr, bytearray([0x26, 0x00])) # ES8388_DACCONTROL17 only left DAC to left mixer enable 0db self._i2c.writeto(self.addr, bytearray([0x27, 0x90])) # ES8388_DACCONTROL20 only right DAC to right mixer enable 0db self._i2c.writeto(self.addr, bytearray([0x2a, 0x90])) # ES8388_DACCONTROL21 set internal ADC and DAC use the same LRCK clock, ADC LRCK as internal LRCK self._i2c.writeto(self.addr, bytearray([0x2b, 0x80])) # ES8388_DACCONTROL23 vroi=0 self._i2c.writeto(self.addr, bytearray([0x2d, 0x00])) self.set_adc_dac_volume( Codec_mode.ES_MODULE_DAC, self.dac_volume, 0) # 0db # ES8388_DACPOWER 0x3c Enable DAC and Enable Lout/Rout/1/2 self._i2c.writeto(self.addr, bytearray([0x04, 0x3c])) # adc setup self._i2c.writeto(self.addr, bytearray( [0x03, 0xff])) # ES8388_ADCPOWER # ES8388_ADCCONTROL1 MIC Left and Right channel PGA gain self._i2c.writeto(self.addr, bytearray([0x09, 0xbb])) # ES8388_ADCCONTROL2 0x00 LINSEL & RINSEL, LIN1/RIN1 as ADC Input; DSSEL,use one DS Reg11; DSR, LINPUT1-RINPUT1 self._i2c.writeto(self.addr, bytearray([0x0a, 0x00])) # ES8388_ADCCONTROL3 clock input self._i2c.writeto(self.addr, bytearray([0x0b, 0x02])) # ES8388_ADCCONTROL4 Left/Right data, Left/Right justified mode, Bits length 16bit, I2S format 0x0c? self._i2c.writeto(self.addr, bytearray([0x0c, 0x0c])) # ES8388_ADCCONTROL5 ADCFsMode,singel SPEED,RATIO=256 self._i2c.writeto(self.addr, bytearray([0x0d, 0x02])) # ALC for Microphone self.set_adc_dac_volume( Codec_mode.ES_MODULE_ADC, self.adc_volume, 0) # 0db # ES8388_ADCPOWER Power on ADC, Enable LIN&RIN, Power off MICBIAS, set int1lp to low power mode self._i2c.writeto(self.addr, bytearray([0x03, 0x09])) # set volume self.set_volume(self.volume) self.set_voice_mute(0) # test # for i in range(0, 52): # i2c.writeto(self.addr, bytearray([i])) # print("%d: %d" % (i, i2c.readfrom(self.addr, 1)[0])) return except: retry = retry + 1 else: raise Exception("es8388 i2c read/write error!") def deinit(self): retry = 0 if (retry < 5): try: # ES8388_CHIPPOWER reset and stop es838 self._i2c.writeto(self.addr, bytearray([0x02, 0xff])) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def set_adc_dac_volume(self, mode, volume, dot): _volume = volume if (_volume < -96): _volume = -96 else: _volume = 0 _dot = 0 if dot >= 5: _dot = 1 _volume = (-_volume << 1) + _dot retry = 0 if (retry < 5): try: if (mode == Codec_mode.ES_MODULE_ADC or mode == Codec_mode.ES_MODULE_ADC_DAC): self._i2c.writeto(self.addr, bytearray( [0x10, _volume])) # ES8388_ADCCONTROL8 self._i2c.writeto(self.addr, bytearray( [0x11, _volume])) # ES8388_ADCCONTROL9 if (mode == Codec_mode.ES_MODULE_DAC or mode == Codec_mode.ES_MODULE_ADC_DAC): self._i2c.writeto(self.addr, bytearray( [0x1b, _volume])) # ES8388_DACCONTROL5 self._i2c.writeto(self.addr, bytearray( [0x1a, _volume])) # ES8388_DACCONTROL4 return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def set_volume(self, volume): self.volume = volume if (self.volume < 0): self.volume = 0 elif (self.volume > 100): self.volume = 100 retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, bytearray( [0x2e, self.volume//3])) # ES8388_DACCONTROL24 self._i2c.writeto(self.addr, bytearray( [0x2f, self.volume//3])) # ES8388_DACCONTROL25 self._i2c.writeto(self.addr, bytearray( [0x30, 0])) # ES8388_DACCONTROL26 self._i2c.writeto(self.addr, bytearray( [0x31, 0])) # ES8388_DACCONTROL27 # print("volume L: %d" % (self.volume//3)) return except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") def get_volume(self): return self.volume def set_voice_mute(self, mute): retry = 0 if (retry < 5): try: self._i2c.writeto(self.addr, b'\x19') dac_ctr3 = self._i2c.readfrom(self.addr, 1)[0] if(mute): dac_ctr3 |= 0x04 else: dac_ctr3 &= 0xFB self._i2c.writeto(self.addr, bytearray([0x19, dac_ctr3])) except: retry = retry + 1 else: raise Exception("bs8112a i2c read/write error!") uart2 = UART(2, baudrate=1152000, rx=Pin.P8, tx=Pin.P23, timeout=50, timeout_char=1024, rxbuf=2048, txbuf=2048) class K210Error(Exception): """K210异常类""" pass class blob(): def __init__(self,*args): self.dict = args[0] def __repr__(self): return self.dict def x(self): return self.dict['x'] def y(self): return self.dict['y'] def w(self): return self.dict['w'] def h(self): return self.dict['h'] def rect(self): return(self.dict['x'], self.dict['y'], self.dict['w'], self.dict['h']) def pixels(self): return self.dict['pixels'] def cx(self): return self.dict['cx'] def cy(self): return self.dict['cy'] def rotation(self): return self.dict['rotation'] def code(self): return self.dict['code'] def count(self): return self.dict['count'] class K210(): def __init__(self): t1 = time.ticks_ms() while (time.ticks_diff(time.ticks_ms(), t1) < 10000): rsp = self.send_cmd({'GET_KEYS': 0}) # 通过发获取按键指令测试K210是否初始化成功 if rsp is not None: return raise K210Error("K210 init failed!") def send_cmd(self, command, wait=True, timeout=200): json_stream = ujson.dumps(command) uart2.write(json_stream + '\n') # print("UART_Send:%s" % (json_stream + '\n')) t1 = time.ticks_ms() while wait: if uart2.any() > 0: r=None r = uart2.readline() r= r.strip() while uart2.readline(): pass # print("UART_Recv:%s" % r) try: rsp = ujson.loads(r) except Exception as e: print(e) break else: if rsp and isinstance(rsp, dict): for key, value in rsp.items(): if key == 'ERROR': raise K210Error(value) if key == 'RESP': return value if time.ticks_diff(time.ticks_ms(), t1) > timeout: # raise K210Error("k210 not respone!") return None def get_key(self): return self.send_cmd({'GET_KEYS': 0}) def get_distance(self): resp = self.send_cmd({'GET_DISTANCE': 0}) if resp is None: resp = 340 return resp def set_cam_led(self, on_off): return self.send_cmd({'SET_CAM_LED': on_off}) def set_motor(self, speed): return self.send_cmd({'SET_MOTOR': speed}) def file_open(self, *args): return self.send_cmd({'FILE_OPEN': args}) def file_read(self, *args): return self.send_cmd({'FILE_READ': args[0]},timeout=300) def file_write(self, *args): return self.send_cmd({'FILE_WRITE': args[0]},timeout=300) def file_close(self): return self.send_cmd({'FILE_CLOSE': 0}) def reset(self): self.send_cmd({'RESET': 0},False) def select_model(self, *args): self.send_cmd({'SELE_MOD': args[0]}, timeout=3000) def load_model(self, **kws): self.send_cmd({'LOD_MOD': kws}, timeout=3000) def detect_yolo(self): return self.send_cmd({'DET_YO': 0}) def predict_net(self): return self.send_cmd({'PRE_NET': 0}) def deinit_yolo(self): return self.send_cmd({'DINT_YO': 0}) def deinit_net(self): return self.send_cmd({'DINT_NET': 0}) def camera_snapshot(self): return self.send_cmd({'SNAPSHOT': 0}) def camera_reset(self): return self.send_cmd({'CAM_RST': 0},timeout=3000) def camera_run(self, *arg): return self.send_cmd({'CAM_RUN': arg[0]}) def camera_set_pixformat(self, *arg): return self.send_cmd({'CAM_SET_PF': arg[0]}) def camera_set_contrast(self, *arg): return self.send_cmd({'CAM_SET_CRA': arg[0]}) def camera_set_brightness(self, *arg): return self.send_cmd({'CAM_SET_BRG': arg[0]}) def camera_set_saturation(self, *arg): return self.send_cmd({'CAM_SET_SAT': arg[0]}) def camera_set_auto_gain(self, *arg, **kw): return self.send_cmd({'CAM_AUTO_GAIN': [arg, kw]}) def camera_set_auto_whitebal(self, *arg): return self.send_cmd({'CAM_AUTO_WBAL': arg[0]}) def camera_set_windowing(self, *arg): return self.send_cmd({'CAM_SET_WIN': arg[0]}) def camera_set_hmirror(self, *arg): return self.send_cmd({'CAM_SET_HM': arg[0]}) def camera_set_vflip(self, *arg): return self.send_cmd({'CAM_SET_VF': arg[0]}) def camera_skip_frames(self, *arg, **kw): return self.send_cmd({'CAM_SKIP_FRM': [arg, kw]}) def lcd_init(self, *args, **kws): return self.send_cmd({'LCD_INT': [args, kws]},timeout=5000) def lcd_display(self, **kws): return self.send_cmd({'LCD_DISP': kws}) def lcd_clear(self, **kws): return self.send_cmd({'LCD_CLR': kws}) def lcd_draw_string(self, *args): return self.send_cmd({'LCD_STR': args}) def image_load(self, *args, **kws): self.send_cmd({'IMG_LOD': [args, kws]}) time.sleep_ms(200) def image_width(self): return self.send_cmd({'IMG_WID': 0}) def image_hight(self): return self.send_cmd({'IMG_HIG': 0}) def image_format(self): return self.send_cmd({'IMG_FRM': 0}) def image_size(self): return self.send_cmd({'IMG_SIZE': 0}) def image_get_pixel(self, *args, **kws): return self.send_cmd({'IMG_GET_PIX': [args, kws]}) def image_set_pixel(self, *args, **kws): self.send_cmd({'IMG_SET_PIX': [args, kws]}) def image_mean_pool(self, *args, **kws): self.send_cmd({'IMG_MEAN_P': [args, kws]}) def image_to_grayscale(self): self.send_cmd({'IMG_TO_GRAY': 0}) def image_to_rainbow(self): self.send_cmd({'IMG_TO_RB': 0}) def image_copy(self, *args, **kws): self.send_cmd({'IMG_CPY': [args, kws]}) def image_save(self, *args, **kws): self.send_cmd({'IMG_SAVE': [args, kws]}) time.sleep_ms(200) def image_clear(self): self.send_cmd({'IMG_CLR': 0}) def image_draw_line(self, *args, **kws): self.send_cmd({'IMG_DRW_LN': [args, kws]}) def image_draw_rectangle(self, *args, **kws): self.send_cmd({'IMG_DRW_RECTANG': [args, kws]}) def image_draw_circle(self, *args, **kws): self.send_cmd({'IMG_DRW_CIR': [args, kws]}) def image_draw_string(self, *args, **kws): self.send_cmd({'IMG_DRW_STR': [args, kws]}) def image_draw_cross(self, *args, **kws): self.send_cmd({'IMG_DRW_CRS': [args, kws]}) def image_draw_arrow(self, *args, **kws): self.send_cmd({'IMG_DRW_ARR': [args, kws]}) def image_draw_image(self, *args, **kws): self.send_cmd({'IMG_DRW_IMG': [args, kws]}) def image_binary(self, *args, **kws): self.send_cmd({'IMG_BINARY': [args, kws]}) def image_invert(self): self.send_cmd({'IMG_INVERT': 0}) def image_erode(self, *args, **kws): self.send_cmd({'IMG_ERODE': [args, kws]}) def image_dilate(self, *args, **kws): self.send_cmd({'IMG_DIL': [args, kws]}) def image_negate(self, *args, **kws): self.send_cmd({'IMG_NEG': [args, kws]}) def image_mean(self, *args, **kws): self.send_cmd({'IMG_MEAN': [args, kws]}) def image_mode(self, *args, **kws): self.send_cmd({'IMG_MODE': [args, kws]}) def image_median(self, *args, **kws): self.send_cmd({'IMG_MEDIAN': [args, kws]}) def image_midpoint(self, *args, **kws): self.send_cmd({'IMG_MIDP': [args, kws]}) def image_cartoon(self, *args, **kws): self.send_cmd({'IMG_CART': [args, kws]}) def image_conv3(self, *args, **kws): self.send_cmd({'IMG_CONV': [args, kws]}) def image_gaussian(self, *args, **kws): self.send_cmd({'IMG_GAUS': [args, kws]}) def image_bilateral(self, *args, **kws): self.send_cmd({'IMG_BIL': [args, kws]}) def image_linpolar(self, *args, **kws): self.send_cmd({'IMG_LINP': [args, kws]}) def image_logpolar(self, *args, **kws): self.send_cmd({'IMG_LOGP': [args, kws]}) def image_rotation_corr(self, *args, **kws): self.send_cmd({'IMG_ROT_COR': [args, kws]}) def image_find_blobs(self, *args, **kws): return [blob(i) for i in self.send_cmd({'IMG_FID_BLOB': [args, kws]})]
[ [ [ 166, 169 ], [ 8650, 8653 ], [ 8661, 8664 ] ], [ [ 171, 175 ], [ 8621, 8625 ] ], [ [ 183, 187 ], [ 1419, 1423 ], [ 1508, 1512 ], [ 9561, 9565 ], [ 9592, 9596 ], [ 9608, 9612 ], [ 10015, 10019 ], [ 10729, 10733 ], [ 10745, 10749 ], [ 13719, 13723 ], [ 14634, 14638 ] ], [ [ 195, 200 ], [ 9892, 9897 ], [ 10313, 10318 ] ], [ [ 218, 226 ] ], [ [ 228, 236 ] ], [ [ 238, 243 ] ], [ [ 264, 271 ] ], [ [ 1679, 1689 ], [ 3999, 4009 ], [ 5148, 5158 ], [ 6455, 6465 ], [ 6491, 6501 ], [ 6774, 6784 ], [ 6810, 6820 ] ], [ [ 1777, 1783 ] ], [ [ 8613, 8618 ], [ 9919, 9924 ], [ 10061, 10066 ], [ 10119, 10124 ], [ 10185, 10190 ] ], [ [ 8746, 8755 ], [ 9784, 9793 ], [ 10618, 10627 ] ], [ [ 8800, 8804 ], [ 16882, 16886 ] ], [ [ 9517, 9521 ] ] ]
import fodmc # output_mode: PyFLOSIC, NRLMOL # output_name: NameOfMolecule.xyz (for PyFLOSIC only) output_mode = ['NRLMOL','PyFLOSIC'][1] output_name = ['', 'test.xyz'][1] fodmc.fodmc_mod.get_guess(output_mode,output_name)
[ [ [ 7, 12 ], [ 178, 183 ] ], [ [ 100, 111 ], [ 204, 215 ] ], [ [ 139, 150 ], [ 216, 227 ] ] ]
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mock from parameterized import parameterized from airflow.gcp.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus from airflow.gcp.sensors.cloud_storage_transfer_service import CloudDataTransferServiceJobStatusSensor class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase): @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success(self, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.return_value = True op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.return_value.list_transfer_operations.assert_called_once_with( request_filter={'project_id': 'project-id', 'job_names': ['job-name']} ) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_success_default_expected_status(self, mock_tool): op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_after_retry(self, mock_tool): operations_set = [ [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}], ] mock_tool.return_value.list_transfer_operations.side_effect = operations_set mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) mock_tool.operations_contain_expected_statuses.reset_mock() result = op.poke(context) self.assertTrue(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) @parameterized.expand( [ (GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}), ({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}), ( {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, ), ] ) @mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook') def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool): operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=expected_status, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses=received_status )
[ [ [ 818, 826 ], [ 1134, 1142 ] ], [ [ 835, 839 ], [ 1159, 1163 ], [ 2283, 2287 ], [ 3015, 3019 ], [ 4858, 4862 ], [ 1808, 1812 ], [ 2715, 2719 ], [ 2902, 2906 ], [ 3785, 3789 ], [ 5540, 5544 ] ], [ [ 866, 879 ], [ 4418, 4431 ] ], [ [ 942, 968 ], [ 4463, 4489 ], [ 4500, 4526 ], [ 4552, 4578 ], [ 4590, 4616 ], [ 4659, 4685 ], [ 4695, 4721 ], [ 4749, 4775 ], [ 4785, 4811 ], [ 1354, 1380 ], [ 1735, 1761 ], [ 2199, 2225 ], [ 2642, 2668 ], [ 2931, 2957 ], [ 3232, 3258 ], [ 3308, 3334 ], [ 3712, 3738 ], [ 4043, 4069 ], [ 4366, 4392 ], [ 5096, 5122 ] ], [ [ 1032, 1071 ], [ 1563, 1602 ], [ 2470, 2509 ], [ 3540, 3579 ], [ 5314, 5353 ] ], [ [ 1080, 1133 ] ] ]
# Generated by Django 3.1.1 on 2020-10-19 16:09 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('maps', '0011_auto_20201019_1839'), ] operations = [ migrations.AlterField( model_name='trafficsignal', name='timer', field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 21, 39, 12, 862273)), ), ]
[ [ [ 56, 64 ], [ 384, 392 ] ], [ [ 87, 97 ], [ 124, 134 ], [ 248, 258 ] ], [ [ 99, 105 ], [ 355, 361 ] ], [ [ 114, 123 ] ] ]
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-07-25 13:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('band', '0002_auto_20160725_1313'), ] operations = [ migrations.RemoveField( model_name='personal', name='id', ), migrations.AlterField( model_name='personal', name='username', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='band.Account'), ), ]
[ [ [ 95, 111 ] ], [ [ 135, 145 ], [ 205, 215 ], [ 329, 339 ], [ 430, 440 ] ], [ [ 147, 153 ], [ 535, 541 ] ], [ [ 161, 186 ], [ 563, 569 ] ], [ [ 195, 204 ] ] ]
""" Augmenters that somehow change the size of the images. List of augmenters: * :class:`Resize` * :class:`CropAndPad` * :class:`Crop` * :class:`Pad` * :class:`PadToFixedSize` * :class:`CenterPadToFixedSize` * :class:`CropToFixedSize` * :class:`CenterCropToFixedSize` * :class:`CropToMultiplesOf` * :class:`CenterCropToMultiplesOf` * :class:`PadToMultiplesOf` * :class:`CenterPadToMultiplesOf` * :class:`CropToPowersOf` * :class:`CenterCropToPowersOf` * :class:`PadToPowersOf` * :class:`CenterPadToPowersOf` * :class:`CropToAspectRatio` * :class:`CenterCropToAspectRatio` * :class:`PadToAspectRatio` * :class:`CenterPadToAspectRatio` * :class:`CropToSquare` * :class:`CenterCropToSquare` * :class:`PadToSquare` * :class:`CenterPadToSquare` * :class:`KeepSizeByResize` """ from __future__ import print_function, division, absolute_import import re import functools import numpy as np import cv2 import imgaug as ia from imgaug.imgaug import _normalize_cv2_input_arr_ from . import meta from .. import parameters as iap def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True): if prevent_zero_size: top, right, bottom, left = _crop_prevent_zero_size( shape[0], shape[1], top, right, bottom, left) height, width = shape[0:2] x1 = left x2 = width - right y1 = top y2 = height - bottom # these steps prevent negative sizes # if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis # note that if height/width of arr is zero, then y2==y1 or x2==x1, which # is still valid, even if height/width is zero and results in a zero-sized # axis x2 = max(x2, x1) y2 = max(y2, y1) return x1, y1, x2, y2 def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True): x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left, prevent_zero_size=prevent_zero_size) return arr[y1:y2, x1:x2, ...] def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant", pad_cval=0, keep_size=False): height, width = arr.shape[0:2] image_cr = _crop_arr_(arr, *croppings) image_cr_pa = pad( image_cr, top=paddings[0], right=paddings[1], bottom=paddings[2], left=paddings[3], mode=pad_mode, cval=pad_cval) if keep_size: image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width)) return image_cr_pa def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img, pad_mode="constant", pad_cval=0.0, keep_size=False): return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img, paddings_img, pad_mode, pad_cval, keep_size) def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img, pad_mode="constant", pad_cval=0, keep_size=False): return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img, paddings_img, pad_mode, pad_cval, keep_size) def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img, paddings_img, pad_mode="constant", pad_cval=None, keep_size=False): if isinstance(augmentable, ia.HeatmapsOnImage): arr_attr_name = "arr_0to1" pad_cval = pad_cval if pad_cval is not None else 0.0 else: assert isinstance(augmentable, ia.SegmentationMapsOnImage), ( "Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % ( type(augmentable))) arr_attr_name = "arr" pad_cval = pad_cval if pad_cval is not None else 0 arr = getattr(augmentable, arr_attr_name) arr_shape_orig = arr.shape augm_shape = augmentable.shape croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape) paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape) croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1], *croppings_proj) arr_cr = _crop_arr_(arr, croppings_proj[0], croppings_proj[1], croppings_proj[2], croppings_proj[3]) arr_cr_pa = pad( arr_cr, top=paddings_proj[0], right=paddings_proj[1], bottom=paddings_proj[2], left=paddings_proj[3], mode=pad_mode, cval=pad_cval) setattr(augmentable, arr_attr_name, arr_cr_pa) if keep_size: augmentable = augmentable.resize(arr_shape_orig[0:2]) else: augmentable.shape = _compute_shape_after_crop_and_pad( augmentable.shape, croppings_img, paddings_img) return augmentable def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size): # using the trbl function instead of croppings_img has the advantage # of incorporating prevent_zero_size, dealing with zero-sized input image # axis and dealing the negative crop amounts x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img) crop_left = x1 crop_top = y1 shape_orig = kpsoi.shape shifted = kpsoi.shift_( x=-crop_left+paddings_img[3], y=-crop_top+paddings_img[0]) shifted.shape = _compute_shape_after_crop_and_pad( shape_orig, croppings_img, paddings_img) if keep_size: shifted = shifted.on_(shape_orig) return shifted def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings): x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings) new_shape = list(old_shape) new_shape[0] = y2 - y1 + paddings[0] + paddings[2] new_shape[1] = x2 - x1 + paddings[1] + paddings[3] return tuple(new_shape) def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom, crop_left): remaining_height = height - (crop_top + crop_bottom) remaining_width = width - (crop_left + crop_right) if remaining_height < 1: regain = abs(remaining_height) + 1 regain_top = regain // 2 regain_bottom = regain // 2 if regain_top + regain_bottom < regain: regain_top += 1 if regain_top > crop_top: diff = regain_top - crop_top regain_top = crop_top regain_bottom += diff elif regain_bottom > crop_bottom: diff = regain_bottom - crop_bottom regain_bottom = crop_bottom regain_top += diff crop_top = crop_top - regain_top crop_bottom = crop_bottom - regain_bottom if remaining_width < 1: regain = abs(remaining_width) + 1 regain_right = regain // 2 regain_left = regain // 2 if regain_right + regain_left < regain: regain_right += 1 if regain_right > crop_right: diff = regain_right - crop_right regain_right = crop_right regain_left += diff elif regain_left > crop_left: diff = regain_left - crop_left regain_left = crop_left regain_right += diff crop_right = crop_right - regain_right crop_left = crop_left - regain_left return ( max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0), max(crop_left, 0)) def _project_size_changes(trbl, from_shape, to_shape): if from_shape[0:2] == to_shape[0:2]: return trbl height_to = to_shape[0] width_to = to_shape[1] height_from = from_shape[0] width_from = from_shape[1] top = trbl[0] right = trbl[1] bottom = trbl[2] left = trbl[3] # Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap # is exactly half the size of an image and the size change on an axis is # an odd value. Then the projected value would end up being <something>.5 # and the rounding would always round up to the next integer. If both # sides then have the same change, they are both rounded up, resulting # in more change than expected. # E.g. image height is 8, map height is 4, change is 3 at the top and 3 at # the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded # up to 2.0. Hence, the maps are changed by 4 (100% of the map height, # vs. 6 for images, which is 75% of the image height). top = _int_r(height_to * (top/height_from) - 1e-4) right = _int_r(width_to * (right/width_from) + 1e-4) bottom = _int_r(height_to * (bottom/height_from) + 1e-4) left = _int_r(width_to * (left/width_from) - 1e-4) return top, right, bottom, left def _int_r(value): return int(np.round(value)) # TODO somehow integrate this with pad() def _handle_pad_mode_param(pad_mode): pad_modes_available = { "constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect", "symmetric", "wrap"} if pad_mode == ia.ALL: return iap.Choice(list(pad_modes_available)) if ia.is_string(pad_mode): assert pad_mode in pad_modes_available, ( "Value '%s' is not a valid pad mode. Valid pad modes are: %s." % ( pad_mode, ", ".join(pad_modes_available))) return iap.Deterministic(pad_mode) if isinstance(pad_mode, list): assert all([v in pad_modes_available for v in pad_mode]), ( "At least one in list %s is not a valid pad mode. Valid pad " "modes are: %s." % (str(pad_mode), ", ".join(pad_modes_available))) return iap.Choice(pad_mode) if isinstance(pad_mode, iap.StochasticParameter): return pad_mode raise Exception( "Expected pad_mode to be ia.ALL or string or list of strings or " "StochasticParameter, got %s." % (type(pad_mode),)) def _handle_position_parameter(position): if position == "uniform": return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0) if position == "normal": return ( iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0), iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0) ) if position == "center": return iap.Deterministic(0.5), iap.Deterministic(0.5) if (ia.is_string(position) and re.match(r"^(left|center|right)-(top|center|bottom)$", position)): mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0, "right": 1.0} return ( iap.Deterministic(mapping[position.split("-")[0]]), iap.Deterministic(mapping[position.split("-")[1]]) ) if isinstance(position, iap.StochasticParameter): return position if isinstance(position, tuple): assert len(position) == 2, ( "Expected tuple with two entries as position parameter. " "Got %d entries with types %s.." % ( len(position), str([type(item) for item in position]))) for item in position: if ia.is_single_number(item) and (item < 0 or item > 1.0): raise Exception( "Both position values must be within the value range " "[0.0, 1.0]. Got type %s with value %.8f." % ( type(item), item,)) position = [iap.Deterministic(item) if ia.is_single_number(item) else item for item in position] only_sparams = all([isinstance(item, iap.StochasticParameter) for item in position]) assert only_sparams, ( "Expected tuple with two entries that are both either " "StochasticParameter or float/int. Got types %s." % ( str([type(item) for item in position]) )) return tuple(position) raise Exception( "Expected one of the following as position parameter: string " "'uniform', string 'normal', string 'center', a string matching " "regex ^(left|center|right)-(top|center|bottom)$, a single " "StochasticParameter or a tuple of two entries, both being either " "StochasticParameter or floats or int. Got instead type %s with " "content '%s'." % ( type(position), (str(position) if len(str(position)) < 20 else str(position)[0:20] + "...") ) ) # TODO this is the same as in imgaug.py, make DRY def _assert_two_or_three_dims(shape): if hasattr(shape, "shape"): shape = shape.shape assert len(shape) in [2, 3], ( "Expected image with two or three dimensions, but got %d dimensions " "and shape %s." % (len(shape), shape)) def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0): """Pad an image-like array on its top/right/bottom/left side. This function is a wrapper around :func:`numpy.pad`. Supported dtypes ---------------- * ``uint8``: yes; fully tested (1) * ``uint16``: yes; fully tested (1) * ``uint32``: yes; fully tested (2) (3) * ``uint64``: yes; fully tested (2) (3) * ``int8``: yes; fully tested (1) * ``int16``: yes; fully tested (1) * ``int32``: yes; fully tested (1) * ``int64``: yes; fully tested (2) (3) * ``float16``: yes; fully tested (2) (3) * ``float32``: yes; fully tested (1) * ``float64``: yes; fully tested (1) * ``float128``: yes; fully tested (2) (3) * ``bool``: yes; tested (2) (3) - (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``, ``"reflect"``, ``"symmetric"``. Otherwise uses ``numpy``. - (2) Uses ``numpy``. - (3) Rejected by ``cv2``. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. top : int, optional Amount of pixels to add to the top side of the image. Must be ``0`` or greater. right : int, optional Amount of pixels to add to the right side of the image. Must be ``0`` or greater. bottom : int, optional Amount of pixels to add to the bottom side of the image. Must be ``0`` or greater. left : int, optional Amount of pixels to add to the left side of the image. Must be ``0`` or greater. mode : str, optional Padding mode to use. See :func:`numpy.pad` for details. In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values`` parameter to :func:`numpy.pad`. In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values`` parameter to :func:`numpy.pad`. cval : number or iterable of number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. The cval is expected to match the input array's dtype and value range. If an iterable is used, it is expected to contain one value per channel. The number of values and number of channels are expected to match. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``. """ import imgaug.dtypes as iadt _assert_two_or_three_dims(arr) assert all([v >= 0 for v in [top, right, bottom, left]]), ( "Expected padding amounts that are >=0, but got %d, %d, %d, %d " "(top, right, bottom, left)" % (top, right, bottom, left)) is_multi_cval = ia.is_iterable(cval) if top > 0 or right > 0 or bottom > 0 or left > 0: min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype) # without the if here there are crashes for float128, e.g. if # cval is an int (just using float(cval) seems to not be accurate # enough) if arr.dtype.name == "float128": cval = np.float128(cval) # pylint: disable=no-member if is_multi_cval: cval = np.clip(cval, min_value, max_value) else: cval = max(min(cval, max_value), min_value) # Note that copyMakeBorder() hangs/runs endlessly if arr has an # axis of size 0 and mode is "reflect". # Numpy also complains in these cases if mode is not "constant". has_zero_sized_axis = any([axis == 0 for axis in arr.shape]) if has_zero_sized_axis: mode = "constant" mapping_mode_np_to_cv2 = { "constant": cv2.BORDER_CONSTANT, "edge": cv2.BORDER_REPLICATE, "linear_ramp": None, "maximum": None, "mean": None, "median": None, "minimum": None, "reflect": cv2.BORDER_REFLECT_101, "symmetric": cv2.BORDER_REFLECT, "wrap": None, cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101, cv2.BORDER_REFLECT: cv2.BORDER_REFLECT } bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None # these datatypes all simply generate a "TypeError: src data type = X # is not supported" error bad_datatype_cv2 = ( arr.dtype.name in ["uint32", "uint64", "int64", "float16", "float128", "bool"] ) # OpenCV turns the channel axis for arrays with 0 channels to 512 # TODO add direct test for this. indirectly tested via Pad bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0) if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2: # convert cval to expected type, as otherwise we get TypeError # for np inputs kind = arr.dtype.kind if is_multi_cval: cval = [float(cval_c) if kind == "f" else int(cval_c) for cval_c in cval] else: cval = float(cval) if kind == "f" else int(cval) if arr.ndim == 2 or arr.shape[2] <= 4: # without this, only the first channel is padded with the cval, # all following channels with 0 if arr.ndim == 3 and not is_multi_cval: cval = tuple([cval] * arr.shape[2]) arr_pad = cv2.copyMakeBorder( _normalize_cv2_input_arr_(arr), top=top, bottom=bottom, left=left, right=right, borderType=mapping_mode_np_to_cv2[mode], value=cval) if arr.ndim == 3 and arr_pad.ndim == 2: arr_pad = arr_pad[..., np.newaxis] else: result = [] channel_start_idx = 0 cval = cval if is_multi_cval else tuple([cval] * arr.shape[2]) while channel_start_idx < arr.shape[2]: arr_c = arr[..., channel_start_idx:channel_start_idx+4] cval_c = cval[channel_start_idx:channel_start_idx+4] arr_pad_c = cv2.copyMakeBorder( _normalize_cv2_input_arr_(arr_c), top=top, bottom=bottom, left=left, right=right, borderType=mapping_mode_np_to_cv2[mode], value=cval_c) arr_pad_c = np.atleast_3d(arr_pad_c) result.append(arr_pad_c) channel_start_idx += 4 arr_pad = np.concatenate(result, axis=2) else: # paddings for 2d case paddings_np = [(top, bottom), (left, right)] # add paddings for 3d case if arr.ndim == 3: paddings_np.append((0, 0)) if mode == "constant": if arr.ndim > 2 and is_multi_cval: arr_pad_chans = [ np.pad(arr[..., c], paddings_np[0:2], mode=mode, constant_values=cval[c]) for c in np.arange(arr.shape[2])] arr_pad = np.stack(arr_pad_chans, axis=-1) else: arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval) elif mode == "linear_ramp": if arr.ndim > 2 and is_multi_cval: arr_pad_chans = [ np.pad(arr[..., c], paddings_np[0:2], mode=mode, end_values=cval[c]) for c in np.arange(arr.shape[2])] arr_pad = np.stack(arr_pad_chans, axis=-1) else: arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval) else: arr_pad = np.pad(arr, paddings_np, mode=mode) return arr_pad return np.copy(arr) def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0, return_pad_amounts=False): """Pad an image array on its sides so that it matches a target aspect ratio. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. mode : str, optional Padding mode to use. See :func:`~imgaug.imgaug.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If ``False``, then only the padded image will be returned. If ``True``, a ``tuple`` with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a ``tuple`` of the form ``(top, right, bottom, left)``, with each value being an ``int``. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the given `aspect_ratio`. tuple of int Amounts by which the image was padded on each side, given as a ``tuple`` ``(top, right, bottom, left)``. This ``tuple`` is only returned if `return_pad_amounts` was set to ``True``. """ pad_top, pad_right, pad_bottom, pad_left = \ compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio) arr_padded = pad( arr, top=pad_top, right=pad_right, bottom=pad_bottom, left=pad_left, mode=mode, cval=cval ) if return_pad_amounts: return arr_padded, (pad_top, pad_right, pad_bottom, pad_left) return arr_padded def pad_to_multiples_of(arr, height_multiple, width_multiple, mode="constant", cval=0, return_pad_amounts=False): """Pad an image array until its side lengths are multiples of given values. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray Image-like array to pad. height_multiple : None or int The desired multiple of the height. The computed padding amount will reflect a padding that increases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed padding amount will reflect a padding that increases the x axis size until it is a multiple of this value. mode : str, optional Padding mode to use. See :func:`~imgaug.imgaug.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If ``False``, then only the padded image will be returned. If ``True``, a ``tuple`` with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a ``tuple`` of the form ``(top, right, bottom, left)``, with each value being an integer. Returns ------- (H',W') ndarray or (H',W',C) ndarray Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray. tuple of int Amounts by which the image was padded on each side, given as a ``tuple`` ``(top, right, bottom, left)``. This ``tuple`` is only returned if `return_pad_amounts` was set to ``True``. """ pad_top, pad_right, pad_bottom, pad_left = \ compute_paddings_to_reach_multiples_of( arr, height_multiple, width_multiple) arr_padded = pad( arr, top=pad_top, right=pad_right, bottom=pad_bottom, left=pad_left, mode=mode, cval=cval ) if return_pad_amounts: return arr_padded, (pad_top, pad_right, pad_bottom, pad_left) return arr_padded def compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio): """Compute pad amounts required to fulfill an aspect ratio. "Pad amounts" here denotes the number of pixels that have to be added to each side to fulfill the desired constraint. The aspect ratio is given as ``ratio = width / height``. Depending on which dimension is smaller (height or width), only the corresponding sides (top/bottom or left/right) will be padded. The axis-wise padding amounts are always distributed equally over the sides of the respective axis (i.e. left and right, top and bottom). For odd pixel amounts, one pixel will be left over after the equal distribution and could be added to either side of the axis. This function will always add such a left over pixel to the bottom (y-axis) or right (x-axis) side. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. Returns ------- tuple of int Required padding amounts to reach the target aspect ratio, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ _assert_two_or_three_dims(arr) assert aspect_ratio > 0, ( "Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,)) pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] if height == 0: height = 1 pad_bottom += 1 if width == 0: width = 1 pad_right += 1 aspect_ratio_current = width / height if aspect_ratio_current < aspect_ratio: # image is more vertical than desired, width needs to be increased diff = (aspect_ratio * height) - width pad_right += int(np.ceil(diff / 2)) pad_left += int(np.floor(diff / 2)) elif aspect_ratio_current > aspect_ratio: # image is more horizontal than desired, height needs to be increased diff = ((1/aspect_ratio) * width) - height pad_top += int(np.floor(diff / 2)) pad_bottom += int(np.ceil(diff / 2)) return pad_top, pad_right, pad_bottom, pad_left def compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio): """Compute crop amounts required to fulfill an aspect ratio. "Crop amounts" here denotes the number of pixels that have to be removed from each side to fulfill the desired constraint. The aspect ratio is given as ``ratio = width / height``. Depending on which dimension is smaller (height or width), only the corresponding sides (top/bottom or left/right) will be cropped. The axis-wise padding amounts are always distributed equally over the sides of the respective axis (i.e. left and right, top and bottom). For odd pixel amounts, one pixel will be left over after the equal distribution and could be added to either side of the axis. This function will always add such a left over pixel to the bottom (y-axis) or right (x-axis) side. If an aspect ratio cannot be reached exactly, this function will return rather one pixel too few than one pixel too many. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. aspect_ratio : float Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the image having twice as much width as height. Returns ------- tuple of int Required cropping amounts to reach the target aspect ratio, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ _assert_two_or_three_dims(arr) assert aspect_ratio > 0, ( "Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,)) shape = arr.shape if hasattr(arr, "shape") else arr assert shape[0] > 0, ( "Expected to get an array with height >0, got shape %s." % (shape,)) height, width = shape[0:2] aspect_ratio_current = width / height top = 0 right = 0 bottom = 0 left = 0 if aspect_ratio_current < aspect_ratio: # image is more vertical than desired, height needs to be reduced # c = H - W/r crop_amount = height - (width / aspect_ratio) crop_amount = min(crop_amount, height - 1) top = int(np.floor(crop_amount / 2)) bottom = int(np.ceil(crop_amount / 2)) elif aspect_ratio_current > aspect_ratio: # image is more horizontal than desired, width needs to be reduced # c = W - Hr crop_amount = width - height * aspect_ratio crop_amount = min(crop_amount, width - 1) left = int(np.floor(crop_amount / 2)) right = int(np.ceil(crop_amount / 2)) return top, right, bottom, left def compute_paddings_to_reach_multiples_of(arr, height_multiple, width_multiple): """Compute pad amounts until img height/width are multiples of given values. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. height_multiple : None or int The desired multiple of the height. The computed padding amount will reflect a padding that increases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed padding amount will reflect a padding that increases the x axis size until it is a multiple of this value. Returns ------- tuple of int Required padding amounts to reach multiples of the provided values, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, multiple): if multiple is None: return 0, 0 if axis_size == 0: to_pad = multiple elif axis_size % multiple == 0: to_pad = 0 else: to_pad = multiple - (axis_size % multiple) return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2)) _assert_two_or_three_dims(arr) if height_multiple is not None: assert height_multiple > 0, ( "Can only pad to multiples of 1 or larger, got %d." % ( height_multiple,)) if width_multiple is not None: assert width_multiple > 0, ( "Can only pad to multiples of 1 or larger, got %d." % ( width_multiple,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_multiple) left, right = _compute_axis_value(width, width_multiple) return top, right, bottom, left def compute_croppings_to_reach_multiples_of(arr, height_multiple, width_multiple): """Compute croppings to reach multiples of given heights/widths. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required cropping amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. height_multiple : None or int The desired multiple of the height. The computed croppings will reflect a crop operation that decreases the y axis size until it is a multiple of this value. width_multiple : None or int The desired multiple of the width. The computed croppings amount will reflect a crop operation that decreases the x axis size until it is a multiple of this value. Returns ------- tuple of int Required cropping amounts to reach multiples of the provided values, given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, multiple): if multiple is None: return 0, 0 if axis_size == 0: to_crop = 0 elif axis_size % multiple == 0: to_crop = 0 else: to_crop = axis_size % multiple return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2)) _assert_two_or_three_dims(arr) if height_multiple is not None: assert height_multiple > 0, ( "Can only crop to multiples of 1 or larger, got %d." % ( height_multiple,)) if width_multiple is not None: assert width_multiple > 0, ( "Can only crop to multiples of 1 or larger, got %d." % ( width_multiple,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_multiple) left, right = _compute_axis_value(width, width_multiple) return top, right, bottom, left def compute_paddings_to_reach_powers_of(arr, height_base, width_base, allow_zero_exponent=False): """Compute paddings to reach powers of given base values. For given axis size ``S``, padded size ``S'`` (``S' >= S``) and base ``B`` this function computes paddings that fulfill ``S' = B^E``, where ``E`` is any exponent from the discrete interval ``[0 .. inf)``. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required padding amounts are distributed per image axis. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute pad amounts. height_base : None or int The desired base of the height. width_base : None or int The desired base of the width. allow_zero_exponent : bool, optional Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes with size ``0`` or ``1`` will be padded up to size ``B^0=1`` and axes with size ``1 < S <= B`` will be padded up to ``B^1=B``. If ``False``, the minimum output axis size is always at least ``B``. Returns ------- tuple of int Required padding amounts to fulfill ``S' = B^E`` given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, base): if base is None: return 0, 0 if axis_size == 0: to_pad = 1 if allow_zero_exponent else base elif axis_size <= base: to_pad = base - axis_size else: # log_{base}(axis_size) in numpy exponent = np.log(axis_size) / np.log(base) to_pad = (base ** int(np.ceil(exponent))) - axis_size return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2)) _assert_two_or_three_dims(arr) if height_base is not None: assert height_base > 1, ( "Can only pad to base larger than 1, got %d." % (height_base,)) if width_base is not None: assert width_base > 1, ( "Can only pad to base larger than 1, got %d." % (width_base,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_base) left, right = _compute_axis_value(width, width_base) return top, right, bottom, left def compute_croppings_to_reach_powers_of(arr, height_base, width_base, allow_zero_exponent=False): """Compute croppings to reach powers of given base values. For given axis size ``S``, cropped size ``S'`` (``S' <= S``) and base ``B`` this function computes croppings that fulfill ``S' = B^E``, where ``E`` is any exponent from the discrete interval ``[0 .. inf)``. See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an explanation of how the required cropping amounts are distributed per image axis. .. note:: For axes where ``S == 0``, this function alwayws returns zeros as croppings. For axes where ``1 <= S < B`` see parameter `allow_zero_exponent`. Parameters ---------- arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int Image-like array or shape tuple for which to compute crop amounts. height_base : None or int The desired base of the height. width_base : None or int The desired base of the width. allow_zero_exponent : bool Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes with size ``1 <= S < B`` will be cropped to size ``B^0=1``. If ``False``, axes with sizes ``S < B`` will not be changed. Returns ------- tuple of int Required cropping amounts to fulfill ``S' = B^E`` given as a ``tuple`` of the form ``(top, right, bottom, left)``. """ def _compute_axis_value(axis_size, base): if base is None: return 0, 0 if axis_size == 0: to_crop = 0 elif axis_size < base: # crop down to B^0 = 1 to_crop = axis_size - 1 if allow_zero_exponent else 0 else: # log_{base}(axis_size) in numpy exponent = np.log(axis_size) / np.log(base) to_crop = axis_size - (base ** int(exponent)) return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2)) _assert_two_or_three_dims(arr) if height_base is not None: assert height_base > 1, ( "Can only crop to base larger than 1, got %d." % (height_base,)) if width_base is not None: assert width_base > 1, ( "Can only crop to base larger than 1, got %d." % (width_base,)) shape = arr.shape if hasattr(arr, "shape") else arr height, width = shape[0:2] top, bottom = _compute_axis_value(height, height_base) left, right = _compute_axis_value(width, width_base) return top, right, bottom, left @ia.deprecated(alt_func="Resize", comment="Resize has the exactly same interface as Scale.") def Scale(*args, **kwargs): """Augmenter that resizes images to specified heights and widths.""" # pylint: disable=invalid-name return Resize(*args, **kwargs) class Resize(meta.Augmenter): """Augmenter that resizes images to specified heights and widths. Supported dtypes ---------------- See :func:`~imgaug.imgaug.imresize_many_images`. Parameters ---------- size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or imgaug.parameters.StochasticParameter or dict The new size of the images. * If this has the string value ``keep``, the original height and width values will be kept (image is not resized). * If this is an ``int``, this value will always be used as the new height and width of the images. * If this is a ``float`` ``v``, then per image the image's height ``H`` and width ``W`` will be changed to ``H*v`` and ``W*v``. * If this is a ``tuple``, it is expected to have two entries ``(a, b)``. If at least one of these are ``float`` s, a value will be sampled from range ``[a, b]`` and used as the ``float`` value to resize the image (see above). If both are ``int`` s, a value will be sampled from the discrete range ``[a..b]`` and used as the integer value to resize the image (see above). * If this is a ``list``, a random value from the ``list`` will be picked to resize the image. All values in the ``list`` must be ``int`` s or ``float`` s (no mixture is possible). * If this is a ``StochasticParameter``, then this parameter will first be queried once per image. The resulting value will be used for both height and width. * If this is a ``dict``, it may contain the keys ``height`` and ``width`` or the keys ``shorter-side`` and ``longer-side``. Each key may have the same datatypes as above and describes the scaling on x and y-axis or the shorter and longer axis, respectively. Both axis are sampled independently. Additionally, one of the keys may have the value ``keep-aspect-ratio``, which means that the respective side of the image will be resized so that the original aspect ratio is kept. This is useful when only resizing one image size by a pixel value (e.g. resize images to a height of ``64`` pixels and resize the width so that the overall aspect ratio is maintained). interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter, optional Interpolation to use. * If ``imgaug.ALL``, then a random interpolation from ``nearest``, ``linear``, ``area`` or ``cubic`` will be picked (per image). * If ``int``, then this interpolation will always be used. Expected to be any of the following: ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC`` * If string, then this interpolation will always be used. Expected to be any of the following: ``nearest``, ``linear``, ``area``, ``cubic`` * If ``list`` of ``int`` / ``str``, then a random one of the values will be picked per image as the interpolation. * If a ``StochasticParameter``, then this parameter will be queried per image and is expected to return an ``int`` or ``str``. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Resize(32) Resize all images to ``32x32`` pixels. >>> aug = iaa.Resize(0.5) Resize all images to ``50`` percent of their original size. >>> aug = iaa.Resize((16, 22)) Resize all images to a random height and width within the discrete interval ``[16..22]`` (uniformly sampled per image). >>> aug = iaa.Resize((0.5, 0.75)) Resize all any input image so that its height (``H``) and width (``W``) become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the interval ``[0.5, 0.75]``. >>> aug = iaa.Resize([16, 32, 64]) Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels. >>> aug = iaa.Resize({"height": 32}) Resize all images to a height of ``32`` pixels and keeps the original width. >>> aug = iaa.Resize({"height": 32, "width": 48}) Resize all images to a height of ``32`` pixels and a width of ``48``. >>> aug = iaa.Resize({"height": 32, "width": "keep-aspect-ratio"}) Resize all images to a height of ``32`` pixels and resizes the x-axis (width) so that the aspect ratio is maintained. >>> aug = iaa.Resize( >>> {"shorter-side": 224, "longer-side": "keep-aspect-ratio"}) Resize all images to a height/width of ``224`` pixels, depending on which axis is shorter and resize the other axis so that the aspect ratio is maintained. >>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]}) Resize all images to a height of ``H*v``, where ``H`` is the original height and ``v`` is a random value sampled from the interval ``[0.5, 0.75]``. The width/x-axis of each image is resized to either ``16`` or ``32`` or ``64`` pixels. >>> aug = iaa.Resize(32, interpolation=["linear", "cubic"]) Resize all images to ``32x32`` pixels. Randomly use either ``linear`` or ``cubic`` interpolation. """ def __init__(self, size, interpolation="cubic", seed=None, name=None, **old_kwargs): super(Resize, self).__init__( seed=seed, name=name, **old_kwargs) self.size, self.size_order = self._handle_size_arg(size, False) self.interpolation = self._handle_interpolation_arg(interpolation) @classmethod def _handle_size_arg(cls, size, subcall): def _dict_to_size_tuple(val1, val2): kaa = "keep-aspect-ratio" not_both_kaa = (val1 != kaa or val2 != kaa) assert not_both_kaa, ( "Expected at least one value to not be \"keep-aspect-ratio\", " "but got it two times.") size_tuple = [] for k in [val1, val2]: if k in ["keep-aspect-ratio", "keep"]: entry = iap.Deterministic(k) else: entry = cls._handle_size_arg(k, True) size_tuple.append(entry) return tuple(size_tuple) def _contains_any_key(dict_, keys): return any([key in dict_ for key in keys]) # HW = height, width # SL = shorter, longer size_order = "HW" if size == "keep": result = iap.Deterministic("keep") elif ia.is_single_number(size): assert size > 0, "Expected only values > 0, got %s" % (size,) result = iap.Deterministic(size) elif not subcall and isinstance(size, dict): if len(size.keys()) == 0: result = iap.Deterministic("keep") elif _contains_any_key(size, ["height", "width"]): height = size.get("height", "keep") width = size.get("width", "keep") result = _dict_to_size_tuple(height, width) elif _contains_any_key(size, ["shorter-side", "longer-side"]): shorter = size.get("shorter-side", "keep") longer = size.get("longer-side", "keep") result = _dict_to_size_tuple(shorter, longer) size_order = "SL" else: raise ValueError( "Expected dictionary containing no keys, " "the keys \"height\" and/or \"width\", " "or the keys \"shorter-side\" and/or \"longer-side\". " "Got keys: %s." % (str(size.keys()),)) elif isinstance(size, tuple): assert len(size) == 2, ( "Expected size tuple to contain exactly 2 values, " "got %d." % (len(size),)) assert size[0] > 0 and size[1] > 0, ( "Expected size tuple to only contain values >0, " "got %d and %d." % (size[0], size[1])) if ia.is_single_float(size[0]) or ia.is_single_float(size[1]): result = iap.Uniform(size[0], size[1]) else: result = iap.DiscreteUniform(size[0], size[1]) elif isinstance(size, list): if len(size) == 0: result = iap.Deterministic("keep") else: all_int = all([ia.is_single_integer(v) for v in size]) all_float = all([ia.is_single_float(v) for v in size]) assert all_int or all_float, ( "Expected to get only integers or floats.") assert all([v > 0 for v in size]), ( "Expected all values to be >0.") result = iap.Choice(size) elif isinstance(size, iap.StochasticParameter): result = size else: raise ValueError( "Expected number, tuple of two numbers, list of numbers, " "dictionary of form " "{'height': number/tuple/list/'keep-aspect-ratio'/'keep', " "'width': <analogous>}, dictionary of form " "{'shorter-side': number/tuple/list/'keep-aspect-ratio'/" "'keep', 'longer-side': <analogous>} " "or StochasticParameter, got %s." % (type(size),) ) if subcall: return result return result, size_order @classmethod def _handle_interpolation_arg(cls, interpolation): if interpolation == ia.ALL: interpolation = iap.Choice( ["nearest", "linear", "area", "cubic"]) elif ia.is_single_integer(interpolation): interpolation = iap.Deterministic(interpolation) elif ia.is_string(interpolation): interpolation = iap.Deterministic(interpolation) elif ia.is_iterable(interpolation): interpolation = iap.Choice(interpolation) elif isinstance(interpolation, iap.StochasticParameter): pass else: raise Exception( "Expected int or string or iterable or StochasticParameter, " "got %s." % (type(interpolation),)) return interpolation def _augment_batch_(self, batch, random_state, parents, hooks): nb_rows = batch.nb_rows samples = self._draw_samples(nb_rows, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: # TODO this uses the same interpolation as for images for heatmaps # while other augmenters resort to cubic batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, "arr_0to1", samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, "arr", (samples[0], samples[1], [None] * nb_rows)) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): input_was_array = False input_dtype = None if ia.is_np_array(images): input_was_array = True input_dtype = images.dtype samples_a, samples_b, samples_ip = samples result = [] for i, image in enumerate(images): h, w = self._compute_height_width(image.shape, samples_a[i], samples_b[i], self.size_order) image_rs = ia.imresize_single_image(image, (h, w), interpolation=samples_ip[i]) result.append(image_rs) if input_was_array: all_same_size = (len({image.shape for image in result}) == 1) if all_same_size: result = np.array(result, dtype=input_dtype) return result def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples): result = [] samples_h, samples_w, samples_ip = samples for i, augmentable in enumerate(augmentables): arr = getattr(augmentable, arr_attr_name) arr_shape = arr.shape img_shape = augmentable.shape h_img, w_img = self._compute_height_width( img_shape, samples_h[i], samples_w[i], self.size_order) h = int(np.round(h_img * (arr_shape[0] / img_shape[0]))) w = int(np.round(w_img * (arr_shape[1] / img_shape[1]))) h = max(h, 1) w = max(w, 1) if samples_ip[0] is not None: # TODO change this for heatmaps to always have cubic or # automatic interpolation? augmentable_resize = augmentable.resize( (h, w), interpolation=samples_ip[i]) else: augmentable_resize = augmentable.resize((h, w)) augmentable_resize.shape = (h_img, w_img) + img_shape[2:] result.append(augmentable_resize) return result def _augment_keypoints_by_samples(self, kpsois, samples): result = [] samples_a, samples_b, _samples_ip = samples for i, kpsoi in enumerate(kpsois): h, w = self._compute_height_width( kpsoi.shape, samples_a[i], samples_b[i], self.size_order) new_shape = (h, w) + kpsoi.shape[2:] keypoints_on_image_rs = kpsoi.on_(new_shape) result.append(keypoints_on_image_rs) return result def _draw_samples(self, nb_images, random_state): rngs = random_state.duplicate(3) if isinstance(self.size, tuple): samples_h = self.size[0].draw_samples(nb_images, random_state=rngs[0]) samples_w = self.size[1].draw_samples(nb_images, random_state=rngs[1]) else: samples_h = self.size.draw_samples(nb_images, random_state=rngs[0]) samples_w = samples_h samples_ip = self.interpolation.draw_samples(nb_images, random_state=rngs[2]) return samples_h, samples_w, samples_ip @classmethod def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order): imh, imw = image_shape[0:2] if size_order == 'SL': # size order: short, long if imh < imw: h, w = sample_a, sample_b else: w, h = sample_a, sample_b else: # size order: height, width h, w = sample_a, sample_b if ia.is_single_float(h): assert h > 0, "Expected 'h' to be >0, got %.4f" % (h,) h = int(np.round(imh * h)) h = h if h > 0 else 1 elif h == "keep": h = imh if ia.is_single_float(w): assert w > 0, "Expected 'w' to be >0, got %.4f" % (w,) w = int(np.round(imw * w)) w = w if w > 0 else 1 elif w == "keep": w = imw # at least the checks for keep-aspect-ratio must come after # the float checks, as they are dependent on the results # this is also why these are not written as elifs if h == "keep-aspect-ratio": h_per_w_orig = imh / imw h = int(np.round(w * h_per_w_orig)) if w == "keep-aspect-ratio": w_per_h_orig = imw / imh w = int(np.round(h * w_per_h_orig)) return h, w def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size, self.interpolation, self.size_order] class _CropAndPadSamplingResult(object): def __init__(self, crop_top, crop_right, crop_bottom, crop_left, pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval): self.crop_top = crop_top self.crop_right = crop_right self.crop_bottom = crop_bottom self.crop_left = crop_left self.pad_top = pad_top self.pad_right = pad_right self.pad_bottom = pad_bottom self.pad_left = pad_left self.pad_mode = pad_mode self.pad_cval = pad_cval @property def croppings(self): """Get absolute pixel amounts of croppings as a TRBL tuple.""" return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left @property def paddings(self): """Get absolute pixel amounts of paddings as a TRBL tuple.""" return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left class CropAndPad(meta.Augmenter): """Crop/pad images by pixel amounts or fractions of image sizes. Cropping removes pixels at the sides (i.e. extracts a subimage from a given full image). Padding adds pixels to the sides (e.g. black pixels). This augmenter will never crop images below a height or width of ``1``. .. note:: This augmenter automatically resizes images back to their original size after it has augmented them. To deactivate this, add the parameter ``keep_size=False``. Supported dtypes ---------------- if (keep_size=False): * ``uint8``: yes; fully tested * ``uint16``: yes; tested * ``uint32``: yes; tested * ``uint64``: yes; tested * ``int8``: yes; tested * ``int16``: yes; tested * ``int32``: yes; tested * ``int64``: yes; tested * ``float16``: yes; tested * ``float32``: yes; tested * ``float64``: yes; tested * ``float128``: yes; tested * ``bool``: yes; tested if (keep_size=True): minimum of ( ``imgaug.augmenters.size.CropAndPad(keep_size=False)``, :func:`~imgaug.imgaug.imresize_many_images` ) Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop (negative values) or pad (positive values) on each side of the image. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based cropping/padding will not be used. * If ``int``, then that exact number of pixels will always be cropped/padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be cropped/padded by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always crop/pad by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (crop/pad by an amount within ``[a, b]``), a ``list`` of ``int`` s (crop/pad by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to crop/pad from that parameter). percent : None or number or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop (negative values) or pad (positive values) on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``-0.1``, the augmenter will always crop away ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``(-1.0, inf)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based cropping/padding will not be used. * If ``number``, then that fraction will always be cropped/padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be cropped/padded by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always crop/pad by exactly that percent value), a ``tuple`` of two ``float`` s ``a`` and ``b`` (crop/pad by a fraction from ``[a, b]``), a ``list`` of ``float`` s (crop/pad by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to crop/pad from that parameter). pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional Padding mode to use. The available modes match the numpy padding modes, i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes ``constant`` and ``linear_ramp`` use extra values, which are provided by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for more details. * If ``imgaug.ALL``, then a random mode from all available modes will be sampled per image. * If a ``str``, it will be used as the pad mode for all images. * If a ``list`` of ``str``, a random one of these will be sampled per image and used as the mode. * If ``StochasticParameter``, a random mode will be sampled from this parameter per image. pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional The constant value to use if the pad mode is ``constant`` or the end value to use if the mode is ``linear_ramp``. See :func:`~imgaug.imgaug.pad` for more details. * If ``number``, then that value will be used. * If a ``tuple`` of two ``number`` s and at least one of them is a ``float``, then a random number will be uniformly sampled per image from the continuous interval ``[a, b]`` and used as the value. If both ``number`` s are ``int`` s, the interval is discrete. * If a ``list`` of ``number``, then a random value will be chosen from the elements of the ``list`` and used as the value. * If ``StochasticParameter``, a random value will be sampled from that parameter per image. keep_size : bool, optional After cropping and padding, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the cropped/padded image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the crop/pad amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropAndPad(px=(-10, 0)) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[-10..0]``. >>> aug = iaa.CropAndPad(px=(0, 10)) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding happens by zero-padding, i.e. it adds black pixels (default setting). >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode="edge") Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding uses the ``edge`` mode from numpy's pad function, i.e. the pixel colors around the image sides are repeated. >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=["constant", "edge"]) Similar to the previous example, but uses zero-padding (``constant``) for half of the images and ``edge`` padding for the other half. >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255)) Similar to the previous example, but uses any available padding mode. In case the padding mode ends up being ``constant`` or ``linear_ramp``, and random intensity is uniformly sampled (once per image) from the discrete interval ``[0..255]`` and used as the intensity of the new pixels. >>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False) Pad each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the padded image back to the input image's size. This will increase the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5))) Pad the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Pad the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.CropAndPad(percent=(0, 0.1)) Pad each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would pad by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.CropAndPad( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Pads each side by either ``5%`` or ``10%``. The values are sampled once per side and image. >>> aug = iaa.CropAndPad(px=(-10, 10)) Sample uniformly per image and side a value ``v`` from the discrete range ``[-10..10]``. Then either crop (negative sample) or pad (positive sample) the side by ``v`` pixels. """ def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): # pylint: disable=invalid-name super(CropAndPad, self).__init__( seed=seed, name=name, **old_kwargs) self.mode, self.all_sides, self.top, self.right, self.bottom, \ self.left = self._handle_px_and_percent_args(px, percent) self.pad_mode = _handle_pad_mode_param(pad_mode) # TODO enable ALL here, like in e.g. Affine self.pad_cval = iap.handle_discrete_param( pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.keep_size = keep_size self.sample_independently = sample_independently # set these to None to use the same values as sampled for the # images (not tested) self._pad_mode_heatmaps = "constant" self._pad_mode_segmentation_maps = "constant" self._pad_cval_heatmaps = 0.0 self._pad_cval_segmentation_maps = 0 @classmethod def _handle_px_and_percent_args(cls, px, percent): # pylint: disable=invalid-name all_sides = None top, right, bottom, left = None, None, None, None if px is None and percent is None: mode = "noop" elif px is not None and percent is not None: raise Exception("Can only pad by pixels or percent, not both.") elif px is not None: mode = "px" all_sides, top, right, bottom, left = cls._handle_px_arg(px) else: # = elif percent is not None: mode = "percent" all_sides, top, right, bottom, left = cls._handle_percent_arg( percent) return mode, all_sides, top, right, bottom, left @classmethod def _handle_px_arg(cls, px): # pylint: disable=invalid-name all_sides = None top, right, bottom, left = None, None, None, None if ia.is_single_integer(px): all_sides = iap.Deterministic(px) elif isinstance(px, tuple): assert len(px) in [2, 4], ( "Expected 'px' given as a tuple to contain 2 or 4 " "entries, got %d." % (len(px),)) def handle_param(p): if ia.is_single_integer(p): return iap.Deterministic(p) if isinstance(p, tuple): assert len(p) == 2, ( "Expected tuple of 2 values, got %d." % (len(p))) only_ints = ( ia.is_single_integer(p[0]) and ia.is_single_integer(p[1])) assert only_ints, ( "Expected tuple of integers, got %s and %s." % ( type(p[0]), type(p[1]))) return iap.DiscreteUniform(p[0], p[1]) if isinstance(p, list): assert len(p) > 0, ( "Expected non-empty list, but got empty one.") assert all([ia.is_single_integer(val) for val in p]), ( "Expected list of ints, got types %s." % ( ", ".join([str(type(v)) for v in p]))) return iap.Choice(p) if isinstance(p, iap.StochasticParameter): return p raise Exception( "Expected int, tuple of two ints, list of ints or " "StochasticParameter, got type %s." % (type(p),)) if len(px) == 2: all_sides = handle_param(px) else: # len == 4 top = handle_param(px[0]) right = handle_param(px[1]) bottom = handle_param(px[2]) left = handle_param(px[3]) elif isinstance(px, iap.StochasticParameter): top = right = bottom = left = px else: raise Exception( "Expected int, tuple of 4 " "ints/tuples/lists/StochasticParameters or " "StochasticParameter, got type %s." % (type(px),)) return all_sides, top, right, bottom, left @classmethod def _handle_percent_arg(cls, percent): all_sides = None top, right, bottom, left = None, None, None, None if ia.is_single_number(percent): assert percent > -1.0, ( "Expected 'percent' to be >-1.0, got %.4f." % (percent,)) all_sides = iap.Deterministic(percent) elif isinstance(percent, tuple): assert len(percent) in [2, 4], ( "Expected 'percent' given as a tuple to contain 2 or 4 " "entries, got %d." % (len(percent),)) def handle_param(p): if ia.is_single_number(p): return iap.Deterministic(p) if isinstance(p, tuple): assert len(p) == 2, ( "Expected tuple of 2 values, got %d." % (len(p),)) only_numbers = ( ia.is_single_number(p[0]) and ia.is_single_number(p[1])) assert only_numbers, ( "Expected tuple of numbers, got %s and %s." % ( type(p[0]), type(p[1]))) assert p[0] > -1.0 and p[1] > -1.0, ( "Expected tuple of values >-1.0, got %.4f and " "%.4f." % (p[0], p[1])) return iap.Uniform(p[0], p[1]) if isinstance(p, list): assert len(p) > 0, ( "Expected non-empty list, but got empty one.") assert all([ia.is_single_number(val) for val in p]), ( "Expected list of numbers, got types %s." % ( ", ".join([str(type(v)) for v in p]))) assert all([val > -1.0 for val in p]), ( "Expected list of values >-1.0, got values %s." % ( ", ".join(["%.4f" % (v,) for v in p]))) return iap.Choice(p) if isinstance(p, iap.StochasticParameter): return p raise Exception( "Expected int, tuple of two ints, list of ints or " "StochasticParameter, got type %s." % (type(p),)) if len(percent) == 2: all_sides = handle_param(percent) else: # len == 4 top = handle_param(percent[0]) right = handle_param(percent[1]) bottom = handle_param(percent[2]) left = handle_param(percent[3]) elif isinstance(percent, iap.StochasticParameter): top = right = bottom = left = percent else: raise Exception( "Expected number, tuple of 4 " "numbers/tuples/lists/StochasticParameters or " "StochasticParameter, got type %s." % (type(percent),)) return all_sides, top, right, bottom, left def _augment_batch_(self, batch, random_state, parents, hooks): shapes = batch.get_rowwise_shapes() samples = self._draw_samples(random_state, shapes) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, self._pad_mode_heatmaps, self._pad_cval_heatmaps, samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, self._pad_mode_segmentation_maps, self._pad_cval_segmentation_maps, samples) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] for i, image in enumerate(images): samples_i = samples[i] image_cr_pa = _crop_and_pad_arr( image, samples_i.croppings, samples_i.paddings, samples_i.pad_mode, samples_i.pad_cval, self.keep_size) result.append(image_cr_pa) if ia.is_np_array(images): if self.keep_size: result = np.array(result, dtype=images.dtype) else: nb_shapes = len({image.shape for image in result}) if nb_shapes == 1: result = np.array(result, dtype=images.dtype) return result def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval, samples): result = [] for i, augmentable in enumerate(augmentables): samples_img = samples[i] augmentable = _crop_and_pad_hms_or_segmaps_( augmentable, croppings_img=samples_img.croppings, paddings_img=samples_img.paddings, pad_mode=(pad_mode if pad_mode is not None else samples_img.pad_mode), pad_cval=(pad_cval if pad_cval is not None else samples_img.pad_cval), keep_size=self.keep_size ) result.append(augmentable) return result def _augment_keypoints_by_samples(self, keypoints_on_images, samples): result = [] for i, keypoints_on_image in enumerate(keypoints_on_images): samples_i = samples[i] kpsoi_aug = _crop_and_pad_kpsoi_( keypoints_on_image, croppings_img=samples_i.croppings, paddings_img=samples_i.paddings, keep_size=self.keep_size) result.append(kpsoi_aug) return result def _draw_samples(self, random_state, shapes): nb_rows = len(shapes) if self.mode == "noop": top = right = bottom = left = np.full((nb_rows,), 0, dtype=np.int32) else: if self.all_sides is not None: if self.sample_independently: samples = self.all_sides.draw_samples( (nb_rows, 4), random_state=random_state) top = samples[:, 0] right = samples[:, 1] bottom = samples[:, 2] left = samples[:, 3] else: sample = self.all_sides.draw_samples( (nb_rows,), random_state=random_state) top = right = bottom = left = sample else: top = self.top.draw_samples( (nb_rows,), random_state=random_state) right = self.right.draw_samples( (nb_rows,), random_state=random_state) bottom = self.bottom.draw_samples( (nb_rows,), random_state=random_state) left = self.left.draw_samples( (nb_rows,), random_state=random_state) if self.mode == "px": # no change necessary for pixel values pass elif self.mode == "percent": # percentage values have to be transformed to pixel values shapes_arr = np.array([shape[0:2] for shape in shapes], dtype=np.float32) heights = shapes_arr[:, 0] widths = shapes_arr[:, 1] top = np.round(heights * top).astype(np.int32) right = np.round(widths * right).astype(np.int32) bottom = np.round(heights * bottom).astype(np.int32) left = np.round(widths * left).astype(np.int32) else: raise Exception("Invalid mode") def _only_above_zero(arr): arr = np.copy(arr) mask = (arr < 0) arr[mask] = 0 return arr crop_top = _only_above_zero((-1) * top) crop_right = _only_above_zero((-1) * right) crop_bottom = _only_above_zero((-1) * bottom) crop_left = _only_above_zero((-1) * left) pad_top = _only_above_zero(top) pad_right = _only_above_zero(right) pad_bottom = _only_above_zero(bottom) pad_left = _only_above_zero(left) pad_mode = self.pad_mode.draw_samples((nb_rows,), random_state=random_state) pad_cval = self.pad_cval.draw_samples((nb_rows,), random_state=random_state) # TODO vectorize this part -- especially return only one instance result = [] for i, shape in enumerate(shapes): height, width = shape[0:2] crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \ _crop_prevent_zero_size( height, width, crop_top[i], crop_right[i], crop_bottom[i], crop_left[i]) # add here any_crop_y to not warn in case of zero height/width # images any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0) if any_crop_y and crop_top_i + crop_bottom_i >= height: ia.warn( "Expected generated crop amounts in CropAndPad for top and " "bottom image side to be less than the image's height, but " "got %d (top) and %d (bottom) vs. image height %d. This " "will result in an image with output height=1 (if input " "height was >=1) or output height=0 (if input height " "was 0)." % (crop_top_i, crop_bottom_i, height)) # add here any_crop_x to not warn in case of zero height/width # images any_crop_x = (crop_left_i > 0 or crop_right_i > 0) if any_crop_x and crop_left_i + crop_right_i >= width: ia.warn( "Expected generated crop amounts in CropAndPad for left " "and right image side to be less than the image's width, " "but got %d (left) and %d (right) vs. image width %d. " "This will result in an image with output width=1 (if " "input width was >=1) or output width=0 (if input width " "was 0)." % (crop_left_i, crop_right_i, width)) result.append( _CropAndPadSamplingResult( crop_top=crop_top_i, crop_right=crop_right_i, crop_bottom=crop_bottom_i, crop_left=crop_left_i, pad_top=pad_top[i], pad_right=pad_right[i], pad_bottom=pad_bottom[i], pad_left=pad_left[i], pad_mode=pad_mode[i], pad_cval=pad_cval[i])) return result def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.all_sides, self.top, self.right, self.bottom, self.left, self.pad_mode, self.pad_cval] class Pad(CropAndPad): """Pad images, i.e. adds columns/rows of pixels to them. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropAndPad`. Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to pad on each side of the image. Expected value range is ``[0, inf)``. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based padding will not be used. * If ``int``, then that exact number of pixels will always be padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be padded by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always pad by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (pad by an amount within ``[a, b]``), a ``list`` of ``int`` s (pad by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to pad from that parameter). percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to pad on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``0.1``, the augmenter will always pad ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``[0.0, inf)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based padding will not be used. * If ``number``, then that fraction will always be padded. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be padded by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always pad by exactly that fraction), a ``tuple`` of two ``float`` s ``a`` and ``b`` (pad by a fraction from ``[a, b]``), a ``list`` of ``float`` s (pad by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to pad from that parameter). pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional Padding mode to use. The available modes match the numpy padding modes, i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes ``constant`` and ``linear_ramp`` use extra values, which are provided by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for more details. * If ``imgaug.ALL``, then a random mode from all available modes will be sampled per image. * If a ``str``, it will be used as the pad mode for all images. * If a ``list`` of ``str``, a random one of these will be sampled per image and used as the mode. * If ``StochasticParameter``, a random mode will be sampled from this parameter per image. pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional The constant value to use if the pad mode is ``constant`` or the end value to use if the mode is ``linear_ramp``. See :func:`~imgaug.imgaug.pad` for more details. * If ``number``, then that value will be used. * If a ``tuple`` of two ``number`` s and at least one of them is a ``float``, then a random number will be uniformly sampled per image from the continuous interval ``[a, b]`` and used as the value. If both ``number`` s are ``int`` s, the interval is discrete. * If a ``list`` of ``number``, then a random value will be chosen from the elements of the ``list`` and used as the value. * If ``StochasticParameter``, a random value will be sampled from that parameter per image. keep_size : bool, optional After padding, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the padded image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the pad amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Pad(px=(0, 10)) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding happens by zero-padding, i.e. it adds black pixels (default setting). >>> aug = iaa.Pad(px=(0, 10), pad_mode="edge") Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. The padding uses the ``edge`` mode from numpy's pad function, i.e. the pixel colors around the image sides are repeated. >>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"]) Similar to the previous example, but uses zero-padding (``constant``) for half of the images and ``edge`` padding for the other half. >>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255)) Similar to the previous example, but uses any available padding mode. In case the padding mode ends up being ``constant`` or ``linear_ramp``, and random intensity is uniformly sampled (once per image) from the discrete interval ``[0..255]`` and used as the intensity of the new pixels. >>> aug = iaa.Pad(px=(0, 10), sample_independently=False) Pad each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.Pad(px=(0, 10), keep_size=False) Pad each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the padded image back to the input image's size. This will increase the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5))) Pad the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Pad the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.Pad(percent=(0, 0.1)) Pad each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would pad by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.Pad( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Pads each side by either ``5%`` or ``10%``. The values are sampled once per side and image. """ def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): def recursive_validate(value): if value is None: return value if ia.is_single_number(value): assert value >= 0, "Expected value >0, got %.4f" % (value,) return value if isinstance(value, iap.StochasticParameter): return value if isinstance(value, tuple): return tuple([recursive_validate(v_) for v_ in value]) if isinstance(value, list): return [recursive_validate(v_) for v_ in value] raise Exception( "Expected None or int or float or StochasticParameter or " "list or tuple, got %s." % (type(value),)) px = recursive_validate(px) percent = recursive_validate(percent) super(Pad, self).__init__( px=px, percent=percent, pad_mode=pad_mode, pad_cval=pad_cval, keep_size=keep_size, sample_independently=sample_independently, seed=seed, name=name, **old_kwargs) class Crop(CropAndPad): """Crop images, i.e. remove columns/rows of pixels at the sides of images. This augmenter allows to extract smaller-sized subimages from given full-sized input images. The number of pixels to cut off may be defined in absolute values or as fractions of the image sizes. This augmenter will never crop images below a height or width of ``1``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropAndPad`. Parameters ---------- px : None or int or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop on each side of the image. Expected value range is ``[0, inf)``. Either this or the parameter `percent` may be set, not both at the same time. * If ``None``, then pixel-based cropping will not be used. * If ``int``, then that exact number of pixels will always be cropped. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left), unless `sample_independently` is set to ``False``, as then only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``, then each side will be cropped by a random amount sampled uniformly per image and side from the inteval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``int`` (always crop by exactly that value), a ``tuple`` of two ``int`` s ``a`` and ``b`` (crop by an amount within ``[a, b]``), a ``list`` of ``int`` s (crop by a random value that is contained in the ``list``) or a ``StochasticParameter`` (sample the amount to crop from that parameter). percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional The number of pixels to crop on each side of the image given as a *fraction* of the image height/width. E.g. if this is set to ``0.1``, the augmenter will always crop ``10%`` of the image's height at both the top and the bottom (both ``10%`` each), as well as ``10%`` of the width at the right and left. Expected value range is ``[0.0, 1.0)``. Either this or the parameter `px` may be set, not both at the same time. * If ``None``, then fraction-based cropping will not be used. * If ``number``, then that fraction will always be cropped. * If ``StochasticParameter``, then that parameter will be used for each image. Four samples will be drawn per image (top, right, bottom, left). If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``, then each side will be cropped by a random fraction sampled uniformly per image and side from the interval ``[a, b]``. If however `sample_independently` is set to ``False``, only one value will be sampled per image and used for all sides. * If a ``tuple`` of four entries, then the entries represent top, right, bottom, left. Each entry may be a single ``float`` (always crop by exactly that fraction), a ``tuple`` of two ``float`` s ``a`` and ``b`` (crop by a fraction from ``[a, b]``), a ``list`` of ``float`` s (crop by a random value that is contained in the list) or a ``StochasticParameter`` (sample the percentage to crop from that parameter). keep_size : bool, optional After cropping, the result image will usually have a different height/width compared to the original input image. If this parameter is set to ``True``, then the cropped image will be resized to the input image's size, i.e. the augmenter's output shape is always identical to the input shape. sample_independently : bool, optional If ``False`` *and* the values for `px`/`percent` result in exactly *one* probability distribution for all image sides, only one single value will be sampled from that probability distribution and used for all sides. I.e. the crop amount then is the same for all sides. If ``True``, four values will be sampled independently, one per side. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.Crop(px=(0, 10)) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. >>> aug = iaa.Crop(px=(0, 10), sample_independently=False) Crop each side by a random pixel value sampled uniformly once per image from the discrete interval ``[0..10]``. Each sampled value is used for *all* sides of the corresponding image. >>> aug = iaa.Crop(px=(0, 10), keep_size=False) Crop each side by a random pixel value sampled uniformly per image and side from the discrete interval ``[0..10]``. Afterwards, do **not** resize the cropped image back to the input image's size. This will decrease the image's height and width by a maximum of ``20`` pixels. >>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5))) Crop the top and bottom by a random pixel value sampled uniformly from the discrete interval ``[0..10]``. Crop the left and right analogously by a random value sampled from ``[0..5]``. Each value is always sampled independently. >>> aug = iaa.Crop(percent=(0, 0.1)) Crop each side by a random fraction sampled uniformly from the continuous interval ``[0.0, 0.10]``. The fraction is sampled once per image and side. E.g. a sampled fraction of ``0.1`` for the top side would crop by ``0.1*H``, where ``H`` is the height of the input image. >>> aug = iaa.Crop( >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1])) Crops each side by either ``5%`` or ``10%``. The values are sampled once per side and image. """ def __init__(self, px=None, percent=None, keep_size=True, sample_independently=True, seed=None, name=None, **old_kwargs): def recursive_negate(value): if value is None: return value if ia.is_single_number(value): assert value >= 0, "Expected value >0, got %.4f." % (value,) return -value if isinstance(value, iap.StochasticParameter): return iap.Multiply(value, -1) if isinstance(value, tuple): return tuple([recursive_negate(v_) for v_ in value]) if isinstance(value, list): return [recursive_negate(v_) for v_ in value] raise Exception( "Expected None or int or float or StochasticParameter or " "list or tuple, got %s." % (type(value),)) px = recursive_negate(px) percent = recursive_negate(percent) super(Crop, self).__init__( px=px, percent=percent, keep_size=keep_size, sample_independently=sample_independently, seed=seed, name=name, **old_kwargs) # TODO maybe rename this to PadToMinimumSize? # TODO this is very similar to CropAndPad, maybe add a way to generate crop # values imagewise via a callback in in CropAndPad? # TODO why is padding mode and cval here called pad_mode, pad_cval but in other # cases mode/cval? class PadToFixedSize(meta.Augmenter): """Pad images to a predefined minimum width and/or height. If images are already at the minimum width/height or are larger, they will not be padded. Note that this also means that images will not be cropped if they exceed the required width/height. The augmenter randomly decides per image how to distribute the required padding amounts over the image axis. E.g. if 2px have to be padded on the left or right to reach the required width, the augmenter will sometimes add 2px to the left and 0px to the right, sometimes add 2px to the right and 0px to the left and sometimes add 1px to both sides. Set `position` to ``center`` to prevent that. Supported dtypes ---------------- See :func:`~imgaug.augmenters.size.pad`. Parameters ---------- width : int or None Pad images up to this minimum width. If ``None``, image widths will not be altered. height : int or None Pad images up to this minimum height. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.CropAndPad.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.CropAndPad.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional Sets the center point of the padding, which determines how the required padding amounts are distributed to each side. For a ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in range ``[0.0, 1.0]`` and describe the fraction of padding applied to the left/right (low/high values for ``a``) and the fraction of padding applied to the top/bottom (low/high values for ``b``). A padding position at ``(0.5, 0.5)`` would be the center of the image and distribute the padding equally to all sides. A padding position at ``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the required padding to the bottom and left sides of the image so that the bottom left corner becomes more and more the new image center (depending on how much is padded). * If string ``uniform`` then the share of padding is randomly and uniformly distributed over each side. Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``. * If string ``normal`` then the share of padding is distributed based on a normal distribution, leading to a focus on the center of the images. Equivalent to ``(Clip(Normal(0.5, 0.45/2), 0, 1), Clip(Normal(0.5, 0.45/2), 0, 1))``. * If string ``center`` then center point of the padding is identical to the image center. Equivalent to ``(0.5, 0.5)``. * If a string matching regex ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top`` or ``center-bottom`` then sets the center point of the padding to the X-Y position matching that description. * If a tuple of float, then expected to have exactly two entries between ``0.0`` and ``1.0``, which will always be used as the combination the position matching (x, y) form. * If a ``StochasticParameter``, then that parameter will be queried once per call to ``augment_*()`` to get ``Nx2`` center positions in ``(x, y)`` form (with ``N`` the number of images). * If a ``tuple`` of ``StochasticParameter``, then expected to have exactly two entries that will both be queried per call to ``augment_*()``, each for ``(N,)`` values, to get the center positions. First parameter is used for ``x`` coordinates, second for ``y`` coordinates. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToFixedSize(width=100, height=100) For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do nothing for the other edges. The padding is randomly (uniformly) distributed over the sides, so that e.g. sometimes most of the required padding is applied to the left, sometimes to the right (analogous top/bottom). >>> aug = iaa.PadToFixedSize(width=100, height=100, position="center") For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do nothing for the other image sides. The padding is always equally distributed over the left/right and top/bottom sides. >>> aug = iaa.PadToFixedSize(width=100, height=100, pad_mode=ia.ALL) For image sides smaller than ``100`` pixels, pad to ``100`` pixels and use any possible padding mode for that. Do nothing for the other image sides. The padding is always equally distributed over the left/right and top/bottom sides. >>> aug = iaa.Sequential([ >>> iaa.PadToFixedSize(width=100, height=100), >>> iaa.CropToFixedSize(width=100, height=100) >>> ]) Pad images smaller than ``100x100`` until they reach ``100x100``. Analogously, crop images larger than ``100x100`` until they reach ``100x100``. The output images therefore have a fixed size of ``100x100``. """ def __init__(self, width, height, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToFixedSize, self).__init__( seed=seed, name=name, **old_kwargs) self.size = (width, height) # Position of where to pad. The further to the top left this is, the # larger the share of pixels that will be added to the top and left # sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) to only # add at the top and left, (Deterministic(1.0), Deterministic(1.0)) # to only add at the bottom right. Analogously (0.5, 0.5) pads equally # on both axis, (0.0, 1.0) pads left and bottom, (1.0, 0.0) pads right # and top. self.position = _handle_position_parameter(position) self.pad_mode = _handle_pad_mode_param(pad_mode) # TODO enable ALL here like in eg Affine self.pad_cval = iap.handle_discrete_param( pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) # set these to None to use the same values as sampled for the # images (not tested) self._pad_mode_heatmaps = "constant" self._pad_mode_segmentation_maps = "constant" self._pad_cval_heatmaps = 0.0 self._pad_cval_segmentation_maps = 0 def _augment_batch_(self, batch, random_state, parents, hooks): # Providing the whole batch to _draw_samples() would not be necessary # for this augmenter. The number of rows would be sufficient. This # formulation however enables derived augmenters to use rowwise shapes # without having to compute them here for this augmenter. samples = self._draw_samples(batch, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, samples, self._pad_mode_heatmaps, self._pad_cval_heatmaps) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, samples, self._pad_mode_heatmaps, self._pad_cval_heatmaps) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples for i, (image, size) in enumerate(zip(images, sizes)): width_min, height_min = size height_image, width_image = image.shape[:2] paddings = self._calculate_paddings(height_image, width_image, height_min, width_min, pad_xs[i], pad_ys[i]) image = _crop_and_pad_arr( image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i], keep_size=False) result.append(image) # TODO result is always a list. Should this be converted to an array # if possible (not guaranteed that all images have same size, # some might have been larger than desired height/width) return result def _augment_keypoints_by_samples(self, keypoints_on_images, samples): result = [] sizes, pad_xs, pad_ys, _, _ = samples for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)): width_min, height_min = size height_image, width_image = kpsoi.shape[:2] paddings_img = self._calculate_paddings(height_image, width_image, height_min, width_min, pad_xs[i], pad_ys[i]) keypoints_padded = _crop_and_pad_kpsoi_( kpsoi, (0, 0, 0, 0), paddings_img, keep_size=False) result.append(keypoints_padded) return result def _augment_maps_by_samples(self, augmentables, samples, pad_mode, pad_cval): sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples for i, (augmentable, size) in enumerate(zip(augmentables, sizes)): width_min, height_min = size height_img, width_img = augmentable.shape[:2] paddings_img = self._calculate_paddings( height_img, width_img, height_min, width_min, pad_xs[i], pad_ys[i]) # TODO for the previous method (and likely the new/current one # too): # for 30x30 padded to 32x32 with 15x15 heatmaps this results # in paddings of 1 on each side (assuming # position=(0.5, 0.5)) giving 17x17 heatmaps when they should # be 16x16. Error is due to each side getting projected 0.5 # padding which is rounded to 1. This doesn't seem right. augmentables[i] = _crop_and_pad_hms_or_segmaps_( augmentables[i], (0, 0, 0, 0), paddings_img, pad_mode=pad_mode if pad_mode is not None else pad_modes[i], pad_cval=pad_cval if pad_cval is not None else pad_cvals[i], keep_size=False) return augmentables def _draw_samples(self, batch, random_state): nb_images = batch.nb_rows rngs = random_state.duplicate(4) if isinstance(self.position, tuple): pad_xs = self.position[0].draw_samples(nb_images, random_state=rngs[0]) pad_ys = self.position[1].draw_samples(nb_images, random_state=rngs[1]) else: pads = self.position.draw_samples((nb_images, 2), random_state=rngs[0]) pad_xs = pads[:, 0] pad_ys = pads[:, 1] pad_modes = self.pad_mode.draw_samples(nb_images, random_state=rngs[2]) pad_cvals = self.pad_cval.draw_samples(nb_images, random_state=rngs[3]) # We return here the sizes even though they are static as it allows # derived augmenters to define image-specific heights/widths. return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals @classmethod def _calculate_paddings(cls, height_image, width_image, height_min, width_min, pad_xs_i, pad_ys_i): pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 if width_min is not None and width_image < width_min: pad_total_x = width_min - width_image pad_left = int((1-pad_xs_i) * pad_total_x) pad_right = pad_total_x - pad_left if height_min is not None and height_image < height_min: pad_total_y = height_min - height_image pad_top = int((1-pad_ys_i) * pad_total_y) pad_bottom = pad_total_y - pad_top return pad_top, pad_right, pad_bottom, pad_left def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size[0], self.size[1], self.pad_mode, self.pad_cval, self.position] class CenterPadToFixedSize(PadToFixedSize): """Pad images equally on all sides up to given minimum heights/widths. This is an alias for :class:`~imgaug.augmenters.size.PadToFixedSize` with ``position="center"``. It spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToFixedSize` by defaults spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width : int or None See :func:`PadToFixedSize.__init__`. height : int or None See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToFixedSize(height=20, width=30) Create an augmenter that pads images up to ``20x30``, with the padded rows added *equally* on the top and bottom (analogous for the padded columns). """ def __init__(self, width, height, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToFixedSize, self).__init__( width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) # TODO maybe rename this to CropToMaximumSize ? # TODO this is very similar to CropAndPad, maybe add a way to generate crop # values imagewise via a callback in in CropAndPad? # TODO add crop() function in imgaug, similar to pad class CropToFixedSize(meta.Augmenter): """Crop images down to a predefined maximum width and/or height. If images are already at the maximum width/height or are smaller, they will not be cropped. Note that this also means that images will not be padded if they are below the required width/height. The augmenter randomly decides per image how to distribute the required cropping amounts over the image axis. E.g. if 2px have to be cropped on the left or right to reach the required width, the augmenter will sometimes remove 2px from the left and 0px from the right, sometimes remove 2px from the right and 0px from the left and sometimes remove 1px from both sides. Set `position` to ``center`` to prevent that. Supported dtypes ---------------- * ``uint8``: yes; fully tested * ``uint16``: yes; tested * ``uint32``: yes; tested * ``uint64``: yes; tested * ``int8``: yes; tested * ``int16``: yes; tested * ``int32``: yes; tested * ``int64``: yes; tested * ``float16``: yes; tested * ``float32``: yes; tested * ``float64``: yes; tested * ``float128``: yes; tested * ``bool``: yes; tested Parameters ---------- width : int or None Crop images down to this maximum width. If ``None``, image widths will not be altered. height : int or None Crop images down to this maximum height. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional Sets the center point of the cropping, which determines how the required cropping amounts are distributed to each side. For a ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in range ``[0.0, 1.0]`` and describe the fraction of cropping applied to the left/right (low/high values for ``a``) and the fraction of cropping applied to the top/bottom (low/high values for ``b``). A cropping position at ``(0.5, 0.5)`` would be the center of the image and distribute the cropping equally over all sides. A cropping position at ``(1.0, 0.0)`` would be the right-top and would apply 100% of the required cropping to the right and top sides of the image. * If string ``uniform`` then the share of cropping is randomly and uniformly distributed over each side. Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``. * If string ``normal`` then the share of cropping is distributed based on a normal distribution, leading to a focus on the center of the images. Equivalent to ``(Clip(Normal(0.5, 0.45/2), 0, 1), Clip(Normal(0.5, 0.45/2), 0, 1))``. * If string ``center`` then center point of the cropping is identical to the image center. Equivalent to ``(0.5, 0.5)``. * If a string matching regex ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top`` or ``center-bottom`` then sets the center point of the cropping to the X-Y position matching that description. * If a tuple of float, then expected to have exactly two entries between ``0.0`` and ``1.0``, which will always be used as the combination the position matching (x, y) form. * If a ``StochasticParameter``, then that parameter will be queried once per call to ``augment_*()`` to get ``Nx2`` center positions in ``(x, y)`` form (with ``N`` the number of images). * If a ``tuple`` of ``StochasticParameter``, then expected to have exactly two entries that will both be queried per call to ``augment_*()``, each for ``(N,)`` values, to get the center positions. First parameter is used for ``x`` coordinates, second for ``y`` coordinates. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToFixedSize(width=100, height=100) For image sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing for the other sides. The cropping amounts are randomly (and uniformly) distributed over the sides of the image. >>> aug = iaa.CropToFixedSize(width=100, height=100, position="center") For sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing for the other sides. The cropping amounts are always equally distributed over the left/right sides of the image (and analogously for top/bottom). >>> aug = iaa.Sequential([ >>> iaa.PadToFixedSize(width=100, height=100), >>> iaa.CropToFixedSize(width=100, height=100) >>> ]) Pad images smaller than ``100x100`` until they reach ``100x100``. Analogously, crop images larger than ``100x100`` until they reach ``100x100``. The output images therefore have a fixed size of ``100x100``. """ def __init__(self, width, height, position="uniform", seed=None, name=None, **old_kwargs): super(CropToFixedSize, self).__init__( seed=seed, name=name, **old_kwargs) self.size = (width, height) # Position of where to crop. The further to the top left this is, # the larger the share of pixels that will be cropped from the top # and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) # to only crop at the top and left, # (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom # right. Analogously (0.5, 0.5) crops equally on both axis, # (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top. self.position = _handle_position_parameter(position) def _augment_batch_(self, batch, random_state, parents, hooks): # Providing the whole batch to _draw_samples() would not be necessary # for this augmenter. The number of rows would be sufficient. This # formulation however enables derived augmenters to use rowwise shapes # without having to compute them here for this augmenter. samples = self._draw_samples(batch, random_state) if batch.images is not None: batch.images = self._augment_images_by_samples(batch.images, samples) if batch.heatmaps is not None: batch.heatmaps = self._augment_maps_by_samples( batch.heatmaps, samples) if batch.segmentation_maps is not None: batch.segmentation_maps = self._augment_maps_by_samples( batch.segmentation_maps, samples) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._augment_keypoints_by_samples, samples=samples) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch def _augment_images_by_samples(self, images, samples): result = [] sizes, offset_xs, offset_ys = samples for i, (image, size) in enumerate(zip(images, sizes)): w, h = size height_image, width_image = image.shape[0:2] croppings = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0), keep_size=False) result.append(image_cropped) return result def _augment_keypoints_by_samples(self, kpsois, samples): result = [] sizes, offset_xs, offset_ys = samples for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)): w, h = size height_image, width_image = kpsoi.shape[0:2] croppings_img = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) kpsoi_cropped = _crop_and_pad_kpsoi_( kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False) result.append(kpsoi_cropped) return result def _augment_maps_by_samples(self, augmentables, samples): sizes, offset_xs, offset_ys = samples for i, (augmentable, size) in enumerate(zip(augmentables, sizes)): w, h = size height_image, width_image = augmentable.shape[0:2] croppings_img = self._calculate_crop_amounts( height_image, width_image, h, w, offset_ys[i], offset_xs[i]) augmentables[i] = _crop_and_pad_hms_or_segmaps_( augmentable, croppings_img, (0, 0, 0, 0), keep_size=False) return augmentables @classmethod def _calculate_crop_amounts(cls, height_image, width_image, height_max, width_max, offset_y, offset_x): crop_top = 0 crop_right = 0 crop_bottom = 0 crop_left = 0 if height_max is not None and height_image > height_max: crop_top = int(offset_y * (height_image - height_max)) crop_bottom = height_image - height_max - crop_top if width_max is not None and width_image > width_max: crop_left = int(offset_x * (width_image - width_max)) crop_right = width_image - width_max - crop_left return crop_top, crop_right, crop_bottom, crop_left def _draw_samples(self, batch, random_state): nb_images = batch.nb_rows rngs = random_state.duplicate(2) if isinstance(self.position, tuple): offset_xs = self.position[0].draw_samples(nb_images, random_state=rngs[0]) offset_ys = self.position[1].draw_samples(nb_images, random_state=rngs[1]) else: offsets = self.position.draw_samples((nb_images, 2), random_state=rngs[0]) offset_xs = offsets[:, 0] offset_ys = offsets[:, 1] offset_xs = 1.0 - offset_xs offset_ys = 1.0 - offset_ys # We return here the sizes even though they are static as it allows # derived augmenters to define image-specific heights/widths. return [self.size] * nb_images, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.size[0], self.size[1], self.position] class CenterCropToFixedSize(CropToFixedSize): """Take a crop from the center of each image. This is an alias for :class:`~imgaug.augmenters.size.CropToFixedSize` with ``position="center"``. .. note:: If images already have a width and/or height below the provided width and/or height then this augmenter will do nothing for the respective axis. Hence, resulting images can be smaller than the provided axis sizes. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width : int or None See :func:`CropToFixedSize.__init__`. height : int or None See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> crop = iaa.CenterCropToFixedSize(height=20, width=10) Create an augmenter that takes ``20x10`` sized crops from the center of images. """ def __init__(self, width, height, seed=None, name=None, **old_kwargs): super(CenterCropToFixedSize, self).__init__( width=width, height=height, position="center", seed=seed, name=name, **old_kwargs) class CropToMultiplesOf(CropToFixedSize): """Crop images down until their height/width is a multiple of a value. .. note:: For a given axis size ``A`` and multiple ``M``, if ``A`` is in the interval ``[0 .. M]``, the axis will not be changed. As a result, this augmenter can still produce axis sizes that are not multiples of the given values. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_multiple : int or None Multiple for the width. Images will be cropped down until their width is a multiple of this value. If ``None``, image widths will not be altered. height_multiple : int or None Multiple for the height. Images will be cropped down until their height is a multiple of this value. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that crops images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, position="uniform", seed=None, name=None, **old_kwargs): super(CropToMultiplesOf, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.width_multiple = width_multiple self.height_multiple = height_multiple def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToMultiplesOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] croppings = compute_croppings_to_reach_multiples_of( shape, height_multiple=self.height_multiple, width_multiple=self.width_multiple) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_multiple, self.height_multiple, self.position] class CenterCropToMultiplesOf(CropToMultiplesOf): """Crop images equally on all sides until H/W are multiples of given values. This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_multiple : int or None See :func:`CropToMultiplesOf.__init__`. height_multiple : int or None See :func:`CropToMultiplesOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that crops images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, seed=None, name=None, **old_kwargs): super(CenterCropToMultiplesOf, self).__init__( width_multiple=width_multiple, height_multiple=height_multiple, position="center", seed=seed, name=name, **old_kwargs) class PadToMultiplesOf(PadToFixedSize): """Pad images until their height/width is a multiple of a value. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_multiple : int or None Multiple for the width. Images will be padded until their width is a multiple of this value. If ``None``, image widths will not be altered. height_multiple : int or None Multiple for the height. Images will be padded until their height is a multiple of this value. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that pads images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToMultiplesOf, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.width_multiple = width_multiple self.height_multiple = height_multiple def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToMultiplesOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_multiples_of( shape, height_multiple=self.height_multiple, width_multiple=self.width_multiple) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_multiple, self.height_multiple, self.pad_mode, self.pad_cval, self.position] class CenterPadToMultiplesOf(PadToMultiplesOf): """Pad images equally on all sides until H/W are multiples of given values. This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_multiple : int or None See :func:`PadToMultiplesOf.__init__`. height_multiple : int or None See :func:`PadToMultiplesOf.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6) Create an augmenter that pads images to multiples of ``10`` along the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the x-axis (i.e. 6, 12, 18, ...). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_multiple, height_multiple, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToMultiplesOf, self).__init__( width_multiple=width_multiple, height_multiple=height_multiple, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class CropToPowersOf(CropToFixedSize): """Crop images until their height/width is a power of a base. This augmenter removes pixels from an axis with size ``S`` leading to the new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a provided base (e.g. ``2``) and ``E`` is an exponent from the discrete interval ``[1 .. inf)``. .. note:: This augmenter does nothing for axes with size less than ``B^1 = B``. If you have images with ``S < B^1``, it is recommended to combine this augmenter with a padding augmenter that pads each axis up to ``B``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_base : int or None Base for the width. Images will be cropped down until their width fulfills ``width' = width_base ^ E`` with ``E`` being any natural number. If ``None``, image widths will not be altered. height_base : int or None Base for the height. Images will be cropped down until their height fulfills ``height' = height_base ^ E`` with ``E`` being any natural number. If ``None``, image heights will not be altered. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2) Create an augmenter that crops each image down to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, position="uniform", seed=None, name=None, **old_kwargs): super(CropToPowersOf, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.width_base = width_base self.height_base = height_base def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToPowersOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] croppings = compute_croppings_to_reach_powers_of( shape, height_base=self.height_base, width_base=self.width_base) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_base, self.height_base, self.position] class CenterCropToPowersOf(CropToPowersOf): """Crop images equally on all sides until H/W is a power of a base. This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- width_base : int or None See :func:`CropToPowersOf.__init__`. height_base : int or None See :func:`CropToPowersOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2) Create an augmenter that crops each image down to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, seed=None, name=None, **old_kwargs): super(CenterCropToPowersOf, self).__init__( width_base=width_base, height_base=height_base, position="center", seed=seed, name=name, **old_kwargs) class PadToPowersOf(PadToFixedSize): """Pad images until their height/width is a power of a base. This augmenter adds pixels to an axis with size ``S`` leading to the new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a provided base (e.g. ``2``) and ``E`` is an exponent from the discrete interval ``[1 .. inf)``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_base : int or None Base for the width. Images will be padded down until their width fulfills ``width' = width_base ^ E`` with ``E`` being any natural number. If ``None``, image widths will not be altered. height_base : int or None Base for the height. Images will be padded until their height fulfills ``height' = height_base ^ E`` with ``E`` being any natural number. If ``None``, image heights will not be altered. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToPowersOf(height_base=3, width_base=2) Create an augmenter that pads each image to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToPowersOf, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.width_base = width_base self.height_base = height_base def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToPowersOf, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_powers_of( shape, height_base=self.height_base, width_base=self.width_base) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.width_base, self.height_base, self.pad_mode, self.pad_cval, self.position] class CenterPadToPowersOf(PadToPowersOf): """Pad images equally on all sides until H/W is a power of a base. This is the same as :class:`~imgaug.augmenters.size.PadToPowersOf`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToPowersOf` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- width_base : int or None See :func:`PadToPowersOf.__init__`. height_base : int or None See :func:`PadToPowersOf.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToPowersOf(height_base=5, width_base=2) Create an augmenter that pads each image to powers of ``3`` along the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2, 4, 8, 16, ...). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, width_base, height_base, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToPowersOf, self).__init__( width_base=width_base, height_base=height_base, pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class CropToAspectRatio(CropToFixedSize): """Crop images until their width/height matches an aspect ratio. This augmenter removes either rows or columns until the image reaches the desired aspect ratio given in ``width / height``. The cropping operation is stopped once the desired aspect ratio is reached or the image side to crop reaches a size of ``1``. If any side of the image starts with a size of ``0``, the image will not be changed. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- aspect_ratio : number The desired aspect ratio, given as ``width/height``. E.g. a ratio of ``2.0`` denotes an image that is twice as wide as it is high. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToAspectRatio(2.0) Create an augmenter that crops each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, position="uniform", seed=None, name=None, **old_kwargs): super(CropToAspectRatio, self).__init__( width=None, height=None, position=position, seed=seed, name=name, **old_kwargs) self.aspect_ratio = aspect_ratio def _draw_samples(self, batch, random_state): _sizes, offset_xs, offset_ys = super( CropToAspectRatio, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] if height == 0 or width == 0: croppings = (0, 0, 0, 0) else: croppings = compute_croppings_to_reach_aspect_ratio( shape, aspect_ratio=self.aspect_ratio) # TODO change that # note that these are not in the same order as shape tuples # in CropToFixedSize new_size = ( width - croppings[1] - croppings[3], height - croppings[0] - croppings[2] ) sizes.append(new_size) return sizes, offset_xs, offset_ys def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.aspect_ratio, self.position] class CenterCropToAspectRatio(CropToAspectRatio): """Crop images equally on all sides until they reach an aspect ratio. This is the same as :class:`~imgaug.augmenters.size.CropToAspectRatio`, but uses ``position="center"`` by default, which spreads the crop amounts equally over all image sides, while :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- aspect_ratio : number See :func:`CropToAspectRatio.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToAspectRatio(2.0) Create an augmenter that crops each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, seed=None, name=None, **old_kwargs): super(CenterCropToAspectRatio, self).__init__( aspect_ratio=aspect_ratio, position="center", seed=seed, name=name, **old_kwargs) class PadToAspectRatio(PadToFixedSize): """Pad images until their width/height matches an aspect ratio. This augmenter adds either rows or columns until the image reaches the desired aspect ratio given in ``width / height``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- aspect_ratio : number The desired aspect ratio, given as ``width/height``. E.g. a ratio of ``2.0`` denotes an image that is twice as wide as it is high. position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToAspectRatio(2.0) Create an augmenter that pads each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToAspectRatio, self).__init__( width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) self.aspect_ratio = aspect_ratio def _draw_samples(self, batch, random_state): _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super( PadToAspectRatio, self )._draw_samples(batch, random_state) shapes = batch.get_rowwise_shapes() sizes = [] for shape in shapes: height, width = shape[0:2] paddings = compute_paddings_to_reach_aspect_ratio( shape, aspect_ratio=self.aspect_ratio) # TODO change that # note that these are not in the same order as shape tuples # in PadToFixedSize new_size = ( width + paddings[1] + paddings[3], height + paddings[0] + paddings[2] ) sizes.append(new_size) return sizes, pad_xs, pad_ys, pad_modes, pad_cvals def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.aspect_ratio, self.pad_mode, self.pad_cval, self.position] class CenterPadToAspectRatio(PadToAspectRatio): """Pad images equally on all sides until H/W matches an aspect ratio. This is the same as :class:`~imgaug.augmenters.size.PadToAspectRatio`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToAspectRatio` by default spreads them randomly. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- aspect_ratio : number See :func:`PadToAspectRatio.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. deterministic : bool, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToAspectRatio(2.0) Create am augmenter that pads each image until its aspect ratio is as close as possible to ``2.0`` (i.e. two times as many pixels along the x-axis than the y-axis). The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToAspectRatio, self).__init__( aspect_ratio=aspect_ratio, position="center", pad_mode=pad_mode, pad_cval=pad_cval, seed=seed, name=name, **old_kwargs) class CropToSquare(CropToAspectRatio): """Crop images until their width and height are identical. This is identical to :class:`~imgaug.augmenters.size.CropToAspectRatio` with ``aspect_ratio=1.0``. Images with axis sizes of ``0`` will not be altered. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`CropToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CropToSquare() Create an augmenter that crops each image until its square, i.e. height and width match. The rows to be cropped will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, position="uniform", seed=None, name=None, **old_kwargs): super(CropToSquare, self).__init__( aspect_ratio=1.0, position=position, seed=seed, name=name, **old_kwargs) class CenterCropToSquare(CropToSquare): """Crop images equally on all sides until their height/width are identical. In contrast to :class:`~imgaug.augmenters.size.CropToSquare`, this augmenter always tries to spread the columns/rows to remove equally over both sides of the respective axis to be cropped. :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads the croppings randomly. This augmenter is identical to :class:`~imgaug.augmenters.size.CropToSquare` with ``position="center"``, and thereby the same as :class:`~imgaug.augmenters.size.CropToAspectRatio` with ``aspect_ratio=1.0, position="center"``. Images with axis sizes of ``0`` will not be altered. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.CropToFixedSize`. Parameters ---------- seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterCropToSquare() Create an augmenter that crops each image until its square, i.e. height and width match. The rows to be cropped will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, seed=None, name=None, **old_kwargs): super(CenterCropToSquare, self).__init__( position="center", seed=seed, name=name, **old_kwargs) class PadToSquare(PadToAspectRatio): """Pad images until their height and width are identical. This augmenter is identical to :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional See :func:`PadToFixedSize.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.PadToSquare() Create an augmenter that pads each image until its square, i.e. height and width match. The rows to be padded will be spread *randomly* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, pad_mode="constant", pad_cval=0, position="uniform", seed=None, name=None, **old_kwargs): super(PadToSquare, self).__init__( aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval, position=position, seed=seed, name=name, **old_kwargs) class CenterPadToSquare(PadToSquare): """Pad images equally on all sides until their height & width are identical. This is the same as :class:`~imgaug.augmenters.size.PadToSquare`, but uses ``position="center"`` by default, which spreads the pad amounts equally over all image sides, while :class:`~imgaug.augmenters.size.PadToSquare` by default spreads them randomly. This augmenter is thus also identical to :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0, position="center"``. Supported dtypes ---------------- See :class:`~imgaug.augmenters.size.PadToFixedSize`. Parameters ---------- name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`. deterministic : bool, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.CenterPadToSquare() Create an augmenter that pads each image until its square, i.e. height and width match. The rows to be padded will be spread *equally* over the top and bottom sides (analogous for the left/right sides). """ def __init__(self, pad_mode="constant", pad_cval=0, seed=None, name=None, **old_kwargs): super(CenterPadToSquare, self).__init__( pad_mode=pad_mode, pad_cval=pad_cval, position="center", seed=seed, name=name, **old_kwargs) class KeepSizeByResize(meta.Augmenter): """Resize images back to their input sizes after applying child augmenters. Combining this with e.g. a cropping augmenter as the child will lead to images being resized back to the input size after the crop operation was applied. Some augmenters have a ``keep_size`` argument that achieves the same goal (if set to ``True``), though this augmenter offers control over the interpolation mode and which augmentables to resize (images, heatmaps, segmentation maps). Supported dtypes ---------------- See :func:`~imgaug.imgaug.imresize_many_images`. Parameters ---------- children : Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional One or more augmenters to apply to images. These augmenters may change the image size. interpolation : KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing images. Can take any value that :func:`~imgaug.imgaug.imresize_single_image` accepts, e.g. ``cubic``. * If this is ``KeepSizeByResize.NO_RESIZE`` then images will not be resized. * If this is a single ``str``, it is expected to have one of the following values: ``nearest``, ``linear``, ``area``, ``cubic``. * If this is a single integer, it is expected to have a value identical to one of: ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``. * If this is a ``list`` of ``str`` or ``int``, it is expected that each ``str``/``int`` is one of the above mentioned valid ones. A random one of these values will be sampled per image. * If this is a ``StochasticParameter``, it will be queried once per call to ``_augment_images()`` and must return ``N`` ``str`` s or ``int`` s (matching the above mentioned ones) for ``N`` images. interpolation_heatmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing heatmaps. Meaning and valid values are similar to `interpolation`. This parameter may also take the value ``KeepSizeByResize.SAME_AS_IMAGES``, which will lead to copying the interpolation modes used for the corresponding images. The value may also be returned on a per-image basis if `interpolation_heatmaps` is provided as a ``StochasticParameter`` or may be one possible value if it is provided as a ``list`` of ``str``. interpolation_segmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional The interpolation mode to use when resizing segmentation maps. Similar to `interpolation_heatmaps`. **Note**: For segmentation maps, only ``NO_RESIZE`` or nearest neighbour interpolation (i.e. ``nearest``) make sense in the vast majority of all cases. seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. name : None or str, optional See :func:`~imgaug.augmenters.meta.Augmenter.__init__`. **old_kwargs Outdated parameters. Avoid using these. Examples -------- >>> import imgaug.augmenters as iaa >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False) >>> ) Apply random cropping to input images, then resize them back to their original input sizes. The resizing is done using this augmenter instead of the corresponding internal resizing operation in ``Crop``. >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False), >>> interpolation="nearest" >>> ) Same as in the previous example, but images are now always resized using nearest neighbour interpolation. >>> aug = iaa.KeepSizeByResize( >>> iaa.Crop((20, 40), keep_size=False), >>> interpolation=["nearest", "cubic"], >>> interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES, >>> interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE >>> ) Similar to the previous example, but images are now sometimes resized using linear interpolation and sometimes using nearest neighbour interpolation. Heatmaps are resized using the same interpolation as was used for the corresponding image. Segmentation maps are not resized and will therefore remain at their size after cropping. """ NO_RESIZE = "NO_RESIZE" SAME_AS_IMAGES = "SAME_AS_IMAGES" def __init__(self, children, interpolation="cubic", interpolation_heatmaps=SAME_AS_IMAGES, interpolation_segmaps="nearest", seed=None, name=None, **old_kwargs): super(KeepSizeByResize, self).__init__( seed=seed, name=name, **old_kwargs) self.children = children def _validate_param(val, allow_same_as_images): valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \ + [KeepSizeByResize.NO_RESIZE] if allow_same_as_images and val == self.SAME_AS_IMAGES: return self.SAME_AS_IMAGES if val in valid_ips_and_resize: return iap.Deterministic(val) if isinstance(val, list): assert len(val) > 0, ( "Expected a list of at least one interpolation method. " "Got an empty list.") valid_ips_here = valid_ips_and_resize if allow_same_as_images: valid_ips_here = valid_ips_here \ + [KeepSizeByResize.SAME_AS_IMAGES] only_valid_ips = all([ip in valid_ips_here for ip in val]) assert only_valid_ips, ( "Expected each interpolations to be one of '%s', got " "'%s'." % (str(valid_ips_here), str(val))) return iap.Choice(val) if isinstance(val, iap.StochasticParameter): return val raise Exception( "Expected interpolation to be one of '%s' or a list of " "these values or a StochasticParameter. Got type %s." % ( str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val))) self.children = meta.handle_children_list(children, self.name, "then") self.interpolation = _validate_param(interpolation, False) self.interpolation_heatmaps = _validate_param(interpolation_heatmaps, True) self.interpolation_segmaps = _validate_param(interpolation_segmaps, True) def _augment_batch_(self, batch, random_state, parents, hooks): with batch.propagation_hooks_ctx(self, hooks, parents): images_were_array = None if batch.images is not None: images_were_array = ia.is_np_array(batch.images) shapes_orig = self._get_shapes(batch) samples = self._draw_samples(batch.nb_rows, random_state) batch = self.children.augment_batch_( batch, parents=parents + [self], hooks=hooks) if batch.images is not None: batch.images = self._keep_size_images( batch.images, shapes_orig["images"], images_were_array, samples) if batch.heatmaps is not None: # dont use shapes_orig["images"] because they might be None batch.heatmaps = self._keep_size_maps( batch.heatmaps, shapes_orig["heatmaps"], shapes_orig["heatmaps_arr"], samples[1]) if batch.segmentation_maps is not None: # dont use shapes_orig["images"] because they might be None batch.segmentation_maps = self._keep_size_maps( batch.segmentation_maps, shapes_orig["segmentation_maps"], shapes_orig["segmentation_maps_arr"], samples[2]) for augm_name in ["keypoints", "bounding_boxes", "polygons", "line_strings"]: augm_value = getattr(batch, augm_name) if augm_value is not None: func = functools.partial( self._keep_size_keypoints, shapes_orig=shapes_orig[augm_name], interpolations=samples[0]) cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func) setattr(batch, augm_name, cbaois) return batch @classmethod def _keep_size_images(cls, images, shapes_orig, images_were_array, samples): interpolations, _, _ = samples gen = zip(images, interpolations, shapes_orig) result = [] for image, interpolation, input_shape in gen: if interpolation == KeepSizeByResize.NO_RESIZE: result.append(image) else: result.append( ia.imresize_single_image(image, input_shape[0:2], interpolation)) if images_were_array: # note here that NO_RESIZE can have led to different shapes nb_shapes = len({image.shape for image in result}) if nb_shapes == 1: result = np.array(result, dtype=images.dtype) return result @classmethod def _keep_size_maps(cls, augmentables, shapes_orig_images, shapes_orig_arrs, interpolations): result = [] gen = zip(augmentables, interpolations, shapes_orig_arrs, shapes_orig_images) for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen: if interpolation == "NO_RESIZE": result.append(augmentable) else: augmentable = augmentable.resize( arr_shape_orig[0:2], interpolation=interpolation) augmentable.shape = img_shape_orig result.append(augmentable) return result @classmethod def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations): result = [] gen = zip(kpsois_aug, interpolations, shapes_orig) for kpsoi_aug, interpolation, input_shape in gen: if interpolation == KeepSizeByResize.NO_RESIZE: result.append(kpsoi_aug) else: result.append(kpsoi_aug.on_(input_shape)) return result @classmethod def _get_shapes(cls, batch): result = dict() for column in batch.columns: result[column.name] = [cell.shape for cell in column.value] if batch.heatmaps is not None: result["heatmaps_arr"] = [ cell.arr_0to1.shape for cell in batch.heatmaps] if batch.segmentation_maps is not None: result["segmentation_maps_arr"] = [ cell.arr.shape for cell in batch.segmentation_maps] return result def _draw_samples(self, nb_images, random_state): rngs = random_state.duplicate(3) interpolations = self.interpolation.draw_samples((nb_images,), random_state=rngs[0]) if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES: interpolations_heatmaps = np.copy(interpolations) else: interpolations_heatmaps = self.interpolation_heatmaps.draw_samples( (nb_images,), random_state=rngs[1] ) # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES` # works here only if the datatype of the array is such that it # may contain strings. It does not work properly for e.g. # integer arrays and will produce a single bool output, even # for arrays with more than one entry. same_as_imgs_idx = [ip == self.SAME_AS_IMAGES for ip in interpolations_heatmaps] interpolations_heatmaps[same_as_imgs_idx] = \ interpolations[same_as_imgs_idx] if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES: interpolations_segmaps = np.copy(interpolations) else: # TODO This used previously the same seed as the heatmaps part # leading to the same sampled values. Was that intentional? # Doesn't look like it should be that way. interpolations_segmaps = self.interpolation_segmaps.draw_samples( (nb_images,), random_state=rngs[2] ) # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES` # works here only if the datatype of the array is such that it # may contain strings. It does not work properly for e.g. # integer arrays and will produce a single bool output, even # for arrays with more than one entry. same_as_imgs_idx = [ip == self.SAME_AS_IMAGES for ip in interpolations_segmaps] interpolations_segmaps[same_as_imgs_idx] = \ interpolations[same_as_imgs_idx] return interpolations, interpolations_heatmaps, interpolations_segmaps def _to_deterministic(self): aug = self.copy() aug.children = aug.children.to_deterministic() aug.deterministic = True aug.random_state = self.random_state.derive_rng_() return aug def get_parameters(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.""" return [self.interpolation, self.interpolation_heatmaps] def get_children_lists(self): """See :func:`~imgaug.augmenters.meta.Augmenter.get_children_lists`.""" return [self.children] def __str__(self): pattern = ( "%s(" "interpolation=%s, " "interpolation_heatmaps=%s, " "name=%s, " "children=%s, " "deterministic=%s" ")") return pattern % ( self.__class__.__name__, self.interpolation, self.interpolation_heatmaps, self.name, self.children, self.deterministic)
[ [ [ 898, 912 ] ], [ [ 914, 922 ] ], [ [ 924, 939 ] ], [ [ 948, 950 ], [ 10470, 10472 ] ], [ [ 958, 967 ], [ 52435, 52444 ], [ 77863, 77872 ], [ 113587, 113596 ], [ 128796, 128805 ], [ 184973, 184982 ] ], [ [ 976, 987 ], [ 8818, 8820 ], [ 16137, 16139 ], [ 16230, 16232 ], [ 18880, 18882 ], [ 19553, 19555 ], [ 19692, 19694 ], [ 20229, 20231 ], [ 20091, 20093 ], [ 20284, 20286 ], [ 20369, 20371 ], [ 20750, 20752 ], [ 20617, 20619 ], [ 20805, 20807 ], [ 20890, 20892 ], [ 21024, 21026 ], [ 21095, 21097 ], [ 27814, 27816 ], [ 27857, 27859 ], [ 28075, 28077 ], [ 28121, 28123 ], [ 30386, 30388 ], [ 30434, 30436 ], [ 30723, 30725 ], [ 30770, 30772 ], [ 53524, 53526 ], [ 54066, 54068 ], [ 54135, 54137 ], [ 56477, 56479 ], [ 56697, 56699 ], [ 57082, 57084 ], [ 57204, 57206 ], [ 78597, 78599 ], [ 78783, 78785 ], [ 80269, 80271 ], [ 80348, 80350 ], [ 81653, 81655 ], [ 81740, 81742 ], [ 81859, 81861 ], [ 81890, 81892 ], [ 81924, 81926 ], [ 81956, 81958 ], [ 81991, 81993 ], [ 82025, 82027 ], [ 82058, 82060 ], [ 82089, 82091 ], [ 186168, 186170 ], [ 188220, 188222 ], [ 189091, 189093 ], [ 32305, 32307 ], [ 32330, 32332 ], [ 34446, 34448 ], [ 34472, 34474 ], [ 36864, 36866 ], [ 36884, 36886 ], [ 36932, 36934 ], [ 36984, 36986 ], [ 37009, 37011 ], [ 39450, 39452 ], [ 39470, 39472 ], [ 39562, 39564 ], [ 39588, 39590 ], [ 82219, 82221 ] ], [ [ 995, 998 ], [ 17059, 17062 ], [ 17113, 17116 ], [ 17169, 17172 ], [ 17229, 17232 ], [ 16721, 16724 ], [ 16762, 16765 ], [ 16952, 16955 ], [ 17001, 17004 ], [ 17080, 17083 ], [ 17135, 17138 ], [ 17193, 17196 ], [ 17249, 17252 ], [ 18568, 18571 ], [ 19292, 19295 ] ], [ [ 1007, 1019 ], [ 40173, 40175 ], [ 2491, 2493 ], [ 3473, 3475 ], [ 3639, 3641 ], [ 9086, 9088 ], [ 9154, 9156 ], [ 10431, 10433 ], [ 11210, 11212 ], [ 11552, 11554 ], [ 15762, 15764 ], [ 47670, 47672 ], [ 49152, 49154 ], [ 49183, 49185 ], [ 49516, 49518 ], [ 49589, 49591 ], [ 50653, 50655 ], [ 50770, 50772 ], [ 50881, 50883 ], [ 50984, 50986 ], [ 52827, 52829 ], [ 53213, 53215 ], [ 56367, 56369 ], [ 56587, 56589 ], [ 71620, 71622 ], [ 74000, 74002 ], [ 78517, 78519 ], [ 83595, 83597 ], [ 84309, 84311 ], [ 183623, 183625 ], [ 185835, 185837 ], [ 71938, 71940 ], [ 72226, 72228 ], [ 72281, 72283 ], [ 72718, 72720 ], [ 74458, 74460 ], [ 74749, 74751 ], [ 74803, 74805 ], [ 75411, 75413 ], [ 95375, 95377 ], [ 103622, 103624 ], [ 181613, 181615 ], [ 182909, 182911 ] ], [ [ 1046, 1071 ], [ 18608, 18633 ], [ 19336, 19361 ] ], [ [ 1086, 1090 ], [ 40466, 40470 ], [ 58357, 58361 ], [ 104844, 104848 ], [ 121197, 121201 ], [ 175910, 175914 ], [ 182981, 182985 ] ], [ [ 1106, 1123 ], [ 9109, 9112 ], [ 9381, 9384 ], [ 9681, 9684 ], [ 9730, 9733 ], [ 10024, 10027 ], [ 10047, 10050 ], [ 10127, 10130 ], [ 10136, 10139 ], [ 10231, 10234 ], [ 10240, 10243 ], [ 10376, 10379 ], [ 10400, 10403 ], [ 10698, 10701 ], [ 10762, 10765 ], [ 10851, 10854 ], [ 11505, 11508 ], [ 11676, 11679 ], [ 47631, 47634 ], [ 47792, 47795 ], [ 47932, 47935 ], [ 49237, 49240 ], [ 49310, 49313 ], [ 49441, 49444 ], [ 49869, 49872 ], [ 49916, 49919 ], [ 50689, 50692 ], [ 50835, 50838 ], [ 50938, 50941 ], [ 51043, 51046 ], [ 51108, 51111 ], [ 70154, 70157 ], [ 71670, 71673 ], [ 73507, 73510 ], [ 74165, 74168 ], [ 76442, 76445 ], [ 111879, 111882 ], [ 47216, 47219 ], [ 71990, 71993 ], [ 72502, 72505 ], [ 72923, 72926 ], [ 72970, 72973 ], [ 74509, 74512 ], [ 75203, 75206 ], [ 75823, 75826 ], [ 75870, 75873 ], [ 95541, 95544 ], [ 103790, 103793 ], [ 103839, 103842 ], [ 181891, 181894 ], [ 182609, 182612 ], [ 182656, 182659 ] ], [ [ 1130, 1148 ], [ 1911, 1929 ], [ 5218, 5236 ], [ 5713, 5731 ] ], [ [ 1823, 1833 ], [ 2252, 2262 ], [ 4298, 4308 ] ], [ [ 2084, 2101 ], [ 78310, 78327 ], [ 114388, 114405 ], [ 129489, 129506 ] ], [ [ 2576, 2598 ] ], [ [ 2915, 2936 ] ], [ [ 3248, 3277 ], [ 2728, 2757 ], [ 3062, 3091 ], [ 79099, 79128 ], [ 116549, 116578 ], [ 130700, 130729 ] ], [ [ 4926, 4946 ], [ 79884, 79904 ], [ 115369, 115389 ], [ 130105, 130125 ] ], [ [ 5625, 5658 ], [ 4802, 4835 ], [ 5456, 5489 ] ], [ [ 5931, 5954 ], [ 1268, 1291 ], [ 4170, 4193 ], [ 83213, 83236 ] ], [ [ 7498, 7519 ], [ 4009, 4030 ], [ 4089, 4110 ] ], [ [ 8788, 8794 ], [ 8527, 8533 ], [ 8584, 8590 ], [ 8642, 8648 ], [ 8701, 8707 ] ], [ [ 8882, 8904 ], [ 70045, 70067 ], [ 111773, 111795 ] ], [ [ 9941, 9967 ], [ 111711, 111737 ], [ 127620, 127646 ] ], [ [ 12645, 12670 ], [ 15506, 15531 ], [ 27159, 27184 ], [ 29695, 29720 ], [ 32354, 32379 ], [ 34497, 34522 ], [ 37033, 37058 ], [ 39613, 39638 ] ], [ [ 12905, 12908 ], [ 2299, 2302 ], [ 4454, 4457 ], [ 23037, 23040 ], [ 25521, 25524 ] ], [ [ 21114, 21133 ] ], [ [ 23320, 23339 ] ], [ [ 25804, 25842 ], [ 22962, 23000 ], [ 165459, 165497 ] ], [ [ 28199, 28238 ], [ 160354, 160393 ] ], [ [ 30839, 30877 ], [ 25414, 25452 ], [ 142717, 142755 ] ], [ [ 32993, 33032 ], [ 137110, 137149 ] ], [ [ 35138, 35173 ], [ 154704, 154739 ] ], [ [ 37594, 37630 ], [ 148940, 148976 ] ], [ [ 40284, 40289 ] ], [ [ 40459, 40465 ], [ 40427, 40433 ], [ 46490, 46496 ] ], [ [ 57432, 57457 ], [ 84817, 84842 ] ], [ [ 58346, 58356 ], [ 85542, 85552 ], [ 96353, 96363 ], [ 69801, 69811 ] ], [ [ 85538, 85541 ], [ 96073, 96076 ] ], [ [ 96348, 96352 ], [ 104332, 104336 ] ], [ [ 104829, 104843 ], [ 118986, 119000 ], [ 139753, 139767 ], [ 151448, 151462 ], [ 162712, 162726 ], [ 111086, 111100 ] ], [ [ 118965, 118985 ], [ 120744, 120764 ] ], [ [ 121181, 121196 ], [ 132719, 132734 ], [ 134332, 134347 ], [ 145882, 145897 ], [ 157731, 157746 ], [ 126984, 126999 ] ], [ [ 132697, 132718 ], [ 134160, 134181 ] ], [ [ 134314, 134331 ], [ 137854, 137871 ], [ 136545, 136562 ], [ 136885, 136902 ] ], [ [ 137830, 137853 ], [ 139520, 139543 ] ], [ [ 139736, 139752 ], [ 143532, 143548 ], [ 142089, 142105 ], [ 142494, 142510 ] ], [ [ 143509, 143531 ], [ 145590, 145612 ] ], [ [ 145867, 145881 ], [ 149654, 149668 ], [ 148397, 148411 ], [ 148718, 148732 ] ], [ [ 149633, 149653 ], [ 151261, 151281 ] ], [ [ 151434, 151447 ], [ 155489, 155502 ], [ 154098, 154111 ], [ 154484, 154497 ] ], [ [ 155469, 155488 ], [ 157479, 157498 ] ], [ [ 157713, 157730 ], [ 161024, 161041 ], [ 168322, 168339 ], [ 159734, 159751 ], [ 160023, 160040 ] ], [ [ 161000, 161023 ], [ 162540, 162563 ] ], [ [ 162695, 162711 ], [ 166176, 166192 ], [ 171782, 171798 ], [ 164881, 164897 ], [ 165235, 165251 ] ], [ [ 166153, 166175 ], [ 168105, 168127 ] ], [ [ 168309, 168321 ], [ 170008, 170020 ], [ 169854, 169866 ] ], [ [ 169989, 170007 ], [ 171647, 171665 ] ], [ [ 171770, 171781 ], [ 173842, 173853 ], [ 173640, 173651 ] ], [ [ 173824, 173841 ], [ 175733, 175750 ] ], [ [ 175893, 175909 ], [ 181406, 181422 ], [ 185701, 185717 ], [ 187177, 187193 ], [ 188149, 188165 ], [ 189021, 189037 ], [ 181685, 181701 ], [ 182299, 182315 ] ] ]
from .common import * __all__ = ["TestReadWriteMemory"] class TestReadWriteMemory(MCPTestCase): def test_read_flash_ok(self): self.mcp.dev.read.return_value = self.xb0_00 self.assertEqual(self.mcp._read_flash(FlashDataSubcode.ChipSettings), self.xb0_00[4:14]) def test_read_sram_ok(self): self.mcp.dev.read.return_value = self.x61 self.assertEqual(self.mcp._read_sram(SramDataSubcode.ChipSettings), self.x61[4:22]) self.assertEqual(self.mcp._read_sram(SramDataSubcode.GPSettings), self.x61[22:26]) def test_read_flash_byte_ok(self): self.mcp.dev.read.return_value = self.xb0_00 for n in range(0,9): result = self.mcp._read_flash_byte(FlashDataSubcode.ChipSettings, n, range(8)) value = int("".join(["1" if x else "0" for x in reversed(result)]),2) self.assertEqual(value, self.xb0_00[4+n]) def test_read_sram_byte_ok(self): self.mcp.dev.read.return_value = self.x61 for n in range(0,9): result = self.mcp._read_sram_byte(SramDataSubcode.ChipSettings, n, range(8)) value = int("".join(["1" if x else "0" for x in reversed(result)]),2) self.assertEqual(value, self.x61[4+n]) def test_write_flash_byte_ok(self): # tests that 'write_flash_byte' sends the right data to hid write command xb1_00 = bytearray(64) xb1_00[0] = 0xb1 with patch.object(self.mcp, "_read_response", return_value = self.xb0_00): for byte in range(9): for bit in range(8): xb1_00[2:12] = self.xb0_00[4:14] xb1_00[2+byte] = self.mcp._MCP2221__and(xb1_00[2+byte], 0xff - (1<<bit)) self.mcp._write_flash_byte(FlashDataSubcode.ChipSettings, byte, [bit], [False]) self.assertEqual(self.mcp.dev.write.call_args[0][0], xb1_00) def test_write_sram_ok(self): # tests that 'write_sram' sends the right data to hid write command with patch.object(self.mcp, "_read_response", return_value = self.x61): v = 0xff for byte in range(9): self.mcp._write_sram(SramDataSubcode.ChipSettings, byte, v) self.assertEqual(self.mcp.dev.write.call_args[0][0][2+byte], v)
[ [ [ 20, 21 ], [ 84, 95 ], [ 231, 247 ], [ 415, 430 ], [ 507, 522 ], [ 726, 742 ], [ 1070, 1085 ], [ 1438, 1443 ], [ 1772, 1788 ], [ 2038, 2043 ], [ 2197, 2212 ] ], [ [ 23, 30 ] ], [ [ 64, 83 ] ] ]
#!/usr/bin/env python from decimal import Decimal, getcontext from fractions import Fraction digits = 500 getcontext().prec = digits def leibnitz(n): """ Parameters ---------- n : int Returns ------- Fraction Approximation of pi. """ pi = Fraction(0) sign = 1 for k in range(1, n, 2): pi = pi + sign*Fraction(4, k) sign *= -1 return pi def calc_pi(n): """ Calculate PI. Parameters ---------- n : int Number of fractions. Returns ------- Fraction Approximation of pi. """ pi = Fraction(0) for k in range(n): # print(Fraction(-1,4)**k) pi += (Fraction(-1, 4)**k * (Fraction(1, 1+2*k) + Fraction(2, 1+4*k) + Fraction(1, 3+4*k))) return pi def get_correct_digits(approx): """ Get how many digits were correct. Parameters ---------- approx : str String representation of an approximation of pi. Returns ------- int The number of correct digits. If the number has too many correct digits, -1 is returned. """ pi = ("3.14159265358979323846264338327950288419716939937510582097494459230" "78164062862089986280348253421170679") for i, el in enumerate(pi): if len(approx) <= i: return i-1 if el != approx[i]: return i return -1 # Very good! if __name__ == "__main__": # for n in range(1,180): # approx = calc_pi(n) # dec =Decimal(approx.numerator) / Decimal(approx.denominator) # #print(dec) # print("correct digits: %s (n=%i)" % (get_correct_digits(str(dec)),n)) n = digits approx = calc_pi(n) dec = Decimal(approx.numerator) / Decimal(approx.denominator) print(dec)
[ [ [ 43, 50 ], [ 1758, 1765 ], [ 1786, 1793 ] ], [ [ 52, 62 ], [ 108, 118 ] ], [ [ 85, 93 ], [ 288, 296 ], [ 365, 373 ], [ 613, 621 ], [ 698, 706 ], [ 720, 728 ], [ 756, 764 ], [ 792, 800 ] ], [ [ 95, 101 ], [ 128, 134 ], [ 1717, 1723 ] ], [ [ 141, 149 ] ], [ [ 419, 426 ], [ 1737, 1744 ] ], [ [ 833, 851 ] ], [ [ 1713, 1714 ], [ 1745, 1746 ] ], [ [ 1728, 1734 ], [ 1766, 1772 ], [ 1794, 1800 ] ], [ [ 1752, 1755 ], [ 1824, 1827 ] ] ]
import os.path from app.data.database import init_db, db_path, get_expected_pathname, set_path def db_exists(): return os.path.isfile(db_path) def check_db(): global db_path if (db_path != get_expected_pathname()): print('DB Check: Running backup') backup_database_to(get_expected_pathname()) init_db() if (not db_exists()): print('DB Check: No database found. Making a new one...') init_db() from app.data.camper_editing import reset_locs reset_locs() def backup_database_to(filename): global db_path from shutil import copy2 s = open('data/BACKUPDATA', 'a+') s.seek(0) prev_path = s.read() set_path(filename) db_path = filename #this line is a crude fix for some messy scoping s.truncate(0) s.seek(0) s.write(filename) if (prev_path == ""): print("No previous database found, a new one will be generated. This may happen if the BACKUPDATA file is missing or corrupt.") return False elif (prev_path == filename): print("Tried to back up to the same file!") else: print ("backing up & copying") from app.data.camper_editing import reset_locs copy2(prev_path, filename) reset_locs() return filename
[ [ [ 7, 14 ], [ 124, 126 ] ], [ [ 45, 52 ], [ 332, 339 ], [ 444, 451 ] ], [ [ 54, 61 ], [ 139, 146 ], [ 193, 200 ] ], [ [ 63, 84 ], [ 204, 225 ], [ 299, 320 ] ], [ [ 86, 94 ], [ 694, 702 ] ], [ [ 100, 109 ], [ 356, 365 ] ], [ [ 153, 161 ] ], [ [ 535, 553 ], [ 280, 298 ] ], [ [ 718, 725 ] ] ]
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * __all__ = [ 'GetGroupResult', 'AwaitableGetGroupResult', 'get_group', 'get_group_output', ] @pulumi.output_type class GetGroupResult: def __init__(__self__, arn=None, configuration=None, description=None, resource_query=None, resources=None, tags=None): if arn and not isinstance(arn, str): raise TypeError("Expected argument 'arn' to be a str") pulumi.set(__self__, "arn", arn) if configuration and not isinstance(configuration, list): raise TypeError("Expected argument 'configuration' to be a list") pulumi.set(__self__, "configuration", configuration) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if resource_query and not isinstance(resource_query, dict): raise TypeError("Expected argument 'resource_query' to be a dict") pulumi.set(__self__, "resource_query", resource_query) if resources and not isinstance(resources, list): raise TypeError("Expected argument 'resources' to be a list") pulumi.set(__self__, "resources", resources) if tags and not isinstance(tags, list): raise TypeError("Expected argument 'tags' to be a list") pulumi.set(__self__, "tags", tags) @property @pulumi.getter def arn(self) -> Optional[str]: """ The Resource Group ARN. """ return pulumi.get(self, "arn") @property @pulumi.getter def configuration(self) -> Optional[Sequence['outputs.GroupConfigurationItem']]: return pulumi.get(self, "configuration") @property @pulumi.getter def description(self) -> Optional[str]: """ The description of the resource group """ return pulumi.get(self, "description") @property @pulumi.getter(name="resourceQuery") def resource_query(self) -> Optional['outputs.GroupResourceQuery']: return pulumi.get(self, "resource_query") @property @pulumi.getter def resources(self) -> Optional[Sequence[str]]: return pulumi.get(self, "resources") @property @pulumi.getter def tags(self) -> Optional[Sequence['outputs.GroupTag']]: return pulumi.get(self, "tags") class AwaitableGetGroupResult(GetGroupResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetGroupResult( arn=self.arn, configuration=self.configuration, description=self.description, resource_query=self.resource_query, resources=self.resources, tags=self.tags) def get_group(name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult: """ Schema for ResourceGroups::Group :param str name: The name of the resource group """ __args__ = dict() __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:resourcegroups:getGroup', __args__, opts=opts, typ=GetGroupResult).value return AwaitableGetGroupResult( arn=__ret__.arn, configuration=__ret__.configuration, description=__ret__.description, resource_query=__ret__.resource_query, resources=__ret__.resources, tags=__ret__.tags) @_utilities.lift_output_func(get_group) def get_group_output(name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]: """ Schema for ResourceGroups::Group :param str name: The name of the resource group """ ...
[ [ [ 176, 184 ] ], [ [ 192, 198 ] ], [ [ 206, 220 ], [ 471, 477 ], [ 1762, 1768 ], [ 1927, 1933 ], [ 2095, 2101 ], [ 2290, 2296 ], [ 2468, 2474 ], [ 2599, 2605 ], [ 756, 762 ], [ 941, 947 ], [ 1138, 1144 ], [ 1342, 1348 ], [ 1537, 1543 ], [ 1707, 1713 ], [ 1883, 1889 ], [ 2041, 2047 ], [ 2238, 2244 ], [ 2413, 2419 ], [ 2549, 2555 ], [ 2690, 2696 ], [ 3204, 3210 ], [ 3455, 3461 ], [ 3569, 3575 ], [ 4109, 4115 ], [ 4013, 4019 ], [ 4076, 4082 ] ], [ [ 240, 243 ] ], [ [ 245, 252 ] ], [ [ 254, 262 ], [ 1797, 1805 ], [ 1972, 1980 ], [ 2138, 2146 ], [ 2358, 2366 ], [ 2509, 2517 ], [ 2635, 2643 ], [ 3153, 3161 ], [ 3195, 3203 ], [ 4004, 4012 ], [ 4067, 4075 ] ], [ [ 264, 272 ], [ 1981, 1989 ], [ 2518, 2526 ], [ 2644, 2652 ] ], [ [ 274, 279 ] ], [ [ 281, 289 ] ], [ [ 305, 315 ], [ 3938, 3948 ], [ 3530, 3540 ] ], [ [ 330, 337 ] ], [ [ 358, 359 ] ], [ [ 361, 368 ] ], [ [ 496, 510 ], [ 2747, 2761 ], [ 2887, 2901 ], [ 3654, 3668 ], [ 4123, 4137 ] ], [ [ 2723, 2746 ], [ 3237, 3260 ], [ 3688, 3711 ] ], [ [ 3137, 3146 ], [ 3966, 3975 ] ], [ [ 3981, 3997 ] ] ]
import numpy as np def partition(arr, low, high): i = (low-1) # index of smaller element pivot = arr[high] # pivot for j in range(low, high): # If current element is smaller than the pivot if arr[j] < pivot: # increment index of smaller element i = i+1 arr[i], arr[j] = arr[j], arr[i] arr[i+1], arr[high] = arr[high], arr[i+1] return (i + 1) def quickSort(arr, low, high): if low < high: # pi is partitioning index, arr[p] is now # at right place pi = partition(arr, low, high) # Separately sort elements before # partition and after partition quickSort(arr, low, pi-1) quickSort(arr, pi + 1, high) # Driver code to test above # arr = [10, 7, 8, 9, 1, 5] arr = np.random.randint(0, 1000000, 200000) n = len(arr) quickSort(arr, 0, n-1) # print(f"Sorted array is: {arr}")
[ [ [ 7, 18 ], [ 817, 819 ] ], [ [ 25, 34 ], [ 573, 582 ] ], [ [ 438, 447 ], [ 868, 877 ], [ 690, 699 ], [ 724, 733 ] ], [ [ 811, 814 ], [ 863, 866 ], [ 878, 881 ] ], [ [ 855, 856 ], [ 886, 887 ] ] ]
#!/bin/env python3 # Steps requried to use # install requried libraries # (root)# dnf install python3-ldap3 # # Create python virtual environment directory # (user)$ python3 -m venv ./venv3 # # Enable virtual environment # (user)$ source ./venv3/bin/activate # # Update pip and then install needed libary # (user-venv3)$ pip install --upgrade pip # (user-venv3)$ pip install python-freeipa # (user-venv3)$ pip install ldap3 # # Execute Script: # (user-venv3)$ ./load_test.py -h # -- not required, saved as a note # dnf install python3-requests-kerberos python3-requests-gssapi import sys import time from datetime import datetime import re import argparse import logging #from linetimer import CodeTimer import itertools import pprint import subprocess import socket import dns.resolver import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # from ldap3 import Server, Connection, ALL, MODIFY_ADD import ldap3 from python_freeipa import ClientMeta # import requests #from requests_kerberos import HTTPKerberosAuth # generate a 4 digit randomizer from the current time # randomizer = int(time.time()) % 10000 randomizer = datetime.now().strftime("%d%H%M") start_timestr = datetime.now().strftime("%Y%m%d %H:%M") start_time = time.time() uid_template = "tuser{}_{{seq}}".format(randomizer) pp=pprint.PrettyPrinter(indent=2) class LogFilter(object): def __init__(self,level,type='ge'): self.__level = level self.__type = type def filter(self, logRecord): if self.__type == 'ge': return logRecord.levelno >= self.__level elif self.__type == 'eq': return logRecord.levelno == self.__level else: return logRecord.levelno <= self.__level class MyLogger(logging.getLoggerClass()): _PERF = 21 def __init__(self, name, **kwargs ): super().__init__(name, **kwargs) logging.addLevelName(self._PERF, 'PERF') def perf(self, message, *args, **kwargs): if self.isEnabledFor(self._PERF): self._log(self._PERF, message, args, **kwargs) logging.setLoggerClass(MyLogger) logger = logging.getLogger('IDM_user_load_tester') logger.setLevel(logging.INFO) _stout_handler = logging.StreamHandler() _stout_handler.setLevel(logging.INFO) logger.addHandler(_stout_handler) def iter_timer(iterable, step=10, label=""): start = time.time() last_t = start loop_tag = "loop {}{}{{}}".format(label, " "*bool(label)) logger.perf(loop_tag.format("start")) pos = 0 # step_count=len(iterable)//step for item in iterable: pos = pos + 1 if pos != 0 and pos % step == 0: logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t)) last_t = time.time() yield item logger.perf("{}: {:4.3f} {:4.3f}".format(pos,time.time() - start, time.time() - last_t)) logger.perf(loop_tag.format("end")) def loop_timer(count,step=10,label=""): start = time.time() last_t = start loop_tag = "loop {}{}{{}}".format(label, " "*bool(label)) logger.perf(loop_tag.format("start")) for item in range(count): if item != 0 and item % step == 0: logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t)) last_t = time.time() yield item logger.perf("{}: {:4.3f} {:4.3f}".format(count,time.time() - start, time.time() - last_t)) logger.perf(loop_tag.format("end")) # creates a generator to iterate through a list in chunks # returns an iterator chunk of the iterable of up to the given size. def chunker(iterable, size): it = iter(iterable) while True: chunk = tuple(itertools.islice(it,size)) if not chunk: return yield chunk def dump_ldap_stats(reset=True): logger.debug(ldap_conn.usage) if reset: ldap_conn.usage.reset() def generate_user(seq_num, ldif_out=False, dc_dn=None): #create a list/dict of user entries to use for passing to a function user = {} user["a_uid"] = uid_template.format(seq=seq_num) user["o_givenname"] = str(seq_num) user["o_sn"] = "tuser_{}".format(randomizer) user["o_cn"] = "{} {}".format(user["o_givenname"], user["o_sn"]) user["o_preferredlanguage"]='EN' user["o_employeetype"]="Created via load_test.py. Run started at: {}".format(start_timestr) # if the user is to be used for LDIF, strip the first two prepended chars if ldif_out: clean_rex = r"^._" keylist = list(user.keys()) user['attributes']={} for key in keylist: new_key = re.sub(clean_rex,'',key) user['attributes'][new_key]=user[key] del user[key] if dc_dn is not None: user['dn']="uid={},cn=staged users,cn=accounts,cn=provisioning,{}".format(user['attributes']['uid'],dc_dn) user['object_class']=['top','inetorgperson'] return user def add_users_api(total): users=[] for i in loop_timer(args.count,args.count//10,label="user_add_api"): user = generate_user(i) users.append(user["a_uid"]) logger.debug(user) if args.stage: user_out = client.stageuser_add(**user) else: user_out = client.user_add(**user) logger.debug(user_out) return users def add_users_stage(total): users=[] if args.ldap_stage: for i in loop_timer(args.count,args.count//10,label="user_add_stage_ldap"): user = generate_user(i, ldif_out=True, dc_dn=dom_dn) users.append(user['attributes']['uid']) user_dn=user['dn'] del user['dn'] ldap_conn.add(user_dn,**user) else: for i in loop_timer(args.count,args.count//10,label="user_add_stage"): user = generate_user(i) users.append(user["a_uid"]) logger.debug(user) user_out = client.stageuser_add(**user) logger.debug(user_out) for i in iter_timer(users,args.count//10,label="user_activate"): activate_out = client.stageuser_activate(i) logger.debug(activate_out) return users def get_users(template): logger.perf("Checking for user template '{}'".format(template)) if client.user_find(template,o_sizelimit=1)['count'] > 0: users = [ user['uid'][0] for user in client.user_find(template,o_sizelimit=0,o_timelimit=0)['result']] logger.perf("Found {} users".format(len(users))) else: logger.perf("Unable to find user template") exit(1) return users def get_users_ldap(template): logger.perf("Checking for user template '{}'".format(template)) results = client.user_find(template,o_sizelimit=1) if results['count'] > 0: result=results['result'][0] uid = result['uid'][0] user_dn=result['dn'] base_dn = re.sub("uid={},".format(uid),'',user_dn) entry_gen = ldap_conn.extend.standard.paged_search(search_base = base_dn, search_filter = "(uid={}*)".format(template), search_scope = ldap3.SUBTREE, attributes = '*', paged_size=1000, generator=True) total = 0 users=[] for entry in entry_gen: # print(entry) total += 1 if total % 10000 == 0: logger.perf("Loaded {} users".format(total)) dump_ldap_stats() # extract user uid. For some reason uid is a list, we only need the first users.append(entry['attributes']['uid'][0]) if args.user_limit>-1 and total >= args.user_limit: break logger.perf("Loaded {} users".format(len(users))) dump_ldap_stats() else: logger.perf("Unable to find user template") exit(1) return users def create_group_add_users_api(i,users): group_name = "group{}_{}".format(randomizer,i) group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr) logger.info("Creating group: {}".format(group_name)) result = client.group_add(group_name, o_description=group_desc) if result["value"]==group_name: logger.info("Success") logger.debug(result) logger.perf("Group: {}".format(group_name)) logger.info("Adding {} users".format(len(users))) result = client.group_add_member(group_name, o_user=users) logger.info("Done") logger.debug(result) def create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=-1): group_name = "group{}_{}".format(randomizer,i) group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr) logger.info("Creating group: {}".format(group_name)) result = client.group_add(group_name, o_description=group_desc,o_raw=True) group_dn=result['result']['dn'] logger.debug(result) mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_ADD, chunk) def remove_group_users_ldap(users, ldap_conn, base_user_dn, group_name, group_dn, chunk=-1): logger.info("Group to delete: {}".format(group_dn)) start = time.time() mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_DELETE, chunk) logger.perf("Removing users from group took: {:4.3f}".format(time.time() - start)) result = client.group_show(group_name) logger.info("Group show: {}".format(result)) logger.info("Delete group from IDM: {}".format(group_dn)) start = time.time() result = client.group_del(group_name) logger.perf("Delete group using API took: {:4.3f}".format(time.time() - start)) logger.info("Group del resul: {}".format(result)) def ldap_modify_retry(*fargs, **kwargs): for retry_num in range(args.max_retries+1): try: return(ldap_conn.modify(*fargs,**kwargs)) except Exception as e: logger.perf("Exception Occured") logger.perf("'{}'".format(e)) logger.perf("{} retries left".format(args.max_retries-retry_num)) ldap_conn.unbind() ldap_conn.bind() logger.info("LDAP Connection rebound") def mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap_mod_op, chunk=-1): if chunk==-1: chunk=len(users) user_dn_list = [base_user_dn.format(user) for user in users] for user_dn_chunk in chunker(user_dn_list,chunk): # print(user_dn_chunk) logger.perf("Chunk ({})".format(len(user_dn_chunk))) logger.debug("Showing fist 20 of user_dn_chunk: {}".format(user_dn_chunk[:20])) # result = ldap_conn.modify(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]}) result = ldap_modify_retry(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]}) dump_ldap_stats() logger.debug("LDAP Modify result: {}".format(result)) if args.rebind: logger.perf("rebinding LDAP connection") ldap_conn.unbind() ldap_conn.bind() if args.delay>0: logger.perf("Sleeping {} seconds".format(args.delay)) time.sleep(args.delay) def check_dns_record(server, domain, record): resolver = dns.resolver.Resolver() resolver.nameservers=[socket.gethostbyname(server)] try: rdata = resolver.query(record + "." + domain) logger.perf("Server [{}] answered with [{}]".format(server, rdata[0].address)) return 1 except dns.resolver.NXDOMAIN: logger.perf("Record [{}] doesn't exist on server [{}]".format(record + "." + domain, server)) return 0 parser = argparse.ArgumentParser(description="Generate load test data for IdM", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-v', dest='verbosity', action='count', default=0, help="Increase Verbosity, default is errors only. Only effective up to 3 levels.") parser.add_argument('-c', type=int, dest='count', help="Total count of users to add") parser.add_argument('-g', dest='group_count', default=1, type=int, help="Number of groups to create") parser.add_argument('-S', dest='server', type=str, help="Server to connect to") parser.add_argument('-U', dest='user', type=str, help="User account to use for connect") parser.add_argument('-P', dest='password', type=str, help="Password for connection") parser.add_argument('--stage', dest='stage', action='store_true', default=False, help="Create user in stage not active") parser.add_argument('--stage-ldap', dest='ldap_stage', default=False, action='store_true', help='Create stage users via ldap not API') parser.add_argument('--ldap-group', dest='ldap_group', default=False, action='store_true', help="Add users to group using LDAP directly") parser.add_argument('--ldap-group-remove', dest='ldap_group_del', type=str, help="Remove users from group using LDAP directly") parser.add_argument('-C', dest='chunk', type=int, default=-1, help="Chunk size for batching user adds to groups, -1 means all users given in count") parser.add_argument('-r', dest='reuse_template', type=str, help="Reuse existing users for group add using given user naming template") parser.add_argument('-D', dest='delay',type=int, default=0, help="Delay N seconds between chunks") parser.add_argument('--rebind', dest='rebind',default=False,action='store_true', help="Perform a unmind/bind operation between ldap operations.") parser.add_argument('-l', dest='user_limit', type=int, default=-1, help="Limit the number of users returned by reuse") parser.add_argument('--max-retries',dest='max_retries', type=int, default=0, help="Maximum number of retries for a failed chunk operation") parser.add_argument('--check-repl', dest='check_repl',default=False,action='store_true', help="Check when replication is finished by adding a DNS record") args=parser.parse_args() # setting up logger here to prevent log files being generated when showing help perf_logfile = "perf_{}".format(randomizer) _perf_handler = logging.FileHandler(perf_logfile) _perf_formatter = logging.Formatter("%(asctime)s; %(message)s") _perf_handler.setFormatter(_perf_formatter) _perf_handler.addFilter(LogFilter(MyLogger._PERF,type='eq')) logger.addHandler(_perf_handler) if args.verbosity: # Error is a level of 40. level=30-(args.verbosity*10) if level<0: level=0 logger.setLevel(level) levels={ 5: "CRITICAL", 4: "ERROR", 3: "WARNING", 2: "INFO", 1: "DEBUG", 0: "ALL" } if level!=30: log_file = "log_{}".format(randomizer) _file_handler = logging.FileHandler(log_file) _file_formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s') _file_handler.setFormatter(_file_formatter) _file_handler.addFilter(LogFilter(level)) logger.addHandler(_file_handler) logger.info("Logging to file '{}'".format(log_file)) logger.info("Debug level: {0} ({1})".format(levels[level // 10],level)) # client = ClientMeta('ipaserver0.example.com',False) # client.login('admin', 'admin123') # kerberos seems broken using OS rpms on RHEL 8 #client.login_kerberos() # user = client.user_add('test4', 'John', 'Doe', 'John Doe', o_preferredlanguage='EN') # Output some data to the user about the script options passed in # Not working as expected when git not found try: commit_info = str(subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:"%ci %H"']),"utf-8").strip() logger.perf("Commit Info: {}".format(commit_info)) except: logger.perf("No git info found") pass logger.perf("Start Time: {}".format(start_timestr)) logger.perf("User count: {} Group count: {}".format(args.count,args.group_count)) logger.perf("Server: {}".format(args.server)) logger.perf("Perf Log file: {}".format(perf_logfile)) if args.stage: if args.ldap_stage: logger.perf("Creating Stage users via ldap") else: logger.perf("Creating Stage users via API") else: logger.perf("Creating active users via API") if args.ldap_group: logger.perf("Adding users to groups via LDAP") if args.chunk>-1: logger.perf(" Using a chunk size of {}".format(args.chunk)) else: logger.perf("Adding users to groups via API") if args.reuse_template: logger.perf("Reusing users starting with: '{}'".format(args.reuse_template)) if args.user_limit>-1: logger.perf(" Limiting reuse to first {} users found".format(args.user_limit)) logger.debug(args) logger.perf('----') # end start header client = ClientMeta(args.server,False) client.login(args.user, args.password) dnszone = client.dnszone_find(o_forward_only=True)['result'][0] servers = dnszone['nsrecord'] domain = dnszone['idnsname'][0]['__dns_name__'] logger.info("Found servers: {} for domain: [{}]".format(servers, domain)) if args.ldap_group or args.ldap_stage: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn) dom_dn = re.search("(dc=.*)",user_dn, re.IGNORECASE).group(1) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True) if args.reuse_template: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={},".format(args.user),'',user_dn) logger.debug("base_user_dn: {}".format(base_user_dn)) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True) users=get_users_ldap(args.reuse_template) else: logger.info("Creating {} users".format(args.count)) logger.info("template: {}".format(uid_template)) logger.info("Checking for existing templated users") user_check=client.user_find(uid_template.format(seq=0)) if user_check["count"]>0: sec_to_wait = 61 - datetime.now().second logger.error("Existing users found please wait {} seconds".format(sec_to_wait)) exit(1) else: logger.info("Proceeding") if args.stage: users = add_users_stage(args.count) else: users = add_users_api(args.count) if args.ldap_group: # print(ldap_server.info) # for i in iter_timer(range(args.group_count),step=1,label="group_add_user_ldap"): # create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk) for i in loop_timer(args.group_count,1,label="group_add_user_ldap"): create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk) elif args.ldap_group_del is not None: user_dn=client.user_show(args.user,o_all=True)['result']['dn'] group_dn=client.group_show(args.ldap_group_del,o_all=True)['result']['dn'] base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn) ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL) ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True) remove_group_users_ldap(users, ldap_conn, base_user_dn, args.ldap_group_del, group_dn, chunk=args.chunk) else: for i in loop_timer(args.group_count,1,label="group_add_user_api"): create_group_add_users_api(i,users) logger.perf('----') logger.perf("End Time: {}".format(datetime.now().strftime("%Y%m%d %H:%M"))) run_time=time.time() - start_time logger.perf("Total Run Time: {:.3f}sec".format(run_time)) logger.perf("Total Run time: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60)) if args.check_repl: record = "trecord{}".format(randomizer) client.dnsrecord_add(a_dnszoneidnsname=domain, a_idnsname=record, o_a_part_ip_address='1.1.1.1') check_result = 0 itr_ctr = 0 while check_result < len(servers) and itr_ctr < 600: time.sleep(1) check_result = 0 logger.perf("---- Iteration [{}] ----".format(itr_ctr)) for server in servers: check_result += check_dns_record(server, domain, record) itr_ctr += 1 logger.perf('----') logger.perf("End Time with replication: {}".format(datetime.now().strftime("%Y%m%d %H:%M"))) run_time=time.time() - start_time logger.perf("Total Run Time with replication: {:.3f}sec".format(run_time)) logger.perf("Total Run time with replication: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
[ [ [ 588, 591 ] ], [ [ 599, 603 ], [ 1261, 1265 ], [ 19224, 19228 ], [ 19662, 19666 ], [ 20024, 20028 ], [ 2340, 2344 ], [ 2648, 2652 ], [ 2669, 2673 ], [ 2707, 2711 ], [ 2782, 2786 ], [ 2803, 2807 ], [ 2915, 2919 ], [ 3164, 3168 ], [ 3185, 3189 ], [ 3223, 3227 ], [ 3300, 3304 ], [ 3321, 3325 ], [ 8879, 8883 ], [ 9047, 9051 ], [ 9227, 9231 ], [ 9339, 9343 ], [ 10693, 10697 ] ], [ [ 625, 633 ], [ 1158, 1166 ], [ 1208, 1216 ], [ 17853, 17861 ], [ 19173, 19181 ], [ 19969, 19977 ] ], [ [ 641, 643 ], [ 16845, 16847 ], [ 16909, 16911 ], [ 16938, 16940 ], [ 17248, 17250 ], [ 18682, 18684 ], [ 4460, 4462 ], [ 6530, 6532 ] ], [ [ 651, 659 ], [ 11190, 11198 ], [ 11310, 11318 ] ], [ [ 667, 674 ], [ 1753, 1760 ], [ 2053, 2060 ], [ 2096, 2103 ], [ 2154, 2161 ], [ 2186, 2193 ], [ 2234, 2241 ], [ 13960, 13967 ], [ 14012, 14019 ], [ 14548, 14555 ], [ 14600, 14607 ], [ 1875, 1882 ] ], [ [ 715, 724 ], [ 3593, 3602 ] ], [ [ 732, 738 ], [ 1329, 1335 ] ], [ [ 746, 756 ], [ 15314, 15324 ] ], [ [ 764, 770 ], [ 10829, 10835 ] ], [ [ 778, 790 ], [ 10779, 10782 ], [ 11037, 11040 ] ], [ [ 799, 806 ], [ 807, 814 ], [ 832, 839 ] ], [ [ 939, 944 ], [ 16978, 16983 ], [ 17013, 17018 ], [ 17038, 17043 ], [ 17368, 17373 ], [ 17403, 17408 ], [ 17428, 17433 ], [ 18751, 18756 ], [ 18786, 18791 ], [ 18811, 18816 ], [ 6826, 6831 ], [ 8695, 8700 ], [ 8956, 8961 ] ], [ [ 973, 983 ], [ 16438, 16448 ] ], [ [ 1145, 1155 ], [ 1313, 1323 ], [ 13932, 13942 ], [ 14516, 14526 ], [ 19447, 19457 ], [ 4036, 4046 ], [ 7709, 7719 ], [ 8336, 8346 ] ], [ [ 1192, 1205 ], [ 15552, 15565 ], [ 4230, 4243 ], [ 7796, 7809 ], [ 8423, 8436 ] ], [ [ 1248, 1258 ], [ 19238, 19248 ], [ 20038, 20048 ] ], [ [ 1273, 1285 ], [ 17673, 17685 ], [ 17774, 17786 ], [ 3931, 3943 ] ], [ [ 1326, 1328 ] ], [ [ 1367, 1376 ], [ 14126, 14135 ], [ 14738, 14747 ] ], [ [ 1744, 1752 ], [ 2076, 2084 ], [ 14136, 14144 ] ], [ [ 2087, 2093 ], [ 2138, 2144 ], [ 2248, 2254 ], [ 14163, 14169 ], [ 14305, 14311 ], [ 14760, 14766 ], [ 14797, 14803 ], [ 14854, 14860 ], [ 15414, 15420 ], [ 15475, 15481 ], [ 15516, 15522 ], [ 15568, 15574 ], [ 15652, 15658 ], [ 15698, 15704 ], [ 15793, 15799 ], [ 15850, 15856 ], [ 15902, 15908 ], [ 15970, 15976 ], [ 16041, 16047 ], [ 16110, 16116 ], [ 16183, 16189 ], [ 16289, 16295 ], [ 16370, 16376 ], [ 16389, 16395 ], [ 16649, 16655 ], [ 17298, 17304 ], [ 17585, 17591 ], [ 17639, 17645 ], [ 17691, 17697 ], [ 17879, 17885 ], [ 17983, 17989 ], [ 19119, 19125 ], [ 19139, 19145 ], [ 19249, 19255 ], [ 19307, 19313 ], [ 19709, 19715 ], [ 19894, 19900 ], [ 19918, 19924 ], [ 20053, 20059 ], [ 20132, 20138 ], [ 2433, 2439 ], [ 2601, 2607 ], [ 2736, 2742 ], [ 2828, 2834 ], [ 3006, 3012 ], [ 3117, 3123 ], [ 3252, 3258 ], [ 3346, 3352 ], [ 3703, 3709 ], [ 4926, 4932 ], [ 5066, 5072 ], [ 5588, 5594 ], [ 5660, 5666 ], [ 5803, 5809 ], [ 5883, 5889 ], [ 6119, 6125 ], [ 6180, 6186 ], [ 6287, 6293 ], [ 7197, 7203 ], [ 7476, 7482 ], [ 7560, 7566 ], [ 7814, 7820 ], [ 7971, 7977 ], [ 7996, 8002 ], [ 8020, 8026 ], [ 8069, 8075 ], [ 8182, 8188 ], [ 8204, 8210 ], [ 8441, 8447 ], [ 8608, 8614 ], [ 8817, 8823 ], [ 8986, 8992 ], [ 9112, 9118 ], [ 9159, 9165 ], [ 9281, 9287 ], [ 9363, 9369 ], [ 9591, 9597 ], [ 9630, 9636 ], [ 9666, 9672 ], [ 9786, 9792 ], [ 10104, 10110 ], [ 10161, 10167 ], [ 10435, 10441 ], [ 10516, 10522 ], [ 10633, 10639 ], [ 10930, 10936 ], [ 11068, 11074 ] ], [ [ 2169, 2183 ], [ 2210, 2224 ], [ 2266, 2280 ] ], [ [ 2289, 2299 ], [ 5695, 5705 ] ], [ [ 2869, 2879 ], [ 18343, 18353 ], [ 19019, 19029 ], [ 4802, 4812 ], [ 5180, 5190 ], [ 5456, 5466 ] ], [ [ 3514, 3521 ], [ 10043, 10050 ] ], [ [ 3672, 3687 ], [ 7250, 7265 ], [ 7530, 7545 ], [ 10413, 10428 ] ], [ [ 3778, 3791 ], [ 4873, 4886 ], [ 5260, 5273 ], [ 5531, 5544 ] ], [ [ 4758, 4771 ], [ 18087, 18100 ] ], [ [ 5110, 5125 ], [ 18039, 18054 ] ], [ [ 5860, 5869 ] ], [ [ 6259, 6273 ], [ 17539, 17553 ] ], [ [ 7637, 7663 ], [ 19082, 19108 ] ], [ [ 8231, 8258 ], [ 18407, 18434 ] ], [ [ 8726, 8749 ], [ 18896, 18919 ] ], [ [ 9418, 9435 ], [ 10339, 10356 ] ], [ [ 9831, 9851 ], [ 8632, 8652 ], [ 8893, 8913 ] ], [ [ 10722, 10738 ], [ 19825, 19841 ] ], [ [ 11181, 11187 ], [ 11350, 11356 ], [ 11525, 11531 ], [ 11631, 11637 ], [ 11753, 11759 ], [ 11853, 11859 ], [ 11962, 11968 ], [ 12067, 12073 ], [ 12208, 12214 ], [ 12363, 12369 ], [ 12521, 12527 ], [ 12669, 12675 ], [ 12838, 12844 ], [ 13013, 13019 ], [ 13152, 13158 ], [ 13318, 13324 ], [ 13457, 13463 ], [ 13617, 13623 ], [ 13798, 13804 ] ], [ [ 13793, 13797 ], [ 14200, 14204 ], [ 14258, 14262 ], [ 15622, 15626 ], [ 15633, 15637 ], [ 15684, 15688 ], [ 15755, 15759 ], [ 15772, 15776 ], [ 15951, 15955 ], [ 16022, 16026 ], [ 16089, 16093 ], [ 16160, 16164 ], [ 16238, 16242 ], [ 16265, 16269 ], [ 16351, 16355 ], [ 16383, 16387 ], [ 16449, 16453 ], [ 16481, 16485 ], [ 16492, 16496 ], [ 16727, 16731 ], [ 16746, 16750 ], [ 16790, 16794 ], [ 16869, 16873 ], [ 16991, 16995 ], [ 17090, 17094 ], [ 17145, 17149 ], [ 17193, 17197 ], [ 17273, 17277 ], [ 17381, 17385 ], [ 17480, 17484 ], [ 17554, 17558 ], [ 17624, 17628 ], [ 18015, 18019 ], [ 18055, 18059 ], [ 18101, 18105 ], [ 18117, 18121 ], [ 18354, 18358 ], [ 18472, 18476 ], [ 18490, 18494 ], [ 18550, 18554 ], [ 18617, 18621 ], [ 18706, 18710 ], [ 18764, 18768 ], [ 18863, 18867 ], [ 18952, 18956 ], [ 18989, 18993 ], [ 19030, 19034 ], [ 19398, 19402 ], [ 4813, 4817 ], [ 4824, 4828 ], [ 4953, 4957 ], [ 5150, 5154 ], [ 5191, 5195 ], [ 5202, 5206 ], [ 5467, 5471 ], [ 5478, 5482 ], [ 5712, 5716 ], [ 7408, 7412 ], [ 7440, 7444 ], [ 9480, 9484 ], [ 9703, 9707 ], [ 10497, 10501 ], [ 10613, 10617 ], [ 10674, 10678 ], [ 10704, 10708 ] ], [ [ 13900, 13912 ], [ 13980, 13992 ], [ 15737, 15749 ] ], [ [ 13944, 13957 ], [ 14058, 14071 ], [ 14102, 14115 ], [ 14181, 14194 ] ], [ [ 13994, 14009 ], [ 14085, 14100 ] ], [ [ 14248, 14253 ], [ 14282, 14287 ], [ 14321, 14326 ], [ 14474, 14479 ], [ 14748, 14753 ], [ 14905, 14910 ], [ 14918, 14923 ] ], [ [ 14295, 14300 ], [ 14321, 14326 ], [ 14474, 14479 ], [ 14748, 14753 ], [ 14905, 14910 ], [ 14918, 14923 ] ], [ [ 14330, 14336 ], [ 14898, 14904 ] ], [ [ 14489, 14497 ], [ 14568, 14576 ], [ 14839, 14847 ] ], [ [ 14532, 14545 ], [ 14666, 14679 ], [ 14714, 14727 ], [ 14778, 14791 ] ], [ [ 14582, 14597 ], [ 14693, 14708 ] ], [ [ 15296, 15307 ], [ 15451, 15462 ] ], [ [ 16429, 16435 ], [ 16468, 16474 ], [ 16517, 16523 ], [ 16773, 16779 ], [ 17176, 17182 ], [ 17757, 17763 ], [ 18533, 18539 ], [ 18599, 18605 ], [ 19463, 19469 ], [ 4982, 4988 ], [ 5038, 5044 ], [ 5625, 5631 ], [ 5770, 5776 ], [ 5953, 5959 ], [ 6049, 6055 ], [ 6364, 6370 ], [ 7878, 7884 ], [ 8130, 8136 ], [ 8505, 8511 ], [ 9080, 9086 ], [ 9250, 9256 ] ], [ [ 16507, 16514 ], [ 16581, 16588 ], [ 16610, 16617 ] ], [ [ 16571, 16578 ], [ 16705, 16712 ], [ 19626, 19633 ], [ 19788, 19795 ] ], [ [ 16601, 16607 ], [ 16714, 16720 ], [ 19502, 19508 ], [ 19850, 19856 ] ], [ [ 16765, 16772 ], [ 16889, 16896 ], [ 16929, 16936 ], [ 17072, 17079 ] ], [ [ 16830, 16842 ], [ 18453, 18465 ] ], [ [ 16900, 16906 ], [ 5298, 5304 ] ], [ [ 16964, 16975 ], [ 17055, 17066 ] ], [ [ 17026, 17035 ], [ 18443, 18452 ], [ 3716, 3725 ], [ 3749, 3758 ], [ 5404, 5413 ], [ 6587, 6596 ], [ 9523, 9532 ], [ 9738, 9747 ], [ 9763, 9772 ] ], [ [ 17168, 17175 ], [ 17287, 17294 ], [ 17462, 17469 ] ], [ [ 17233, 17245 ], [ 17337, 17349 ], [ 18453, 18465 ] ], [ [ 17354, 17365 ], [ 17445, 17456 ] ], [ [ 17416, 17425 ], [ 18443, 18452 ], [ 3716, 3725 ], [ 3749, 3758 ], [ 5404, 5413 ], [ 6587, 6596 ], [ 9523, 9532 ], [ 9738, 9747 ], [ 9763, 9772 ] ], [ [ 17533, 17538 ], [ 18437, 18442 ], [ 18920, 18925 ], [ 19111, 19116 ] ], [ [ 17746, 17756 ], [ 17807, 17817 ] ], [ [ 17834, 17845 ], [ 17945, 17956 ] ], [ [ 18031, 18036 ], [ 18437, 18442 ], [ 18920, 18925 ], [ 19111, 19116 ] ], [ [ 18079, 18084 ], [ 18437, 18442 ], [ 18920, 18925 ], [ 19111, 19116 ] ], [ [ 18338, 18339 ], [ 18435, 18436 ] ], [ [ 18525, 18532 ], [ 18726, 18733 ], [ 18845, 18852 ] ], [ [ 18590, 18598 ], [ 18973, 18981 ] ], [ [ 18667, 18679 ], [ 18938, 18950 ] ], [ [ 18737, 18748 ], [ 18828, 18839 ] ], [ [ 18799, 18808 ], [ 18927, 18936 ], [ 3716, 3725 ], [ 3749, 3758 ], [ 5404, 5413 ], [ 6587, 6596 ], [ 9523, 9532 ], [ 9738, 9747 ], [ 9763, 9772 ] ], [ [ 19014, 19015 ], [ 19109, 19110 ] ], [ [ 19215, 19223 ], [ 19296, 19304 ], [ 19366, 19374 ], [ 19380, 19388 ] ], [ [ 19419, 19425 ], [ 19521, 19527 ], [ 19858, 19864 ] ], [ [ 19564, 19576 ], [ 19607, 19619 ] ], [ [ 19585, 19592 ], [ 19639, 19646 ], [ 19755, 19762 ], [ 19875, 19882 ] ], [ [ 19684, 19696 ], [ 19809, 19821 ], [ 19607, 19619 ] ], [ [ 19778, 19784 ], [ 19842, 19848 ] ], [ [ 20015, 20023 ], [ 20117, 20125 ], [ 20208, 20216 ], [ 20222, 20230 ] ] ]
"""Test UPnP/IGD config flow.""" from datetime import timedelta from unittest.mock import AsyncMock, patch from homeassistant import config_entries, data_entry_flow from homeassistant.components import ssdp from homeassistant.components.upnp.const import ( CONFIG_ENTRY_SCAN_INTERVAL, CONFIG_ENTRY_ST, CONFIG_ENTRY_UDN, DEFAULT_SCAN_INTERVAL, DISCOVERY_LOCATION, DISCOVERY_NAME, DISCOVERY_ST, DISCOVERY_UDN, DISCOVERY_UNIQUE_ID, DISCOVERY_USN, DOMAIN, DOMAIN_COORDINATORS, ) from homeassistant.components.upnp.device import Device from homeassistant.helpers.typing import HomeAssistantType from homeassistant.setup import async_setup_component from .mock_device import MockDevice from tests.common import MockConfigEntry async def test_flow_ssdp_discovery(hass: HomeAssistantType): """Test config flow: discovered + configured through ssdp.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step ssdp. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data={ ssdp.ATTR_SSDP_LOCATION: location, ssdp.ATTR_SSDP_ST: mock_device.device_type, ssdp.ATTR_SSDP_USN: mock_device.usn, ssdp.ATTR_UPNP_UDN: mock_device.udn, }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "ssdp_confirm" # Confirm via step ssdp_confirm. result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_ssdp_discovery_incomplete(hass: HomeAssistantType): """Test config flow: incomplete discovery through ssdp.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) # Discovered via step ssdp. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data={ ssdp.ATTR_SSDP_ST: mock_device.device_type, # ssdp.ATTR_UPNP_UDN: mock_device.udn, # Not provided. ssdp.ATTR_SSDP_LOCATION: location, }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "incomplete_discovery" async def test_flow_user(hass: HomeAssistantType): """Test config flow: discovered + configured through user.""" udn = "uuid:device_1" location = "dummy" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step user. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" # Confirmed via step user. result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={"unique_id": mock_device.unique_id}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_import(hass: HomeAssistantType): """Test config flow: discovered + configured through configuration.yaml.""" udn = "uuid:device_1" mock_device = MockDevice(udn) location = "dummy" discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object( Device, "async_discover", AsyncMock(return_value=discoveries) ), patch.object( Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0]) ): # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["title"] == mock_device.name assert result["data"] == { CONFIG_ENTRY_ST: mock_device.device_type, CONFIG_ENTRY_UDN: mock_device.udn, } async def test_flow_import_already_configured(hass: HomeAssistantType): """Test config flow: discovered, but already configured.""" udn = "uuid:device_1" mock_device = MockDevice(udn) # Existing entry. config_entry = MockConfigEntry( domain=DOMAIN, data={ CONFIG_ENTRY_UDN: mock_device.udn, CONFIG_ENTRY_ST: mock_device.device_type, }, options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL}, ) config_entry.add_to_hass(hass) # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_flow_import_incomplete(hass: HomeAssistantType): """Test config flow: incomplete discovery, configured through configuration.yaml.""" udn = "uuid:device_1" mock_device = MockDevice(udn) location = "dummy" discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, # DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] with patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)): # Discovered via step import. result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT} ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "incomplete_discovery" async def test_options_flow(hass: HomeAssistantType): """Test options flow.""" # Set up config entry. udn = "uuid:device_1" location = "http://192.168.1.1/desc.xml" mock_device = MockDevice(udn) discoveries = [ { DISCOVERY_LOCATION: location, DISCOVERY_NAME: mock_device.name, DISCOVERY_ST: mock_device.device_type, DISCOVERY_UDN: mock_device.udn, DISCOVERY_UNIQUE_ID: mock_device.unique_id, DISCOVERY_USN: mock_device.usn, } ] config_entry = MockConfigEntry( domain=DOMAIN, data={ CONFIG_ENTRY_UDN: mock_device.udn, CONFIG_ENTRY_ST: mock_device.device_type, }, options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL}, ) config_entry.add_to_hass(hass) config = { # no upnp, ensures no import-flow is started. } with patch.object( Device, "async_create_device", AsyncMock(return_value=mock_device) ), patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)): # Initialisation of component. await async_setup_component(hass, "upnp", config) await hass.async_block_till_done() # DataUpdateCoordinator gets a default of 30 seconds for updates. coordinator = hass.data[DOMAIN][DOMAIN_COORDINATORS][mock_device.udn] assert coordinator.update_interval == timedelta(seconds=DEFAULT_SCAN_INTERVAL) # Options flow with no input results in form. result = await hass.config_entries.options.async_init( config_entry.entry_id, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM # Options flow with input results in update to entry. result2 = await hass.config_entries.options.async_configure( result["flow_id"], user_input={CONFIG_ENTRY_SCAN_INTERVAL: 60}, ) assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert config_entry.options == { CONFIG_ENTRY_SCAN_INTERVAL: 60, } # Also updates DataUpdateCoordinator. assert coordinator.update_interval == timedelta(seconds=60)
[ [ [ 55, 64 ], [ 9401, 9410 ], [ 10164, 10173 ] ], [ [ 91, 100 ], [ 1376, 1385 ], [ 1467, 1476 ], [ 1570, 1579 ], [ 3912, 3921 ], [ 4003, 4012 ], [ 4106, 4115 ], [ 5519, 5528 ], [ 5610, 5619 ], [ 5713, 5722 ], [ 7621, 7630 ], [ 8942, 8951 ], [ 9024, 9033 ] ], [ [ 102, 107 ], [ 1323, 1328 ], [ 1419, 1424 ], [ 1510, 1515 ], [ 3859, 3864 ], [ 3955, 3960 ], [ 4046, 4051 ], [ 5466, 5471 ], [ 5562, 5567 ], [ 5653, 5658 ], [ 7582, 7587 ], [ 8889, 8894 ], [ 8985, 8990 ] ], [ [ 135, 149 ], [ 1763, 1777 ], [ 2969, 2983 ], [ 4287, 4301 ], [ 5896, 5910 ], [ 6851, 6865 ], [ 7796, 7810 ] ], [ [ 151, 166 ], [ 2086, 2101 ], [ 2379, 2394 ], [ 3230, 3245 ], [ 4358, 4373 ], [ 4671, 4686 ], [ 5970, 5985 ], [ 6917, 6932 ], [ 7870, 7885 ], [ 9638, 9653 ], [ 9935, 9950 ] ], [ [ 204, 208 ], [ 1827, 1831 ], [ 1878, 1882 ], [ 1938, 1942 ], [ 1991, 1995 ], [ 3025, 3029 ], [ 3149, 3153 ] ], [ [ 263, 289 ], [ 6632, 6658 ], [ 8711, 8737 ], [ 9858, 9884 ], [ 10029, 10055 ] ], [ [ 295, 310 ], [ 2518, 2533 ], [ 4810, 4825 ], [ 6109, 6124 ], [ 6562, 6577 ], [ 8641, 8656 ] ], [ [ 316, 332 ], [ 2572, 2588 ], [ 4864, 4880 ], [ 6163, 6179 ], [ 6515, 6531 ], [ 8594, 8610 ] ], [ [ 338, 359 ], [ 6660, 6681 ], [ 8739, 8760 ], [ 9419, 9440 ] ], [ [ 365, 383 ], [ 1027, 1045 ], [ 3562, 3580 ], [ 5169, 5187 ], [ 7283, 7301 ], [ 8221, 8239 ] ], [ [ 389, 403 ], [ 1069, 1083 ], [ 3604, 3618 ], [ 5211, 5225 ], [ 7325, 7339 ], [ 8263, 8277 ] ], [ [ 409, 421 ], [ 1115, 1127 ], [ 3650, 3662 ], [ 5257, 5269 ], [ 8309, 8321 ] ], [ [ 427, 440 ], [ 1166, 1179 ], [ 3701, 3714 ], [ 5308, 5321 ], [ 7424, 7437 ], [ 8360, 8373 ] ], [ [ 446, 465 ], [ 1210, 1229 ], [ 3745, 3764 ], [ 5352, 5371 ], [ 7468, 7487 ], [ 8404, 8423 ] ], [ [ 471, 484 ], [ 1266, 1279 ], [ 3801, 3814 ], [ 5408, 5421 ], [ 7524, 7537 ], [ 8460, 8473 ] ], [ [ 490, 496 ], [ 1724, 1730 ], [ 2934, 2940 ], [ 4260, 4266 ], [ 5869, 5875 ], [ 6480, 6486 ], [ 6824, 6830 ], [ 7769, 7775 ], [ 8559, 8565 ], [ 9309, 9315 ] ], [ [ 502, 521 ], [ 9317, 9336 ] ], [ [ 574, 580 ], [ 1345, 1351 ], [ 1441, 1447 ], [ 1532, 1538 ], [ 3881, 3887 ], [ 3977, 3983 ], [ 4068, 4074 ], [ 5488, 5494 ], [ 5584, 5590 ], [ 5675, 5681 ], [ 7595, 7601 ], [ 8911, 8917 ], [ 8998, 9004 ] ], [ [ 622, 639 ], [ 816, 833 ], [ 2671, 2688 ], [ 3351, 3368 ], [ 4944, 4961 ], [ 6262, 6279 ], [ 7049, 7066 ], [ 7998, 8015 ] ], [ [ 672, 693 ], [ 9115, 9136 ] ], [ [ 720, 730 ], [ 969, 979 ], [ 2821, 2831 ], [ 3504, 3514 ], [ 5088, 5098 ], [ 6390, 6400 ], [ 7202, 7212 ], [ 8163, 8173 ] ], [ [ 757, 772 ], [ 6448, 6463 ], [ 8527, 8542 ] ], [ [ 775, 2616 ] ], [ [ 2619, 3317 ] ], [ [ 3320, 4908 ] ], [ [ 4911, 6207 ] ], [ [ 6210, 7002 ] ], [ [ 7005, 7961 ] ], [ [ 7964, 10185 ] ] ]
from .BSD500 import BSD500 __all__ = ('BSD500')
[ [ [ 20, 26 ] ], [ [ 27, 34 ] ] ]
week = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"] print(7-week.index(input()))
[ [ [ 0, 4 ], [ 65, 69 ] ] ]
""" priorityqueue.py Priority Queue Implementation with a O(log n) Remove Method This file implements min- amd max-oriented priority queues based on binary heaps. I found the need for a priority queue with a O(log n) remove method. This can't be achieved with any of Python's built in collections including the heapq module, so I built my own. The heap is arranged according to a given key function. Usage: >>> from priorityqueue import MinHeapPriorityQueue >>> items = [4, 0, 1, 3, 2] >>> pq = MinHeapPriorityQueue(items) >>> pq.pop() 0 A priority queue accepts an optional key function. >>> items = ['yy', 'ttttttt', 'z', 'wwww', 'uuuuuu', 'vvvvv', 'xxx'] >>> pq = MinHeapPriorityQueue(items, key=len) >>> pq.pop() 'z' >>> pq.pop() 'yy' Internally, the queue is a list of tokens of type 'Locator', which contain the priority value, the item itself, and its current index in the heap. The index field is updated whenever the heap is modified. This is what allows us to remove in O(log n). Appending an item returns it's Locator. >>> token = pq.append('a') >>> token Locator(value=1, item='a', index=0) >>> pq.remove(token) 'a' If we want to be able to remove any item in the list we can maintain an auxiliary dictionary mapping items to their Locators. Here's a simple example with unique items: >>> items = [12, 46, 89, 101, 72, 81] >>> pq = MinHeapPriorityQueue() >>> locs = {} >>> for item in items: ... locs[item] = pq.append(item) >>> locs[46] Locator(value=46, item=46, index=1) >>> pq.remove(locs[46]) 46 Iterating with 'for item in pq' or iter() will produce the items, not the Locator instances used in the internal representation. The items will be generated in sorted order. >>> items = [3, 1, 0, 2, 4] >>> pq = MinHeapPriorityQueue(items) >>> for item in pq: ... print(item) 0 1 2 3 4 """ # Inspired by: # - AdaptableHeapPriorityQueue in 'Data Structures and Algorithms in Python' # - the Go Standard library's heap package # - Python's heapq module # - Raymond Hettinger's SortedCollection on ActiveState # - Peter Norvig's PriorityQueue in the Python AIMA repo class MinHeapPriorityQueue(): """A locator-based min-oriented priority queue implemented with a binary heap, arranged according to a key function. Operation Running Time len(P), P.peek() O(1) P.update(loc, value, item) O(log n) P.append(item) O(log n)* P.pop() O(log n)* P.remove(loc) O(log n)* *amortized due to occasional resizing of the underlying python list """ def __init__(self, iterable=(), key=lambda x: x): self._key = key decorated = [(key(item), item) for item in iterable] self._pq = [self.Locator(value, item, i) for i, (value, item) in enumerate(decorated)] if len(self._pq) > 1: self._heapify() class Locator: """Token for locating an entry of the priority queue.""" __slots__ = '_value', '_item', '_index' def __init__(self, value, item, i): self._value = value self._item = item self._index = i def __eq__(self, other): return self._value == other._value def __lt__(self, other): return self._value < other._value def __le__(self, other): return self._value <= other._value def __repr__(self): return '{}(value={!r}, item={!r}, index={})'.format( self.__class__.__name__, self._value, self._item, self._index ) #------------------------------------------------------------------------------ # non-public def _parent(self, j): return (j-1) // 2 def _left(self, j): return 2*j + 1 def _right(self, j): return 2*j + 2 def _swap(self, i, j): """Swap the elements at indices i and j of array.""" self._pq[i], self._pq[j] = self._pq[j], self._pq[i] # Update the indices in the Locator instances. self._pq[i]._index = i self._pq[j]._index = j def _upheap(self, i): parent = self._parent(i) if i > 0 and self._pq[i] < self._pq[parent]: self._swap(i, parent) self._upheap(parent) def _downheap(self, i): n = len(self._pq) left, right = self._left(i), self._right(i) if left < n: child = left if right < n and self._pq[right] < self._pq[left]: child = right if self._pq[child] < self._pq[i]: self._swap(i, child) self._downheap(child) def _fix(self, i): self._upheap(i) self._downheap(i) def _heapify(self): start = self._parent(len(self) - 1) # Start at parent of last leaf for j in range(start, -1, -1): # going to and includng the root. self._downheap(j) #------------------------------------------------------------------------------ # public def append(self, item): """Add an item to the heap""" token = self.Locator(self._key(item), item, len(self._pq)) self._pq.append(token) self._upheap(len(self._pq) - 1) # Upheap newly added position. return token def update(self, loc, newval, newitem): """Update the priority value and item for the entry identified by Locator loc.""" j = loc._index if not (0 <= j < len(self) and self._pq[j] is loc): raise ValueError('Invalid locator') loc._value = newval loc._item = newitem self._fix(j) def remove(self, loc): """Remove and return the item identified by Locator loc.""" j = loc._index if not (0 <= j < len(self) and self._pq[j] is loc): raise ValueError('Invalid locator') if j == len(self) - 1: self._pq.pop() else: self._swap(j, len(self) - 1) self._pq.pop() self._fix(j) return loc._item def peek(self): """Return but do not remove item with minimum priority value.""" loc = self._pq[0] return loc._item def pop(self): """Remove and return item with minimum priority value.""" self._swap(0, len(self._pq) - 1) loc = self._pq.pop() self._downheap(0) return loc._item @property def items(self): return [token._item for token in self._pq] def __len__(self): return len(self._pq) def __contains__(self, item): return item in self.items def __iter__(self): return iter(sorted(self.items)) def __repr__(self): return '{}({})'.format(self.__class__.__name__, self._pq) class MaxHeapPriorityQueue(MinHeapPriorityQueue): """A locator-based max-oriented priority queue implemented with a binary heap, arranged according to a key function. Operation Running Time len(P), P.peek() O(1) P.update(loc, value, item) O(log n) P.append(item) O(log n)* P.pop() O(log n)* P.remove(loc) O(log n)* *amortized due to occasional resizing of the underlying python list """ # Override all relevant private methods of MinHeapPriorityQueue # with max-oriented versions. def _upheap(self, i): parent = self._parent(i) if i > 0 and self._pq[parent] < self._pq[i]: self._swap(i, parent) self._upheap(parent) def _downheap(self, i): n = len(self._pq) left, right = self._left(i), self._right(i) if left < n: child = left if right < n and self._pq[left] < self._pq[right]: child = right if self._pq[i] < self._pq[child]: self._swap(i, child) self._downheap(child) def __iter__(self): return iter(sorted(self.items, reverse=True)) __doc__ += """ >>> import random; random.seed(42) >>> from priorityqueue import MinHeapPriorityQueue, MaxHeapPriorityQueue Function to verify the min-heap invariant is true for all elements of pq. >>> def verify(pq): ... n = len(pq._pq) ... for i in range(n): ... left, right = 2*i + 1, 2*i + 2 ... if left < n: ... assert pq._pq[i] <= pq._pq[left] ... if right < n: ... assert pq._pq[i] <= pq._pq[right] Function to verify the max-heap invariant is true for all elements of pq. >>> def verify_max(pq): ... n = len(pq._pq) ... for i in range(n): ... left, right = 2*i + 1, 2*i + 2 ... if left < n: ... assert pq._pq[i] >= pq._pq[left] ... if right < n: ... assert pq._pq[i] >= pq._pq[right] >>> items = [random.randint(1, 100) for _ in range(10000)] >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> pq = MaxHeapPriorityQueue(items) >>> verify_max(pq) Check multiple signs for priority values. >>> items = list(range(100, -100, -1)) >>> random.shuffle(items) >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> pq = MaxHeapPriorityQueue(items) >>> verify_max(pq) Test pop, peek, append, remove, update, __len__, and __contains__ operations. >>> items = ['jjjjjjjjjj', 'iiiiiiiii', 'hhhhhhhh', ... 'ggggggg', 'ffffff', 'eeeee', ... 'dddd', 'ccc', 'bb', 'a'] >>> pq = MinHeapPriorityQueue(items, key=len) >>> verify(pq) >>> pq.pop() 'a' >>> pq.pop() 'bb' >>> pq.peek() 'ccc' >>> pq.pop() 'ccc' >>> pq.pop() 'dddd' >>> pq.peek() 'eeeee' >>> pq.pop() 'eeeee' >>> _ = pq.append('a') >>> _ = pq.append('bb') >>> verify(pq) >>> pq = MaxHeapPriorityQueue(key=len) >>> pq.append([1, 2, 3]) Locator(value=3, item=[1, 2, 3], index=0) >>> pq.append([1, 2, 3, 4, 5, 6]) Locator(value=6, item=[1, 2, 3, 4, 5, 6], index=0) >>> pq.append([1]) Locator(value=1, item=[1], index=2) >>> pq.append([1, 2, 3, 4, 5, 6, 7, 8, 9]) Locator(value=9, item=[1, 2, 3, 4, 5, 6, 7, 8, 9], index=0) >>> len(pq) 4 >>> [1] in pq True >>> [1, 2, 3, 4, 5] in pq False >>> items = list(range(1, 10001)) >>> random.shuffle(items) >>> pq = MinHeapPriorityQueue(items) >>> verify(pq) >>> len(pq) == 10000 True >>> for i in range(1, 10001): ... x = pq.pop() ... assert x == i >>> pq = MinHeapPriorityQueue() >>> locs = {} >>> for x in items: ... locs[x] = pq.append(x) >>> pq.remove(locs[1]) 1 >>> pq.remove(locs[2]) 2 >>> pq.pop() 3 >>> for i in range(4, 100): ... _ = pq.remove(locs[i]) >>> pq.pop() 100 >>> verify(pq) >>> pq.update(locs[999], 1, 'test') >>> 999 in pq False >>> pq.pop() 'test' >>> 998 in pq True Test the items and __repr__ methods. >>> items = ['a', 'b', 'c'] >>> pq = MinHeapPriorityQueue(items) >>> pq MinHeapPriorityQueue([Locator(value='a', item='a', index=0), Locator(value='b', item='b', index=1), Locator(value='c', item='c', index=2)]) >>> pq.items == ['a', 'b', 'c'] True Check that __iter__ generates items in sorted order. >>> items = list(range(1000)) >>> pq = MinHeapPriorityQueue(items) >>> for i, x in enumerate(pq): ... assert i == x >>> pq = MaxHeapPriorityQueue(items) >>> for i, x in enumerate(pq): ... assert 999 - i == x """ if __name__ == "__main__": import doctest doctest.testmod()
[ [ [ 2287, 2307 ], [ 7009, 7029 ] ], [ [ 6988, 7008 ] ], [ [ 11454, 11461 ], [ 11466, 11473 ] ] ]
from selenium import webdriver from fixture.session import SessionHelper from fixture.group import GroupHelper from fixture.contact import ContactHelper class Application: def __init__(self, browser, base_url): if browser == "firefox": self.wd = webdriver.Firefox() elif browser == "chrome": self.wd = webdriver.Chrome() elif browser == "ie": self.wd = webdriver.Ie() else: raise ValueError("Unrecognized browser %s" % browser) self.wd.implicitly_wait(5) self.session = SessionHelper(self) self.group = GroupHelper(self) self.contact = ContactHelper(self) self.base_url=base_url def is_valid(self): try: self.wd.current_url return True except: return False def open_home_page(self): wd = self.wd wd.get(self.base_url) def destroy(self): self.wd.quit()
[ [ [ 21, 30 ], [ 273, 282 ], [ 349, 358 ], [ 420, 429 ] ], [ [ 59, 72 ], [ 573, 586 ] ], [ [ 99, 110 ], [ 614, 625 ] ], [ [ 139, 152 ], [ 655, 668 ] ], [ [ 161, 172 ] ] ]
import matplotlib.pyplot as plt from shapely.geometry import MultiLineString from .route_iterator import RouteIterator from .graphconverter import GraphConverter class TramLine(object): """Class represents single tram line for example '33: from Pilczyce to Sępolno' """ def __init__(self, number, direction_to, dl): """ Basic requirements to unambiguously define line :param number: number of line as str :param direction_to: :param dl: DataLoader object """ self.number = number # Stored as str self.direction_to = direction_to self.default_route = dl.load_single_line(number, direction_to) # As you can default_route is type LineString self.stops = dl.load_tram_stops(self.default_route) # List of shapely.Point objects self.current_route = self.default_route self.route_in_order = GraphConverter.find_route_in_order(dl, self) """ def show(self, with_stops=True): # Development tool. Plot line if isinstance(self.current_route, MultiLineString): for line in self.current_route: plt.plot(line.xy[0], line.xy[1]) else: plt.plot(self.current_route.xy[0], self.current_route.xy[1]) if with_stops: plt.scatter([p.x for p in self.stops], [p.y for p in self.stops]) plt.show() """
[ [ [ 7, 31 ] ], [ [ 61, 76 ] ], [ [ 105, 118 ] ], [ [ 147, 161 ], [ 894, 908 ] ], [ [ 170, 178 ] ] ]
"""Hermes MQTT service for Rhasspy wakeword with snowboy""" import argparse import asyncio import dataclasses import itertools import json import logging import os import sys import typing from pathlib import Path import paho.mqtt.client as mqtt import rhasspyhermes.cli as hermes_cli from . import SnowboyModel, WakeHermesMqtt _DIR = Path(__file__).parent _LOGGER = logging.getLogger("rhasspywake_snowboy_hermes") # ----------------------------------------------------------------------------- def main(): """Main method.""" parser = argparse.ArgumentParser(prog="rhasspy-wake-snowboy-hermes") parser.add_argument( "--model", required=True, action="append", nargs="+", help="Snowboy model settings (model, sensitivity, audio_gain, apply_frontend)", ) parser.add_argument( "--model-dir", action="append", default=[], help="Directories with snowboy models", ) parser.add_argument( "--wakeword-id", action="append", help="Wakeword IDs of each keyword (default: use file name)", ) parser.add_argument( "--stdin-audio", action="store_true", help="Read WAV audio from stdin" ) parser.add_argument( "--udp-audio", nargs=3, action="append", help="Host/port/siteId for UDP audio input", ) parser.add_argument("--lang", help="Set lang in hotword detected message") hermes_cli.add_hermes_args(parser) args = parser.parse_args() hermes_cli.setup_logging(args) _LOGGER.debug(args) if args.model_dir: args.model_dir = [Path(d) for d in args.model_dir] # Use embedded models too args.model_dir.append(_DIR / "models") # Load model settings models: typing.List[SnowboyModel] = [] for model_settings in args.model: model_path = Path(model_settings[0]) if not model_path.is_file(): # Resolve relative to model directories for model_dir in args.model_dir: maybe_path = model_dir / model_path.name if maybe_path.is_file(): model_path = maybe_path break _LOGGER.debug("Loading model from %s", str(model_path)) model = SnowboyModel(model_path=model_path) if len(model_settings) > 1: model.sensitivity = model_settings[1] if len(model_settings) > 2: model.audio_gain = float(model_settings[2]) if len(model_settings) > 3: model.apply_frontend = model_settings[3].strip().lower() == "true" models.append(model) wakeword_ids = [ kn[1] for kn in itertools.zip_longest( args.model, args.wakeword_id or [], fillvalue="" ) ] if args.stdin_audio: # Read WAV from stdin, detect, and exit client = None hermes = WakeHermesMqtt(client, models, wakeword_ids) for site_id in args.site_id: hermes.load_detectors(site_id) if os.isatty(sys.stdin.fileno()): print("Reading WAV data from stdin...", file=sys.stderr) wav_bytes = sys.stdin.buffer.read() # Print results as JSON for result in hermes.handle_audio_frame(wav_bytes): result_dict = dataclasses.asdict(result) json.dump(result_dict, sys.stdout, ensure_ascii=False) return udp_audio = [] if args.udp_audio: udp_audio = [ (host, int(port), site_id) for host, port, site_id in args.udp_audio ] # Listen for messages client = mqtt.Client() hermes = WakeHermesMqtt( client, models, wakeword_ids, model_dirs=args.model_dir, udp_audio=udp_audio, site_ids=args.site_id, lang=args.lang, ) for site_id in args.site_id: hermes.load_detectors(site_id) _LOGGER.debug("Connecting to %s:%s", args.host, args.port) hermes_cli.connect(client, args) client.loop_start() try: # Run event loop asyncio.run(hermes.handle_messages_async()) except KeyboardInterrupt: pass finally: _LOGGER.debug("Shutting down") client.loop_stop() # ----------------------------------------------------------------------------- if __name__ == "__main__": main()
[ [ [ 67, 75 ], [ 550, 558 ] ], [ [ 83, 90 ], [ 4078, 4085 ] ], [ [ 98, 109 ], [ 3308, 3319 ] ], [ [ 117, 126 ], [ 2693, 2702 ] ], [ [ 134, 138 ], [ 3347, 3351 ] ], [ [ 146, 153 ], [ 371, 378 ] ], [ [ 161, 163 ], [ 3044, 3046 ] ], [ [ 171, 174 ], [ 3054, 3057 ], [ 3132, 3135 ], [ 3165, 3168 ], [ 3370, 3373 ] ], [ [ 182, 188 ], [ 1778, 1784 ] ], [ [ 209, 213 ], [ 339, 343 ], [ 1632, 1636 ], [ 1869, 1873 ] ], [ [ 222, 246 ], [ 3614, 3618 ] ], [ [ 255, 286 ], [ 1456, 1466 ], [ 1527, 1537 ], [ 3977, 3987 ] ], [ [ 302, 314 ], [ 1790, 1802 ], [ 2277, 2289 ] ], [ [ 316, 330 ], [ 2906, 2920 ], [ 3641, 3655 ] ], [ [ 332, 336 ], [ 1722, 1726 ] ], [ [ 361, 368 ], [ 1562, 1569 ], [ 2205, 2212 ], [ 3914, 3921 ], [ 4186, 4193 ] ], [ [ 506, 510 ], [ 4358, 4362 ] ] ]
#!/usr/bin/env python3 import sys import yaml def main(): args = sys.argv[1:] file = args[0] if args else sys.stdin data = yaml.safe_load(file) join_args = data['Fn::Join'] contents = join_args[0].join(join_args[1]) print(contents, end='') if __name__ == '__main__': sys.exit(main())
[ [ [ 31, 34 ], [ 301, 304 ], [ 72, 75 ], [ 117, 120 ] ], [ [ 42, 46 ], [ 139, 143 ] ], [ [ 53, 57 ], [ 310, 314 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # class for windows getch class _GetchWindows: def __init__(self): import msvcrt def __call__(self): import msvcrt return msvcrt.getch() getch = _GetchWindows() # print instruction print ("Please enter something: ") # read user input and save in into x x = getch() # print user input saved in x print(x)
[ [ [ 81, 94 ], [ 228, 241 ] ], [ [ 220, 225 ], [ 341, 346 ] ], [ [ 337, 338 ], [ 385, 386 ] ] ]
import os from django.conf import settings from main.tests.test_base import MainTestCase from odk_viewer.models import ParsedInstance from odk_viewer.management.commands.remongo import Command from django.core.management import call_command from common_tags import USERFORM_ID class TestRemongo(MainTestCase): def test_remongo_in_batches(self): self._publish_transportation_form() # submit 4 instances self._make_submissions() self.assertEqual(ParsedInstance.objects.count(), 4) # clear mongo settings.MONGO_DB.instances.drop() c = Command() c.handle(batchsize=3) # mongo db should now have 5 records count = settings.MONGO_DB.instances.count() self.assertEqual(count, 4) def test_remongo_with_username_id_string(self): self._publish_transportation_form() # submit 1 instances s = self.surveys[0] self._make_submission(os.path.join(self.this_directory, 'fixtures', 'transportation', 'instances', s, s + '.xml')) # publish and submit for a different user self._logout() self._create_user_and_login("harry", "harry") self._publish_transportation_form() s = self.surveys[1] self._make_submission(os.path.join(self.this_directory, 'fixtures', 'transportation', 'instances', s, s + '.xml')) self.assertEqual(ParsedInstance.objects.count(), 2) # clear mongo settings.MONGO_DB.instances.drop() c = Command() c.handle(batchsize=3, username=self.user.username, id_string=self.xform.id_string) # mongo db should now have 2 records count = settings.MONGO_DB.instances.count() self.assertEqual(count, 1) def test_indexes_exist(self): """ Make sure the required indexes are set, _userform_id as of now """ call_command('remongo') # if index exists, ensure index returns None # list of indexes to check for index_list = [USERFORM_ID] # get index info index_info = settings.MONGO_DB.instances.index_information() # index_info looks like this - {u'_id_': {u'key': [(u'_id', 1)], u'v': 1}, u'_userform_id_1': {u'key': [(u'_userform_id', 1)], u'v': 1}} # lets make a list of the indexes existing_indexes = [v['key'][0][0] for v in index_info.itervalues() if v['key'][0][1] == 1] all_indexes_found = True for index_item in index_list: if index_item not in existing_indexes: all_indexes_found = False break self.assertTrue(all_indexes_found) def test_sync_mongo_with_all_option_deletes_existing_records(self): self._publish_transportation_form() userform_id = "%s_%s" % (self.user.username, self.xform.id_string) initial_mongo_count = settings.MONGO_DB.instances.find( {USERFORM_ID: userform_id}).count() for i in range(len(self.surveys)): self._submit_transport_instance(i) mongo_count = settings.MONGO_DB.instances.find( {USERFORM_ID: userform_id}).count() # check our mongo count self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys)) # add dummy instance settings.MONGO_DB.instances.save( {"_id": 12345, "_userform_id": userform_id}) # make sure the dummy is returned as part of the forms mongo instances mongo_count = settings.MONGO_DB.instances.find( {USERFORM_ID: userform_id}).count() self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys) + 1) # call sync_mongo WITHOUT the all option call_command("sync_mongo", remongo=True) mongo_count = settings.MONGO_DB.instances.find( {USERFORM_ID: userform_id}).count() self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys) + 1) # call sync_mongo WITH the all option call_command("sync_mongo", remongo=True, update_all=True) # check that we are back to just the submitted set mongo_count = settings.MONGO_DB.instances.find( {USERFORM_ID: userform_id}).count() self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
[ [ [ 7, 9 ], [ 929, 931 ], [ 1281, 1283 ] ], [ [ 35, 43 ], [ 536, 544 ], [ 676, 684 ], [ 1494, 1502 ], [ 1715, 1723 ], [ 2121, 2129 ], [ 2907, 2915 ], [ 3101, 3109 ], [ 3331, 3339 ], [ 3523, 3531 ], [ 3833, 3841 ], [ 4203, 4211 ] ], [ [ 77, 89 ], [ 298, 310 ] ], [ [ 120, 134 ], [ 475, 489 ], [ 1429, 1443 ] ], [ [ 186, 193 ], [ 581, 588 ], [ 1541, 1548 ] ], [ [ 229, 241 ], [ 1924, 1936 ], [ 3770, 3782 ], [ 4064, 4076 ] ], [ [ 266, 277 ], [ 2062, 2073 ], [ 2954, 2965 ], [ 3148, 3159 ], [ 3570, 3581 ], [ 3880, 3891 ], [ 4250, 4261 ] ], [ [ 286, 297 ] ] ]
from selenium import webdriver link = "http://selenium1py.pythonanywhere.com/" class TestMainPage1(): @classmethod def setup_class(self): print("\nstart browser for test suite..") self.browser = webdriver.Chrome() @classmethod def teardown_class(self): print("quit browser for test suite..") self.browser.quit() def test_guest_should_see_login_link(self): self.browser.get(link) self.browser.find_element_by_css_selector("#login_link") def test_guest_should_see_basket_link_on_the_main_page(self): self.browser.get(link) self.browser.find_element_by_css_selector( ".basket-mini .btn-group > a") class TestMainPage2(): def setup_method(self): print("start browser for test..") self.browser = webdriver.Chrome() def teardown_method(self): print("quit browser for test..") self.browser.quit() def test_guest_should_see_login_link(self): self.browser.get(link) self.browser.find_element_by_css_selector("#login_link") def test_guest_should_see_basket_link_on_the_main_page(self): self.browser.get(link) self.browser.find_element_by_css_selector( ".basket-mini .btn-group > a")
[ [ [ 21, 30 ], [ 223, 232 ], [ 821, 830 ] ], [ [ 32, 36 ], [ 439, 443 ], [ 602, 606 ], [ 1015, 1019 ], [ 1178, 1182 ] ], [ [ 88, 101 ] ], [ [ 710, 723 ] ] ]
import pytest import numpy as np import sklearn.linear_model import sklearn.model_selection import scipy.linalg from himalaya.backend import set_backend from himalaya.backend import ALL_BACKENDS from himalaya.utils import assert_array_almost_equal from himalaya.scoring import r2_score from himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search def _create_dataset(backend, n_targets=4): n_featuress = (100, 200) n_samples = 80 n_gammas = 3 Xs = [ backend.asarray(backend.randn(n_samples, n_features), backend.float64) for n_features in n_featuress ] Ks = backend.stack([X @ X.T for X in Xs]) ws = [ backend.asarray(backend.randn(n_features, n_targets), backend.float64) for n_features in n_featuress ] Ys = backend.stack([X @ w for X, w in zip(Xs, ws)]) Y = Ys.sum(0) gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]), backend.float64) gammas /= gammas.sum(1)[:, None] return Ks, Y, gammas, Xs @pytest.mark.parametrize('local_alpha', [True, False]) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_local_alphah( backend, local_alpha): _test_solve_multiple_kernel_ridge_random_search(backend=backend, local_alpha=local_alpha) @pytest.mark.parametrize('n_targets_batch', [None, 3]) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_n_targets_batch( backend, n_targets_batch): _test_solve_multiple_kernel_ridge_random_search( backend=backend, n_targets_batch=n_targets_batch) @pytest.mark.parametrize('n_alphas_batch', [None, 2]) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_n_alphas_batch( backend, n_alphas_batch): _test_solve_multiple_kernel_ridge_random_search( backend=backend, n_alphas_batch=n_alphas_batch) @pytest.mark.parametrize('return_weights', ['primal', 'dual']) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_return_weights( backend, return_weights): _test_solve_multiple_kernel_ridge_random_search( backend=backend, return_weights=return_weights) @pytest.mark.parametrize('diagonalize_method', ['eigh', 'svd']) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_diagonalize_method( backend, diagonalize_method): _test_solve_multiple_kernel_ridge_random_search( backend=backend, diagonalize_method=diagonalize_method) def _test_solve_multiple_kernel_ridge_random_search( backend, n_targets_batch=None, n_alphas_batch=None, return_weights="dual", diagonalize_method="eigh", local_alpha=True): backend = set_backend(backend) Ks, Y, gammas, Xs = _create_dataset(backend) alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks) n_targets = Y.shape[1] cv = sklearn.model_selection.check_cv(10) ############ # run solver results = solve_multiple_kernel_ridge_random_search( Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv, n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False, return_weights=return_weights, n_alphas_batch=n_alphas_batch, diagonalize_method=diagonalize_method, local_alpha=local_alpha) best_deltas, refit_weights, cv_scores = results ######################################### # compare with sklearn.linear_model.Ridge if local_alpha: # only compare when each target optimizes alpha test_scores = [] for gamma in backend.sqrt(gammas): X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1) for train, test in cv.split(X): for alpha in alphas: model = sklearn.linear_model.Ridge( alpha=backend.to_numpy(alpha), fit_intercept=False) model = model.fit(backend.to_numpy(X[train]), backend.to_numpy(Y[train])) predictions = backend.asarray_like( model.predict(backend.to_numpy(X[test])), Y) test_scores.append(r2_score(Y[test], predictions)) test_scores = backend.stack(test_scores) test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(), len(alphas), n_targets) test_scores_mean = backend.max(test_scores.mean(1), 1) assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5) ###################### # test refited_weights for tt in range(n_targets): gamma = backend.exp(best_deltas[:, tt]) alpha = 1.0 if return_weights == 'primal': # compare primal weights with sklearn.linear_model.Ridge X = backend.concatenate( [X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1) model = sklearn.linear_model.Ridge(fit_intercept=False, alpha=backend.to_numpy(alpha)) w1 = model.fit(backend.to_numpy(X), backend.to_numpy(Y[:, tt])).coef_ w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0) w1 = [backend.asarray(w) for w in w1] w1_scaled = backend.concatenate( [w * backend.sqrt(g) for w, g, in zip(w1, gamma)]) assert_array_almost_equal(w1_scaled, refit_weights[:, tt], decimal=5) elif return_weights == 'dual': # compare dual weights with scipy.linalg.solve Ks_64 = backend.asarray(Ks, dtype=backend.float64) gamma_64 = backend.asarray(gamma, dtype=backend.float64) K = backend.matmul(Ks_64.T, gamma_64).T reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha Y_64 = backend.asarray(Y, dtype=backend.float64) c1 = scipy.linalg.solve(backend.to_numpy(K + reg), backend.to_numpy(Y_64[:, tt])) c1 = backend.asarray_like(c1, K) assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5) @pytest.mark.parametrize('backend', ALL_BACKENDS) def test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend): backend = set_backend(backend) # just a smoke test, so make it minimal Ks, Y, gammas, Xs = _create_dataset(backend) alphas = 1.0 # make Y a numpy array Y = backend.to_numpy(Y) results = solve_multiple_kernel_ridge_random_search( Ks, Y, n_iter=gammas, alphas=alphas ) @pytest.mark.parametrize('backend', ALL_BACKENDS) @pytest.mark.parametrize('n_kernels', [1, 2]) def test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels): backend = set_backend(backend) # add more targets to make sure we get some variability Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20) alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks) cv = sklearn.model_selection.check_cv(5) deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search( Ks[:n_kernels], Y, n_iter=50, progress_bar=False, alphas=alphas, cv=cv, local_alpha=False, return_alphas=True ) # test that we return a single combination of deltas deltas = backend.to_numpy(deltas) if deltas.ndim == 1: assert np.allclose(deltas[0], deltas) else: for dd in deltas: assert np.allclose(dd[0], dd) # test that we return a single alpha best_alphas = backend.to_numpy(best_alphas) assert np.allclose(best_alphas[0], best_alphas)
[ [ [ 7, 13 ], [ 1048, 1054 ], [ 1103, 1109 ], [ 1397, 1403 ], [ 1452, 1458 ], [ 1718, 1724 ], [ 1772, 1778 ], [ 2034, 2040 ], [ 2097, 2103 ], [ 2359, 2365 ], [ 2423, 2429 ], [ 6368, 6374 ], [ 6807, 6813 ], [ 6857, 6863 ] ], [ [ 22, 33 ], [ 5356, 5358 ], [ 5369, 5371 ], [ 6023, 6025 ], [ 7645, 7647 ], [ 7731, 7733 ], [ 7855, 7857 ] ], [ [ 41, 61 ] ], [ [ 69, 92 ], [ 3077, 3084 ], [ 3955, 3962 ], [ 5104, 5111 ], [ 7220, 7227 ] ], [ [ 100, 112 ], [ 6132, 6137 ] ], [ [ 143, 154 ], [ 2904, 2915 ], [ 6511, 6522 ], [ 7001, 7012 ] ], [ [ 184, 196 ], [ 1138, 1150 ], [ 1487, 1499 ], [ 1807, 1819 ], [ 2132, 2144 ], [ 2458, 2470 ], [ 6403, 6415 ], [ 6842, 6854 ] ], [ [ 224, 249 ], [ 4648, 4673 ], [ 5593, 5618 ], [ 6302, 6327 ] ], [ [ 279, 287 ], [ 3262, 3270 ], [ 4355, 4363 ] ], [ [ 323, 364 ], [ 3163, 3204 ], [ 6711, 6752 ], [ 7287, 7328 ] ], [ [ 371, 386 ], [ 2950, 2965 ], [ 6600, 6615 ], [ 7106, 7121 ] ], [ [ 1156, 1215 ] ], [ [ 1505, 1567 ] ], [ [ 1825, 1886 ] ], [ [ 2150, 2211 ] ], [ [ 2476, 2541 ] ], [ [ 2704, 2751 ], [ 1252, 1299 ], [ 1608, 1655 ], [ 1926, 1973 ], [ 2251, 2298 ], [ 2585, 2632 ] ], [ [ 6421, 6486 ] ], [ [ 6906, 6965 ] ] ]
# -*- coding: utf-8 -*- """Helper functions for getting resources.""" import logging import os from dataclasses import dataclass from typing import List, Optional from urllib.request import urlretrieve logger = logging.getLogger(__name__) HERE = os.path.abspath(os.path.dirname(__file__)) DEFAULT_DIRECTORY = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, 'data')) DATA_DIRECTORY = os.environ.get('REPOSITIONING_COMPARISON_DIRECTORY', DEFAULT_DIRECTORY) # URLs from dhimmel/integrate NODE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/nodes.tsv' EDGE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/edges.sif.gz' PERMUTATION1_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-1.json.bz2' PERMUTATION2_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-2.json.bz2' PERMUTATION3_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-3.json.bz2' PERMUTATION4_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-4.json.bz2' PERMUTATION5_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-5.json.bz2' PERMUTATION_DATA_FILE_FMT = 'hetnet_perm-{}.json.bz2' PERMUTATION_DATA_URL_FMT = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-{}.json.bz2' # URLs from dhimmel/learn TRANSFORMED_FEATURES_URL = 'https://github.com/dhimmel/learn/blob/master/prediction/features/features.tsv.bz2?raw=true' VALIDATE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/validate/validation-statuses.tsv' SYMPTOMATIC_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/prediction/predictions/probabilities.tsv' REPURPOSE_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repurpose_overlap.json' REPO_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repo_data.csv' @dataclass class DataPaths: """Container for the paths for training.""" node_data_path: str edge_data_path: str transformed_features_path: str validate_data_path: str symptomatic_data_path: str permutation_paths: List[str] data_edge2vec_path: str repurpose_data_path: str repo_data_path: str def get_data_paths(directory: Optional[str] = None) -> DataPaths: """Ensure Himmelstein's data files are downloaded.""" if directory is None: directory = DATA_DIRECTORY os.makedirs(directory, exist_ok=True) node_data_path = os.path.join(directory, 'nodes.tsv') if not os.path.exists(node_data_path): logger.info(f'downloading {NODE_DATA_URL}') urlretrieve(NODE_DATA_URL, node_data_path) edge_data_path = os.path.join(directory, 'edges.sif.gz') if not os.path.exists(edge_data_path): logger.info(f'downloading {EDGE_DATA_URL}') urlretrieve(EDGE_DATA_URL, edge_data_path) transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2') if not os.path.exists(transformed_features_path): logger.info(f'downloading {TRANSFORMED_FEATURES_URL}') urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path) validate_data_path = os.path.join(directory, 'validation-statuses.tsv') if not os.path.exists(validate_data_path): logger.info(f'downloading {VALIDATE_DATA_URL}') urlretrieve(VALIDATE_DATA_URL, validate_data_path) symptomatic_data_path = os.path.join(directory, 'probabilities.tsv') if not os.path.exists(symptomatic_data_path): logger.info(f'downloading {SYMPTOMATIC_DATA_URL}') urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path) repurpose_data_path = os.path.join(directory,'repurpose_overlap.json') if not os.path.exists(repurpose_data_path): logger.info(f'downloading {REPURPOSE_DATA_URL}') urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path) repo_data_path = os.path.join(directory, 'repo_data.csv') if not os.path.exists(repo_data_path): logger.info(f'downloading {REPO_DATA_URL}') urlretrieve(REPO_DATA_URL, repo_data_path) permutation_directory = os.path.join(directory, "permutations") os.makedirs(permutation_directory, exist_ok=True) permutation_paths = [] for i in range(5): permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1)) if not os.path.exists(permutation_data_path): url = PERMUTATION_DATA_URL_FMT.format(i + 1) logger.info(f'downloading {url}') urlretrieve(url, permutation_data_path) permutation_paths.append(permutation_data_path) data_edge2vec_path = os.path.join(directory, 'data_edge2vec') return DataPaths( node_data_path=node_data_path, edge_data_path=edge_data_path, transformed_features_path=transformed_features_path, validate_data_path=validate_data_path, symptomatic_data_path=symptomatic_data_path, permutation_paths=permutation_paths, data_edge2vec_path=data_edge2vec_path, repurpose_data_path = repurpose_data_path, repo_data_path = repo_data_path )
[ [ [ 79, 86 ], [ 214, 221 ] ], [ [ 94, 96 ], [ 250, 252 ], [ 266, 268 ], [ 314, 316 ], [ 330, 332 ], [ 349, 351 ], [ 360, 362 ], [ 397, 399 ], [ 2610, 2612 ], [ 2670, 2672 ], [ 2718, 2720 ], [ 2875, 2877 ], [ 2926, 2928 ], [ 3094, 3096 ], [ 3161, 3163 ], [ 3366, 3368 ], [ 3428, 3430 ], [ 3608, 3610 ], [ 3664, 3666 ], [ 3854, 3856 ], [ 3914, 3916 ], [ 4091, 4093 ], [ 4143, 4145 ], [ 4307, 4309 ], [ 4351, 4353 ], [ 4484, 4486 ], [ 4576, 4578 ], [ 4851, 4853 ] ], [ [ 121, 130 ], [ 2086, 2095 ] ], [ [ 150, 154 ], [ 2327, 2331 ] ], [ [ 156, 164 ], [ 2450, 2458 ] ], [ [ 192, 203 ], [ 2810, 2821 ], [ 3018, 3029 ], [ 3275, 3286 ], [ 3528, 3539 ], [ 3770, 3781 ], [ 4016, 4027 ], [ 4235, 4246 ], [ 4730, 4741 ] ], [ [ 205, 211 ], [ 2758, 2764 ], [ 2966, 2972 ], [ 3212, 3218 ], [ 3472, 3478 ], [ 3711, 3717 ], [ 3959, 3965 ], [ 4183, 4189 ], [ 4684, 4690 ] ], [ [ 243, 247 ], [ 343, 347 ] ], [ [ 294, 311 ], [ 450, 467 ] ], [ [ 380, 394 ], [ 2590, 2604 ] ], [ [ 501, 514 ], [ 2785, 2798 ], [ 2822, 2835 ] ], [ [ 593, 606 ], [ 2993, 3006 ], [ 3030, 3043 ] ], [ [ 689, 710 ] ], [ [ 811, 832 ] ], [ [ 933, 954 ] ], [ [ 1055, 1076 ] ], [ [ 1177, 1198 ] ], [ [ 1300, 1325 ], [ 4520, 4545 ] ], [ [ 1354, 1378 ], [ 4633, 4657 ] ], [ [ 1508, 1532 ], [ 3239, 3263 ], [ 3287, 3311 ] ], [ [ 1628, 1645 ], [ 3499, 3516 ], [ 3540, 3557 ] ], [ [ 1738, 1758 ], [ 3738, 3758 ], [ 3782, 3802 ] ], [ [ 1860, 1878 ], [ 3986, 4004 ], [ 4028, 4046 ] ], [ [ 1979, 1992 ], [ 4210, 4223 ], [ 4247, 4260 ] ], [ [ 2102, 2111 ], [ 2475, 2484 ], [ 4904, 4913 ] ], [ [ 2424, 2438 ] ] ]
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic linux scsi subsystem and Multipath utilities. Note, this is not iSCSI. """ import os import re from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder.brick import exception from cinder.brick import executor from cinder.i18n import _, _LW, _LE from cinder.openstack.common import loopingcall LOG = logging.getLogger(__name__) MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$") MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)") class LinuxSCSI(executor.Executor): def __init__(self, root_helper, execute=putils.execute, *args, **kwargs): super(LinuxSCSI, self).__init__(root_helper, execute, *args, **kwargs) def echo_scsi_command(self, path, content): """Used to echo strings to scsi subsystem.""" args = ["-a", path] kwargs = dict(process_input=content, run_as_root=True, root_helper=self._root_helper) self._execute('tee', *args, **kwargs) def get_name_from_path(self, path): """Translates /dev/disk/by-path/ entry to /dev/sdX.""" name = os.path.realpath(path) if name.startswith("/dev/"): return name else: return None def remove_scsi_device(self, device): """Removes a scsi device based upon /dev/sdX name.""" path = "/sys/block/%s/device/delete" % device.replace("/dev/", "") if os.path.exists(path): # flush any outstanding IO first self.flush_device_io(device) LOG.debug("Remove SCSI device(%s) with %s" % (device, path)) self.echo_scsi_command(path, "1") def wait_for_volume_removal(self, volume_path): """This is used to ensure that volumes are gone.""" def _wait_for_volume_removal(volume_path): LOG.debug("Waiting for SCSI mount point %s to be removed.", volume_path) if os.path.exists(volume_path): if self.tries >= self.scan_attempts: msg = _LE("Exceeded the number of attempts to detect " "volume removal.") LOG.error(msg) raise exception.VolumePathNotRemoved( volume_path=volume_path) LOG.debug("%(path)s still exists, rescanning. Try number: " "%(tries)s", {'path': volume_path, 'tries': self.tries}) self.tries = self.tries + 1 else: LOG.debug("SCSI mount point %s has been removed.", volume_path) raise loopingcall.LoopingCallDone() # Setup a loop here to give the kernel time # to remove the volume from /dev/disk/by-path/ self.tries = 0 self.scan_attempts = 3 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_volume_removal, volume_path) timer.start(interval=2).wait() def get_device_info(self, device): (out, _err) = self._execute('sg_scan', device, run_as_root=True, root_helper=self._root_helper) dev_info = {'device': device, 'host': None, 'channel': None, 'id': None, 'lun': None} if out: line = out.strip() line = line.replace(device + ": ", "") info = line.split(" ") for item in info: if '=' in item: pair = item.split('=') dev_info[pair[0]] = pair[1] elif 'scsi' in item: dev_info['host'] = item.replace('scsi', '') return dev_info def remove_multipath_device(self, multipath_name): """This removes LUNs associated with a multipath device and the multipath device itself. """ LOG.debug("remove multipath device %s" % multipath_name) mpath_dev = self.find_multipath_device(multipath_name) if mpath_dev: devices = mpath_dev['devices'] LOG.debug("multipath LUNs to remove %s" % devices) for device in devices: self.remove_scsi_device(device['device']) self.flush_multipath_device(mpath_dev['id']) def flush_device_io(self, device): """This is used to flush any remaining IO in the buffers.""" try: LOG.debug("Flushing IO for device %s" % device) self._execute('blockdev', '--flushbufs', device, run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: msg = _("Failed to flush IO buffers prior to removing" " device: (%(code)s)") % {'code': exc.exit_code} LOG.warn(msg) def flush_multipath_device(self, device): try: LOG.debug("Flush multipath device %s" % device) self._execute('multipath', '-f', device, run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warn(_LW("multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) def flush_multipath_devices(self): try: self._execute('multipath', '-F', run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warn(_LW("multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) def find_multipath_device(self, device): """Find a multipath device associated with a LUN device name. device can be either a /dev/sdX entry or a multipath id. """ mdev = None devices = [] out = None try: (out, _err) = self._execute('multipath', '-l', device, run_as_root=True, root_helper=self._root_helper) except putils.ProcessExecutionError as exc: LOG.warn(_LW("multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) return None if out: lines = out.strip() lines = lines.split("\n") lines = [line for line in lines if not re.match(MULTIPATH_ERROR_REGEX, line)] if lines: # Use the device name, be it the WWID, mpathN or custom alias # of a device to build the device path. This should be the # first item on the first line of output from `multipath -l # ${path}` or `multipath -l ${wwid}`.. mdev_name = lines[0].split(" ")[0] mdev = '/dev/mapper/%s' % mdev_name # Find the WWID for the LUN if we are using mpathN or aliases. wwid_search = MULTIPATH_WWID_REGEX.search(lines[0]) if wwid_search is not None: mdev_id = wwid_search.group('wwid') else: mdev_id = mdev_name # Confirm that the device is present. try: os.stat(mdev) except OSError: LOG.warn(_LW("Couldn't find multipath device %s"), mdev) return None LOG.debug("Found multipath device = %(mdev)s" % {'mdev': mdev}) device_lines = lines[3:] for dev_line in device_lines: if dev_line.find("policy") != -1: continue dev_line = dev_line.lstrip(' |-`') dev_info = dev_line.split() address = dev_info[0].split(":") dev = {'device': '/dev/%s' % dev_info[1], 'host': address[0], 'channel': address[1], 'id': address[2], 'lun': address[3] } devices.append(dev) if mdev is not None: info = {"device": mdev, "id": mdev_id, "name": mdev_name, "devices": devices} return info return None
[ [ [ 735, 737 ], [ 1838, 1840 ], [ 2152, 2154 ], [ 7941, 7943 ], [ 2667, 2669 ] ], [ [ 745, 747 ], [ 1051, 1053 ], [ 1119, 1121 ], [ 7086, 7088 ] ], [ [ 778, 800 ], [ 1232, 1238 ], [ 5321, 5327 ], [ 5783, 5789 ], [ 6122, 6128 ], [ 6752, 6758 ] ], [ [ 822, 836 ], [ 998, 1005 ] ], [ [ 863, 872 ], [ 2934, 2943 ] ], [ [ 898, 906 ], [ 1168, 1176 ] ], [ [ 931, 932 ], [ 5376, 5377 ] ], [ [ 934, 937 ], [ 5841, 5844 ], [ 6180, 6183 ], [ 6810, 6813 ], [ 8016, 8019 ] ], [ [ 939, 942 ], [ 2775, 2778 ] ], [ [ 979, 990 ], [ 3573, 3584 ], [ 3365, 3376 ] ], [ [ 992, 995 ], [ 2273, 2276 ], [ 4590, 4593 ], [ 4787, 4790 ], [ 5122, 5125 ], [ 5506, 5509 ], [ 5592, 5595 ], [ 5832, 5835 ], [ 6171, 6174 ], [ 6801, 6804 ], [ 8007, 8010 ], [ 8113, 8116 ], [ 2557, 2560 ], [ 2893, 2896 ], [ 3032, 3035 ], [ 3279, 3282 ] ], [ [ 1027, 1048 ], [ 7095, 7116 ] ], [ [ 1096, 1116 ], [ 7645, 7665 ] ], [ [ 1158, 1167 ], [ 1297, 1306 ] ] ]
import pytest from django.conf import settings from django.contrib import messages from proposals.models import TalkProposal, TutorialProposal pytestmark = pytest.mark.skipif( not settings.PROPOSALS_WITHDRAWABLE, reason='proposal withdrawal disabled', ) def test_talk_proposal_cancel_login(client): response = client.get('/en-us/proposals/talk/42/cancel/', follow=True) assert response.redirect_chain == [ ('/en-us/accounts/login/?next=/en-us/proposals/talk/42/cancel/', 302), ] def test_tutorial_proposal_cancel_login(client): response = client.get('/en-us/proposals/tutorial/42/cancel/', follow=True) assert response.redirect_chain == [ ('/en-us/accounts/login/?next=/en-us/proposals/tutorial/42/cancel/', 302), ] @pytest.mark.parametrize('method', ['get', 'post']) def test_talk_proposal_cancel_denied(bare_user_client, method): response = getattr(bare_user_client, method)( '/en-us/proposals/talk/42/cancel/', ) assert response.status_code == 403 @pytest.mark.parametrize('method', ['get', 'post']) def test_tutorial_proposal_cancel_denied(bare_user_client, method): response = getattr(bare_user_client, method)( '/en-us/proposals/tutorial/42/cancel/', ) assert response.status_code == 403 def test_talk_proposal_cancel_get(agreed_user_client, talk_proposal): """The cancel view should not allow GET, only POST. """ response = agreed_user_client.get('/en-us/proposals/talk/42/cancel/') assert response.status_code == 405 def test_tutorial_proposal_cancel_get(agreed_user_client, tutorial_proposal): """The cancel view should not allow GET, only POST. """ response = agreed_user_client.get('/en-us/proposals/tutorial/42/cancel/') assert response.status_code == 405 def test_talk_proposal_cancel_not_owned(another_agreed_user_client, talk_proposal): response = another_agreed_user_client.post('/en-us/proposals/talk/42/cancel/') assert response.status_code == 404 def test_tutorial_proposal_cancel_not_owned( another_agreed_user_client, tutorial_proposal): response = another_agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/') assert response.status_code == 404 def test_talk_proposal_cancel(agreed_user_client, talk_proposal): assert not talk_proposal.cancelled response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', { 'cancelled': True, }, follow=True) assert response.redirect_chain == [('/en-us/dashboard/', 302)], ( response.context['form'].errors ) assert TalkProposal.objects.get(pk=42).cancelled msgs = [(m.level, m.message) for m in response.context['messages']] assert msgs == [ (messages.INFO, 'Talk proposal ' '<strong>Beyond the Style Guides&lt;br&gt;</strong> withdrawn.'), ] def test_talk_proposal_reactivate(agreed_user_client, cancelled_talk_proposal): assert cancelled_talk_proposal.cancelled response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', { 'cancelled': '', }, follow=True) assert response.redirect_chain == [('/en-us/dashboard/', 302)], ( response.context['form'].errors ) assert not TalkProposal.objects.get(pk=42).cancelled msgs = [(m.level, m.message) for m in response.context['messages']] assert msgs == [ (messages.SUCCESS, 'Talk proposal ' '<strong>Beyond the Style Guides&lt;br&gt;</strong> reactivated.'), ] def test_tutorial_proposal_cancel(agreed_user_client, tutorial_proposal): assert not tutorial_proposal.cancelled response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', { 'cancelled': True, }, follow=True) assert response.redirect_chain == [('/en-us/dashboard/', 302)], ( response.context['form'].errors ) assert TutorialProposal.objects.get(pk=42).cancelled msgs = [(m.level, m.message) for m in response.context['messages']] assert msgs == [ (messages.INFO, 'Tutorial proposal ' '<strong>Beyond the Style Guides&lt;br&gt;</strong> withdrawn.'), ] def test_tutorial_proposal_reactivate( agreed_user_client, cancelled_tutorial_proposal): assert cancelled_tutorial_proposal.cancelled response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', { 'cancelled': '', }, follow=True) assert response.redirect_chain == [('/en-us/dashboard/', 302)], ( response.context['form'].errors ) assert not TutorialProposal.objects.get(pk=42).cancelled msgs = [(m.level, m.message) for m in response.context['messages']] assert msgs == [ (messages.SUCCESS, 'Tutorial proposal ' '<strong>Beyond the Style Guides&lt;br&gt;</strong> reactivated.'), ]
[ [ [ 7, 13 ], [ 160, 166 ], [ 784, 790 ], [ 1041, 1047 ] ], [ [ 39, 47 ], [ 188, 196 ] ], [ [ 75, 83 ], [ 2755, 2763 ], [ 3404, 3412 ], [ 4056, 4064 ], [ 4738, 4746 ] ], [ [ 114, 126 ], [ 2610, 2622 ], [ 3259, 3271 ] ], [ [ 128, 144 ], [ 3907, 3923 ], [ 4589, 4605 ] ], [ [ 147, 157 ] ], [ [ 272, 303 ] ], [ [ 519, 554 ] ], [ [ 839, 871 ] ], [ [ 1096, 1132 ] ], [ [ 1309, 1338 ] ], [ [ 1558, 1591 ] ], [ [ 1819, 1854 ] ], [ [ 2027, 2066 ] ], [ [ 2256, 2281 ] ], [ [ 2883, 2912 ] ], [ [ 3537, 3566 ] ], [ [ 4188, 4221 ] ] ]
"""Deals with making images (np arrays). It provides drawing methods that are difficult to do with the existing Python libraries. """ import numpy as np def blit(im1, im2, pos=None, mask=None): """Blit an image over another. Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the ``mask`` if provided. """ if pos is None: pos = (0, 0) # pragma: no cover else: # Cast to tuple in case pos is not subscriptable. pos = tuple(pos) im2.paste(im1, pos, mask) return im2 def color_gradient( size, p1, p2=None, vector=None, radius=None, color_1=0.0, color_2=1.0, shape="linear", offset=0, ): """Draw a linear, bilinear, or radial gradient. The result is a picture of size ``size``, whose color varies gradually from color `color_1` in position ``p1`` to color ``color_2`` in position ``p2``. If it is a RGB picture the result must be transformed into a 'uint8' array to be displayed normally: Parameters ---------- size : tuple or list Size (width, height) in pixels of the final image array. p1 : tuple or list Position for the first coordinate of the gradient in pixels (x, y). The color 'before' ``p1`` is ``color_1`` and it gradually changes in the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``. p2 : tuple or list, optional Position for the second coordinate of the gradient in pixels (x, y). Coordinates (x, y) of the limit point for ``color_1`` and ``color_2``. vector : tuple or list, optional A vector (x, y) in pixels that can be provided instead of ``p2``. ``p2`` is then defined as (p1 + vector). color_1 : tuple or list, optional Starting color for the gradient. As default, black. Either floats between 0 and 1 (for gradients used in masks) or [R, G, B] arrays (for colored gradients). color_2 : tuple or list, optional Color for the second point in the gradient. As default, white. Either floats between 0 and 1 (for gradients used in masks) or [R, G, B] arrays (for colored gradients). shape : str, optional Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or ``"circular"``. In a linear gradient the color varies in one direction, from point ``p1`` to point ``p2``. In a bilinear gradient it also varies symmetrically from ``p1`` in the other direction. In a circular gradient it goes from ``color_1`` to ``color_2`` in all directions. radius : float, optional If ``shape="radial"``, the radius of the gradient is defined with the parameter ``radius``, in pixels. offset : float, optional Real number between 0 and 1 indicating the fraction of the vector at which the gradient actually starts. For instance if ``offset`` is 0.9 in a gradient going from p1 to p2, then the gradient will only occur near p2 (before that everything is of color ``color_1``) If the offset is 0.9 in a radial gradient, the gradient will occur in the region located between 90% and 100% of the radius, this creates a blurry disc of radius ``d(p1, p2)``. Returns ------- image An Numpy array of dimensions (width, height, n_colors) of type float representing the image of the gradient. Examples -------- >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] >>> >>> color_gradient( # from red to green ... (10, 1), # size ... (0, 0), # p1 ... p2=(10, 0), ... color_1=(255, 0, 0), # red ... color_2=(0, 255, 0), # green ... ) [[[ 0. 255. 0. ] [ 25.5 229.5 0. ] [ 51. 204. 0. ] [ 76.5 178.5 0. ] [102. 153. 0. ] [127.5 127.5 0. ] [153. 102. 0. ] [178.5 76.5 0. ] [204. 51. 0. ] [229.5 25.5 0. ]]] """ # np-arrayize and change x,y coordinates to y,x w, h = size color_1 = np.array(color_1).astype(float) color_2 = np.array(color_2).astype(float) if shape == "bilinear": if vector is None: if p2 is None: raise ValueError("You must provide either 'p2' or 'vector'") vector = np.array(p2) - np.array(p1) m1, m2 = [ color_gradient( size, p1, vector=v, color_1=1.0, color_2=0.0, shape="linear", offset=offset, ) for v in [vector, [-v for v in vector]] ] arr = np.maximum(m1, m2) if color_1.size > 1: arr = np.dstack(3 * [arr]) return arr * color_1 + (1 - arr) * color_2 p1 = np.array(p1[::-1]).astype(float) M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float) if shape == "linear": if vector is None: if p2 is not None: vector = np.array(p2[::-1]) - p1 else: raise ValueError("You must provide either 'p2' or 'vector'") else: vector = np.array(vector[::-1]) norm = np.linalg.norm(vector) n_vec = vector / norm ** 2 # norm 1/norm(vector) p1 = p1 + offset * vector arr = (M - p1).dot(n_vec) / (1 - offset) arr = np.minimum(1, np.maximum(0, arr)) if color_1.size > 1: arr = np.dstack(3 * [arr]) return arr * color_1 + (1 - arr) * color_2 elif shape == "radial": if (radius or 0) == 0: arr = np.ones((h, w)) else: arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius arr = arr / ((1 - offset) * radius) arr = np.minimum(1.0, np.maximum(0, arr)) if color_1.size > 1: arr = np.dstack(3 * [arr]) return (1 - arr) * color_1 + arr * color_2 raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'") def color_split( size, x=None, y=None, p1=None, p2=None, vector=None, color_1=0, color_2=1.0, gradient_width=0, ): """Make an image split in 2 colored regions. Returns an array of size ``size`` divided in two regions called 1 and 2 in what follows, and which will have colors color_1 and color_2 respectively. Parameters ---------- x : int, optional If provided, the image is split horizontally in x, the left region being region 1. y : int, optional If provided, the image is split vertically in y, the top region being region 1. p1, p2: tuple or list, optional Positions (x1, y1), (x2, y2) in pixels, where the numbers can be floats. Region 1 is defined as the whole region on the left when going from ``p1`` to ``p2``. p1, vector: tuple or list, optional ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be floats. Region 1 is then the region on the left when starting in position ``p1`` and going in the direction given by ``vector``. gradient_width : float, optional If not zero, the split is not sharp, but gradual over a region of width ``gradient_width`` (in pixels). This is preferable in many situations (for instance for antialiasing). Examples -------- >>> size = [200, 200] >>> >>> # an image with all pixels with x<50 =0, the others =1 >>> color_split(size, x=50, color_1=0, color_2=1) >>> >>> # an image with all pixels with y<50 red, the others green >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) >>> >>> # An image split along an arbitrary line (see below) >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1) """ if gradient_width or ((x is None) and (y is None)): if p2 is not None: vector = np.array(p2) - np.array(p1) elif x is not None: vector = np.array([0, -1.0]) p1 = np.array([x, 0]) elif y is not None: vector = np.array([1.0, 0.0]) p1 = np.array([0, y]) x, y = vector vector = np.array([y, -x]).astype("float") norm = np.linalg.norm(vector) vector = max(0.1, gradient_width) * vector / norm return color_gradient( size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear" ) else: w, h = size shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1)) arr = np.zeros(shape) if x: arr[:, :x] = color_1 arr[:, x:] = color_2 elif y: arr[:y] = color_1 arr[y:] = color_2 return arr def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1): """Draw an image with a circle. Draws a circle of color ``color``, on a background of color ``bg_color``, on a screen of size ``screensize`` at the position ``center=(x, y)``, with a radius ``radius`` but slightly blurred on the border by ``blur`` pixels. Parameters ---------- screensize : tuple or list Size of the canvas. center : tuple or list Center of the circle. radius : float Radius of the circle, in pixels. bg_color : tuple or float, optional Color for the background of the canvas. As default, black. blur : float, optional Blur for the border of the circle. Examples -------- >>> from moviepy.video.tools.drawing import circle >>> >>> circle( ... (5, 5), # size ... (2, 2), # center ... 2, # radius ... ) array([[0. , 0. , 0. , 0. , 0. ], [0. , 0.58578644, 1. , 0.58578644, 0. ], [0. , 1. , 1. , 1. , 0. ], [0. , 0.58578644, 1. , 0.58578644, 0. ], [0. , 0. , 0. , 0. , 0. ]]) """ offset = 1.0 * (radius - blur) / radius if radius else 0 return color_gradient( screensize, p1=center, radius=radius, color_1=color, color_2=bg_color, shape="radial", offset=offset, )
[ [ [ 142, 153 ], [ 4183, 4185 ], [ 4229, 4231 ], [ 4442, 4444 ], [ 4457, 4459 ], [ 4798, 4800 ], [ 4864, 4866 ], [ 4946, 4948 ], [ 4988, 4990 ], [ 4998, 5000 ], [ 5161, 5163 ], [ 5315, 5317 ], [ 5354, 5356 ], [ 5533, 5535 ], [ 5547, 5549 ], [ 5614, 5616 ], [ 5764, 5766 ], [ 5813, 5815 ], [ 5935, 5937 ], [ 5951, 5953 ], [ 6019, 6021 ], [ 8113, 8115 ], [ 8128, 8130 ], [ 8190, 8192 ], [ 8227, 8229 ], [ 8293, 8295 ], [ 8331, 8333 ], [ 8388, 8390 ], [ 8437, 8439 ], [ 8701, 8703 ], [ 8762, 8764 ] ], [ [ 160, 164 ] ], [ [ 538, 552 ], [ 4502, 4516 ], [ 8533, 8547 ], [ 10343, 10357 ] ], [ [ 6186, 6197 ] ], [ [ 8959, 8965 ] ] ]
# -*- coding: utf-8 -*- import h5py import pyre from ..Base import Base from .Identification import Identification class SLC(Base, family='nisar.productreader.slc'): ''' Class for parsing NISAR SLC products into isce structures. ''' productValidationType = pyre.properties.str(default='SLC') productValidationType.doc = 'Validation tag to ensure correct product type' def __init__(self, **kwds): ''' Constructor to initialize product with HDF5 file. ''' ###Read base product information like Identification super().__init__(**kwds) def populateIdentification(self): ''' Read in the Identification information and assert identity. ''' with h5py.File(self.filename, 'r', libver='latest', swmr=True) as f: h5grp = f[self.IdentificationPath] self.identification = Identification(h5grp)
[ [ [ 32, 36 ], [ 750, 754 ] ], [ [ 44, 48 ], [ 279, 283 ] ], [ [ 68, 72 ], [ 127, 131 ] ], [ [ 101, 115 ], [ 895, 909 ] ], [ [ 123, 126 ] ] ]
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'stevedore.sphinxext', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'tacker' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['tacker.'] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None}
[ [ [ 553, 555 ], [ 587, 589 ] ], [ [ 563, 566 ], [ 568, 571 ] ], [ [ 840, 850 ] ], [ [ 1149, 1162 ] ], [ [ 1204, 1214 ] ], [ [ 1267, 1274 ], [ 2121, 2128 ], [ 2325, 2332 ], [ 2361, 2368 ] ], [ [ 1287, 1296 ] ], [ [ 1400, 1424 ] ], [ [ 1544, 1560 ] ], [ [ 1632, 1646 ] ], [ [ 1714, 1736 ] ], [ [ 2091, 2108 ] ], [ [ 2275, 2290 ] ] ]
from __future__ import print_function from __future__ import division import os import sys import time import datetime import os.path as osp import numpy as np import warnings import torch import torch.nn as nn import torch.backends.cudnn as cudnn from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs from torchreid.data_manager import ImageDataManager from torchreid import models from torchreid.losses import CrossEntropyLoss, DeepSupervision from torchreid.utils.iotools import check_isfile from torchreid.utils.avgmeter import AverageMeter from torchreid.utils.loggers import Logger, RankLogger from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \ load_pretrained_weights, save_checkpoint, resume_from_checkpoint from torchreid.utils.reidtools import visualize_ranked_results from torchreid.utils.generaltools import set_random_seed from torchreid.eval_metrics import evaluate from torchreid.optimizers import init_optimizer from torchreid.lr_schedulers import init_lr_scheduler os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch')) testloader_dict = trainloader = criterion = None use_gpu = False # global variables parser = argument_parser() args = parser.parse_args() def corr_metric(W: 'K x N'): G = W.permute(1, 0) @ W return torch.trace(G) / abs(G).sum() def replace_weight(layer): with torch.no_grad(): # NECESSARY! The weight of Linear layer has been transposed! A = layer.weight.t() M, N = A.size() M: 2048 N: 1024 U, S, V = torch.svd(A, some=False) W = A @ V W: '2048 x 1024 = M x N' NW = torch.zeros_like(A) for i in range(N): curr_N = W.size(1) W_norm = torch.norm(W, p=2, dim=0) W_norm: 'curr_N' index = i vec_i = A[:, i] vec_i_norm = torch.norm(vec_i) co = (A[:, i].view(M, 1).t() @ W).view(curr_N) co: 'curr_N' co = co / vec_i_norm absco = abs(co / W_norm) maxco_index = torch.max(absco, 0)[1].item() NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index]) # Remove selected column vector from W W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})] layer.weight.copy_(NW.t()) print(layer.weight) return layer def main(): global args, criterion, testloader_dict, trainloader, use_gpu set_random_seed(args.seed) if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False log_name = 'test.log' if args.evaluate else 'train.log' sys.stdout = Logger(osp.join(args.save_dir, log_name)) print('==========\nArgs:{}\n=========='.format(args)) if use_gpu: print('Currently using GPU {}'.format(args.gpu_devices)) cudnn.benchmark = True else: warnings.warn('Currently using CPU, however, GPU is highly recommended') print('Initializing image data manager') dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args)) trainloader, testloader_dict = dm.return_dataloaders() print('Initializing model: {}'.format(args.arch)) model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu) print('Model size: {:.3f} M'.format(count_num_param(model))) if args.load_weights and check_isfile(args.load_weights): load_pretrained_weights(model, args.load_weights) model = nn.DataParallel(model).cuda() if use_gpu else model criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth) if args.resume and check_isfile(args.resume): args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None) resumed = True else: resumed = False if args.evaluate: print('Evaluate only') for name in args.target_names: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True) if args.visualize_ranks: visualize_ranked_results( distmat, dm.return_testdataset_by_name(name), save_dir=osp.join(args.save_dir, 'ranked_results', name), topk=20 ) return time_start = time.time() # ranklogger = RankLogger(args.source_names, args.target_names) print('=> Start training') if not resumed: train_base(model) train_RRI(model, 7) elapsed = round(time.time() - time_start) elapsed = str(datetime.timedelta(seconds=elapsed)) print('Elapsed {}'.format(elapsed)) # ranklogger.show_summary() def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False): losses = AverageMeter() accs = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() model.train() # if fixbase or args.always_fixbase: # open_specified_layers(model, args.open_layers) # else: # open_all_layers(model) end = time.time() for batch_idx, (imgs, pids, _, _) in enumerate(trainloader): data_time.update(time.time() - end) if use_gpu: imgs, pids = imgs.cuda(), pids.cuda() outputs = model(imgs) loss = sum(criterion(x, pids) for x in outputs) / len(outputs) # if isinstance(outputs, (tuple, list)): # loss = DeepSupervision(criterion, outputs, pids) # else: # loss = criterion(outputs, pids) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) losses.update(loss.item(), pids.size(0)) accs.update(accuracy(outputs, pids)[0]) if (batch_idx + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format( epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses, acc=accs )) end = time.time() def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1))) print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print('Computing CMC and mAP') cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print('Results ----------') print('mAP: {:.1%}'.format(mAP)) print('CMC curve') for r in ranks: print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1])) print('------------------') if return_distmat: return distmat return cmc[0] def get_base_optimizer(model): kwargs = { 'weight_decay': 5e-4, 'lr': 0.0003, 'betas': (0.9, 0.999), } param_groups = model.parameters() optimizer = torch.optim.Adam(param_groups, **kwargs) scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1) return optimizer, scheduler def get_base_sgd_optimizer(model): kwargs = { 'weight_decay': 5e-4, 'lr': 0.001, 'momentum': 0.9, } param_groups = model.parameters() optimizer = torch.optim.SGD(param_groups, **kwargs) scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1) return optimizer, scheduler def get_RRI_optimizer( model, lr ): kwargs = { 'weight_decay': 5e-4, 'lr': lr, 'momentum': 0.9, } param_groups = model.parameters() optimizer = torch.optim.SGD(param_groups, **kwargs) scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1) return optimizer, scheduler def train_R(model, lr, T, fix_eigen_layer: bool=False): eigen_layers = model.module.get_fcs() if fix_eigen_layer: for eigen_layer in eigen_layers: eigen_layer.eval() for p in eigen_layer.parameters(): p.requires_grad = False stage_name = 'restraint' else: model.train() for p in model.parameters(): p.requires_grad = True stage_name = 'relaxation' prefix = '{}_{}_'.format(T, stage_name) optimizer, scheduler = get_RRI_optimizer(model, lr) for epoch in range(20): train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu) scheduler.step() print('=> Test') if (epoch + 1) % args.eval_freq == 0: for name in args.target_names: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1 = test(model, queryloader, galleryloader, use_gpu) save_checkpoint({ 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': 0, 'arch': args.arch, 'optimizer': (), }, args.save_dir, prefix=prefix) def train_base(model): use_sgd = os.environ.get('sgd') is not None optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer optimizer, scheduler = get_base_optimizer(model) model.train() print('=== train base ===') if True: open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier'] print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10)) for epoch in range(10): open_specified_layers(model, open_layers) train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True) print('Done. All layers are open to train for {} epochs'.format(60)) open_all_layers(model) optimizer, scheduler = optimizer_getter(model) for epoch in range(60): train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu) scheduler.step() print('=> Test') if (epoch + 1) % args.eval_freq == 0: for name in args.target_names: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1 = test(model, queryloader, galleryloader, use_gpu) save_checkpoint({ 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': 0, 'arch': args.arch, 'optimizer': optimizer.state_dict(), }, args.save_dir, prefix='base_') def train_RRI(model, Ts: int=7): base_lrs = [0.001] * 3 + [0.0001] * 10 for T in range(Ts): print('=== T = {} ==='.format(T)) print('Replacing eigen layer weight...') for eigen_layer in model.module.get_fcs(): replace_weight(eigen_layer) print('Replaced.') print('--- Restraint ({}) ---'.format(T)) train_R(model, base_lrs[T], T, fix_eigen_layer=True) print('--- Relaxation ({}) ---'.format(T)) train_R(model, base_lrs[T], T, fix_eigen_layer=False) for name in args.target_names: print('Evaluating {} ...'.format(name)) queryloader = testloader_dict[name]['query'] galleryloader = testloader_dict[name]['gallery'] rank1 = test(model, queryloader, galleryloader, use_gpu) save_checkpoint({ 'state_dict': model.state_dict(), 'rank1': rank1, 'epoch': 0, 'arch': args.arch, 'optimizer': (), }, args.save_dir, prefix='final_') if __name__ == '__main__': main()
[ [ [ 23, 37 ] ], [ [ 61, 69 ] ], [ [ 78, 80 ], [ 1115, 1117 ], [ 1131, 1133 ], [ 1144, 1146 ], [ 1088, 1090 ], [ 2647, 2649 ], [ 11393, 11395 ] ], [ [ 88, 91 ], [ 2850, 2853 ] ], [ [ 99, 103 ], [ 4754, 4758 ], [ 4957, 4961 ], [ 5487, 5491 ], [ 5589, 5593 ], [ 6061, 6065 ], [ 6793, 6797 ], [ 7176, 7180 ], [ 7253, 7257 ], [ 7687, 7691 ], [ 7853, 7857 ], [ 7930, 7934 ] ], [ [ 111, 119 ], [ 5001, 5009 ] ], [ [ 127, 141 ], [ 2870, 2873 ], [ 4626, 4629 ] ], [ [ 149, 160 ], [ 7463, 7465 ], [ 7501, 7503 ], [ 8140, 8142 ], [ 8178, 8180 ] ], [ [ 168, 176 ], [ 3094, 3102 ] ], [ [ 185, 190 ], [ 1399, 1404 ], [ 1468, 1473 ], [ 1657, 1662 ], [ 1747, 1752 ], [ 1849, 1854 ], [ 1980, 1985 ], [ 2179, 2184 ], [ 2257, 2262 ], [ 2715, 2720 ], [ 6965, 6970 ], [ 7429, 7434 ], [ 8106, 8111 ], [ 8459, 8464 ], [ 8526, 8531 ], [ 9261, 9266 ], [ 9601, 9606 ], [ 9945, 9950 ] ], [ [ 198, 212 ], [ 3735, 3737 ] ], [ [ 220, 249 ], [ 3053, 3058 ] ], [ [ 268, 283 ], [ 1283, 1298 ] ], [ [ 285, 305 ], [ 3250, 3270 ] ], [ [ 307, 323 ] ], [ [ 325, 344 ] ], [ [ 380, 396 ], [ 3222, 3238 ] ], [ [ 419, 425 ], [ 3404, 3410 ] ], [ [ 455, 471 ], [ 3804, 3820 ] ], [ [ 473, 488 ] ], [ [ 525, 537 ], [ 3631, 3643 ], [ 3925, 3937 ] ], [ [ 575, 587 ], [ 5209, 5221 ], [ 5235, 5247 ], [ 5267, 5279 ], [ 5298, 5310 ], [ 6922, 6934 ] ], [ [ 624, 630 ], [ 2863, 2869 ] ], [ [ 632, 642 ] ], [ [ 682, 697 ], [ 3576, 3591 ] ], [ [ 699, 714 ], [ 12106, 12121 ] ], [ [ 716, 737 ], [ 11896, 11917 ] ], [ [ 739, 747 ], [ 6150, 6158 ] ], [ [ 755, 778 ], [ 3672, 3695 ] ], [ [ 780, 795 ], [ 11160, 11175 ], [ 12692, 12707 ], [ 13707, 13722 ] ], [ [ 797, 819 ], [ 3979, 4001 ] ], [ [ 858, 882 ], [ 4505, 4529 ] ], [ [ 924, 939 ], [ 2581, 2596 ] ], [ [ 975, 983 ], [ 8704, 8712 ] ], [ [ 1017, 1031 ] ], [ [ 1068, 1085 ], [ 9318, 9335 ], [ 9657, 9674 ], [ 10001, 10018 ] ], [ [ 1189, 1204 ], [ 10986, 11001 ], [ 11049, 11064 ], [ 12518, 12533 ], [ 12581, 12596 ], [ 13549, 13564 ], [ 13604, 13619 ] ], [ [ 1207, 1218 ], [ 10728, 10739 ], [ 11992, 12003 ], [ 12260, 12271 ] ], [ [ 1221, 1230 ], [ 10706, 10715 ], [ 11970, 11979 ], [ 12238, 12247 ] ], [ [ 1238, 1245 ], [ 10749, 10756 ], [ 11146, 11153 ], [ 12005, 12012 ], [ 12281, 12288 ], [ 12678, 12685 ], [ 13693, 13700 ] ], [ [ 1274, 1280 ], [ 1308, 1314 ] ], [ [ 1301, 1305 ], [ 2597, 2601 ], [ 2619, 2623 ], [ 2684, 2688 ], [ 2748, 2752 ], [ 2815, 2819 ], [ 2879, 2883 ], [ 2956, 2960 ], [ 3026, 3030 ], [ 3271, 3275 ], [ 3380, 3384 ], [ 3427, 3431 ], [ 3499, 3503 ], [ 3609, 3613 ], [ 3644, 3648 ], [ 3703, 3707 ], [ 3882, 3886 ], [ 3909, 3913 ], [ 3938, 3942 ], [ 4002, 4006 ], [ 3960, 3964 ], [ 4103, 4107 ], [ 4170, 4174 ], [ 4467, 4471 ], [ 4635, 4639 ], [ 6208, 6212 ], [ 8387, 8391 ], [ 8776, 8780 ], [ 10836, 10840 ], [ 10881, 10885 ], [ 11280, 11284 ], [ 11323, 11327 ], [ 12367, 12371 ], [ 12413, 12417 ], [ 12812, 12816 ], [ 12875, 12879 ], [ 13460, 13464 ], [ 13827, 13831 ], [ 13870, 13874 ] ], [ [ 1334, 1345 ] ], [ [ 1435, 1449 ], [ 13164, 13178 ] ], [ [ 2502, 2506 ], [ 13935, 13939 ] ], [ [ 5116, 5121 ], [ 10686, 10691 ], [ 11950, 11955 ], [ 12218, 12223 ] ], [ [ 6811, 6815 ], [ 4381, 4385 ], [ 11106, 11110 ], [ 12638, 12642 ], [ 13653, 13657 ] ], [ [ 9074, 9092 ], [ 11490, 11508 ], [ 11537, 11555 ] ], [ [ 9416, 9438 ], [ 11451, 11473 ] ], [ [ 9755, 9772 ], [ 10620, 10637 ] ], [ [ 10095, 10102 ], [ 13277, 13284 ], [ 13389, 13396 ] ], [ [ 11359, 11369 ], [ 4894, 4904 ] ], [ [ 12912, 12921 ], [ 4916, 4925 ] ], [ [ 2705, 2712 ], [ 2971, 2978 ], [ 3239, 3246 ], [ 3527, 3534 ], [ 3768, 3775 ], [ 3860, 3867 ], [ 4421, 4428 ] ], [ [ 2770, 2777 ], [ 2971, 2978 ], [ 3239, 3246 ], [ 3527, 3534 ], [ 3768, 3775 ], [ 3860, 3867 ], [ 4421, 4428 ] ], [ [ 3282, 3293 ] ], [ [ 3295, 3310 ], [ 4267, 4282 ], [ 4326, 4341 ] ], [ [ 3792, 3801 ] ] ]
# Copyright (c) 2018, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ The renewals command is intended to be run as part of an automated script run at least once a day. It will - recognize revenue for past periods (see :doc:`ledger <ledger>`). - extends active subscriptions - create charges for new periods - trigger expiration notices Every functions part of the renewals script are explicitly written to be idempotent. Calling the scripts multiple times for the same timestamp (i.e. with the ``--at-time`` command line argument) will generate the appropriate ``Transaction`` and ``Charge`` only once. **Example cron setup**: .. code-block:: bash $ cat /etc/cron.daily/renewals #!/bin/sh cd /var/*mysite* && python manage.py renewals """ import logging, time from django.core.management.base import BaseCommand from ...models import get_broker from ...renewals import (create_charges_for_balance, complete_charges, extend_subscriptions, recognize_income, trigger_expiration_notices) from ...utils import datetime_or_now from ... import settings LOGGER = logging.getLogger(__name__) class Command(BaseCommand): help = """Recognized backlog, extends subscription and charge due balance on credit cards""" def add_arguments(self, parser): parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False, help='Do not commit transactions nor submit charges to processor') parser.add_argument('--no-charges', action='store_true', dest='no_charges', default=False, help='Do not submit charges to processor') parser.add_argument('--at-time', action='store', dest='at_time', default=None, help='Specifies the time at which the command runs') def handle(self, *args, **options): #pylint:disable=broad-except dry_run = options['dry_run'] no_charges = options['no_charges'] end_period = datetime_or_now(options['at_time']) if dry_run: LOGGER.warning("dry_run: no changes will be committed.") if no_charges: LOGGER.warning("no_charges: no charges will be submitted.") try: recognize_income(end_period, dry_run=dry_run) except Exception as err: LOGGER.exception("recognize_income: %s", err) try: extend_subscriptions(end_period, dry_run=dry_run) except Exception as err: LOGGER.exception("extend_subscriptions: %s", err) try: create_charges_for_balance( end_period, dry_run=dry_run or no_charges) except Exception as err: LOGGER.exception( "Unable to create charges for balance on broker '%s'", get_broker()) if not (dry_run or no_charges): # Let's complete the in flight charges after we have given # them time to settle. time.sleep(30) complete_charges() # Trigger 'expires soon' notifications expiration_periods = settings.EXPIRE_NOTICE_DAYS for period in expiration_periods: trigger_expiration_notices( end_period, nb_days=period, dry_run=dry_run)
[ [ [ 2044, 2051 ], [ 2361, 2368 ] ], [ [ 2053, 2057 ], [ 4233, 4237 ] ], [ [ 2099, 2110 ], [ 2405, 2416 ] ], [ [ 2134, 2144 ], [ 4061, 4071 ] ], [ [ 2170, 2196 ], [ 3824, 3850 ] ], [ [ 2198, 2214 ], [ 4260, 4276 ] ], [ [ 2220, 2240 ], [ 3654, 3674 ] ], [ [ 2242, 2258 ], [ 3492, 3508 ] ], [ [ 2260, 2286 ], [ 4438, 4464 ] ], [ [ 2309, 2324 ], [ 3247, 3262 ] ], [ [ 2341, 2349 ], [ 4356, 4364 ] ], [ [ 2352, 2358 ], [ 3315, 3321 ], [ 3407, 3413 ], [ 3583, 3589 ], [ 3749, 3755 ], [ 3956, 3962 ] ], [ [ 2397, 2404 ] ] ]
from __future__ import print_function import datetime import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/calendar'] def main(): """Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. """ creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) print(flow) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('calendar', 'v3', credentials=creds) # Call the Calendar API #now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time #print('Getting the upcoming 10 events') #events_result = service.events().list(calendarId='primary', timeMin=now, # maxResults=10, singleEvents=True, # orderBy='startTime').execute() #events = events_result.get('items', []) #if not events: # print('No upcoming events found.') #for event in events: # start = event['start'].get('dateTime', event['start'].get('date')) # print(start, event['summary']) print("Creating events") # Refer to the Python quickstart on how to setup the environment: # https://developers.google.com/calendar/quickstart/python # Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any # stored credentials. event = { 'summary': 'Google I/O 2019', 'location': '800 Howard St., San Francisco, CA 94103', 'description': 'A chance to hear more about Google\'s developer products.', 'start': { 'dateTime': '2019-08-28T09:00:00-07:00', 'timeZone': 'America/Los_Angeles', }, 'end': { 'dateTime': '2019-09-01T17:00:00-07:00', 'timeZone': 'America/Los_Angeles', }, 'recurrence': [ 'RRULE:FREQ=DAILY;COUNT=2' ], 'attendees': [ {'email': 'lpage@example.com'}, {'email': 'sbrin@example.com'}, ], 'reminders': { 'useDefault': False, 'overrides': [ {'method': 'email', 'minutes': 24 * 60}, {'method': 'popup', 'minutes': 10}, ], }, } event = service.events().insert(calendarId='primary', body=event).execute() print ('Event created: %s' % (event.get('htmlLink'))) if __name__ == '__main__': main()
[ [ [ 23, 37 ] ], [ [ 45, 53 ] ], [ [ 61, 67 ], [ 795, 801 ], [ 1326, 1332 ] ], [ [ 75, 82 ], [ 693, 695 ] ], [ [ 121, 126 ], [ 1367, 1372 ] ], [ [ 165, 181 ], [ 1055, 1071 ] ], [ [ 225, 232 ], [ 1011, 1018 ] ], [ [ 293, 299 ], [ 1134, 1140 ] ], [ [ 352, 356 ], [ 3277, 3281 ] ] ]
import database def load_shard_from_db(conf): #TODO: load shard from cache if exists shards = database.load_shard(conf) return shards def get_shard(shards, url): """ Hash function for shading scheme returns a dict with hostname and table name Eg: s = { 'hostname': 'node1', 'table_name': 'url_s1'} """ if not shards: return {} else: return shards[hash(str(url['hostname'])+str(url['port'])+str(url['path'])) % len(shards)]
[ [ [ 7, 15 ], [ 105, 113 ] ], [ [ 22, 40 ] ], [ [ 156, 165 ] ] ]
from nonebot import on_command, CommandSession @on_command('help', aliases=('h', '帮助'), only_to_me=False) async def manual(session: CommandSession): await session.send(f'[CQ:image,file=/admin/manual.png]') @manual.args_parser async def _(session: CommandSession): # do nothing return
[ [ [ 20, 30 ], [ 49, 59 ] ], [ [ 32, 46 ], [ 133, 147 ], [ 254, 268 ] ], [ [ 107, 211 ], [ 214, 220 ] ], [ [ 233, 298 ] ] ]
# Generated by Django 2.2.13 on 2020-06-30 06:51 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('pipeline', '0004_hospital'), ] operations = [ migrations.RemoveField( model_name='hospital', name='sv_name', ), ]
[ [ [ 72, 82 ], [ 101, 111 ], [ 219, 229 ] ], [ [ 91, 100 ] ] ]
""" Definition of urls for polls viewing and voting. """ from django.conf.urls import url from app.models import Poll import app.views urlpatterns = [ url(r'^$', app.views.PollListView.as_view( queryset=Poll.objects.order_by('-pub_date')[:5], context_object_name='latest_poll_list', template_name='app/index.html',), name='home'), url(r'^(?P<pk>\d+)/$', app.views.PollDetailView.as_view( template_name='app/details.html'), name='detail'), url(r'^(?P<pk>\d+)/results/$', app.views.PollResultsView.as_view( template_name='app/results.html'), name='results'), url(r'^(?P<poll_id>\d+)/vote/$', app.views.vote, name='vote'), ]
[ [ [ 91, 94 ], [ 168, 171 ], [ 410, 413 ], [ 554, 557 ], [ 708, 711 ] ], [ [ 119, 123 ], [ 242, 246 ] ], [ [ 134, 143 ], [ 188, 191 ], [ 442, 445 ], [ 594, 597 ], [ 741, 744 ] ], [ [ 147, 158 ] ] ]
# Copyright (C) 2019 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body validating middleware. """ import functools import webob from tacker.api.validation import validators from tacker.common import exceptions def schema(request_body_schema): """Register a schema to validate request body. Registered schema will be used for validating request body just before API method executing. :param dict request_body_schema: a schema to validate request body """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): schema_validator = validators._SchemaValidator( request_body_schema) try: schema_validator.validate(kwargs['body']) except KeyError: raise webob.exc.HTTPBadRequest( explanation=_("Malformed request body")) return func(*args, **kwargs) return wrapper return add_validator def query_schema(query_params_schema): """Register a schema to validate request query parameters. Registered schema will be used for validating request query params just before API method executing. :param query_params_schema: A dict, the JSON-Schema for validating the query parameters. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): # NOTE(tpatil): The second argument of the method # calling this method should always be 'request'. if 'request' in kwargs: req = kwargs['request'] else: req = args[1] try: req.GET.dict_of_lists() except UnicodeDecodeError: msg = _('Query string is not UTF-8 encoded') raise exceptions.ValidationError(msg) query_opts = {} query_opts.update(req.GET) schema_validator = validators._SchemaValidator( query_params_schema) schema_validator.validate(query_opts) return func(*args, **kwargs) return wrapper return add_validator
[ [ [ 681, 690 ], [ 1095, 1104 ], [ 1943, 1952 ] ], [ [ 698, 703 ], [ 1378, 1383 ] ], [ [ 739, 749 ], [ 1186, 1196 ], [ 2562, 2572 ] ], [ [ 776, 786 ], [ 2431, 2441 ] ], [ [ 793, 799 ] ], [ [ 1562, 1574 ] ] ]
#!/usr/bin/env python3 # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest import config import thread_cert from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_PARENT_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_LINK_REQUEST, MLE_LINK_ACCEPT_AND_REQUEST, ADDR_SOL_URI, SOURCE_ADDRESS_TLV, MODE_TLV, TIMEOUT_TLV, CHALLENGE_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, ROUTE64_TLV, ADDRESS16_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, TLV_REQUEST_TLV, SCAN_MASK_TLV, CONNECTIVITY_TLV, LINK_MARGIN_TLV, VERSION_TLV, ADDRESS_REGISTRATION_TLV, ACTIVE_TIMESTAMP_TLV from pktverify.packet_verifier import PacketVerifier from pktverify.null_field import nullField LEADER = 1 ROUTER = 2 ED = 3 class Cert_5_5_2_LeaderReboot(thread_cert.TestCase): TOPOLOGY = { LEADER: { 'name': 'LEADER', 'mode': 'rsdn', 'panid': 0xface, 'router_selection_jitter': 1, 'whitelist': [ROUTER] }, ROUTER: { 'name': 'ROUTER', 'mode': 'rsdn', 'panid': 0xface, 'router_selection_jitter': 1, 'whitelist': [LEADER, ED] }, ED: { 'name': 'MED', 'is_mtd': True, 'mode': 'rsn', 'panid': 0xface, 'whitelist': [ROUTER] }, } def _setUpLeader(self): self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64()) self.nodes[LEADER].enable_whitelist() self.nodes[LEADER].set_router_selection_jitter(1) def test(self): self.nodes[LEADER].start() self.simulator.go(5) self.assertEqual(self.nodes[LEADER].get_state(), 'leader') self.nodes[ROUTER].start() self.simulator.go(5) self.assertEqual(self.nodes[ROUTER].get_state(), 'router') self.nodes[ED].start() self.simulator.go(5) self.assertEqual(self.nodes[ED].get_state(), 'child') self.nodes[LEADER].reset() self._setUpLeader() self.simulator.go(140) self.assertEqual(self.nodes[ROUTER].get_state(), 'leader') self.nodes[LEADER].start() self.simulator.go(5) self.assertEqual(self.nodes[LEADER].get_state(), 'router') addrs = self.nodes[ED].get_addrs() for addr in addrs: self.assertTrue(self.nodes[ROUTER].ping(addr)) def verify(self, pv): pkts = pv.pkts pv.summary.show() LEADER = pv.vars['LEADER'] ROUTER = pv.vars['ROUTER'] MED = pv.vars['MED'] leader_pkts = pkts.filter_wpan_src64(LEADER) _rpkts = pkts.filter_wpan_src64(ROUTER) # Step 2: The DUT MUST send properly formatted MLE Advertisements _rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next() _lpkts = leader_pkts.range(_rpkts.index) _lpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify( lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type)) _rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify( lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type)) # Step 4: Router_1 MUST attempt to reattach to its original partition by # sending MLE Parent Requests to the All-Routers multicast # address (FFxx::xx) with a hop limit of 255. _rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify( lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type)) lreset_start = _rpkts.index # Step 5: Leader MUST NOT respond to the MLE Parent Requests _lpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_not_next() # Step 6:Router_1 MUST attempt to attach to any other Partition # within range by sending a MLE Parent Request. _rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify( lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type)) lreset_stop = _rpkts.index # Step 3: The Leader MUST stop sending MLE advertisements. leader_pkts.range(lreset_start, lreset_stop).filter_mle_cmd(MLE_ADVERTISEMENT).must_not_next() # Step 7: Take over leader role of a new Partition and # begin transmitting MLE Advertisements with _rpkts.save_index(): _rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify( lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type)) # Step 8: Router_1 MUST respond with an MLE Child Update Response, # with the updated TLVs of the new partition _rpkts.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify( lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type)) # Step 9: The Leader MUST send properly formatted MLE Parent # Requests to the All-Routers multicast address _lpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify( lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type)) # Step 10: Router_1 MUST send an MLE Parent Response _rpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_next().must_verify( lambda p: { SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, LINK_LAYER_FRAME_COUNTER_TLV, RESPONSE_TLV, CHALLENGE_TLV, LINK_MARGIN_TLV, CONNECTIVITY_TLV, VERSION_TLV } < set(p.mle.tlv.type)) # Step 11: Leader send MLE Child ID Request _lpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify( lambda p: { RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV, ACTIVE_TIMESTAMP_TLV } < set(p.mle.tlv.type)) #Step 12: Router_1 send MLE Child ID Response _rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next().must_verify( lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV} < set( p.mle.tlv.type)) #Step 13: Leader send an Address Solicit Request _lpkts.filter_coap_request(ADDR_SOL_URI).must_next().must_verify( lambda p: p.coap.tlv.ext_mac_addr and p.coap.tlv.rloc16 is not nullField and p.coap.tlv.status != 0) #Step 14: Router_1 send an Address Solicit Response _rpkts.filter_coap_ack( ADDR_SOL_URI).must_next().must_verify(lambda p: p.coap.tlv.router_mask_assigned and p.coap.tlv.rloc16 is not nullField and p.coap.tlv.status == 0) #Step 15: Leader Send a Multicast Link Request _lpkts.filter_mle_cmd(MLE_LINK_REQUEST).must_next().must_verify( lambda p: {VERSION_TLV, TLV_REQUEST_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, CHALLENGE_TLV} < set( p.mle.tlv.type)) #Step 16: Router_1 send a Unicast Link Accept _rpkts.filter_mle_cmd(MLE_LINK_ACCEPT_AND_REQUEST).must_next().must_verify(lambda p: { VERSION_TLV, SOURCE_ADDRESS_TLV, RESPONSE_TLV, MLE_FRAME_COUNTER_TLV, LINK_MARGIN_TLV, LEADER_DATA_TLV } < set(p.mle.tlv.type)) #Step 17: Router_1 MUST respond with an ICMPv6 Echo Reply _rpkts.filter_ping_request().filter_wpan_dst64(MED).must_next() if __name__ == '__main__': unittest.main()
[ [ [ 1612, 1620 ], [ 9149, 9157 ] ], [ [ 1629, 1635 ] ], [ [ 1643, 1654 ], [ 2350, 2361 ] ], [ [ 1684, 1701 ], [ 4488, 4505 ], [ 4660, 4677 ], [ 5827, 5844 ], [ 6042, 6059 ] ], [ [ 1703, 1721 ], [ 5034, 5052 ], [ 5511, 5529 ], [ 6673, 6691 ] ], [ [ 1723, 1742 ], [ 5315, 5334 ], [ 6910, 6929 ] ], [ [ 1744, 1769 ], [ 6346, 6371 ] ], [ [ 1771, 1791 ], [ 7275, 7295 ] ], [ [ 1793, 1814 ], [ 4374, 4395 ], [ 7664, 7685 ] ], [ [ 1816, 1832 ], [ 8491, 8507 ] ], [ [ 1834, 1861 ], [ 8764, 8791 ] ], [ [ 1863, 1875 ], [ 7951, 7963 ], [ 8208, 8220 ] ], [ [ 1877, 1895 ], [ 4555, 4573 ], [ 4727, 4745 ], [ 6113, 6131 ], [ 6421, 6439 ], [ 6996, 7014 ], [ 7735, 7753 ], [ 8587, 8605 ], [ 8854, 8872 ] ], [ [ 1897, 1905 ], [ 5102, 5110 ], [ 5579, 5587 ], [ 6441, 6449 ], [ 6741, 6749 ], [ 7406, 7414 ] ], [ [ 1907, 1918 ], [ 7416, 7427 ] ], [ [ 1920, 1933 ], [ 5112, 5125 ], [ 5589, 5602 ], [ 6751, 6764 ], [ 7077, 7090 ], [ 8624, 8637 ] ], [ [ 1935, 1947 ], [ 7063, 7075 ], [ 7362, 7374 ], [ 8874, 8886 ] ], [ [ 1949, 1977 ], [ 7033, 7061 ], [ 7376, 7404 ] ], [ [ 1979, 2000 ], [ 8888, 8909 ] ], [ [ 2002, 2013 ], [ 4592, 4603 ], [ 4764, 4775 ], [ 6150, 6161 ], [ 7508, 7519 ], [ 7805, 7816 ] ], [ [ 2015, 2028 ], [ 7475, 7488 ], [ 7772, 7785 ] ], [ [ 2030, 2045 ], [ 4575, 4590 ], [ 4747, 4762 ], [ 6133, 6148 ], [ 6451, 6466 ], [ 7016, 7031 ], [ 7755, 7770 ], [ 8607, 8622 ], [ 8928, 8943 ] ], [ [ 2047, 2063 ], [ 7490, 7506 ], [ 7787, 7803 ] ], [ [ 2065, 2080 ], [ 7442, 7457 ], [ 8570, 8585 ] ], [ [ 2082, 2095 ], [ 5127, 5140 ], [ 5604, 5617 ], [ 6766, 6779 ] ], [ [ 2097, 2113 ], [ 7125, 7141 ] ], [ [ 2115, 2130 ], [ 7108, 7123 ], [ 8911, 8926 ] ], [ [ 2132, 2143 ], [ 5142, 5153 ], [ 5619, 5630 ], [ 6781, 6792 ], [ 7143, 7154 ], [ 7429, 7440 ], [ 8557, 8568 ], [ 8841, 8852 ] ], [ [ 2145, 2169 ], [ 6468, 6492 ] ], [ [ 2171, 2191 ], [ 7521, 7541 ] ], [ [ 2230, 2244 ] ], [ [ 2278, 2287 ], [ 8065, 8074 ], [ 8367, 8376 ] ], [ [ 2289, 2295 ], [ 2398, 2404 ], [ 2755, 2761 ], [ 3002, 3008 ], [ 3076, 3082 ], [ 3122, 3128 ], [ 3201, 3207 ], [ 3282, 3288 ], [ 3588, 3594 ], [ 3750, 3756 ], [ 3831, 3837 ] ], [ [ 2300, 2306 ], [ 2590, 2596 ], [ 2563, 2569 ], [ 2929, 2935 ], [ 3035, 3041 ], [ 3333, 3339 ], [ 3414, 3420 ], [ 3699, 3705 ], [ 3972, 3978 ] ], [ [ 2311, 2313 ], [ 2786, 2788 ], [ 2763, 2765 ], [ 3465, 3467 ], [ 3542, 3544 ], [ 3890, 3892 ] ], [ [ 2326, 2349 ] ] ]
#!/usr/bin/env python3 # # Araboly 2000 Advanced Server SP4 -- everyone's favourite board game... with IRC support and fancy colours! # Copyright (c) 2018 Lucio Andrés Illanes Albornoz <lucio@lucioillanes.de> # This project is licensed under the terms of the MIT licence. # from ArabolyGenerals import ArabolyGenerals from ArabolyMonad import ArabolyDecorator from ArabolyTypeClass import ArabolyTypeClass from ArabolyState import ArabolyGameState, ArabolyOutputLevel, ArabolyStringType from ArabolyTrade import ArabolyTrade import copy, os, sys, yaml @ArabolyDecorator() class ArabolyFree(ArabolyTypeClass): """XXX""" # {{{ dispatch_board(args, channel, context, output, src, status): XXX @staticmethod def dispatch_board(args, channel, context, output, src, status): if context.state != ArabolyGameState.AUCTION \ and context.state != ArabolyGameState.GAME \ and context.state != ArabolyGameState.PROPERTY: status = False elif len(args) \ or src not in context.players["byName"]: status = False else: output = ArabolyGenerals._board(channel, context, output, src) return args, channel, context, output, src, status # }}} # {{{ dispatch_bugcheck(channel, context, srcFull, status): XXX @staticmethod def dispatch_bugcheck(channel, context, srcFull, status): if not ArabolyGenerals._authorised(channel, context, srcFull): status = False else: snapshotPath = os.path.join("assets", "savefiles", "snapshot.dmp.{}".format(context.clientParams["hostname"])) print("Saving game snapshot to {}!".format(os.path.join("assets", "savefiles", snapshotPath))) with open(snapshotPath, "w+") as fileObject: yaml.dump(context, fileObject) sys.exit(1) return channel, context, srcFull, status # }}} # {{{ dispatch_help(channel, context): XXX @staticmethod def dispatch_help(channel, context, output): for helpLine in context.graphics["help"]: output = ArabolyGenerals._push_output(channel, context, output, helpLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS) return channel, context, output # }}} # {{{ dispatch_join(args, channel, context, output, src, status): XXX @staticmethod def dispatch_join(args, channel, context, output, src, status): if context.state != ArabolyGameState.GAME \ and context.state != ArabolyGameState.SETUP: status = False elif src in context.players["byName"] \ or len(args): status = False else: newNum = None for otherNum in range(len(context.players["numMap"])): if context.players["numMap"][otherNum] == None: newNum = otherNum; break; if newNum == None: status = False else: context.players["byName"][src] = {"field":0, "name":src, "num":newNum, "properties":[], "wallet":1500} context.players["numMap"][newNum] = src output = ArabolyGenerals._push_output(channel, context, output, "Player {src} joins Araboly game!".format(**locals())) return args, channel, context, output, src, status # }}} # {{{ dispatch_kick(args, channel, context, output, srcFull, status): XXX @staticmethod def dispatch_kick(args, channel, context, output, srcFull, status): if context.state == ArabolyGameState.GAME \ or context.state == ArabolyGameState.SETUP: if len(args) != 1 or len(args[0]) < 1 \ or args[0] not in context.players["byName"]: status = False elif ArabolyGenerals._authorised(channel, context, srcFull): otherPlayers = [args[0]] output = ArabolyGenerals._push_output(channel, context, output, "Kicking {args[0]} from current Araboly game!".format(**locals())) context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers) else: status = False return args, channel, context, output, srcFull, status # }}} # {{{ dispatch_melp(channel, context, output): XXX @staticmethod def dispatch_melp(channel, context, output): for explosionLine in context.graphics["explosion"]: output = ArabolyGenerals._push_output(channel, context, output, explosionLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS) output = ArabolyGenerals._push_output(channel, context, output, "\u0001ACTION explodes.\u0001", outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS) return channel, context, output # }}} # {{{ dispatch_part(args, channel, context, output, src, status): XXX @staticmethod def dispatch_part(args, channel, context, output, src, status): if context.state == ArabolyGameState.GAME \ or context.state == ArabolyGameState.SETUP: if len(args) > 0 \ or src not in context.players["byName"]: status = False else: otherPlayers = [src] output = ArabolyGenerals._push_output(channel, context, output, "Player {src} parts Araboly game!".format(**locals())) context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers) else: status = False return args, channel, context, output, src, status # }}} # {{{ dispatch_save(args, channel, context, output, srcFull, status): XXX def dispatch_save(args, channel, context, output, srcFull, status): if context.state != ArabolyGameState.AUCTION \ and context.state != ArabolyGameState.BANKRUPTCY \ and context.state != ArabolyGameState.GAME \ and context.state != ArabolyGameState.PROPERTY: status = False elif len(args) != 1 \ or not ArabolyGenerals._authorised(channel, context, srcFull): status = False else: snapshotPath = os.path.join("assets", "savefiles", os.path.basename(args[0])) output = ArabolyGenerals._push_output(channel, context, output, "Saving snapshot to {snapshotPath}!".format(**locals())) with open(snapshotPath, "w") as fileObject: gameSnapshot = copy.deepcopy(context) delattr(gameSnapshot, "clientParams") delattr(gameSnapshot, "graphics") delattr(gameSnapshot, "kades") yaml.dump(gameSnapshot, fileObject) output = ArabolyGenerals._push_output(channel, context, output, "Saved snapshot to {snapshotPath}!".format(**locals())) return args, channel, context, output, srcFull, status # }}} # {{{ dispatch_status(args, channel, context, output, src, status): XXX def dispatch_status(args, channel, context, output, src, status): if context.state != ArabolyGameState.AUCTION \ and context.state != ArabolyGameState.BANKRUPTCY \ and context.state != ArabolyGameState.GAME \ and context.state != ArabolyGameState.PROPERTY: status = False elif len(args) == 0: statusPlayer = src elif len(args) == 1: statusPlayer = args[0] else: status = False if status: if not statusPlayer in context.players["byName"].keys(): status = False else: playerField = context.board[context.players["byName"][statusPlayer]["field"]] playerProps = context.players["byName"][statusPlayer]["properties"] playerWallet = context.players["byName"][statusPlayer]["wallet"] output = ArabolyGenerals._push_output(channel, context, output, "Araboly status for player {statusPlayer}:".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) output = ArabolyGenerals._push_output(channel, context, output, "Field....: {playerField[title]}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) output = ArabolyGenerals._push_output(channel, context, output, "Wallet...: ${playerWallet}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) if len(playerProps): output = ArabolyGenerals._push_output(channel, context, output, "Properties owned:", outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) for playerPropNum in playerProps: playerProp = context.board[playerPropNum] mortgagedString = " (\u001fMORTGAGED\u001f)" if playerProp["mortgaged"] else "" developmentsList = [] for levelNum in range(playerProp["level"] + 1): developmentsList += playerProp["strings"][ArabolyStringType.NAME][levelNum] developmentsString = ", level {}, developments: {}".format(playerProp["level"], ", ".join(developmentsList)) output = ArabolyGenerals._push_output(channel, context, output, "\u0003{:02d}${}{} (#{}) -- {}{}".format(playerProp["colourMiRC"], playerProp["price"], mortgagedString, playerProp["field"], playerProp["title"], developmentsString), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) output = ArabolyTrade._status(channel, context, output, statusPlayer) output = ArabolyGenerals._push_output(channel, context, output, "Current turn: {}".format(context.players["numMap"][context.players["curNum"]]), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY) return args, channel, context, output, src, status # }}} # {{{ dispatch_stop(args, channel, context, output, src, srcFull, status): XXX @staticmethod def dispatch_stop(args, channel, context, output, src, srcFull, status): if context.state == ArabolyGameState.AUCTION \ or context.state == ArabolyGameState.BANKRUPTCY \ or context.state == ArabolyGameState.GAME \ or context.state == ArabolyGameState.PROPERTY \ or context.state == ArabolyGameState.SETUP: if len(args) > 0: status = False elif ArabolyGenerals._authorised(channel, context, srcFull): otherPlayers = list(context.players["byName"].keys()) context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers) else: status = False return args, channel, context, output, src, srcFull, status # }}} # vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
[ [ [ 303, 318 ], [ 1158, 1173 ], [ 1444, 1459 ], [ 2143, 2158 ], [ 3217, 3232 ], [ 3829, 3844 ], [ 3951, 3966 ], [ 4107, 4122 ], [ 4496, 4511 ], [ 4630, 4645 ], [ 5278, 5293 ], [ 5422, 5437 ], [ 6100, 6115 ], [ 6308, 6323 ], [ 6754, 6769 ], [ 7939, 7954 ], [ 8129, 8144 ], [ 8309, 8324 ], [ 8525, 8540 ], [ 9259, 9274 ], [ 9639, 9654 ], [ 10430, 10445 ], [ 10590, 10605 ] ], [ [ 344, 360 ], [ 555, 571 ] ], [ [ 390, 406 ], [ 592, 608 ] ], [ [ 432, 448 ], [ 817, 833 ], [ 875, 891 ], [ 933, 949 ], [ 2494, 2510 ], [ 2556, 2572 ], [ 3592, 3608 ], [ 3646, 3662 ], [ 5002, 5018 ], [ 5056, 5072 ], [ 5783, 5799 ], [ 5845, 5861 ], [ 5907, 5923 ], [ 5969, 5985 ], [ 7113, 7129 ], [ 7175, 7191 ], [ 7237, 7253 ], [ 7299, 7315 ], [ 10096, 10112 ], [ 10154, 10170 ], [ 10212, 10228 ], [ 10270, 10286 ], [ 10328, 10344 ] ], [ [ 450, 468 ], [ 2220, 2238 ], [ 4578, 4596 ], [ 4729, 4747 ], [ 8070, 8088 ], [ 8250, 8268 ], [ 8425, 8443 ], [ 8613, 8631 ], [ 9494, 9512 ], [ 9787, 9805 ] ], [ [ 470, 487 ], [ 9059, 9076 ] ], [ [ 513, 525 ], [ 9553, 9565 ] ], [ [ 533, 537 ], [ 6507, 6511 ] ], [ [ 539, 541 ], [ 1568, 1570 ], [ 1719, 1721 ], [ 6224, 6226 ], [ 6260, 6262 ] ], [ [ 543, 546 ], [ 1887, 1890 ] ], [ [ 548, 552 ], [ 1844, 1848 ], [ 6697, 6701 ] ], [ [ 580, 591 ] ] ]
import setuptools setuptools.setup( name="video_to_ascii", version="1.0.6", author="Joel Ibaceta", author_email="mail@joelibaceta.com", description="A simple tool to play a video using ascii characters", url="https://github.com/joelibaceta/video-to-ascii", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], scripts=['bin/video-to-ascii'], )
[ [ [ 7, 17 ], [ 19, 29 ], [ 295, 305 ] ] ]
import unittest from SDWLE.agents.trade.possible_play import PossiblePlays from SDWLE.cards import Wisp, WarGolem, BloodfenRaptor, RiverCrocolisk, AbusiveSergeant, ArgentSquire from testsSDW.agents.trade.test_helpers import TestHelpers from testsSDW.agents.trade.test_case_mixin import TestCaseMixin class TestTradeAgent(TestCaseMixin, unittest.TestCase): def test_setup_smoke(self): game = TestHelpers().make_game() self.add_minions(game, 0, Wisp(), WarGolem()) self.add_minions(game, 1, BloodfenRaptor()) self.assertEqual(2, len(game.players[0].minions)) self.assertEqual(1, len(game.players[1].minions)) def test_basic_trade(self): game = TestHelpers().make_game() self.add_minions(game, 1, Wisp(), WarGolem()) self.add_minions(game, 0, BloodfenRaptor()) self.make_all_active(game) game.play_single_turn() self.assert_minions(game.players[1], "War Golem") self.assert_minions(game.players[0], "Bloodfen Raptor") def test_buff_target(self): game = TestHelpers().make_game() self.add_minions(game, 0, BloodfenRaptor(), RiverCrocolisk()) self.make_all_active(game) game.players[0].agent.player = game.players[0] self.add_minions(game, 0, AbusiveSergeant()) game.play_single_turn() def test_hero_power(self): game = self.make_game() cards = self.make_cards(game.current_player, ArgentSquire()) possible_plays = PossiblePlays(cards, 10, allow_hero_power=True) self.assertEqual(1, len(possible_plays.plays()))
[ [ [ 7, 15 ], [ 338, 346 ] ], [ [ 61, 74 ], [ 1506, 1519 ] ], [ [ 99, 103 ], [ 466, 470 ], [ 764, 768 ] ], [ [ 105, 113 ], [ 474, 482 ], [ 772, 780 ] ], [ [ 115, 129 ], [ 520, 534 ], [ 818, 832 ], [ 1136, 1150 ] ], [ [ 131, 145 ], [ 1154, 1168 ] ], [ [ 147, 162 ], [ 1296, 1311 ] ], [ [ 164, 176 ], [ 1465, 1477 ] ], [ [ 224, 235 ], [ 405, 416 ], [ 703, 714 ], [ 1075, 1086 ] ], [ [ 286, 299 ], [ 323, 336 ] ], [ [ 308, 322 ] ] ]
import numpy as np from numpy.linalg import norm from ._jit import jit @jit def J2_perturbation(t0, state, k, J2, R): r"""Calculates J2_perturbation acceleration (km/s2) .. math:: \vec{p} = \frac{3}{2}\frac{J_{2}\mu R^{2}}{r^{4}}\left [\frac{x}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{i} + \frac{y}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{j} + \frac{z}{r}\left ( 5\frac{z^{2}}{r^{2}}-3 \right )\vec{k}\right] .. versionadded:: 0.9.0 Parameters ---------- t0 : float Current time (s) state : numpy.ndarray Six component state vector [x, y, z, vx, vy, vz] (km, km/s). k : float gravitational constant, (km^3/s^2) J2: float oblateness factor R: float attractor radius Note ---- The J2 accounts for the oblateness of the attractor. The formula is given in Howard Curtis, (12.30) """ r_vec = state[:3] r = norm(r_vec) factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5) a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1 a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1 a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3 return np.array([a_x, a_y, a_z]) * r_vec * factor @jit def J3_perturbation(t0, state, k, J3, R): r"""Calculates J3_perturbation acceleration (km/s2) Parameters ---------- t0 : float Current time (s) state : numpy.ndarray Six component state vector [x, y, z, vx, vy, vz] (km, km/s). k : float gravitational constant, (km^3/s^2) J3: float oblateness factor R: float attractor radius Note ---- The J3 accounts for the oblateness of the attractor. The formula is given in Howard Curtis, problem 12.8 This perturbation has not been fully validated, see https://github.com/poliastro/poliastro/pull/398 """ r_vec = state[:3] r = norm(r_vec) factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5) cos_phi = r_vec[2] / r a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi) a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi) a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1) return np.array([a_x, a_y, a_z]) * factor @jit def atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0): r"""Calculates atmospheric drag acceleration (km/s2) .. math:: \vec{p} = -\frac{1}{2}\rho v_{rel}\left ( \frac{C_{d}A}{m} \right )\vec{v_{rel}} .. versionadded:: 0.9.0 Parameters ---------- t0 : float Current time (s) state : numpy.ndarray Six component state vector [x, y, z, vx, vy, vz] (km, km/s). k : float gravitational constant, (km^3/s^2) R : float radius of the attractor (km) C_D: float dimensionless drag coefficient () A: float frontal area of the spacecraft (km^2) m: float mass of the spacecraft (kg) H0 : float atmospheric scale height, (km) rho0: float the exponent density pre-factor, (kg / m^3) Note ---- This function provides the acceleration due to atmospheric drag. We follow Howard Curtis, section 12.4 the atmospheric density model is rho(H) = rho0 x exp(-H / H0) """ H = norm(state[:3]) v_vec = state[3:] v = norm(v_vec) B = C_D * A / m rho = rho0 * np.exp(-(H - R) / H0) return -(1.0 / 2.0) * rho * B * v * v_vec @jit def shadow_function(r_sat, r_sun, R): r"""Determines whether the satellite is in attractor's shadow, uses algorithm 12.3 from Howard Curtis Parameters ---------- r_sat : numpy.ndarray position of the satellite in the frame of attractor (km) r_sun : numpy.ndarray position of star in the frame of attractor (km) R : float radius of body (attractor) that creates shadow (km) """ r_sat_norm = np.sqrt(np.sum(r_sat ** 2)) r_sun_norm = np.sqrt(np.sum(r_sun ** 2)) theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm) theta_1 = np.arccos(R / r_sat_norm) theta_2 = np.arccos(R / r_sun_norm) return theta < theta_1 + theta_2 def third_body(t0, state, k, k_third, third_body): r"""Calculates 3rd body acceleration (km/s2) .. math:: \vec{p} = \mu_{m}\left ( \frac{\vec{r_{m/s}}}{r_{m/s}^3} - \frac{\vec{r_{m}}}{r_{m}^3} \right ) Parameters ---------- t0 : float Current time (s) state : numpy.ndarray Six component state vector [x, y, z, vx, vy, vz] (km, km/s). k : float gravitational constant, (km^3/s^2) third_body: a callable object returning the position of 3rd body third body that causes the perturbation Note ---- This formula is taken from Howard Curtis, section 12.10. As an example, a third body could be the gravity from the Moon acting on a small satellite. """ body_r = third_body(t0) delta_r = body_r - state[:3] return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3 def radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star): r"""Calculates radiation pressure acceleration (km/s2) .. math:: \vec{p} = -\nu \frac{S}{c} \left ( \frac{C_{r}A}{m} \right )\frac{\vec{r}}{r} Parameters ---------- t0 : float Current time (s) state : numpy.ndarray Six component state vector [x, y, z, vx, vy, vz] (km, km/s). k : float gravitational constant, (km^3/s^2) R : float radius of the attractor C_R: float dimensionless radiation pressure coefficient, 1 < C_R < 2 () A: float effective spacecraft area (km^2) m: float mass of the spacecraft (kg) Wdivc_s : float total star emitted power divided by the speed of light (W * s / km) star: a callable object returning the position of star in attractor frame star position Note ---- This function provides the acceleration due to star light pressure. We follow Howard Curtis, section 12.9 """ r_star = star(t0) r_sat = state[:3] P_s = Wdivc_s / (norm(r_star) ** 2) nu = float(shadow_function(r_sat, r_star, R)) return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)
[ [ [ 7, 18 ], [ 1148, 1150 ], [ 2186, 2188 ], [ 3350, 3352 ], [ 3875, 3877 ], [ 3883, 3885 ], [ 3920, 3922 ], [ 3928, 3930 ], [ 3961, 3963 ], [ 3971, 3973 ], [ 4033, 4035 ], [ 4073, 4075 ] ], [ [ 44, 48 ], [ 938, 942 ], [ 1872, 1876 ], [ 3254, 3258 ], [ 3301, 3305 ], [ 4977, 4981 ], [ 5017, 5021 ], [ 6125, 6129 ], [ 6243, 6247 ] ], [ [ 68, 71 ], [ 75, 78 ], [ 1194, 1197 ], [ 2224, 2227 ], [ 3422, 3425 ] ], [ [ 83, 98 ] ], [ [ 1202, 1217 ] ], [ [ 2232, 2248 ] ], [ [ 3430, 3445 ], [ 6160, 6175 ] ], [ [ 4143, 4153 ] ], [ [ 5041, 5059 ] ] ]
# coding: utf-8 # # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. # import pprint import re # noqa: F401 import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class PurchasableState(Enum): """ Whether or not the in-skill product is purchasable by customers. A product that is not purchasable will prevent new customers from being prompted to purchase the product. Customers who already own the product will see no effect and continue to have access to the product features. Allowed enum values: [PURCHASABLE, NOT_PURCHASABLE] """ PURCHASABLE = "PURCHASABLE" NOT_PURCHASABLE = "NOT_PURCHASABLE" def to_dict(self): # type: () -> Dict[str, Any] """Returns the model properties as a dict""" result = {self.name: self.value} return result def to_str(self): # type: () -> str """Returns the string representation of the model""" return pprint.pformat(self.value) def __repr__(self): # type: () -> str """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): # type: (Any) -> bool """Returns true if both objects are equal""" if not isinstance(other, PurchasableState): return False return self.__dict__ == other.__dict__ def __ne__(self, other): # type: (Any) -> bool """Returns true if both objects are not equal""" return not self == other
[ [ [ 586, 592 ], [ 1547, 1553 ] ], [ [ 600, 602 ] ], [ [ 624, 627 ] ], [ [ 635, 641 ], [ 669, 675 ] ], [ [ 659, 663 ], [ 806, 810 ] ], [ [ 714, 718 ] ], [ [ 720, 724 ] ], [ [ 726, 734 ] ], [ [ 736, 741 ] ], [ [ 743, 746 ] ], [ [ 772, 780 ] ], [ [ 789, 805 ], [ 1839, 1855 ] ] ]
# -*- coding: utf-8 -*- # Copyright 2021 Cohesity Inc. import cohesity_management_sdk.models.exchange_database_copy_info import cohesity_management_sdk.models.exchange_database_info class ApplicationServerInfo(object): """Implementation of the 'ApplicationServerInfo' model. Specifies the Information about the Exchange Server Node. Attributes: database_copy_info_list (list of ExchangeDatabaseCopyInfo): Specifies the list of all the copies of the Exchange databases(that are part of DAG) that are present on this Exchange Node. database_info_list (list of ExchangeDatabaseInfo): Specifies the list of all the databases available on the standalone Exchange server node. This is populated for the Standlone Exchange Servers. fqdn (string): Specifies the fully qualified domain name of the Exchange Server. guid (string): Specifies the Guid of the Exchange Application Server. name (string): Specifies the display name of the Exchange Application Server. total_size_bytes (int): Specifies the total size of all Exchange database copies in all the Exchange Application Servers that are part of the DAG. """ # Create a mapping from Model property names to API property names _names = { "database_copy_info_list": 'databaseCopyInfoList', "database_info_list":'databaseInfoList', "fqdn": 'fqdn', "guid": 'guid', "name": 'name', "total_size_bytes":'totalSizeBytes' } def __init__(self, database_copy_info_list=None, database_info_list=None, fqdn=None, guid=None, name=None, total_size_bytes=None): """Constructor for the ApplicationServerInfo class""" # Initialize members of the class self.database_copy_info_list = database_copy_info_list self.database_info_list = database_info_list self.fqdn = fqdn self.guid = guid self.name = name self.total_size_bytes = total_size_bytes @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary database_copy_info_list = None if dictionary.get('databaseCopyInfoList') != None: database_copy_info_list = list() for structure in dictionary.get('databaseCopyInfoList'): database_copy_info_list.append(cohesity_management_sdk.models.exchange_database_copy_info.ExchangeDatabaseCopyInfo.from_dictionary(structure)) database_info_list = None if dictionary.get('databaseInfoList') != None: database_info_list = list() for structure in dictionary.get('databaseInfoList'): database_info_list.append(cohesity_management_sdk.models.exchange_database_info.ExchangeDatabaseInfo.from_dictionary(structure)) fqdn = dictionary.get('fqdn') guid = dictionary.get('guid') name = dictionary.get('name') total_size_bytes = dictionary.get('totalSizeBytes') # Return an object of this model return cls(database_copy_info_list, database_info_list, fqdn, guid, name, total_size_bytes)
[ [ [ 63, 121 ] ], [ [ 129, 182 ], [ 3010, 3033 ], [ 3358, 3381 ] ], [ [ 190, 211 ] ] ]
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ { 'target_name': 'main_page_behavior', 'dependencies': [ '../animation/compiled_resources2.gyp:animation', '../compiled_resources2.gyp:route', 'settings_section', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util', ], 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, { 'target_name': 'settings_animated_pages', 'dependencies': [ '../compiled_resources2.gyp:route', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data', ], 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, { 'target_name': 'settings_page_visibility', 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, { 'target_name': 'settings_section', 'dependencies': [ '../animation/compiled_resources2.gyp:animation', '<(EXTERNS_GYP):web_animations', ], 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, { 'target_name': 'settings_subpage', 'dependencies': [ '../compiled_resources2.gyp:route', 'settings_subpage_search', '<(DEPTH)/third_party/polymer/v1_0/components-chromium/iron-resizable-behavior/compiled_resources2.gyp:iron-resizable-behavior-extracted', '<(DEPTH)/third_party/polymer/v1_0/components-chromium/neon-animation/compiled_resources2.gyp:neon-animatable-behavior-extracted', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', ], 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, { 'target_name': 'settings_subpage_search', 'dependencies': [ '<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-icon-button/compiled_resources2.gyp:paper-icon-button-extracted', '<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-input/compiled_resources2.gyp:paper-input-container-extracted', '<(DEPTH)/ui/webui/resources/cr_elements/cr_search_field/compiled_resources2.gyp:cr_search_field_behavior', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', ], 'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'], }, ], }
[]
# # coding=utf-8 import unittest import sys import os from io import open import openpyxl as xl from pptx_template.xlsx_model import _build_tsv, _format_cell_value, generate_whole_model class Cell: def __init__(self, value, number_format): self.value = value self.number_format = number_format def _to_cells(list_of_list): return [[Cell(value, '') for value in list] for list in list_of_list] class MyTest(unittest.TestCase): def test_build_tsv(self): tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])]) self.assertEqual([["Year","A","B"],["2016",100,200]], tsv) def test_build_tsv_tranapose(self): tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])], transpose=True) self.assertEqual([["Year","2016"],["A",100],["B",200]], tsv) def test_build_tsv_side_by_side(self): tsv = _build_tsv([_to_cells([["Year","A"],["2016",100]]), _to_cells([["B"],[200]])], side_by_side=True) self.assertEqual([["Year","A","B"],["2016",100,200]], tsv) def test_format_cell_value(self): self.assertEqual(123.45678, _format_cell_value(Cell(123.45678, ''))) self.assertEqual("123", _format_cell_value(Cell(123.45678, '0'))) self.assertEqual("123.46", _format_cell_value(Cell(123.45678, '0.00'))) self.assertEqual("123.5", _format_cell_value(Cell(123.45678, '0.0_'))) self.assertEqual("12345.7%", _format_cell_value(Cell(123.45678, '0.0%_'))) self.assertEqual("12345%", _format_cell_value(Cell(123.45678, '0%_'))) def test_generate_whole_model(self): def read_expect(name): file_name = os.path.join(os.path.dirname(__file__), 'data2', name) f = open(file_name, mode = 'r', encoding = 'utf-8') result = f.read() f.close() return result xls_file = os.path.join(os.path.dirname(__file__), 'data2', 'in.xlsx') slides = generate_whole_model(xls_file, {}) self.assertEqual(u'Hello!', slides['p01']['greeting']['en']) self.assertEqual(u'こんにちは!', slides['p01']['greeting']['ja']) self.assertEqual([ ['Season', u'売り上げ', u'利益', u'利益率'], [u'春', 100, 50, 0.5], [u'夏', 110, 60, 0.5], [u'秋', 120, 70, 0.5], [u'冬', 130, 0, 0.6], ], slides['p02']['array']) self.assertEqual(read_expect('p02-normal.tsv'), slides['p02']['normal']['tsv_body']) self.assertEqual(read_expect('p02-transpose.tsv'), slides['p02']['transpose']['tsv_body']) self.assertEqual(read_expect('p02-sidebyside.tsv'), slides['p02']['sidebyside']['tsv_body']) if __name__ == '__main__': unittest.main()
[ [ [ 24, 32 ], [ 435, 443 ], [ 2748, 2756 ] ], [ [ 40, 43 ] ], [ [ 51, 53 ], [ 1883, 1885 ], [ 1896, 1898 ], [ 1666, 1668 ], [ 1679, 1681 ] ], [ [ 69, 73 ], [ 1737, 1741 ] ], [ [ 82, 96 ] ], [ [ 135, 145 ], [ 501, 511 ], [ 686, 696 ], [ 892, 902 ] ], [ [ 147, 165 ], [ 1133, 1151 ], [ 1206, 1224 ], [ 1283, 1301 ], [ 1362, 1380 ], [ 1444, 1462 ], [ 1525, 1543 ] ], [ [ 167, 187 ], [ 1960, 1980 ] ], [ [ 195, 199 ], [ 360, 364 ], [ 1152, 1156 ], [ 1225, 1229 ], [ 1302, 1306 ], [ 1381, 1385 ], [ 1463, 1467 ], [ 1544, 1548 ] ], [ [ 322, 331 ], [ 513, 522 ], [ 698, 707 ], [ 904, 913 ], [ 944, 953 ] ], [ [ 428, 434 ] ] ]
import base64 import copy import hashlib import json from botocore.exceptions import ClientError import pytest from ..test_utils import import_lambda sdk_analysis = import_lambda( "sdk_analysis", mock_imports=[ "pulse3D.plate_recording", "pulse3D.constants", "pulse3D.excel_writer", "pymysql", "pandas", ], ) TEST_BUCKET_NAME = "test_name" TEST_OBJECT_KEY = "customer_id/username/test_key" TEST_RECORD = {"s3": {"bucket": {"name": TEST_BUCKET_NAME}, "object": {"key": TEST_OBJECT_KEY}}} TEST_FILENAME = TEST_OBJECT_KEY.rsplit("/", 1)[1] @pytest.fixture(scope="function", name="mocked_boto3_client") def fixture_mocked_boto3_client(mocker): mocked_sqs_client = mocker.Mock() mocked_ssm_client = mocker.Mock() mocked_s3_client = mocker.Mock() mocked_ec2_client = mocker.Mock() mocked_s3_client.head_object.return_value = {"Metadata": {"upload-id": "test-id"}} mocked_dynamodb_client = mocker.Mock() def se(client_type): if client_type == "sqs": return mocked_sqs_client if client_type == "s3": return mocked_s3_client if client_type == "dynamodb": return mocked_dynamodb_client if client_type == "secretsmanager": return mocked_ssm_client if client_type == "ec2": return mocked_ec2_client mocker.patch.object(sdk_analysis.boto3, "client", autospec=True, side_effect=se) yield { "sqs": mocked_sqs_client, "s3": mocked_s3_client, "dynamodb": mocked_dynamodb_client, "secretsmanager": mocked_ssm_client, "ec2": mocked_ec2_client, } def test_sdk_analysis__logs_exception_when_receiving_message_from_sqs_fails(mocker, mocked_boto3_client): mocked_sqs_client = mocked_boto3_client["sqs"] expected_error = ClientError({}, "") mocked_sqs_client.receive_message.side_effect = expected_error spied_logger_exception = mocker.spy(sdk_analysis.logger, "exception") sdk_analysis.handler(max_num_loops=1) spied_logger_exception.assert_called_once_with(f"receive_message failed. Error: {expected_error}") def test_sdk_analysis__sleeps_after_each_loop_but_not_in_final_loop(mocker, mocked_boto3_client): mocked_sqs_client = mocked_boto3_client["sqs"] mocked_sleep = mocker.patch.object(sdk_analysis, "sleep", autospec=True) # Tanner (9/23/21): mocking receive_message to have error raised here in order to avoid mocking multiple other objects mocked_sqs_client.receive_message.side_effect = ClientError({}, "") sdk_analysis.handler(max_num_loops=2) mocked_sleep.assert_called_once_with(5) def test_sdk_analysis__gets_messages_from_sqs_queue_correctly(mocker, mocked_boto3_client): mocked_sqs_client = mocked_boto3_client["sqs"] mocked_sqs_client.receive_message.return_value = {} expected_sqs_url = "test_url" mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url) sdk_analysis.handler(max_num_loops=1) mocked_sqs_client.receive_message.assert_called_once_with( QueueUrl=expected_sqs_url, MaxNumberOfMessages=1, WaitTimeSeconds=10 ) def test_sdk_analysis__deletes_messages_from_sqs_queue_after_processing_them(mocker, mocked_boto3_client): mocked_sqs_client = mocked_boto3_client["sqs"] expected_sqs_url = "test_url" mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url) test_message = {"ReceiptHandle": "rh"} test_message_list = [test_message] * 3 mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list} sdk_analysis.handler(max_num_loops=1) assert mocked_sqs_client.delete_message.call_count == len(test_message_list) mocked_sqs_client.delete_message.called_with( QueueUrl=expected_sqs_url, ReceiptHandle=test_message["ReceiptHandle"] ) @pytest.mark.parametrize( "test_message", [ {}, {"Body": json.dumps({})}, {"Body": json.dumps({"other_key": "val"})}, {"Body": json.dumps({"Records": []})}, {"Body": json.dumps({"Records": [{}]})}, {"Body": json.dumps({"Records": [{"eventSource": "aws:s3"}]})}, {"Body": json.dumps({"Records": [{"eventName": "ObjectCreated:Post"}]})}, ], ) def test_sdk_analysis__does_not_process_message_or_record_from_sqs_queue_that_is_not_formatted_correctly( test_message, mocker, mocked_boto3_client ): mocked_sqs_client = mocked_boto3_client["sqs"] test_message.update({"ReceiptHandle": "rh"}) mocked_sqs_client.receive_message.return_value = {"Messages": [test_message]} spied_process_record = mocker.spy(sdk_analysis, "process_record") sdk_analysis.handler(max_num_loops=1) spied_process_record.assert_not_called() def test_sdk_analysis__processes_each_record_of_each_record_of_each_message_from_sqs_queue( mocker, mocked_boto3_client ): mocked_sqs_client = mocked_boto3_client["sqs"] mocked_s3_client = mocked_boto3_client["s3"] mocked_dynamodb_client = mocked_boto3_client["dynamodb"] test_num_records = 5 test_records = [ {"eventSource": "aws:s3", "eventName": "ObjectCreated:Post", "num": i} for i in range(test_num_records) ] test_messages = [ {"Body": json.dumps({"Records": records}), "ReceiptHandle": "rh"} for records in (test_records[:2], test_records[2:]) ] mocked_sqs_client.receive_message.return_value = {"Messages": test_messages} mocked_process_record = mocker.patch.object(sdk_analysis, "process_record") sdk_analysis.handler(max_num_loops=1) assert mocked_process_record.call_count == test_num_records for record in test_records: mocked_process_record.assert_any_call(record, mocked_s3_client, mocked_dynamodb_client) def test_sdk_analysis__handles_info_logging_pertaining_to_sqs_queue(mocker, mocked_boto3_client): mocked_sqs_client = mocked_boto3_client["sqs"] test_message_list = [] mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list} expected_sqs_url = "test_url" mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url) spied_logger_info = mocker.spy(sdk_analysis.logger, "info") sdk_analysis.handler(max_num_loops=1) spied_logger_info.assert_any_call(f"Receiving messages on {expected_sqs_url}") spied_logger_info.assert_any_call(f"Received: {len(test_message_list)}") spied_logger_info.assert_any_call("Received: 0") def test_process_record__retrieves_metadata_of_file_correctly(mocked_boto3_client): mocked_s3_client = mocked_boto3_client["s3"] sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) mocked_s3_client.head_object.assert_called_once_with(Bucket=TEST_BUCKET_NAME, Key=TEST_OBJECT_KEY) def test_process_record__logs_error_when_one_is_raised_while_retrieving_metadata_from_s3_and_does_not_attempt_to_download_the_file( mocker, mocked_boto3_client ): mocked_s3_client = mocked_boto3_client["s3"] expected_error = ClientError({}, "") mocked_s3_client.head_object.side_effect = expected_error spied_logger_error = mocker.spy(sdk_analysis.logger, "error") sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) spied_logger_error.assert_called_once_with( f"Error occurred while retrieving head object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}" ) mocked_s3_client.download_file.assert_not_called() def test_process_record__correctly_downloads_file_to_temporary_directory(mocker, mocked_boto3_client): mocked_s3_client = mocked_boto3_client["s3"] spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory") sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) spied_temporary_dir.assert_called_once_with(dir="/tmp") mocked_s3_client.download_file.assert_called_once_with( TEST_BUCKET_NAME, TEST_OBJECT_KEY, f"{spied_temporary_dir.spy_return.name}/{TEST_FILENAME}" ) def test_process_record__handles_error_raised_while_downloading_file_from_s3(mocker, mocked_boto3_client): mocked_s3_client = mocked_boto3_client["s3"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] expected_error = ClientError({}, "") mocked_s3_client.download_file.side_effect = expected_error spied_logger_error = mocker.spy(sdk_analysis.logger, "error") spied_update_status = mocker.spy(sdk_analysis, "update_sdk_status") spied_pr_from_dir = mocker.spy(sdk_analysis.PlateRecording, "from_directory") sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) spied_logger_error.assert_called_once_with( f"Failed to download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}" ) spied_update_status.assert_called_once_with( mocked_boto3_client["dynamodb"], expected_upload_id, "error accessing file" ) spied_pr_from_dir.assert_not_called() def test_process_record__sets_file_status_to_analysis_running_then_runs_sdk_analysis_on_file( mocker, mocked_boto3_client ): mocked_s3_client = mocked_boto3_client["s3"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory") mocked_pr_from_dir = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) pr = mocked_pr_from_dir.return_value.__next__() error_tracker = {"funcs_called_out_of_order": False} def se(*args): if args[-1] == "analysis running": error_tracker["funcs_called_out_of_order"] = mocked_pr_from_dir.call_count != 0 mocked_update_status = mocker.patch.object( sdk_analysis, "update_sdk_status", autospec=True, side_effect=se ) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) assert error_tracker["funcs_called_out_of_order"] is False assert mocked_update_status.call_args_list[0] == mocker.call( mocked_boto3_client["dynamodb"], expected_upload_id, "analysis running" ) mocked_pr_from_dir.assert_called_once_with(spied_temporary_dir.spy_return) sdk_analysis.write_xlsx.assert_called_with(pr, name=f"{TEST_FILENAME}.xlsx") def test_process_record__handles_error_raised_while_running_sdk_analysis(mocker, mocked_boto3_client): expected_upload_id = mocked_boto3_client["s3"].head_object.return_value["Metadata"]["upload-id"] expected_error = Exception("test_exception") mocker.patch.object( sdk_analysis.PlateRecording, "from_directory", autospec=True, side_effect=expected_error ) spied_logger_error = mocker.spy(sdk_analysis.logger, "error") mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) sdk_analysis.process_record( copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"] ) spied_logger_error.assert_called_once_with(f"SDK analysis failed: {expected_error}") mocked_update_status.assert_called_with( mocked_boto3_client["dynamodb"], expected_upload_id, "error during analysis" ) def test_process_record__uploads_file_created_by_sdk_analysis_to_s3_bucket_correctly_and_sets_file_status_to_analysis_complete( mocker, mocked_boto3_client ): mocked_s3_client = mocked_boto3_client["s3"] mocked_dynamo_client = mocked_boto3_client["dynamodb"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] expected_upload_bucket = "test_url" mocker.patch.object(hashlib, "md5") mocked_base64 = mocker.patch.object(base64, "b64encode") expected_md5 = mocked_base64().decode() mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocked_open = mocker.patch("builtins.open", autospec=True) mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", autospec=True) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) mocked_open.assert_called_with(f"{TEST_FILENAME}.xlsx", "rb") mocked_s3_client.put_object.assert_called_once_with( Body=mocked_open.return_value.__enter__(), Bucket=expected_upload_bucket, Key=f"{TEST_OBJECT_KEY}.xlsx", ContentMD5=expected_md5, ) assert mocked_update_status.call_args_list[1] == mocker.call( mocked_dynamo_client, expected_upload_id, "analysis complete" ) def test_process_record__handles_error_raised_while_uploading_file_to_s3(mocker, mocked_boto3_client): mocked_s3_client = mocked_boto3_client["s3"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] mocker.patch.object(hashlib, "md5") mocker.patch.object(base64, "b64encode") expected_error = Exception("test_exception") mocked_s3_client.put_object.side_effect = expected_error expected_upload_bucket = "test_url" mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocker.patch("builtins.open", autospec=True) mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) spied_logger_error = mocker.spy(sdk_analysis.logger, "error") mocked_db_handling = mocker.patch.object( sdk_analysis.main, "handle_db_metadata_insertions", autospec=True ) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) expected_file_name = f"{TEST_FILENAME}.xlsx" spied_logger_error.assert_called_with( f"S3 Upload failed for {expected_file_name} to {expected_upload_bucket}/{TEST_OBJECT_KEY}.xlsx: {expected_error}" ) mocked_update_status.assert_called_with( mocked_boto3_client["dynamodb"], expected_upload_id, "error during upload of analyzed file" ) mocked_db_handling.assert_not_called() def test_process_record__after_successful_upload_logger_handles_failed_aurora_db_insertion( mocker, mocked_boto3_client ): spied_logger_error = mocker.spy(sdk_analysis.logger, "error") mocked_s3_client = mocked_boto3_client["s3"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] mocker.patch.object(hashlib, "md5") mocker.patch.object(base64, "b64encode") expected_upload_bucket = "test_url" mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory") mocker.patch("builtins.open", autospec=True) mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) # mocker.patch.object(sdk_analysis, "write_xslx", autospec=True) mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", side_effect=Exception("ERROR")) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) mocked_update_status.assert_called_with( mocked_boto3_client["dynamodb"], expected_upload_id, "error inserting analysis to database" ) spied_logger_error.assert_called_with("Recording metadata failed to store in aurora database: ERROR") def test_process_record__after_successful_upload_logger_handles_successful_aurora_db_insertion( mocker, mocked_boto3_client ): spied_logger_info = mocker.spy(sdk_analysis.logger, "info") mocked_s3_client = mocked_boto3_client["s3"] expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"] expected_upload_bucket = "test_bucket" expected_db_cluster_endpoint = "test_host" expected_file_name = f"{TEST_OBJECT_KEY}.xlsx" mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint) mocker.patch.object(hashlib, "md5") mocked_base64 = mocker.patch.object(base64, "b64encode") expected_md5 = mocked_base64().decode() mocked_open = mocker.patch("builtins.open", autospec=True) mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) mocked_db_handling = mocker.patch.object( sdk_analysis.main, "handle_db_metadata_insertions", autospec=True ) mocker.patch.object(mocked_s3_client, "put_object") sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) mocked_update_status.assert_any_call( mocked_boto3_client["dynamodb"], expected_upload_id, "analysis successfully inserted into database" ) spied_logger_info.assert_any_call(f"Inserting {TEST_FILENAME}.xlsx metadata into aurora database") test_args = [ mocked_open.return_value.__enter__(), mocked_PR_instance.return_value.__next__(), expected_md5, ] mocked_db_handling.assert_called_with( expected_upload_bucket, expected_file_name, expected_db_cluster_endpoint, test_args ) def test_set_info_dict__correctly_retrieves_aws_credentials(mocker, mocked_boto3_client): mocked_s3_client = mocked_boto3_client["s3"] expected_upload_bucket = "test_url" mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocker.patch.object(hashlib, "md5") mocker.patch.object(base64, "b64encode") mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password")) mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocker.patch("builtins.open", autospec=True) mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) expected_info_dict = { "db_name": "mantarray_recordings", "db_password": "test_password", "db_username": "test_username", } assert sdk_analysis.main.INFO_DICT == expected_info_dict def test_load_data_into_dataframe__successfully_gets_called_after_successful_db_connection( mocker, mocked_boto3_client ): mocked_s3_client = mocked_boto3_client["s3"] mocker.patch.object(hashlib, "md5") mocker.patch.object(base64, "b64encode") mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password")) expected_db_cluster_endpoint = "test_host" expected_upload_bucket = "test_url" mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket) mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint) mocker.patch.object(sdk_analysis.main.pymysql, "connect") format_spy = mocker.patch.object(sdk_analysis.main, "load_data_to_dataframe") mocked_open = mocker.patch("builtins.open", autospec=True) mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True) mocker.patch.object(mocked_s3_client, "put_object", autospec=True) mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True) sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"]) format_spy.assert_any_call( mocked_open.return_value.__enter__(), mocked_PR_instance.return_value.__next__() ) def test_process_record__handles_info_logging(mocker, mocked_boto3_client): spied_logger_info = mocker.spy(sdk_analysis.logger, "info") spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory") sdk_analysis.process_record( copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"] ) spied_logger_info.assert_any_call(f"Retrieving Head Object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}") spied_logger_info.assert_any_call( f"Download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY} to {spied_temporary_dir.spy_return.name}/{TEST_FILENAME}" ) def test_update_sdk_status__updates_item_correctly(mocker, mocked_boto3_client): mocked_dynamodb_client = mocked_boto3_client["dynamodb"] expected_table_name = "test_table" mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name) test_upload_id = "test_id" test_status = "test_status" sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status) mocked_dynamodb_client.update_item.assert_called_once_with( TableName=expected_table_name, Key={"upload_id": {"S": test_upload_id}}, UpdateExpression="SET sdk_status = :val", ExpressionAttributeValues={":val": {"S": test_status}}, ConditionExpression="attribute_exists(upload_id)", ) def test_update_sdk_status__handles_conditional_check_failed_exceptions_raised_from_updating_item( mocker, mocked_boto3_client ): mocked_dynamodb_client = mocked_boto3_client["dynamodb"] expected_error = ClientError({"Error": {"Code": "ConditionalCheckFailedException"}}, "") mocked_dynamodb_client.update_item.side_effect = expected_error expected_table_name = "test_table" mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name) spied_logger_error = mocker.spy(sdk_analysis.logger, "error") test_upload_id = "test_id" test_status = "test_status" sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status) spied_logger_error.assert_any_call(f"Error: {expected_error}") spied_logger_error.assert_any_call( f"Upload ID: {test_upload_id} was not found in table {expected_table_name}" ) mocked_dynamodb_client.put_item.assert_called_once_with( TableName=expected_table_name, Item={"upload_id": {"S": test_upload_id}, "sdk_status": {"S": test_status}}, ) def test_update_sdk_status__logs_other_aws_errors_raised_from_updating_item(mocker, mocked_boto3_client): mocked_dynamodb_client = mocked_boto3_client["dynamodb"] expected_error = ClientError({"Error": {"Code": "SomeOtherException"}}, "") mocked_dynamodb_client.update_item.side_effect = expected_error expected_table_name = "test_table" mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name) spied_logger_error = mocker.spy(sdk_analysis.logger, "error") test_upload_id = "test_id" test_status = "test_status" sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status) spied_logger_error.assert_called_once_with(f"Error: {expected_error}") mocked_dynamodb_client.put_item.assert_not_called()
[ [ [ 7, 13 ], [ 11862, 11868 ], [ 13203, 13209 ], [ 14809, 14815 ], [ 16528, 16534 ], [ 18029, 18035 ], [ 18943, 18949 ] ], [ [ 21, 25 ], [ 6637, 6641 ], [ 7240, 7244 ], [ 7811, 7815 ], [ 8725, 8729 ], [ 10013, 10017 ], [ 11057, 11061 ], [ 12381, 12385 ], [ 13915, 13919 ], [ 15456, 15460 ], [ 17076, 17080 ], [ 18404, 18408 ], [ 19829, 19833 ], [ 20302, 20306 ] ], [ [ 33, 40 ], [ 11806, 11813 ], [ 13163, 13170 ], [ 14769, 14776 ], [ 16472, 16479 ], [ 17989, 17996 ], [ 18903, 18910 ] ], [ [ 48, 52 ], [ 3937, 3941 ], [ 3971, 3975 ], [ 4023, 4027 ], [ 4070, 4074 ], [ 4119, 4123 ], [ 4191, 4195 ], [ 5263, 5267 ] ], [ [ 86, 97 ], [ 1851, 1862 ], [ 2562, 2573 ], [ 7059, 7070 ], [ 8388, 8399 ], [ 21625, 21636 ], [ 22681, 22692 ] ], [ [ 105, 111 ], [ 596, 602 ], [ 3857, 3863 ] ], [ [ 138, 151 ], [ 168, 181 ] ], [ [ 153, 165 ], [ 1401, 1413 ], [ 1979, 1991 ], [ 2017, 2029 ], [ 2349, 2361 ], [ 2587, 2599 ], [ 2929, 2941 ], [ 2977, 2989 ], [ 3380, 3392 ], [ 3600, 3612 ], [ 4641, 4653 ], [ 4678, 4690 ], [ 5516, 5528 ], [ 5553, 5565 ], [ 6106, 6118 ], [ 6184, 6196 ], [ 6218, 6230 ], [ 6609, 6621 ], [ 7177, 7189 ], [ 7212, 7224 ], [ 7733, 7745 ], [ 7783, 7795 ], [ 8508, 8520 ], [ 8575, 8587 ], [ 8645, 8657 ], [ 8697, 8709 ], [ 9434, 9446 ], [ 9524, 9536 ], [ 9909, 9921 ], [ 9985, 9997 ], [ 10390, 10402 ], [ 10756, 10768 ], [ 10888, 10900 ], [ 10965, 10977 ], [ 11020, 11032 ], [ 11951, 11963 ], [ 12121, 12133 ], [ 12195, 12207 ], [ 12281, 12293 ], [ 12353, 12365 ], [ 13399, 13411 ], [ 13554, 13566 ], [ 13628, 13640 ], [ 13726, 13738 ], [ 13810, 13822 ], [ 13887, 13899 ], [ 14574, 14586 ], [ 14895, 14907 ], [ 14969, 14981 ], [ 15110, 15122 ], [ 15184, 15196 ], [ 15339, 15351 ], [ 15428, 15440 ], [ 15962, 15974 ], [ 16299, 16311 ], [ 16381, 16393 ], [ 16704, 16716 ], [ 16799, 16811 ], [ 16915, 16927 ], [ 17048, 17060 ], [ 17907, 17919 ], [ 18075, 18087 ], [ 18187, 18199 ], [ 18310, 18322 ], [ 18376, 18388 ], [ 18651, 18663 ], [ 18988, 19000 ], [ 19187, 19199 ], [ 19269, 19281 ], [ 19361, 19373 ], [ 19436, 19448 ], [ 19569, 19581 ], [ 19735, 19747 ], [ 19801, 19813 ], [ 20149, 20161 ], [ 20215, 20227 ], [ 20265, 20277 ], [ 20867, 20879 ], [ 20990, 21002 ], [ 21829, 21841 ], [ 21920, 21932 ], [ 22018, 22030 ], [ 22872, 22884 ], [ 22963, 22975 ], [ 23061, 23073 ] ], [ [ 365, 381 ], [ 487, 503 ], [ 6780, 6796 ], [ 7425, 7441 ], [ 8018, 8034 ], [ 8882, 8898 ], [ 20462, 20478 ], [ 20559, 20575 ] ], [ [ 396, 411 ], [ 524, 539 ], [ 559, 574 ], [ 6802, 6817 ], [ 7444, 7459 ], [ 8036, 8051 ], [ 8901, 8916 ], [ 12688, 12703 ], [ 14167, 14182 ], [ 16251, 16266 ], [ 20481, 20496 ], [ 20578, 20593 ] ], [ [ 446, 457 ], [ 6651, 6662 ], [ 7254, 7265 ], [ 7825, 7836 ], [ 8739, 8750 ], [ 10027, 10038 ], [ 11071, 11082 ], [ 12395, 12406 ], [ 13929, 13940 ], [ 15470, 15481 ], [ 17090, 17101 ], [ 18418, 18429 ], [ 19843, 19854 ], [ 20316, 20327 ] ], [ [ 543, 556 ], [ 8094, 8107 ], [ 10445, 10458 ], [ 12498, 12511 ], [ 14022, 14035 ], [ 17363, 17376 ], [ 20637, 20650 ] ], [ [ 661, 688 ] ], [ [ 1676, 1747 ] ], [ [ 2164, 2227 ] ], [ [ 2675, 2732 ] ], [ [ 3167, 3239 ] ], [ [ 4269, 4369 ] ], [ [ 4767, 4853 ] ], [ [ 5789, 5852 ] ], [ [ 6475, 6532 ] ], [ [ 6825, 6951 ] ], [ [ 7547, 7615 ] ], [ [ 8122, 8194 ] ], [ [ 9130, 9218 ] ], [ [ 10473, 10541 ] ], [ [ 11382, 11504 ] ], [ [ 12899, 12967 ] ], [ [ 14415, 14501 ] ], [ [ 15800, 15890 ] ], [ [ 17707, 17762 ] ], [ [ 18707, 18793 ] ], [ [ 20042, 20083 ] ], [ [ 20665, 20711 ] ], [ [ 21412, 21505 ] ], [ [ 22496, 22567 ] ] ]
from typing import TypeVar, AsyncIterator, Sequence from chris.common.types import PluginUrl from chris.common.client import AuthenticatedClient from chris.common.search import get_paginated, to_sequence import chris.common.decorator as http from chris.cube.types import ComputeResourceName, PfconUrl from chris.cube.deserialization import CubeCollectionLinks, CubePlugin, ComputeResource _T = TypeVar("_T") class CubeClient(AuthenticatedClient[CubeCollectionLinks, CubePlugin, "CubeClient"]): @http.post("/chris-admin/api/v1/") async def register_plugin( self, plugin_store_url: PluginUrl, compute_name: ComputeResourceName ) -> CubePlugin: ... @http.post("/chris-admin/api/v1/computeresources/") async def create_compute_resource( self, name: ComputeResourceName, compute_url: PfconUrl, compute_user: str, compute_password: str, description: str = "", ) -> ComputeResource: ... def get_compute_resources_of( self, plugin: CubePlugin ) -> AsyncIterator[ComputeResource]: return get_paginated( session=self.s, url=plugin.compute_resources, element_type=ComputeResource ) def search_compute_resources( self, max_requests=100, **query ) -> AsyncIterator[ComputeResource]: return self.search( url=self.collection_links.compute_resources, query=query, element_type=ComputeResource, max_requests=max_requests, ) async def get_all_compute_resources(self) -> Sequence[ComputeResource]: return await to_sequence(self.search_compute_resources())
[ [ [ 19, 26 ], [ 395, 402 ] ], [ [ 28, 41 ], [ 1057, 1070 ], [ 1300, 1313 ] ], [ [ 43, 51 ], [ 1583, 1591 ] ], [ [ 83, 92 ], [ 599, 608 ] ], [ [ 125, 144 ], [ 428, 447 ] ], [ [ 177, 190 ], [ 1104, 1117 ] ], [ [ 192, 203 ], [ 1631, 1642 ] ], [ [ 211, 241 ], [ 502, 506 ], [ 683, 687 ] ], [ [ 271, 290 ], [ 624, 643 ], [ 801, 820 ] ], [ [ 292, 300 ], [ 843, 851 ] ], [ [ 340, 359 ], [ 448, 467 ] ], [ [ 361, 371 ], [ 469, 479 ], [ 653, 663 ], [ 1037, 1047 ] ], [ [ 373, 388 ], [ 951, 966 ], [ 1071, 1086 ], [ 1190, 1205 ], [ 1314, 1329 ], [ 1467, 1482 ], [ 1592, 1607 ] ], [ [ 390, 392 ] ], [ [ 417, 427 ] ] ]
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2020-04-07 17:42 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('crawl', '0015_remove_article_news_source'), ] operations = [ migrations.RenameField( model_name='article', old_name='source', new_name='news_source', ), ]
[ [ [ 94, 110 ] ], [ [ 134, 144 ], [ 163, 173 ], [ 296, 306 ] ], [ [ 153, 162 ] ] ]
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Skylib module containing functions that operate on dictionaries.""" def _add(*dictionaries): """Returns a new `dict` that has all the entries of the given dictionaries. If the same key is present in more than one of the input dictionaries, the last of them in the argument list overrides any earlier ones. This function is designed to take zero or one arguments as well as multiple dictionaries, so that it follows arithmetic identities and callers can avoid special cases for their inputs: the sum of zero dictionaries is the empty dictionary, and the sum of a single dictionary is a copy of itself. Args: *dictionaries: Zero or more dictionaries to be added. Returns: A new `dict` that has all the entries of the given dictionaries. """ result = {} for d in dictionaries: result.update(d) return result dicts = struct( add = _add, )
[ [ [ 679, 683 ], [ 1515, 1519 ] ], [ [ 1489, 1494 ] ] ]
""" artificial measure ------------------ Creation of artificial measure """ import numpy as np ############################### Create measure ################################ ############################################################################### def create_artificial_measure_array(n_k, n_vals_i, n_feats): """Create artificial random measure in the array form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: np.ndarray the transformed measure computed by the whole spatial descriptor model. """ measure = np.random.random((n_vals_i, n_feats, n_k)) return measure def create_artificial_measure_append(n_k, n_vals_i, n_feats): """Create artificial random measure in the list form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: list the transformed measure computed by the whole spatial descriptor model. """ rounds = np.random.randint(1, 40) measure = create_empty_append(n_k, n_vals_i, n_feats) for i in range(rounds): n_iss = np.random.randint(1, 10) vals_i = create_vals_i(n_iss, n_vals_i, n_k) x_i = create_features_i_dict(n_feats, n_iss, n_k) for k in range(len(vals_i)): for i in range(len(vals_i[k])): measure[k][vals_i[k][i]].append(x_i[k][i]) return measure def create_artificial_measure_replacelist(n_k, n_vals_i, n_feats, unique_=False): """Create artificial random measure in the replacelist form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. unique_: boolean (default=False) if there are no collapse. Returns ------- measure: list the transformed measure computed by the whole spatial descriptor model. """ last = 0 rounds = np.random.randint(1, 40) measure = create_empty_replacelist(n_k, n_vals_i, n_feats) for i in range(rounds): n_iss = np.random.randint(1, 10) if unique_: vals_i = np.array([last+np.arange(n_iss)]*n_k) last += n_iss else: vals_i = create_vals_i(n_iss, n_vals_i, n_k) x_i = create_features_i_dict(n_feats, n_iss, n_k) for k in range(len(vals_i)): measure[k][0].append(x_i[k]) measure[k][1].append(vals_i[k]) return measure ############################### Empty measure ################################# ############################################################################### def create_empty_array(n_k, n_vals_i, n_feats): """Create null measure in the array form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: np.ndarray the null measure to be fill by the computation of the spatial descriptor model. """ return np.zeros((n_vals_i, n_feats, n_k)) def create_empty_append(n_k, n_iss, n_feats): """Create null measure in the list form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: list the null measure to be fill by the computation of the spatial descriptor model. """ return [[[]]*n_iss]*n_k def create_empty_replacelist(n_k, n_iss, n_feats): """Create null measure in the replacelist form. Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- measure: list the null measure to be fill by the computation of the spatial descriptor model. """ return [[[], []]]*n_k ############################### Vals_i creation ############################### ############################################################################### def create_vals_i(n_iss, nvals, n_k): """ Parameters ---------- n_k: int the number of perturbations n_vals_i: int the number of indices of the output measure. n_feats: int the number of features. Returns ------- vals_i: np.ndarray the associated stored indices for the element indices. """ return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss)) ############################### Empty features ################################ ############################################################################### def create_empty_features_array(n_feats, n_iss, n_k): """Create null features for different iss in an array-form. Parameters ---------- n_feats: int the number of features. n_iss: int the number of the elements to create their features. n_k: int the number of perturbations. Returns ------- features: np.ndarray the null features we want to compute. """ return np.zeros((n_k, n_iss, n_feats)) def create_empty_features_dict(n_feats, n_iss, n_k): """Create null features for different iss in an listdict-form. Parameters ---------- n_feats: int the number of features. n_iss: int the number of the elements to create their features. n_k: int the number of perturbations. Returns ------- features: list the null features we want to compute. """ return [[{}]*n_iss]*n_k ################################ X_i features ################################# ############################################################################### def create_features_i_array(n_feats, n_iss, n_k): """Create null features for different iss in an array-form. Parameters ---------- n_feats: int the number of features. n_iss: int the number of the elements to create their features. n_k: int the number of perturbations. Returns ------- features: np.ndarray the null features we want to compute. """ x_i = np.random.random((n_k, n_iss, n_feats)) return x_i def create_features_i_dict(n_feats, n_iss, n_k): """Create null features for different iss in an listdict-form. Parameters ---------- n_feats: int the number of features. n_iss: int the number of the elements to create their features. n_k: int the number of perturbations. Returns ------- features: list the null features we want to compute. """ x_i = [] for k in range(n_k): x_i_k = [] for i in range(n_iss): keys = np.unique(np.random.randint(1, n_feats, n_feats)) keys = [str(e) for e in keys] values = np.random.random(len(keys)) x_i_k.append(dict(zip(keys, values))) x_i.append(x_i_k) return x_i
[ [ [ 86, 97 ], [ 732, 734 ], [ 1261, 1263 ], [ 1388, 1390 ], [ 2303, 2305 ], [ 2435, 2437 ], [ 2501, 2503 ], [ 2516, 2518 ], [ 3456, 3458 ], [ 4978, 4980 ], [ 5641, 5643 ], [ 6725, 6727 ], [ 7310, 7312 ], [ 7320, 7322 ], [ 7423, 7425 ] ], [ [ 264, 295 ] ], [ [ 800, 832 ] ], [ [ 1689, 1726 ] ], [ [ 3001, 3019 ] ], [ [ 3497, 3516 ], [ 1300, 1319 ] ], [ [ 3966, 3990 ], [ 2342, 2366 ] ], [ [ 4605, 4618 ], [ 1430, 1443 ], [ 2600, 2613 ] ], [ [ 5205, 5232 ] ], [ [ 5679, 5705 ] ], [ [ 6294, 6317 ] ], [ [ 6786, 6808 ], [ 1480, 1502 ], [ 2650, 2672 ] ] ]
from .prettifier import prettify from .prettifier.common import assert_prettifier_works import pytoml def test_prettifying_against_humanly_verified_sample(): toml_source = open('sample.toml').read() expected = open('sample-prettified.toml').read() assert_prettifier_works(toml_source, expected, prettify) assert pytoml.loads(toml_source) == pytoml.loads(expected)
[ [ [ 25, 33 ], [ 311, 319 ] ], [ [ 65, 88 ], [ 264, 287 ] ], [ [ 96, 102 ], [ 332, 338 ], [ 361, 367 ] ], [ [ 109, 157 ] ] ]
# coding: utf-8 """ Mux API Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501 The version of the OpenAPI document: v1 Generated by: https://openapi-generator.tech """ import inspect import pprint import re # noqa: F401 import six from mux_python.configuration import Configuration class SignalLiveStreamCompleteResponse(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'data': 'object' } attribute_map = { 'data': 'data' } def __init__(self, data=None, local_vars_configuration=None): # noqa: E501 """SignalLiveStreamCompleteResponse - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._data = None self.discriminator = None if data is not None: self.data = data @property def data(self): """Gets the data of this SignalLiveStreamCompleteResponse. # noqa: E501 :return: The data of this SignalLiveStreamCompleteResponse. # noqa: E501 :rtype: object """ return self._data @data.setter def data(self, data): """Sets the data of this SignalLiveStreamCompleteResponse. :param data: The data of this SignalLiveStreamCompleteResponse. # noqa: E501 :type data: object """ self._data = data def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = inspect.getargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SignalLiveStreamCompleteResponse): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, SignalLiveStreamCompleteResponse): return True return self.to_dict() != other.to_dict()
[ [ [ 347, 354 ], [ 2230, 2237 ] ], [ [ 362, 368 ], [ 3180, 3186 ] ], [ [ 376, 378 ] ], [ [ 400, 403 ], [ 2476, 2479 ] ], [ [ 442, 455 ], [ 1286, 1299 ] ], [ [ 464, 496 ], [ 3420, 3452 ], [ 3650, 3682 ] ] ]
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440-pre" cfg.tag_prefix = "v" cfg.parentdir_prefix = "None" cfg.versionfile_source = "keras_ocr/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands, )) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date") } def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None }
[ [ [ 522, 527 ], [ 2775, 2780 ] ], [ [ 535, 537 ], [ 3813, 3815 ], [ 4180, 4182 ], [ 17284, 17286 ], [ 17580, 17582 ] ], [ [ 545, 547 ], [ 4991, 4993 ], [ 5175, 5177 ], [ 5355, 5357 ], [ 7385, 7387 ], [ 10347, 10349 ] ], [ [ 555, 565 ], [ 2410, 2420 ], [ 2563, 2573 ], [ 2621, 2631 ] ], [ [ 573, 576 ], [ 2731, 2734 ], [ 3117, 3120 ], [ 8812, 8815 ] ], [ [ 583, 595 ], [ 17179, 17191 ] ], [ [ 1102, 1118 ], [ 1367, 1383 ] ], [ [ 1187, 1197 ], [ 17088, 17098 ] ], [ [ 1594, 1607 ], [ 4363, 4376 ], [ 5734, 5747 ], [ 6466, 6479 ], [ 9077, 9090 ], [ 9525, 9538 ], [ 9713, 9726 ], [ 17231, 17244 ], [ 17965, 17978 ], [ 18127, 18140 ] ], [ [ 1700, 1715 ] ], [ [ 1721, 1729 ], [ 1954, 1962 ], [ 1976, 1984 ], [ 2003, 2011 ] ], [ [ 1741, 1761 ], [ 4427, 4447 ], [ 5543, 5563 ], [ 8368, 8388 ] ], [ [ 2073, 2084 ], [ 8478, 8489 ] ], [ [ 3394, 3417 ], [ 18055, 18078 ] ], [ [ 4475, 4491 ] ], [ [ 5587, 5613 ], [ 17152, 17178 ] ], [ [ 8419, 8438 ], [ 17862, 17881 ] ], [ [ 11681, 11692 ], [ 12333, 12344 ], [ 13699, 13710 ] ], [ [ 11854, 11867 ], [ 16038, 16051 ] ], [ [ 12694, 12711 ], [ 16111, 16128 ] ], [ [ 13100, 13118 ], [ 16189, 16207 ] ], [ [ 13982, 13999 ], [ 16267, 16284 ] ], [ [ 14552, 14571 ], [ 16346, 16365 ] ], [ [ 15066, 15090 ], [ 16432, 16456 ] ], [ [ 15594, 15600 ], [ 17928, 17934 ] ], [ [ 16720, 16732 ] ] ]
############################################################################### ## ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the University of Utah nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from datetime import date, datetime from vistrails.core.system import strftime, time_strptime class XMLDAO: def __init__(self): pass def getAttribute(self, node, attr): try: attribute = node.attributes.get(attr) if attribute is not None: return attribute.value except KeyError: pass return None def convertFromStr(self, value, type): if value is not None: if type == 'str': return str(value) elif value.strip() != '': if type == 'long': return long(value) elif type == 'float': return float(value) elif type == 'int': return int(value) elif type == 'date': return date(*time_strptime(value, '%Y-%m-%d')[0:3]) elif type == 'datetime': return datetime(*time_strptime(value, '%Y-%m-%d %H:%M:%S')[0:6]) return None def convertToStr(self, value, type): if value is not None: if type == 'date': return value.isoformat() elif type == 'datetime': return strftime(value, '%Y-%m-%d %H:%M:%S') else: return str(value) return ''
[ [ [ 1902, 1906 ], [ 2737, 2741 ] ], [ [ 1908, 1916 ], [ 2850, 2858 ] ], [ [ 1952, 1960 ], [ 3132, 3140 ] ], [ [ 1962, 1975 ], [ 2743, 2756 ], [ 2860, 2873 ] ], [ [ 1983, 1989 ] ] ]
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module helps emulate Visual Studio 2008 behavior on top of other build systems, primarily ninja. """ import collections import os import pickle import re import subprocess import sys import time import hashlib from gyp.common import OrderedSet import gyp.MSVSUtil import gyp.MSVSVersion from gyp import DebugOutput, DEBUG_GENERAL try: import sys reload(sys) sys.setdefaultencoding('utf8') except: pass try: basestring = basestring except NameError: basestring = str windows_quoter_regex = re.compile(r'(\\*)"') def QuoteForRspFile(arg): """Quote a command line argument so that it appears as one argument when processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for Windows programs).""" # See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment # threads. This is actually the quoting rules for CommandLineToArgvW, not # for the shell, because the shell doesn't do anything in Windows. This # works more or less because most programs (including the compiler, etc.) # use that function to handle command line arguments. # Use a heuristic to try to find args that are paths, and normalize them if arg.find('/') > 0 or arg.count('/') > 1: arg = os.path.normpath(arg) # For a literal quote, CommandLineToArgvW requires 2n+1 backslashes # preceding it, and results in n backslashes + the quote. So we substitute # in 2* what we match, +1 more, plus the quote. arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg) # %'s also need to be doubled otherwise they're interpreted as batch # positional arguments. Also make sure to escape the % so that they're # passed literally through escaping so they can be singled to just the # original %. Otherwise, trying to pass the literal representation that # looks like an environment variable to the shell (e.g. %PATH%) would fail. arg = arg.replace('%', '%%') # These commands are used in rsp files, so no escaping for the shell (via ^) # is necessary. # Finally, wrap the whole thing in quotes so that the above quote rule # applies and whitespace isn't a word break. return '"' + arg + '"' def EncodeRspFileList(args): """Process a list of arguments using QuoteCmdExeArgument.""" # Note that the first argument is assumed to be the command. Don't add # quotes around it because then built-ins like 'echo', etc. won't work. # Take care to normpath only the path in the case of 'call ../x.bat' because # otherwise the whole thing is incorrectly interpreted as a path and not # normalized correctly. if not args: return '' if args[0].startswith('call '): call, program = args[0].split(' ', 1) program = call + ' ' + os.path.normpath(program) else: program = os.path.normpath(args[0]) return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:]) def _GenericRetrieve(root, default, path): """Given a list of dictionary keys |path| and a tree of dicts |root|, find value at path, or return |default| if any of the path doesn't exist.""" if not root: return default if not path: return root return _GenericRetrieve(root.get(path[0]), default, path[1:]) def _AddPrefix(element, prefix): """Add |prefix| to |element| or each subelement if element is iterable.""" if element is None: return element if (isinstance(element, collections.Iterable) and not isinstance(element, basestring)): return [prefix + e for e in element] else: return prefix + element def _DoRemapping(element, map): """If |element| then remap it through |map|. If |element| is iterable then each item will be remapped. Any elements not found will be removed.""" if map is not None and element is not None: if not callable(map): map = map.get # Assume it's a dict, otherwise a callable to do the remap. if (isinstance(element, collections.Iterable) and not isinstance(element, basestring)): element = filter(None, [map(elem) for elem in element]) else: element = map(element) return element def _AppendOrReturn(append, element): """If |append| is None, simply return |element|. If |append| is not None, then add |element| to it, adding each item in |element| if it's a list or tuple.""" if append is not None and element is not None: if (isinstance(element, collections.Iterable) and not isinstance(element, basestring)): append.extend(element) else: append.append(element) else: return element def _FindDirectXInstallation(): """Try to find an installation location for the DirectX SDK. Check for the standard environment variable, and if that doesn't exist, try to find via the registry. May return None if not found in either location.""" # Return previously calculated value, if there is one if hasattr(_FindDirectXInstallation, 'dxsdk_dir'): return _FindDirectXInstallation.dxsdk_dir dxsdk_dir = os.environ.get('DXSDK_DIR') if not dxsdk_dir: # Setup params to pass to and attempt to launch reg.exe. cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in p.communicate()[0].splitlines(): if isinstance(line, bytes): line = line.decode() if 'InstallPath' in line: dxsdk_dir = line.split(' ')[3] + "\\" # Cache return value _FindDirectXInstallation.dxsdk_dir = dxsdk_dir return dxsdk_dir def GetGlobalVSMacroEnv(vs_version): """Get a dict of variables mapping internal VS macro names to their gyp equivalents. Returns all variables that are independent of the target.""" env = {} # '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when # Visual Studio is actually installed. if vs_version.Path(): env['$(VSInstallDir)'] = vs_version.Path() env['$(VCInstallDir)'] = os.path.join(vs_version.Path().decode(), 'VC') + '\\' # Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be # set. This happens when the SDK is sync'd via src-internal, rather than # by typical end-user installation of the SDK. If it's not set, we don't # want to leave the unexpanded variable in the path, so simply strip it. dxsdk_dir = _FindDirectXInstallation() env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else '' # Try to find an installation location for the Windows DDK by checking # the WDK_DIR environment variable, may be None. env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '') return env def ExtractSharedMSVSSystemIncludes(configs, generator_flags): """Finds msvs_system_include_dirs that are common to all targets, removes them from all targets, and returns an OrderedSet containing them.""" all_system_includes = OrderedSet( configs[0].get('msvs_system_include_dirs', [])) for config in configs[1:]: system_includes = config.get('msvs_system_include_dirs', []) all_system_includes = all_system_includes & OrderedSet(system_includes) if not all_system_includes: return None # Expand macros in all_system_includes. env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags)) expanded_system_includes = OrderedSet([ExpandMacros(include, env) for include in all_system_includes]) if any(['$' in include for include in expanded_system_includes]): # Some path relies on target-specific variables, bail. return None # Remove system includes shared by all targets from the targets. for config in configs: includes = config.get('msvs_system_include_dirs', []) if includes: # Don't insert a msvs_system_include_dirs key if not needed. # This must check the unexpanded includes list: new_includes = [i for i in includes if i not in all_system_includes] config['msvs_system_include_dirs'] = new_includes return expanded_system_includes class MsvsSettings(object): """A class that understands the gyp 'msvs_...' values (especially the msvs_settings field). They largely correpond to the VS2008 IDE DOM. This class helps map those settings to command line options.""" def __init__(self, spec, generator_flags): self.spec = spec self.vs_version = GetVSVersion(generator_flags) supported_fields = [ ('msvs_configuration_attributes', dict), ('msvs_settings', dict), ('msvs_system_include_dirs', list), ('msvs_disabled_warnings', list), ('msvs_precompiled_header', str), ('msvs_precompiled_source', str), ('msvs_configuration_platform', str), ('msvs_target_platform', str), ] configs = spec['configurations'] for field, default in supported_fields: setattr(self, field, {}) for configname, config in configs.items(): getattr(self, field)[configname] = config.get(field, default()) self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.']) unsupported_fields = [ 'msvs_prebuild', 'msvs_postbuild', ] unsupported = [] for field in unsupported_fields: for config in configs.values(): if field in config: unsupported += ["%s not supported (target %s)." % (field, spec['target_name'])] if unsupported: raise Exception('\n'.join(unsupported)) def GetExtension(self): """Returns the extension for the target, with no leading dot. Uses 'product_extension' if specified, otherwise uses MSVS defaults based on the target type. """ ext = self.spec.get('product_extension', None) if ext: return ext return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '') def GetVSMacroEnv(self, base_to_build=None, config=None): """Get a dict of variables mapping internal VS macro names to their gyp equivalents.""" target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64' target_name = self.spec.get('product_prefix', '') + \ self.spec.get('product_name', self.spec['target_name']) target_dir = base_to_build + '\\' if base_to_build else '' target_ext = '.' + self.GetExtension() target_file_name = target_name + target_ext replacements = { '$(InputName)': '${root}', '$(InputPath)': '${source}', '$(IntDir)': '$!INTERMEDIATE_DIR', '$(OutDir)\\': target_dir, '$(PlatformName)': target_platform, '$(ProjectDir)\\': '', '$(ProjectName)': self.spec['target_name'], '$(TargetDir)\\': target_dir, '$(TargetExt)': target_ext, '$(TargetFileName)': target_file_name, '$(TargetName)': target_name, '$(TargetPath)': os.path.join(target_dir, target_file_name), } replacements.update(GetGlobalVSMacroEnv(self.vs_version)) return replacements def ConvertVSMacros(self, s, base_to_build=None, config=None): """Convert from VS macro names to something equivalent.""" env = self.GetVSMacroEnv(base_to_build, config=config) return ExpandMacros(s, env) def AdjustLibraries(self, libraries): """Strip -l from library if it's specified with that.""" libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries] return [lib + '.lib' if not lib.lower().endswith('.lib') else lib for lib in libs] def _GetAndMunge(self, field, path, default, prefix, append, map): """Retrieve a value from |field| at |path| or return |default|. If |append| is specified, and the item is found, it will be appended to that object instead of returned. If |map| is specified, results will be remapped through |map| before being returned or appended.""" result = _GenericRetrieve(field, default, path) result = _DoRemapping(result, map) result = _AddPrefix(result, prefix) return _AppendOrReturn(append, result) class _GetWrapper(object): def __init__(self, parent, field, base_path, append=None): self.parent = parent self.field = field self.base_path = [base_path] self.append = append def __call__(self, name, map=None, prefix='', default=None): return self.parent._GetAndMunge(self.field, self.base_path + [name], default=default, prefix=prefix, append=self.append, map=map) def GetArch(self, config): """Get architecture based on msvs_configuration_platform and msvs_target_platform. Returns either 'x86' or 'x64'.""" configuration_platform = self.msvs_configuration_platform.get(config, '') platform = self.msvs_target_platform.get(config, '') if not platform: # If no specific override, use the configuration's. platform = configuration_platform # Map from platform to architecture. return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86') def _TargetConfig(self, config): """Returns the target-specific configuration.""" # There's two levels of architecture/platform specification in VS. The # first level is globally for the configuration (this is what we consider # "the" config at the gyp level, which will be something like 'Debug' or # 'Release'), VS2015 and later only use this level if int(self.vs_version.short_name) >= 2015: return config # and a second target-specific configuration, which is an # override for the global one. |config| is remapped here to take into # account the local target-specific overrides to the global configuration. #arch = self.GetArch(config) #if arch == 'x64' and not config.endswith('_x64'): # config += '_x64' #if arch == 'x86' and config.endswith('_x64'): # config = config.rsplit('_', 1)[0] return config def _Setting(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_settings.""" return self._GetAndMunge( self.msvs_settings[config], path, default, prefix, append, map) def _ConfigAttrib(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_configuration_attributes.""" return self._GetAndMunge( self.msvs_configuration_attributes[config], path, default, prefix, append, map) def AdjustIncludeDirs(self, include_dirs, config): """Updates include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.""" config = self._TargetConfig(config) includes = include_dirs + self.msvs_system_include_dirs[config] includes.extend(self._Setting( ('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[])) return [self.ConvertVSMacros(p, config=config) for p in includes] def AdjustMidlIncludeDirs(self, midl_include_dirs, config): """Updates midl_include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.""" config = self._TargetConfig(config) includes = midl_include_dirs + self.msvs_system_include_dirs[config] includes.extend(self._Setting( ('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[])) return [self.ConvertVSMacros(p, config=config) for p in includes] def GetComputedDefines(self, config): """Returns the set of defines that are injected to the defines list based on other VS settings.""" config = self._TargetConfig(config) defines = [] if self._ConfigAttrib(['CharacterSet'], config) == '1': defines.extend(('_UNICODE', 'UNICODE')) if self._ConfigAttrib(['CharacterSet'], config) == '2': defines.append('_MBCS') defines.extend(self._Setting( ('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[])) return defines def GetCompilerPdbName(self, config, expand_special): """Get the pdb file name that should be used for compiler invocations, or None if there's no explicit name specified.""" config = self._TargetConfig(config) pdbname = self._Setting( ('VCCLCompilerTool', 'ProgramDataBaseFileName'), config) if pdbname: pdbname = expand_special(self.ConvertVSMacros(pdbname)) return pdbname def GetMapFileName(self, config, expand_special): """Gets the explicitly overriden map file name for a target or returns None if it's not set.""" config = self._TargetConfig(config) map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config) if map_file: map_file = expand_special(self.ConvertVSMacros(map_file, config=config)) return map_file def GetOutputName(self, config, expand_special): """Gets the explicitly overridden output name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) type = self.spec['type'] root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool' # TODO(scottmg): Handle OutputDirectory without OutputFile. output_file = self._Setting((root, 'OutputFile'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file def GetPDBName(self, config, expand_special, default): """Gets the explicitly overridden pdb name for a target or returns default if it's not overridden, or if no pdb will be generated.""" config = self._TargetConfig(config) output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config) generate_debug_info = self._Setting( ('VCLinkerTool', 'GenerateDebugInformation'), config) if generate_debug_info == 'true': if output_file: return expand_special(self.ConvertVSMacros(output_file, config=config)) else: return default else: return None def GetNoImportLibrary(self, config): """If NoImportLibrary: true, ninja will not expect the output to include an import library.""" config = self._TargetConfig(config) noimplib = self._Setting(('NoImportLibrary',), config) return noimplib == 'true' def GetAsmflags(self, config): """Returns the flags that need to be added to ml invocations.""" config = self._TargetConfig(config) asmflags = [] safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config) if safeseh == 'true': asmflags.append('/safeseh') return asmflags def GetCflags(self, config): """Returns the flags that need to be added to .c and .cc compilations.""" config = self._TargetConfig(config) cflags = [] cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]]) cl = self._GetWrapper(self, self.msvs_settings[config], 'VCCLCompilerTool', append=cflags) cl('Optimization', map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2') cl('InlineFunctionExpansion', prefix='/Ob') cl('DisableSpecificWarnings', prefix='/wd') cl('StringPooling', map={'true': '/GF'}) cl('EnableFiberSafeOptimizations', map={'true': '/GT'}) cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy') cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi') cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O') cl('FloatingPointModel', map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:', default='0') cl('CompileAsManaged', map={'false': '', 'true': '/clr'}) cl('WholeProgramOptimization', map={'true': '/GL'}) cl('WarningLevel', prefix='/W') cl('WarnAsError', map={'true': '/WX'}) cl('CallingConvention', map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G') cl('DebugInformationFormat', map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z') cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'}) cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'}) cl('MinimalRebuild', map={'true': '/Gm'}) cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'}) cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC') cl('RuntimeLibrary', map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M') cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH') cl('DefaultCharIsUnsigned', map={'true': '/J'}) cl('TreatWChar_tAsBuiltInType', map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t') cl('EnablePREfast', map={'true': '/analyze'}) cl('AdditionalOptions', prefix='') cl('EnableEnhancedInstructionSet', map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'}, prefix='/arch:') cflags.extend(['/FI' + f for f in self._Setting( ('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])]) if float(self.vs_version.project_version) >= 12.0: # New flag introduced in VS2013 (project version 12.0) Forces writes to # the program database (PDB) to be serialized through MSPDBSRV.EXE. # https://msdn.microsoft.com/en-us/library/dn502518.aspx cflags.append('/FS') # ninja handles parallelism by itself, don't have the compiler do it too. cflags = [x for x in cflags if not x.startswith('/MP')] return cflags def _GetPchFlags(self, config, extension): """Get the flags to be added to the cflags for precompiled header support. """ config = self._TargetConfig(config) # The PCH is only built once by a particular source file. Usage of PCH must # only be for the same language (i.e. C vs. C++), so only include the pch # flags when the language matches. if self.msvs_precompiled_header[config]: source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1] if _LanguageMatchesForPch(source_ext, extension): pch = self.msvs_precompiled_header[config] pchbase = os.path.split(pch)[1] return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pchbase + '.pch'] return [] def GetCflagsC(self, config): """Returns the flags that need to be added to .c compilations.""" config = self._TargetConfig(config) return self._GetPchFlags(config, '.c') def GetCflagsCC(self, config): """Returns the flags that need to be added to .cc compilations.""" config = self._TargetConfig(config) return ['/TP'] + self._GetPchFlags(config, '.cc') def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path): """Get and normalize the list of paths in AdditionalLibraryDirectories setting.""" config = self._TargetConfig(config) libpaths = self._Setting((root, 'AdditionalLibraryDirectories'), config, default=[]) libpaths = [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(p, config=config))) for p in libpaths] return ['/LIBPATH:"' + p + '"' for p in libpaths] def GetLibFlags(self, config, gyp_to_build_path): """Returns the flags that need to be added to lib commands.""" config = self._TargetConfig(config) libflags = [] lib = self._GetWrapper(self, self.msvs_settings[config], 'VCLibrarianTool', append=libflags) libflags.extend(self._GetAdditionalLibraryDirectories( 'VCLibrarianTool', config, gyp_to_build_path)) lib('LinkTimeCodeGeneration', map={'true': '/LTCG'}) lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'}, prefix='/MACHINE:') lib('AdditionalOptions') return libflags def GetDefFile(self, gyp_to_build_path): """Returns the .def file from sources, if any. Otherwise returns None.""" spec = self.spec if spec['type'] in ('shared_library', 'loadable_module', 'executable'): def_files = [s for s in spec.get('sources', []) if s.lower().endswith('.def')] if len(def_files) == 1: return gyp_to_build_path(def_files[0]) elif len(def_files) > 1: raise Exception("Multiple .def files") return None def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path): """.def files get implicitly converted to a ModuleDefinitionFile for the linker in the VS generator. Emulate that behaviour here.""" def_file = self.GetDefFile(gyp_to_build_path) if def_file: ldflags.append('/DEF:"%s"' % def_file) def GetPGDName(self, config, expand_special): """Gets the explicitly overridden pgd name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) output_file = self._Setting( ('VCLinkerTool', 'ProfileGuidedDatabase'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file def GetLdflags(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir): """Returns the flags that need to be added to link commands, and the manifest files.""" config = self._TargetConfig(config) ldflags = [] ld = self._GetWrapper(self, self.msvs_settings[config], 'VCLinkerTool', append=ldflags) self._GetDefFileAsLdflags(ldflags, gyp_to_build_path) ld('GenerateDebugInformation', map={'true': '/DEBUG'}) ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'}, prefix='/MACHINE:') ldflags.extend(self._GetAdditionalLibraryDirectories( 'VCLinkerTool', config, gyp_to_build_path)) ld('DelayLoadDLLs', prefix='/DELAYLOAD:') ld('TreatLinkerWarningAsErrors', prefix='/WX', map={'true': '', 'false': ':NO'}) out = self.GetOutputName(config, expand_special) if out: ldflags.append('/OUT:' + out) pdb = self.GetPDBName(config, expand_special, output_name + '.pdb') if pdb: ldflags.append('/PDB:' + pdb) pgd = self.GetPGDName(config, expand_special) if pgd: ldflags.append('/PGD:' + pgd) map_file = self.GetMapFileName(config, expand_special) ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file else '/MAP'}) ld('MapExports', map={'true': '/MAPINFO:EXPORTS'}) ld('AdditionalOptions', prefix='') minimum_required_version = self._Setting( ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='') if minimum_required_version: minimum_required_version = ',' + minimum_required_version ld('SubSystem', map={'1': 'CONSOLE%s' % minimum_required_version, '2': 'WINDOWS%s' % minimum_required_version}, prefix='/SUBSYSTEM:') stack_reserve_size = self._Setting( ('VCLinkerTool', 'StackReserveSize'), config, default='') if stack_reserve_size: stack_commit_size = self._Setting( ('VCLinkerTool', 'StackCommitSize'), config, default='') if stack_commit_size: stack_commit_size = ',' + stack_commit_size ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size)) ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE') ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL') ld('BaseAddress', prefix='/BASE:') ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED') ld('RandomizedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE') ld('DataExecutionPrevention', map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT') ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:') ld('ForceSymbolReferences', prefix='/INCLUDE:') ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:') ld('LinkTimeCodeGeneration', map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE', '4': ':PGUPDATE'}, prefix='/LTCG') ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:') ld('ResourceOnlyDLL', map={'true': '/NOENTRY'}) ld('EntryPointSymbol', prefix='/ENTRY:') ld('Profile', map={'true': '/PROFILE'}) ld('LargeAddressAware', map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE') # TODO(scottmg): This should sort of be somewhere else (not really a flag). ld('AdditionalDependencies', prefix='') if self.GetArch(config) == 'x86': safeseh_default = 'true' else: safeseh_default = None ld('ImageHasSafeExceptionHandlers', map={'false': ':NO', 'true': ''}, prefix='/SAFESEH', default=safeseh_default) # If the base address is not specifically controlled, DYNAMICBASE should # be on by default. if not any('DYNAMICBASE' in flag or flag == '/FIXED' for flag in ldflags): ldflags.append('/DYNAMICBASE') # If the NXCOMPAT flag has not been specified, default to on. Despite the # documentation that says this only defaults to on when the subsystem is # Vista or greater (which applies to the linker), the IDE defaults it on # unless it's explicitly off. if not any('NXCOMPAT' in flag for flag in ldflags): ldflags.append('/NXCOMPAT') have_def_file = any(flag.startswith('/DEF:') for flag in ldflags) manifest_flags, intermediate_manifest, manifest_files = \ self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path, is_executable and not have_def_file, build_dir) ldflags.extend(manifest_flags) return ldflags, intermediate_manifest, manifest_files def _GetLdManifestFlags(self, config, name, gyp_to_build_path, allow_isolation, build_dir): """Returns a 3-tuple: - the set of flags that need to be added to the link to generate a default manifest - the intermediate manifest that the linker will generate that should be used to assert it doesn't add anything to the merged one. - the list of all the manifest files to be merged by the manifest tool and included into the link.""" generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'), config, default='true') if generate_manifest != 'true': # This means not only that the linker should not generate the intermediate # manifest but also that the manifest tool should do nothing even when # additional manifests are specified. return ['/MANIFEST:NO'], [], [] output_name = name + '.intermediate.manifest' flags = [ '/MANIFEST', '/ManifestFile:' + output_name, ] # Instead of using the MANIFESTUAC flags, we generate a .manifest to # include into the list of manifests. This allows us to avoid the need to # do two passes during linking. The /MANIFEST flag and /ManifestFile are # still used, and the intermediate manifest is used to assert that the # final manifest we get from merging all the additional manifest files # (plus the one we generate here) isn't modified by merging the # intermediate into it. # Always NO, because we generate a manifest file that has what we want. flags.append('/MANIFESTUAC:NO') config = self._TargetConfig(config) enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config, default='true') manifest_files = [] generated_manifest_outer = \ "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \ "<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \ "</assembly>" if enable_uac == 'true': execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'), config, default='0') execution_level_map = { '0': 'asInvoker', '1': 'highestAvailable', '2': 'requireAdministrator' } ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config, default='false') inner = ''' <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level='%s' uiAccess='%s' /> </requestedPrivileges> </security> </trustInfo>''' % (execution_level_map[execution_level], ui_access) else: inner = '' generated_manifest_contents = generated_manifest_outer % inner generated_name = name + '.generated.manifest' # Need to join with the build_dir here as we're writing it during # generation time, but we return the un-joined version because the build # will occur in that directory. We only write the file if the contents # have changed so that simply regenerating the project files doesn't # cause a relink. build_dir_generated_name = os.path.join(build_dir, generated_name) gyp.common.EnsureDirExists(build_dir_generated_name) f = gyp.common.WriteOnDiff(build_dir_generated_name) f.write(generated_manifest_contents) f.close() manifest_files = [generated_name] if allow_isolation: flags.append('/ALLOWISOLATION') manifest_files += self._GetAdditionalManifestFiles(config, gyp_to_build_path) return flags, output_name, manifest_files def _GetAdditionalManifestFiles(self, config, gyp_to_build_path): """Gets additional manifest files that are added to the default one generated by the linker.""" files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config, default=[]) if isinstance(files, str): files = files.split(';') return [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(f, config=config))) for f in files] def IsUseLibraryDependencyInputs(self, config): """Returns whether the target should be linked via Use Library Dependency Inputs (using component .objs of a given .lib).""" config = self._TargetConfig(config) uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config) return uldi == 'true' def IsEmbedManifest(self, config): """Returns whether manifest should be linked into binary.""" config = self._TargetConfig(config) embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config, default='true') return embed == 'true' def IsLinkIncremental(self, config): """Returns whether the target should be linked incrementally.""" config = self._TargetConfig(config) link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config) return link_inc != '1' def GetRcflags(self, config, gyp_to_ninja_path): """Returns the flags that need to be added to invocations of the resource compiler.""" config = self._TargetConfig(config) rcflags = [] rc = self._GetWrapper(self, self.msvs_settings[config], 'VCResourceCompilerTool', append=rcflags) rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I') rcflags.append('/I' + gyp_to_ninja_path('.')) rc('PreprocessorDefinitions', prefix='/d') # /l arg must be in hex without leading '0x' rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:]) return rcflags def BuildCygwinBashCommandLine(self, args, path_to_base): """Build a command line that runs args via cygwin bash. We assume that all incoming paths are in Windows normpath'd form, so they need to be converted to posix style for the part of the command line that's passed to bash. We also have to do some Visual Studio macro emulation here because various rules use magic VS names for things. Also note that rules that contain ninja variables cannot be fixed here (for example ${source}), so the outer generator needs to make sure that the paths that are written out are in posix style, if the command line will be used here.""" cygwin_dir = os.path.normpath( os.path.join(path_to_base, self.msvs_cygwin_dirs[0])) cd = ('cd %s' % path_to_base).replace('\\', '/') args = [a.replace('\\', '/').replace('"', '\\"') for a in args] args = ["'%s'" % a.replace("'", "'\\''") for a in args] bash_cmd = ' '.join(args) cmd = ( 'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir + 'bash -c "%s ; %s"' % (cd, bash_cmd)) return cmd def IsRuleRunUnderCygwin(self, rule): """Determine if an action should be run under cygwin. If the variable is unset, or set to 1 we use cygwin.""" return int(rule.get('msvs_cygwin_shell', self.spec.get('msvs_cygwin_shell', 1))) != 0 def _HasExplicitRuleForExtension(self, spec, extension): """Determine if there's an explicit rule for a particular extension.""" for rule in spec.get('rules', []): if rule['extension'] == extension: return True return False def _HasExplicitIdlActions(self, spec): """Determine if an action should not run midl for .idl files.""" return any([action.get('explicit_idl_action', 0) for action in spec.get('actions', [])]) def HasExplicitIdlRulesOrActions(self, spec): """Determine if there's an explicit rule or action for idl files. When there isn't we need to generate implicit rules to build MIDL .idl files.""" return (self._HasExplicitRuleForExtension(spec, 'idl') or self._HasExplicitIdlActions(spec)) def HasExplicitAsmRules(self, spec): """Determine if there's an explicit rule for asm files. When there isn't we need to generate implicit rules to assemble .asm files.""" return self._HasExplicitRuleForExtension(spec, 'asm') def GetIdlBuildData(self, source, config): """Determine the implicit outputs for an idl file. Returns output directory, outputs, and variables and flags that are required.""" config = self._TargetConfig(config) midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool') def midl(name, default=None): return self.ConvertVSMacros(midl_get(name, default=default), config=config) tlb = midl('TypeLibraryName', default='${root}.tlb') header = midl('HeaderFileName', default='${root}.h') dlldata = midl('DLLDataFileName', default='dlldata.c') iid = midl('InterfaceIdentifierFileName', default='${root}_i.c') proxy = midl('ProxyFileName', default='${root}_p.c') # Note that .tlb is not included in the outputs as it is not always # generated depending on the content of the input idl file. outdir = midl('OutputDirectory', default='') output = [header, dlldata, iid, proxy] variables = [('tlb', tlb), ('h', header), ('dlldata', dlldata), ('iid', iid), ('proxy', proxy)] # TODO(scottmg): Are there configuration settings to set these flags? target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64' flags = ['/char', 'signed', '/env', target_platform, '/Oicf'] return outdir, output, variables, flags def _LanguageMatchesForPch(source_ext, pch_source_ext): c_exts = ('.c',) cc_exts = ('.cc', '.cxx', '.cpp') return ((source_ext in c_exts and pch_source_ext in c_exts) or (source_ext in cc_exts and pch_source_ext in cc_exts)) class PrecompiledHeader(object): """Helper to generate dependencies and build rules to handle generation of precompiled headers. Interface matches the GCH handler in xcode_emulation.py. """ def __init__( self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext): self.settings = settings self.config = config pch_source = self.settings.msvs_precompiled_source[self.config] self.pch_source = gyp_to_build_path(pch_source) filename, _ = os.path.splitext(pch_source) self.output_obj = gyp_to_unique_output(filename + obj_ext).lower() def _PchHeader(self): """Get the header that will appear in an #include line for all source files.""" return self.settings.msvs_precompiled_header[self.config] def GetObjDependencies(self, sources, objs, arch): """Given a list of sources files and the corresponding object files, returns a list of the pch files that should be depended upon. The additional wrapping in the return value is for interface compatibility with make.py on Mac, and xcode_emulation.py.""" assert arch is None if not self._PchHeader(): return [] pch_ext = os.path.splitext(self.pch_source)[1] for source in sources: if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext): return [(None, None, self.output_obj)] return [] def GetPchBuildCommands(self, arch): """Not used on Windows as there are no additional build steps required (instead, existing steps are modified in GetFlagsModifications below).""" return [] def GetFlagsModifications(self, input, output, implicit, command, cflags_c, cflags_cc, expand_special): """Get the modified cflags and implicit dependencies that should be used for the pch compilation step.""" if input == self.pch_source: pch_output = ['/Yc' + self._PchHeader()] if command == 'cxx': return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))], self.output_obj, []) elif command == 'cc': return ([('cflags_c', map(expand_special, cflags_c + pch_output))], self.output_obj, []) return [], output, implicit vs_version = None def GetVSVersion(generator_flags): global vs_version if not vs_version: vs_version = gyp.MSVSVersion.SelectVisualStudioVersion( generator_flags.get('msvs_version', 'auto'), allow_fallback=False) return vs_version def _GetVsvarsSetupArgs(generator_flags, arch): vs = GetVSVersion(generator_flags) return vs.SetupScript() def ExpandMacros(string, expansions): """Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv for the canonical way to retrieve a suitable dict.""" if '$' in string: for old, new in expansions.items(): if isinstance(new, bytes): new = new.decode() assert '$(' not in new, new string = string.replace(old, new) return string def _ExtractImportantEnvironment(output_of_set, arch): """Extracts environment variables required for the toolchain to run from a textual dump output by the cmd.exe 'set' command.""" envvars_to_save = ( 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma. 'include', 'lib', 'libpath', 'path', 'pathext', 'systemroot', 'temp', 'tmp', ) env = {} # This occasionally happens and leads to misleading SYSTEMROOT error messages # if not caught here. cl_find = 'cl.exe' if 'Visual Studio 2017'.encode('utf-8') in output_of_set: cl_find = arch + '.' + cl_find if output_of_set.count('='.encode('utf-8')) == 0: raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set) for line in output_of_set.splitlines(): if re.search(cl_find.encode(), line, re.I): env['GYP_CL_PATH'] = line continue for envvar in envvars_to_save: if re.match((envvar + '=').encode(), line, re.I): var, setting = line.split('='.encode(), 1) if envvar == 'path': # Our own rules (for running gyp-win-tool) and other actions in # Chromium rely on python being in the path. Add the path to this # python here so that if it's not in the path when ninja is run # later, python will still be found. setting = os.path.dirname(sys.executable) + os.pathsep + setting.decode() env[var.upper()] = setting break for required in (b'SYSTEMROOT', b'TEMP', b'TMP'): if required not in env: raise Exception('Environment variable "%s" ' 'required to be set to valid path' % required) return env def _FormatAsEnvironmentBlock(envvar_dict): """Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.""" block = '' nul = '\0' for key, value in envvar_dict.items(): try: block += key except: block += key.decode() block += '=' try: block += value except: block += value.decode() block += nul block += nul return block def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, system_includes, open_out): """It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers within the same build (to support msvs_target_platform hackery). Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which sets up the environment, and then we do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path. When the following procedure to generate environment files does not meet your requirement (e.g. for custom toolchains), you can pass "-G ninja_use_custom_environment_files" to the gyp to suppress file generation and use custom environment files prepared by yourself.""" archs = ('x86', 'x64') if generator_flags.get('ninja_use_custom_environment_files', 0): cl_paths = {} for arch in archs: cl_paths[arch] = 'cl.exe' return cl_paths vs = GetVSVersion(generator_flags) cl_paths = {} for arch in archs: env = _GetEnvironment(arch, vs, open_out) # Inject system includes from gyp files into INCLUDE. if system_includes: system_includes = system_includes | OrderedSet( env.get('INCLUDE', '').split(';')) env['INCLUDE'] = ';'.join(system_includes) env_block = _FormatAsEnvironmentBlock(env) f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'w') f.write(env_block) f.close() cl_paths[arch] = env['GYP_CL_PATH'] return cl_paths def _GetEnvironment(arch, vs, open_out): """ This function will run the VC environment setup script, retrieve variables, and also the path on cl.exe. It will then try to cache the values to disk, and on next run will try to lookup the cache. The cache key is the path to the setup script (which is embedded within each Visual Studio installed instance) + it's args. Even after a cache hit we do some validation of the cached values, since parts of the tool-set can be upgraded with in the installed lifecycle so paths and version numbers may change. Args: arch: {string} target architecture vs: VisualStudioVersion open_out: file open wrapper Returns: {dict} the important environment variables VC need to run """ env = {} args = vs.SetupScript(arch) args.extend(('&&', 'set', '&&', 'where', 'cl.exe')) cache_key = hashlib.md5(''.join(args).encode('utf-8')).hexdigest() # The default value for %TEMP% will make all cache look ups to safely miss appdata_dir = os.environ.get('TEMP', '') cache_path = os.path.join(appdata_dir, '.gyp-cache') cache_keyed_file = os.path.join(cache_path, cache_key) if os.path.exists(cache_keyed_file): try: with file(cache_keyed_file) as f: env = pickle.load(f) except Exception: pass cl_path = env.get('GYP_CL_PATH', '') if os.path.exists(cl_path): return env else: # cache has become invalid (probably form a tool set update) os.remove(cache_keyed_file) start_time = time.clock() # Extract environment variables for subprocesses. popen = subprocess.Popen( args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) std_out, _ = popen.communicate() if popen.returncode != 0: raise Exception('"%s" failed with error %d' % (args, popen.returncode)) end_time = time.clock() if DEBUG_GENERAL in gyp.debug.keys(): DebugOutput(DEBUG_GENERAL, "vcvars %s time: %f" % (' '.join(args), end_time - start_time)) env = _ExtractImportantEnvironment(std_out, arch) if os.path.exists(appdata_dir): try: with open_out(cache_keyed_file) as f: pickle.dump(env, f) except Exception as e: print (e) return env def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja): """Emulate behavior of msvs_error_on_missing_sources present in the msvs generator: Check that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation when building via VS, and we want this check to match for people/bots that build using ninja, so they're not surprised when the VS build fails.""" if int(generator_flags.get('msvs_error_on_missing_sources', 0)): no_specials = filter(lambda x: '$' not in x, sources) relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials] missing = [x for x in relative if not os.path.exists(x)] if missing: # They'll look like out\Release\..\..\stuff\things.cc, so normalize the # path for a slightly less crazy looking output. cleaned_up = [os.path.normpath(x) for x in missing] raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up)) # Sets some values in default_variables, which are required for many # generators, run on Windows. def CalculateCommonVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) # Set a variable so conditions can be based on msvs_version. msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags) default_variables['MSVS_VERSION'] = msvs_version.ShortName() # To determine processor word size on Windows, in addition to checking # PROCESSOR_ARCHITECTURE (which reflects the word size of the current # process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which # contains the actual word size of the system when running thru WOW64). if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or '64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')): default_variables['MSVS_OS_BITS'] = 64 else: default_variables['MSVS_OS_BITS'] = 32
[ [ [ 275, 286 ], [ 3526, 3537 ], [ 4037, 4048 ], [ 4508, 4519 ] ], [ [ 294, 296 ], [ 1384, 1386 ], [ 2872, 2874 ], [ 2920, 2922 ], [ 5100, 5102 ], [ 6063, 6065 ], [ 6659, 6661 ], [ 10818, 10820 ], [ 21692, 21694 ], [ 21875, 21877 ], [ 22723, 22725 ], [ 32631, 32633 ], [ 33491, 33493 ], [ 35772, 35774 ], [ 35798, 35800 ], [ 39652, 39654 ], [ 40335, 40337 ], [ 40431, 40433 ], [ 43504, 43506 ], [ 43538, 43540 ], [ 46104, 46106 ], [ 47274, 47276 ], [ 47316, 47318 ], [ 47377, 47379 ], [ 47418, 47420 ], [ 47611, 47613 ], [ 47736, 47738 ], [ 48317, 48319 ], [ 49069, 49071 ], [ 49174, 49176 ], [ 49362, 49364 ], [ 50186, 50188 ], [ 50248, 50250 ] ], [ [ 304, 310 ], [ 47515, 47521 ], [ 48407, 48413 ] ], [ [ 318, 320 ], [ 672, 674 ], [ 42954, 42956 ], [ 42988, 42990 ], [ 43086, 43088 ], [ 43126, 43128 ] ], [ [ 328, 338 ], [ 5290, 5300 ], [ 5319, 5329 ], [ 5343, 5353 ], [ 47854, 47864 ], [ 47901, 47911 ], [ 47925, 47935 ] ], [ [ 346, 349 ], [ 43520, 43523 ] ], [ [ 357, 361 ], [ 47779, 47783 ], [ 48096, 48100 ] ], [ [ 369, 376 ], [ 47126, 47133 ] ], [ [ 401, 411 ], [ 6937, 6947 ], [ 7145, 7155 ], [ 7349, 7359 ], [ 45935, 45945 ] ], [ [ 419, 431 ] ], [ [ 439, 454 ], [ 9772, 9775 ], [ 32675, 32678 ], [ 32736, 32739 ], [ 41495, 41498 ], [ 48131, 48134 ], [ 49766, 49769 ] ], [ [ 471, 482 ], [ 48153, 48164 ] ], [ [ 484, 497 ], [ 48114, 48127 ], [ 48165, 48178 ] ], [ [ 513, 516 ], [ 526, 529 ], [ 533, 536 ], [ 43520, 43523 ] ], [ [ 587, 597 ], [ 3582, 3592 ], [ 4095, 4105 ], [ 4566, 4576 ] ], [ [ 631, 641 ], [ 3582, 3592 ], [ 4095, 4105 ], [ 4566, 4576 ] ], [ [ 649, 669 ], [ 1612, 1632 ] ], [ [ 699, 714 ], [ 2980, 2995 ] ], [ [ 2329, 2346 ] ], [ [ 3028, 3044 ], [ 3292, 3308 ], [ 11818, 11834 ] ], [ [ 3353, 3363 ], [ 11909, 11919 ] ], [ [ 3679, 3691 ], [ 11870, 11882 ] ], [ [ 4233, 4248 ], [ 11947, 11962 ] ], [ [ 4681, 4705 ], [ 4999, 5023 ], [ 5050, 5074 ], [ 5579, 5603 ], [ 6431, 6455 ] ], [ [ 5651, 5670 ], [ 7269, 7288 ], [ 10892, 10911 ] ], [ [ 6707, 6738 ] ], [ [ 8066, 8078 ] ], [ [ 38928, 38950 ], [ 21759, 21781 ], [ 40408, 40430 ] ], [ [ 39173, 39190 ] ], [ [ 41384, 41394 ], [ 41466, 41476 ] ], [ [ 41406, 41418 ], [ 7289, 7301 ], [ 8385, 8397 ], [ 41697, 41709 ], [ 45697, 45709 ] ], [ [ 41646, 41665 ] ], [ [ 41758, 41770 ], [ 7361, 7373 ], [ 11153, 11165 ] ], [ [ 42139, 42167 ], [ 48268, 48296 ] ], [ [ 43836, 43861 ], [ 46056, 46081 ] ], [ [ 44355, 44379 ] ], [ [ 46268, 46283 ], [ 45774, 45789 ] ], [ [ 48489, 48509 ] ], [ [ 49578, 49602 ] ], [ [ 41482, 41492 ], [ 41630, 41640 ] ] ]
# Copyright 2015 Hewlett-Packard # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from oslo_log import log from keystone.auth import core from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) class TokenlessAuthHelper(provider_api.ProviderAPIMixin, object): def __init__(self, env): """A init class for TokenlessAuthHelper. :param env: The HTTP request environment that should contain client certificate attributes. These attributes should match with what the mapping defines. Or a user cannot be mapped and results un-authenticated. The following examples are for the attributes that reference to the client certificate's Subject's Common Name and Organization: SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O :type env: dict """ self.env = env def _build_scope_info(self): """Build the token request scope based on the headers. :returns: scope data :rtype: dict """ project_id = self.env.get('HTTP_X_PROJECT_ID') project_name = self.env.get('HTTP_X_PROJECT_NAME') project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID') project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME') domain_id = self.env.get('HTTP_X_DOMAIN_ID') domain_name = self.env.get('HTTP_X_DOMAIN_NAME') scope = {} if project_id: scope['project'] = {'id': project_id} elif project_name: scope['project'] = {'name': project_name} if project_domain_id: scope['project']['domain'] = {'id': project_domain_id} elif project_domain_name: scope['project']['domain'] = {'name': project_domain_name} else: msg = _('Neither Project Domain ID nor Project Domain Name ' 'was provided.') raise exception.ValidationError(msg) elif domain_id: scope['domain'] = {'id': domain_id} elif domain_name: scope['domain'] = {'name': domain_name} else: raise exception.ValidationError( attribute='project or domain', target='scope') return scope def get_scope(self): auth = {} # NOTE(chioleong): Auth methods here are insignificant because # we only care about using auth.controllers.AuthInfo # to validate the scope information. Therefore, # we don't provide any identity. auth['scope'] = self._build_scope_info() # NOTE(chioleong): We'll let AuthInfo validate the scope for us auth_info = core.AuthInfo.create(auth, scope_only=True) return auth_info.get_scope() def get_mapped_user(self, project_id=None, domain_id=None): """Map client certificate to an existing user. If user is ephemeral, there is no validation on the user himself; however it will be mapped to a corresponding group(s) and the scope of this ephemeral user is the same as what is assigned to the group. :param project_id: Project scope of the mapped user. :param domain_id: Domain scope of the mapped user. :returns: A dictionary that contains the keys, such as user_id, user_name, domain_id, domain_name :rtype: dict """ idp_id = self._build_idp_id() LOG.debug('The IdP Id %s and protocol Id %s are used to look up ' 'the mapping.', idp_id, CONF.tokenless_auth.protocol) mapped_properties, mapping_id = self.federation_api.evaluate( idp_id, CONF.tokenless_auth.protocol, self.env) user = mapped_properties.get('user', {}) user_id = user.get('id') user_name = user.get('name') user_type = user.get('type') if user.get('domain') is not None: user_domain_id = user.get('domain').get('id') user_domain_name = user.get('domain').get('name') else: user_domain_id = None user_domain_name = None # if user is ephemeral type, we don't care if the user exists # or not, but just care if the mapped group(s) is valid. if user_type == utils.UserType.EPHEMERAL: user_ref = {'type': utils.UserType.EPHEMERAL} group_ids = mapped_properties['group_ids'] utils.validate_mapped_group_ids(group_ids, mapping_id, self.identity_api) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, self.identity_api, self.assignment_api)) roles = self.assignment_api.get_roles_for_groups(group_ids, project_id, domain_id) if roles is not None: role_names = [role['name'] for role in roles] user_ref['roles'] = role_names user_ref['group_ids'] = list(group_ids) user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id user_ref[federation_constants.PROTOCOL] = ( CONF.tokenless_auth.protocol) return user_ref if user_id: user_ref = self.identity_api.get_user(user_id) elif user_name and (user_domain_name or user_domain_id): if user_domain_name: user_domain = self.resource_api.get_domain_by_name( user_domain_name) self.resource_api.assert_domain_enabled(user_domain['id'], user_domain) user_domain_id = user_domain['id'] user_ref = self.identity_api.get_user_by_name(user_name, user_domain_id) else: msg = _('User auth cannot be built due to missing either ' 'user id, or user name with domain id, or user name ' 'with domain name.') raise exception.ValidationError(msg) self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) user_ref['type'] = utils.UserType.LOCAL return user_ref def _build_idp_id(self): """Build the IdP name from the given config option issuer_attribute. The default issuer attribute SSL_CLIENT_I_DN in the environment is built with the following formula - base64_idp = sha1(env['SSL_CLIENT_I_DN']) :returns: base64_idp like the above example :rtype: str """ idp = self.env.get(CONF.tokenless_auth.issuer_attribute) if idp is None: raise exception.TokenlessAuthConfigError( issuer_attribute=CONF.tokenless_auth.issuer_attribute) hashed_idp = hashlib.sha256(idp.encode('utf-8')) return hashed_idp.hexdigest()
[ [ [ 638, 645 ], [ 7841, 7848 ] ], [ [ 668, 671 ], [ 963, 966 ] ], [ [ 699, 703 ], [ 3481, 3485 ] ], [ [ 732, 744 ], [ 1015, 1027 ] ], [ [ 752, 765 ], [ 938, 946 ] ], [ [ 787, 796 ], [ 2726, 2735 ], [ 2939, 2948 ], [ 7030, 7039 ], [ 7712, 7721 ] ], [ [ 829, 862 ], [ 6011, 6031 ], [ 6081, 6101 ] ], [ [ 895, 900 ], [ 5062, 5067 ], [ 5120, 5125 ], [ 5213, 5218 ], [ 5421, 5426 ], [ 7198, 7203 ] ], [ [ 927, 928 ], [ 2608, 2609 ], [ 6844, 6845 ] ], [ [ 931, 935 ], [ 4337, 4341 ], [ 4458, 4462 ], [ 6132, 6136 ], [ 7632, 7636 ], [ 7781, 7785 ] ], [ [ 957, 960 ], [ 4229, 4232 ] ], [ [ 995, 1014 ] ] ]
# coding=utf-8 from common.BNFParser import * from common.Grammar import Grammar # 求文法G的可空变量集 # 该算法只跟G的P有关系 def algo_6_3(P): """ 测试数据来源于第6章习题12(2) >>> from common.production import Production >>> p1 = Production(['S'], [['A', 'B', 'D', 'C']]) >>> p2 = Production(['A'], [['B', 'D'], ['\\"a\\"', '\\"a\\"'], ['\\"ε\\"']]) >>> p3 = Production(['B'], [['\\"a\\"', 'B'], ['\\"a\\"']]) >>> p4 = Production(['C'], [['D','C'], ['\\"c\\"'], ['\\"ε\\"']]) >>> p5 = Production(['D'], [['\\"ε\\"']]) >>> p = [p1, p2, p3, p4, p5] >>> u = algo_6_3(p) >>> set(u) == set(['A', 'C', 'D']) True """ simple_plist = [] for p in P: simple_plist.extend(Production.toSimpleProduction(p)) old_u = set() new_u = set() for p in simple_plist: if Production.isDirectEmpty(p): new_u.add(p.left[0]) while new_u != old_u: old_u = new_u for p in simple_plist: if set(p.right[0]) <= old_u: new_u.add(p.left[0]) return new_u
[ [ [ 44, 45 ], [ 703, 713 ], [ 812, 822 ] ], [ [ 73, 80 ] ], [ [ 113, 121 ] ] ]
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def HostFileSystemVolume(vim, *args, **kwargs): '''Detailed information about a file system. This is a base type for derived types that have more specific details about specific filesystem types.Typically a FileSystem is exposed as a datatoreSee DatastoreInfoSee HostVmfsVolumeSee HostNasVolumeSee HostLocalFileSystemVolumeSee HostVfatVolume''' obj = vim.client.factory.create('{urn:vim25}HostFileSystemVolume') # do some validation checking... if (len(args) + len(kwargs)) < 3: raise IndexError('Expected at least 4 arguments got: %d' % len(args)) required = [ 'capacity', 'name', 'type' ] optional = [ 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
[ [ [ 8, 15 ], [ 198, 205 ] ], [ [ 47, 67 ], [ 1147, 1167 ] ], [ [ 192, 195 ] ], [ [ 231, 251 ] ] ]
#!../bin/python3 # -*- coding:utf-8 -*- """ Copyright 2021 Jerome DE LUCCHI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import sys import json from env import _SERVER_DIR sys.path.insert(0, _SERVER_DIR) from api import db __DATAMODEL_DIR = os.path.join(os.path.abspath('..'), 'datamodel') __DATAMODEL_NODE_MODE_FILE = os.path.join(__DATAMODEL_DIR, 'node_mode.template.mapping') __ES_ADDR = db.ES_PROTOCOL + """://""" + str(db.ES_HOSTNAME) + """:""" + str(db.ES_PORT) __CREATE_INDEX_TEMPLATE = """curl -s -XPUT -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/_template/blast_node_mode -d@""" + __DATAMODEL_NODE_MODE_FILE __NODE_MODES = [ {"name": "maintenance"}, {"name": "pause"}, {"name": "running"} ] def defineIndexTemplate(): try: if json.load(os.popen(__CREATE_INDEX_TEMPLATE))["acknowledged"]: return True except KeyError: return False def provisionDefault(): try: for mode in __NODE_MODES: __ES_PROVISION_DEFAULT = """curl -s -XPOST -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/blast_node_mode/_doc -d \'""" + json.dumps(mode) + """\'""" if not json.load(os.popen(__ES_PROVISION_DEFAULT))["result"] == "created": return False return True except KeyError: return False def main(): if defineIndexTemplate(): if provisionDefault(): sys.exit(0) if __name__ == "__main__": main()
[ [ [ 643, 645 ], [ 767, 769 ], [ 780, 782 ], [ 845, 847 ], [ 1313, 1315 ], [ 1705, 1707 ] ], [ [ 653, 656 ], [ 697, 700 ], [ 1942, 1945 ] ], [ [ 664, 668 ], [ 1303, 1307 ], [ 1648, 1652 ], [ 1695, 1699 ] ], [ [ 685, 696 ], [ 716, 727 ] ], [ [ 745, 747 ], [ 917, 919 ], [ 950, 952 ], [ 982, 984 ] ], [ [ 749, 764 ], [ 858, 873 ] ], [ [ 816, 842 ], [ 1132, 1158 ] ], [ [ 905, 914 ], [ 1081, 1090 ], [ 1600, 1609 ] ], [ [ 994, 1017 ], [ 1322, 1345 ] ], [ [ 1159, 1171 ], [ 1487, 1499 ] ], [ [ 1259, 1278 ], [ 1876, 1895 ] ], [ [ 1437, 1453 ], [ 1910, 1926 ] ], [ [ 1860, 1864 ], [ 1987, 1991 ] ] ]
# ext/declarative/__init__.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .api import declarative_base, synonym_for, comparable_using, \ instrument_declarative, ConcreteBase, AbstractConcreteBase, \ DeclarativeMeta, DeferredReflection, has_inherited_table,\ declared_attr, as_declarative __all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', 'comparable_using', 'instrument_declarative', 'declared_attr', 'as_declarative', 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', 'DeferredReflection']
[ [ [ 265, 281 ] ], [ [ 283, 294 ] ], [ [ 296, 312 ] ], [ [ 320, 342 ] ], [ [ 344, 356 ] ], [ [ 358, 378 ] ], [ [ 386, 401 ] ], [ [ 403, 421 ] ], [ [ 423, 442 ] ], [ [ 449, 462 ] ], [ [ 464, 478 ] ], [ [ 481, 488 ] ] ]
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu> import pickle from nose.tools import eq_ import numpy as np from numpy.testing import assert_array_equal from eelbrain import datasets from eelbrain._stats.spm import LM, LMGroup def test_lm(): ds = datasets.get_uts() model = ds.eval("A*B*Y") coeffs = ds['uts'].ols(model) lm = LM('uts', 'A*B*Y', ds, 'effect') eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>") for i, effect in enumerate(model.effects): assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i]) def test_random_lm(): # dummy coding ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds)) rlm = LMGroup(lms) eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>') # coefficients ds = rlm.coefficients_dataset(('A', 'A x B')) eq_(ds['term'].cells, ('A', 'A x B')) # tests res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 1) # effect coding ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds, 'effect')) rlm = LMGroup(lms) res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 6) # persistence rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL)) eq_(rlm_p.dims, rlm.dims)
[ [ [ 64, 70 ], [ 1507, 1513 ], [ 1520, 1526 ], [ 1538, 1544 ] ], [ [ 94, 97 ], [ 392, 395 ], [ 825, 828 ], [ 985, 988 ], [ 1115, 1118 ], [ 1447, 1450 ], [ 1568, 1571 ] ], [ [ 105, 116 ], [ 715, 717 ], [ 1252, 1254 ] ], [ [ 143, 161 ], [ 522, 540 ] ], [ [ 184, 192 ], [ 263, 271 ], [ 637, 645 ], [ 1174, 1182 ] ], [ [ 225, 227 ], [ 355, 357 ], [ 774, 776 ], [ 1311, 1313 ] ], [ [ 229, 236 ], [ 808, 815 ], [ 1355, 1362 ] ], [ [ 243, 250 ] ], [ [ 591, 605 ] ] ]
# Copyright 2017 The Armada Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armada.exceptions import base_exception class LintException(base_exception.ArmadaBaseException): '''Base class for linting exceptions and errors.''' message = 'An unknown linting error occurred.' class InvalidManifestException(LintException): ''' Exception for invalid manifests. **Troubleshoot:** *Coming Soon* ''' message = 'Armada manifest invalid.' class InvalidChartNameException(LintException): '''Exception that occurs when an invalid filename is encountered.''' message = 'Chart name must be a string.' class InvalidChartDefinitionException(LintException): '''Exception when invalid chart definition is encountered.''' message = 'Invalid chart definition. Chart definition must be array.' class InvalidReleaseException(LintException): '''Exception that occurs when a release is invalid.''' message = 'Release needs to be a string.' class InvalidArmadaObjectException(LintException): ''' Exception that occurs when an Armada object is not declared. **Troubleshoot:** *Coming Soon* ''' message = 'An Armada object was not declared.'
[ [ [ 614, 628 ], [ 651, 665 ] ], [ [ 637, 650 ], [ 829, 842 ], [ 1015, 1028 ], [ 1190, 1203 ], [ 1379, 1392 ], [ 1538, 1551 ] ], [ [ 804, 828 ] ], [ [ 989, 1014 ] ], [ [ 1158, 1189 ] ], [ [ 1355, 1378 ] ], [ [ 1509, 1537 ] ] ]
import numpy as np import tensorflow as tf from common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias from encoders.message_gcns.message_gcn import MessageGcn class BasisGcn(MessageGcn): def parse_settings(self): self.dropout_keep_probability = float(self.settings['DropoutKeepProbability']) self.n_coefficients = int(self.settings['NumberOfBasisFunctions']) def local_initialize_train(self): vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0] type_matrix_shape = (self.relation_count, self.n_coefficients) vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1]) self_matrix_shape = (vertex_feature_dimension, self.shape[1]) glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]]) self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape) type_init_var = 1 self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape) self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape) self.b = make_tf_bias(self.shape[1]) def local_get_weights(self): return [self.W_forward, self.W_backward, self.C_forward, self.C_backward, self.W_self, self.b] def compute_messages(self, sender_features, receiver_features): backward_type_scaling, forward_type_scaling = self.compute_coefficients() receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features) forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1) backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1) return forward_messages, backward_messages def compute_coefficients(self): message_types = self.get_graph().get_type_indices() forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types) backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types) return backward_type_scaling, forward_type_scaling def compute_basis_functions(self, receiver_features, sender_features): sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward) receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward) return receiver_terms, sender_terms def dot_or_tensor_mul(self, features, tensor): tensor_shape = tf.shape(input=tensor) flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]] flattened_tensor = tf.reshape(tensor, flat_shape) result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input) result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]]) return result_tensor def compute_self_loop_messages(self, vertex_features): return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input) def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'): mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated')) mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated')) collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages) collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages) updated_vertex_embeddings = collected_messages_f + collected_messages_b if self.use_nonlinearity: activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages) else: activated = updated_vertex_embeddings + self_loop_messages return activated def local_get_regularization(self): regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self)) return 0.0 * regularization
[ [ [ 7, 18 ] ], [ [ 26, 42 ], [ 1845, 1847 ], [ 1887, 1889 ], [ 1964, 1966 ], [ 2008, 2010 ], [ 2239, 2241 ], [ 2336, 2338 ], [ 2820, 2822 ], [ 2945, 2947 ], [ 3098, 3100 ], [ 3698, 3700 ], [ 3784, 3786 ], [ 3980, 3982 ], [ 4216, 4218 ], [ 4244, 4246 ], [ 4297, 4299 ], [ 4325, 4327 ], [ 4379, 4381 ], [ 4407, 4409 ] ], [ [ 79, 92 ], [ 3000, 3013 ], [ 3269, 3282 ] ], [ [ 94, 109 ], [ 821, 836 ] ], [ [ 111, 127 ], [ 912, 928 ], [ 1000, 1016 ], [ 1084, 1100 ], [ 1196, 1212 ], [ 1276, 1292 ] ], [ [ 129, 141 ], [ 1348, 1360 ] ], [ [ 189, 199 ], [ 217, 227 ] ], [ [ 208, 216 ] ] ]
import hashlib import datetime import json import uuid from hashlib import sha256 from sys import version_info as pyVersion from binascii import hexlify, unhexlify from wallet import * from func.send_message import send_message from func.send_coin import send_coin from func.node_connection import * from lib.mixlib import * import pickle from blockchain.blockchain_main import get_blockchain , create_blockchain, sendme_full_chain from lib.settings import the_settings def show_menu(): print(banner_maker(sc_name="Mix Blockchain Network",description="This is an open source blockchain network project. It exists for people to build and use their own blockchain networks. Or to join the network created by others.",author="Onur Atakan ULUSOY",email="atadogan06@gmail.com") + \ menu_space() + \ menu_maker(menu_number="cbc",menu_text="Create Blockchain")+ \ menu_maker(menu_number="cw",menu_text="Create Wallet")+ \ menu_space() + \ menu_maker(menu_number="sm",menu_text="Send Message")+ \ menu_maker(menu_number="sc",menu_text="Send Coin")+ \ menu_space() + \ menu_maker(menu_number="gb",menu_text="Get Balance")+ \ menu_space() + \ menu_maker(menu_number="ndstart",menu_text="Node Start")+ \ menu_maker(menu_number="ndstop",menu_text="Node Stop")+ \ menu_maker(menu_number="ndconnect",menu_text="Node Connect")+ \ menu_maker(menu_number="ndconnectmix_blockchain_network",menu_text="Node Connect from mix_blockchain_network-DB")+ \ menu_space() + \ menu_maker(menu_number="testmodeon",menu_text="Test mode ON")+ \ menu_maker(menu_number="testmodeoff",menu_text="Test mode OF")+ \ menu_maker(menu_number="debugmodeon",menu_text="Debug mode ON")+ \ menu_maker(menu_number="debugmodeoff",menu_text="Debug mode OF")+ \ menu_space() + \ menu_maker(menu_number="getfullnodelist",menu_text="Get Full Node List")+ \ menu_maker(menu_number="getfullchain",menu_text="Get Full Chain")+ \ quit_menu_maker(mode="main") ) def menu(): while True: show_menu() choices_input = question_maker(mode="main") if choices_input == "cbc": create_blockchain() if choices_input == "cw": Wallet_Create() if choices_input == "sm": send_message(input("Message: "),input("Please write receiver adress: ")) if choices_input == "sc": send_coin(input("Coin Amount: "),input("Please write receiver adress: ")) if choices_input == "gb": print(get_blockchain().getBalance(Wallet_Import(0,0))) if choices_input == "help": show_menu() if choices_input == "ndstart": ndstart(int(input("port: "))) if choices_input == "ndstop": ndstop() if choices_input == "ndconnect": ndconnect(str(input("node ip: ")),int(input("node port: "))) if choices_input == "ndconnectmix_blockchain_network": ndconnectmix_blockchain_network() if choices_input == "testmodeon": the_settings().test_mode(True) if choices_input == "testmodeoff": the_settings().test_mode(False) if choices_input == "debugmodeon": the_settings().debug_mode(True) if choices_input == "debugmodeoff": the_settings().debug_mode(False) if choices_input == "getfullnodelist": sendme_full_node_list() if choices_input == "getfullchain": sendme_full_chain() if choices_input == "0": exit() def start(): menu() if __name__ == '__main__': start()
[ [ [ 7, 14 ] ], [ [ 22, 30 ] ], [ [ 38, 42 ] ], [ [ 51, 55 ] ], [ [ 77, 83 ] ], [ [ 102, 127 ] ], [ [ 149, 156 ] ], [ [ 158, 167 ] ], [ [ 188, 189 ] ], [ [ 224, 236 ], [ 2351, 2363 ] ], [ [ 264, 273 ], [ 2470, 2479 ] ], [ [ 307, 308 ] ], [ [ 335, 336 ], [ 524, 536 ], [ 815, 825 ], [ 839, 849 ], [ 906, 916 ], [ 968, 978 ], [ 989, 999 ], [ 1050, 1060 ], [ 1108, 1118 ], [ 1132, 1142 ], [ 1195, 1205 ], [ 1216, 1226 ], [ 1283, 1293 ], [ 1348, 1358 ], [ 1419, 1429 ], [ 1543, 1553 ], [ 1567, 1577 ], [ 1639, 1649 ], [ 1712, 1722 ], [ 1786, 1796 ], [ 1861, 1871 ], [ 1885, 1895 ], [ 1968, 1978 ], [ 2040, 2055 ], [ 2146, 2160 ], [ 2289, 2302 ], [ 2624, 2637 ], [ 2756, 2763 ], [ 2836, 2842 ], [ 2898, 2907 ], [ 3034, 3065 ], [ 3477, 3498 ] ], [ [ 345, 351 ] ], [ [ 394, 408 ], [ 2596, 2610 ] ], [ [ 411, 428 ], [ 2223, 2240 ] ], [ [ 430, 447 ], [ 3557, 3574 ] ], [ [ 474, 486 ], [ 3122, 3134 ], [ 3208, 3220 ], [ 3295, 3307 ], [ 3383, 3395 ] ], [ [ 504, 513 ], [ 2110, 2119 ], [ 2693, 2702 ] ], [ [ 2078, 2082 ], [ 3653, 3657 ] ], [ [ 3640, 3645 ], [ 3692, 3697 ] ] ]
import numpy as np import cv2 def make_colorwheel(): ''' Generates a color wheel for optical flow visualization as presented in: Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun ''' RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros((ncols, 3)) col = 0 # RY colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) col = col + RY # YG colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) colorwheel[col:col + YG, 1] = 255 col = col + YG # GC colorwheel[col:col + GC, 1] = 255 colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) col = col + GC # CB colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB) colorwheel[col:col + CB, 2] = 255 col = col + CB # BM colorwheel[col:col + BM, 2] = 255 colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) col = col + BM # MR colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR) colorwheel[col:col + MR, 0] = 255 return colorwheel def flow_compute_color(u, v, convert_to_bgr=False): ''' Applies the flow color wheel to (possibly clipped) flow components u and v. According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun :param u: np.ndarray, input horizontal flow :param v: np.ndarray, input vertical flow :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB :return: ''' flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) colorwheel = make_colorwheel() # shape [55x3] ncols = colorwheel.shape[0] rad = np.sqrt(np.square(u) + np.square(v)) a = np.arctan2(-v, -u) / np.pi fk = (a + 1) / 2 * (ncols - 1) + 1 k0 = np.floor(fk).astype(np.int32) k0[k0 > 53] = 53 k1 = k0 + 1 k1[k1 == ncols] = 1 f = fk - k0 for i in range(colorwheel.shape[1]): tmp = colorwheel[:, i] col0 = tmp[k0] / 255.0 col1 = tmp[k1] / 255.0 col = (1 - f) * col0 + f * col1 idx = (rad <= 1) col[idx] = 1 - rad[idx] * (1 - col[idx]) col[~idx] = col[~idx] * 0.75 # out of range? # Note the 2-i => BGR instead of RGB ch_idx = 2 - i if convert_to_bgr else i flow_image[:, :, ch_idx] = np.floor(255 * col) return flow_image def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False): ''' Expects a two dimensional flow image of shape [H,W,2] According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun :param flow_uv: np.ndarray of shape [H,W,2] :param clip_flow: float, maximum clipping value for flow :return: ''' assert flow_uv.ndim == 3, 'input flow must have three dimensions' assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' if clip_flow is not None: flow_uv = np.clip(flow_uv, 0, clip_flow) u = flow_uv[:, :, 0] v = flow_uv[:, :, 1] rad = np.sqrt(np.square(u) + np.square(v)) rad_max = np.max(rad) epsilon = 1e-5 u = u / (rad_max + epsilon) v = v / (rad_max + epsilon) return flow_compute_color(u, v, convert_to_bgr) def readFlow(name): f = open(name, 'rb') header = f.read(4) if header.decode("utf-8") != 'PIEH': raise Exception('Flow file header does not contain PIEH') width = np.fromfile(f, np.int32, 1).squeeze() height = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2)) f.close() return flow.astype(np.float32) def get_warp_label(flow1, flow2, label1, th=50, value=0): label2 = np.ones_like(label1, dtype=label1.dtype) * value height = flow1.shape[0] width = flow1.shape[1] flow_t = np.zeros_like(flow1, dtype=flow1.dtype) grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2) dx = grid[:, :, 0] + flow2[:, :, 1] dy = grid[:, :, 1] + flow2[:, :, 0] sx = np.floor(dx).astype(int) sy = np.floor(dy).astype(int) valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1) sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1) sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1) sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) * (1 - np.abs(sy_mat - dy[:, :, np.newaxis]))) for i in range(4): flow_t = flow_t + sxsy_mat[:, :, i][:, :, np. newaxis] * flow1[sx_mat[:, :, i], sy_mat[:, :, i], :] valid = valid & (np.linalg.norm( flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th) flow_t = (flow2 - flow_t) / 2.0 dx = grid[:, :, 0] + flow_t[:, :, 1] dy = grid[:, :, 1] + flow_t[:, :, 0] valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1) label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round() .astype(int), :] return label2 def flow_tf(flow, size): flow_shape = flow.shape flow_resized = cv2.resize(flow, (size[1], size[0])) flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1])) flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0])) return flow_resized
[ [ [ 7, 18 ], [ 546, 548 ], [ 645, 647 ], [ 660, 662 ], [ 751, 753 ], [ 766, 768 ], [ 927, 929 ], [ 942, 944 ], [ 1033, 1035 ], [ 1048, 1050 ], [ 1206, 1208 ], [ 1221, 1223 ], [ 1312, 1314 ], [ 1327, 1329 ], [ 1886, 1888 ], [ 1924, 1926 ], [ 2029, 2031 ], [ 2037, 2039 ], [ 2052, 2054 ], [ 2074, 2076 ], [ 2095, 2097 ], [ 2150, 2152 ], [ 2170, 2172 ], [ 2691, 2693 ], [ 3303, 3305 ], [ 3396, 3398 ], [ 3404, 3406 ], [ 3419, 3421 ], [ 3447, 3449 ], [ 3787, 3789 ], [ 3802, 3804 ], [ 3838, 3840 ], [ 3853, 3855 ], [ 3888, 3890 ], [ 3903, 3905 ], [ 4067, 4069 ], [ 4152, 4154 ], [ 4269, 4271 ], [ 4321, 4323 ], [ 4468, 4470 ], [ 4502, 4504 ], [ 4614, 4616 ], [ 4683, 4685 ], [ 4753, 4755 ], [ 4765, 4767 ], [ 4790, 4792 ], [ 4833, 4835 ], [ 4858, 4860 ], [ 4947, 4949 ], [ 5132, 5134 ], [ 5179, 5181 ] ], [ [ 26, 29 ], [ 5640, 5643 ] ], [ [ 36, 51 ], [ 1952, 1967 ] ], [ [ 1413, 1431 ], [ 3555, 3573 ] ], [ [ 2740, 2753 ] ], [ [ 3602, 3610 ] ], [ [ 4085, 4099 ] ], [ [ 5572, 5579 ] ] ]
r""" `torch.distributed.launch` is a module that spawns up multiple distributed training processes on each of the training nodes. The utility can be used for single-node distributed training, in which one or more processes per node will be spawned. The utility can be used for either CPU training or GPU training. If the utility is used for GPU training, each distributed process will be operating on a single GPU. This can achieve well-improved single-node training performance. It can also be used in multi-node distributed training, by spawning up multiple processes on each node for well-improved multi-node distributed training performance as well. This will especially be benefitial for systems with multiple Infiniband interfaces that have direct-GPU support, since all of them can be utilized for aggregated communication bandwidth. In both cases of single-node distributed training or multi-node distributed training, this utility will launch the given number of processes per node (``--nproc_per_node``). If used for GPU training, this number needs to be less or euqal to the number of GPUs on the current system (``nproc_per_node``), and each process will be operating on a single GPU from *GPU 0 to GPU (nproc_per_node - 1)*. **How to use this module:** 1. Single-Node multi-process distributed training :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) 2. Multi-Node multi-process distributed training: (e.g. two nodes) Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=0 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) Node 2: :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=1 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) 3. To look up what optional arguments this module offers: :: >>> python -m torch.distributed.launch --help **Important Notices:** 1. This utilty and multi-process distributed (single-node or multi-node) GPU training currently only achieves the best performance using the NCCL distributed backend. Thus NCCL backend is the recommended backend to use for GPU training. 2. In your training program, you must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module. If your training program uses GPUs, you should ensure that your code only runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by: Parsing the local_rank argument :: >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--local_rank", type=int) >>> args = parser.parse_args() Set your device to local rank using either :: >>> torch.cuda.set_device(arg.local_rank) # before your code runs or :: >>> with torch.cuda.device(arg.local_rank): >>> # your code to run 3. In your training program, you are supposed to call the following function at the beginning to start the distributed backend. You need to make sure that the init_method uses ``env://``, which is the only supported ``init_method`` by this module. :: torch.distributed.init_process_group(backend='YOUR BACKEND', init_method='env://') 4. In your training program, you can either use regular distributed functions or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your training program uses GPUs for training and you would like to use :func:`torch.nn.parallel.DistributedDataParallel` module, here is how to configure it. :: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[arg.local_rank], output_device=arg.local_rank) Please ensure that ``device_ids`` argument is set to be the only GPU device id that your code will be operating on. This is generally the local rank of the process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``, and ``output_device`` needs to be ``args.local_rank`` in order to use this utility 5. Another way to pass ``local_rank`` to the subprocesses via environment variable ``LOCAL_RANK``. This behavior is enabled when you launch the script with ``--use_env=True``. You must adjust the subprocess example above to replace ``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher will not pass ``--local_rank`` when you specify this flag. .. warning:: ``local_rank`` is NOT globally unique: it is only unique per process on a machine. Thus, don't use it to decide if you should, e.g., write to a networked filesystem. See https://github.com/pytorch/pytorch/issues/12042 for an example of how things can go wrong if you don't do this correctly. """ import sys import subprocess import os from argparse import ArgumentParser, REMAINDER def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser(description="PyTorch distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") # Optional arguments for the launch helper parser.add_argument("--nnodes", type=int, default=1, help="The number of nodes to use for distributed " "training") parser.add_argument("--node_rank", type=int, default=0, help="The rank of the node for multi-node distributed " "training") parser.add_argument("--nproc_per_node", type=int, default=1, help="The number of processes to launch on each node, " "for GPU training, this is recommended to be set " "to the number of GPUs in your system so that " "each process can be bound to a single GPU.") parser.add_argument("--master_addr", default="127.0.0.1", type=str, help="Master node (rank 0)'s address, should be either " "the IP address or the hostname of node 0, for " "single node multi-proc training, the " "--master_addr can simply be 127.0.0.1") parser.add_argument("--master_port", default=29500, type=int, help="Master node (rank 0)'s free port that needs to " "be used for communciation during distributed " "training") parser.add_argument("--use_env", default=False, action="store_true", help="Use environment variable to pass " "'local rank'. For legacy reasons, the default value is False. " "If set to True, the script will not pass " "--local_rank as argument, and will instead set LOCAL_RANK.") # positional parser.add_argument("training_script", type=str, help="The full path to the single GPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # world size in terms of number of processes dist_world_size = args.nproc_per_node * args.nnodes # set PyTorch distributed related environmental variables current_env = os.environ.copy() current_env["MASTER_ADDR"] = args.master_addr current_env["MASTER_PORT"] = str(args.master_port) current_env["WORLD_SIZE"] = str(dist_world_size) processes = [] for local_rank in range(0, args.nproc_per_node): # each process's rank dist_rank = args.nproc_per_node * args.node_rank + local_rank current_env["RANK"] = str(dist_rank) current_env["LOCAL_RANK"] = str(local_rank) # spawn the processes if args.use_env: cmd = [sys.executable, "-u", args.training_script] + args.training_script_args else: cmd = [sys.executable, "-u", args.training_script, "--local_rank={}".format(local_rank)] + args.training_script_args process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if process.returncode != 0: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) if __name__ == "__main__": main()
[ [ [ 5261, 5264 ], [ 8711, 8714 ], [ 8835, 8838 ] ], [ [ 5272, 5282 ], [ 9021, 9031 ], [ 9202, 9212 ] ], [ [ 5290, 5292 ], [ 8189, 8191 ] ], [ [ 5314, 5328 ], [ 5469, 5483 ] ], [ [ 5330, 5339 ], [ 7922, 7931 ] ], [ [ 5346, 5356 ], [ 7989, 7999 ] ], [ [ 7970, 7974 ], [ 9353, 9357 ] ] ]
# -*- coding: utf-8 -*- from bamboo_engine.builder import * # noqa from bamboo_engine.engine import Engine from pipeline.eri.runtime import BambooDjangoRuntime from ..utils import * # noqa def test_retry_subprocess(): subproc_start = EmptyStartEvent() subproc_act = ServiceActivity(component_code="debug_node") subproc_end = EmptyEndEvent() subproc_start.extend(subproc_act).extend(subproc_end) params = Params({"${raise_var}": Var(type=Var.LAZY, custom_type="raise_variable", value="")}) start = EmptyStartEvent() subproc = SubProcess(start=subproc_start, params=params) end = EmptyEndEvent() start.extend(subproc).extend(end) pipeline = build_tree(start) engine = Engine(BambooDjangoRuntime()) engine.run_pipeline(pipeline=pipeline, root_pipeline_data={}) sleep(1) old_state = runtime.get_state(subproc.id) assert old_state.name == states.FAILED engine.retry_subprocess(subproc.id) sleep(1) state = runtime.get_state(subproc.id) assert state.name == states.FAILED assert state.version != old_state.version histories = runtime.get_histories(subproc.id) assert len(histories) == 1 assert histories[0].node_id == subproc.id assert histories[0].loop == 1 assert histories[0].retry == 0 assert histories[0].skip is False assert histories[0].started_time is not None assert histories[0].archived_time is not None assert histories[0].inputs == {} assert len(histories[0].outputs) == 1 assert "ex_data" in histories[0].outputs assert histories[0].version == old_state.version
[ [ [ 59, 60 ] ], [ [ 102, 108 ], [ 721, 727 ] ], [ [ 142, 161 ], [ 728, 747 ] ], [ [ 183, 184 ], [ 244, 259 ], [ 280, 295 ], [ 343, 356 ], [ 432, 438 ], [ 456, 459 ], [ 465, 468 ], [ 530, 545 ], [ 562, 572 ], [ 619, 632 ], [ 690, 700 ], [ 822, 827 ], [ 848, 855 ], [ 907, 913 ], [ 967, 972 ], [ 989, 996 ], [ 1044, 1050 ], [ 1121, 1128 ] ], [ [ 199, 220 ] ] ]
""" Cross-validation with blocks. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" __maintainer__ = "Steven Kearnes" from theano.compat.six.moves import xrange from pylearn2.blocks import StackedBlocks class StackedBlocksCV(object): """ Multi-layer transforms using cross-validation models. Parameters ---------- layers : iterable (list of lists) Cross-validation models for each layer. Should be a list of lists, where the first index is for the layer and the second index is for the cross-validation fold. """ def __init__(self, layers): stacked_blocks = [] n_folds = len(layers[0]) assert all([len(layer) == n_folds for layer in layers]) # stack the k-th block from each layer for k in xrange(n_folds): this_blocks = [] for i, layer in enumerate(layers): this_blocks.append(layer[k]) this_stacked_blocks = StackedBlocks(this_blocks) stacked_blocks.append(this_stacked_blocks) # _folds contains a StackedBlocks instance for each CV fold self._folds = stacked_blocks def select_fold(self, k): """ Choose a single cross-validation fold to represent. Parameters ---------- k : int Index of selected fold. """ return self._folds[k] def get_input_space(self): """Get input space.""" return self._folds[0][0].get_input_space() def get_output_space(self): """Get output space.""" return self._folds[0][-1].get_output_space() def set_input_space(self, space): """ Set input space. Parameters ---------- space : WRITEME Input space. """ for fold in self._folds: this_space = space for layer in fold._layers: layer.set_input_space(this_space) this_space = layer.get_output_space()
[ [ [ 39, 49 ] ], [ [ 69, 82 ] ], [ [ 123, 134 ] ], [ [ 152, 166 ] ], [ [ 223, 229 ], [ 855, 861 ] ], [ [ 258, 271 ], [ 1027, 1040 ] ], [ [ 280, 295 ] ] ]
# Generated by Django 2.2.1 on 2019-05-12 08:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('leads', '0004_lead_documentstagecode'), ] operations = [ migrations.AlterField( model_name='lead', name='email', field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( model_name='lead', name='name', field=models.CharField(blank=True, max_length=100), ), ]
[ [ [ 71, 81 ], [ 108, 118 ], [ 237, 247 ], [ 400, 410 ] ], [ [ 83, 89 ], [ 335, 341 ], [ 497, 503 ] ], [ [ 98, 107 ] ] ]
import asyncio import json import logging import socket import time import traceback from pathlib import Path from typing import Callable, Dict, List, Optional, Set, Tuple, Union from blspy import PrivateKey from chia.consensus.block_record import BlockRecord from chia.consensus.blockchain_interface import BlockchainInterface from chia.consensus.constants import ConsensusConstants from chia.consensus.multiprocess_validation import PreValidationResult from chia.daemon.keychain_proxy import ( KeychainProxy, KeychainProxyConnectionFailure, KeyringIsEmpty, KeyringIsLocked, connect_to_keychain_and_validate, wrap_local_keychain, ) from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH from chia.protocols import wallet_protocol from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.protocols.wallet_protocol import ( RejectAdditionsRequest, RejectRemovalsRequest, RequestAdditions, RequestHeaderBlocks, RespondAdditions, RespondBlockHeader, RespondHeaderBlocks, RespondRemovals, ) from chia.server.node_discovery import WalletPeers from chia.server.outbound_message import Message, NodeType, make_msg from chia.server.server import ChiaServer from chia.server.ws_connection import WSChiaConnection from chia.types.blockchain_format.coin import Coin, hash_coin_list from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.coin_spend import CoinSpend from chia.types.header_block import HeaderBlock from chia.types.mempool_inclusion_status import MempoolInclusionStatus from chia.types.peer_info import PeerInfo from chia.util.byte_types import hexstr_to_bytes from chia.util.check_fork_next_block import check_fork_next_block from chia.util.errors import Err, ValidationError from chia.util.ints import uint32, uint128 from chia.util.keychain import Keychain from chia.util.lru_cache import LRUCache from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed from chia.util.path import mkdir, path_from_root from chia.wallet.block_record import HeaderBlockRecord from chia.wallet.derivation_record import DerivationRecord from chia.wallet.settings.settings_objects import BackupInitialized from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.backup_utils import open_backup_file from chia.wallet.util.wallet_types import WalletType from chia.wallet.wallet_action import WalletAction from chia.wallet.wallet_blockchain import ReceiveBlockResult from chia.wallet.wallet_state_manager import WalletStateManager from chia.util.profiler import profile_task class WalletNode: key_config: Dict config: Dict constants: ConsensusConstants keychain_proxy: Optional[KeychainProxy] local_keychain: Optional[Keychain] # For testing only. KeychainProxy is used in normal cases server: Optional[ChiaServer] log: logging.Logger wallet_peers: WalletPeers # Maintains the state of the wallet (blockchain and transactions), handles DB connections wallet_state_manager: Optional[WalletStateManager] # How far away from LCA we must be to perform a full sync. Before then, do a short sync, # which is consecutive requests for the previous block short_sync_threshold: int _shut_down: bool root_path: Path state_changed_callback: Optional[Callable] syncing: bool full_node_peer: Optional[PeerInfo] peer_task: Optional[asyncio.Task] logged_in: bool wallet_peers_initialized: bool def __init__( self, config: Dict, root_path: Path, consensus_constants: ConsensusConstants, name: str = None, local_keychain: Optional[Keychain] = None, ): self.config = config self.constants = consensus_constants self.keychain_proxy = None self.local_keychain = local_keychain self.root_path = root_path self.log = logging.getLogger(name if name else __name__) # Normal operation data self.cached_blocks: Dict = {} self.future_block_hashes: Dict = {} # Sync data self._shut_down = False self.proof_hashes: List = [] self.header_hashes: List = [] self.header_hashes_error = False self.short_sync_threshold = 15 # Change the test when changing this self.potential_blocks_received: Dict = {} self.potential_header_hashes: Dict = {} self.state_changed_callback = None self.wallet_state_manager = None self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip self.server = None self.wsm_close_task = None self.sync_task: Optional[asyncio.Task] = None self.logged_in_fingerprint: Optional[int] = None self.peer_task = None self.logged_in = False self.wallet_peers_initialized = False self.last_new_peak_messages = LRUCache(5) async def ensure_keychain_proxy(self) -> KeychainProxy: if not self.keychain_proxy: if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log) else: self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log) if not self.keychain_proxy: raise KeychainProxyConnectionFailure("Failed to connect to keychain service") return self.keychain_proxy async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]: key: PrivateKey = None try: keychain_proxy = await self.ensure_keychain_proxy() key = await keychain_proxy.get_key_for_fingerprint(fingerprint) except KeyringIsEmpty: self.log.warning("No keys present. Create keys with the UI, or with the 'sit keys' program.") return None except KeyringIsLocked: self.log.warning("Keyring is locked") return None except KeychainProxyConnectionFailure as e: tb = traceback.format_exc() self.log.error(f"Missing keychain_proxy: {e} {tb}") raise e # Re-raise so that the caller can decide whether to continue or abort return key async def _start( self, fingerprint: Optional[int] = None, new_wallet: bool = False, backup_file: Optional[Path] = None, skip_backup_import: bool = False, ) -> bool: try: private_key = await self.get_key_for_fingerprint(fingerprint) except KeychainProxyConnectionFailure: self.log.error("Failed to connect to keychain service") return False if private_key is None: self.logged_in = False return False if self.config.get("enable_profiler", False): asyncio.create_task(profile_task(self.root_path, "wallet", self.log)) db_path_key_suffix = str(private_key.get_g1().get_fingerprint()) db_path_replaced: str = ( self.config["database_path"] .replace("CHALLENGE", self.config["selected_network"]) .replace("KEY", db_path_key_suffix) ) path = path_from_root(self.root_path, db_path_replaced) mkdir(path.parent) self.new_peak_lock = asyncio.Lock() assert self.server is not None self.wallet_state_manager = await WalletStateManager.create( private_key, self.config, path, self.constants, self.server, self.root_path ) self.wsm_close_task = None assert self.wallet_state_manager is not None backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings() if backup_settings.user_initialized is False: if new_wallet is True: await self.wallet_state_manager.user_settings.user_created_new_wallet() self.wallet_state_manager.new_wallet = True elif skip_backup_import is True: await self.wallet_state_manager.user_settings.user_skipped_backup_import() elif backup_file is not None: await self.wallet_state_manager.import_backup_info(backup_file) else: self.backup_initialized = False await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None self.logged_in = False return False self.backup_initialized = True # Start peers here after the backup initialization has finished # We only want to do this once per instantiation # However, doing it earlier before backup initialization causes # the wallet to spam the introducer if self.wallet_peers_initialized is False: asyncio.create_task(self.wallet_peers.start()) self.wallet_peers_initialized = True if backup_file is not None: json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key) if "start_height" in json_dict["data"]: start_height = json_dict["data"]["start_height"] self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"]) else: self.config["starting_height"] = 0 else: self.config["starting_height"] = 0 if self.state_changed_callback is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) self._shut_down = False self.peer_task = asyncio.create_task(self._periodically_check_full_node()) self.sync_event = asyncio.Event() self.sync_task = asyncio.create_task(self.sync_job()) self.logged_in_fingerprint = fingerprint self.logged_in = True return True def _close(self): self.log.info("self._close") self.logged_in_fingerprint = None self._shut_down = True async def _await_closed(self): self.log.info("self._await_closed") await self.server.close_all_connections() asyncio.create_task(self.wallet_peers.ensure_is_closed()) if self.wallet_state_manager is not None: await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None if self.sync_task is not None: self.sync_task.cancel() self.sync_task = None if self.peer_task is not None: self.peer_task.cancel() self.peer_task = None self.logged_in = False def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback if self.wallet_state_manager is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) def _pending_tx_handler(self): if self.wallet_state_manager is None or self.backup_initialized is False: return None asyncio.create_task(self._resend_queue()) async def _action_messages(self) -> List[Message]: if self.wallet_state_manager is None or self.backup_initialized is False: return [] actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions() result: List[Message] = [] for action in actions: data = json.loads(action.data) action_data = data["data"]["action_data"] if action.name == "request_puzzle_solution": coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"])) height = uint32(action_data["height"]) msg = make_msg( ProtocolMessageTypes.request_puzzle_solution, wallet_protocol.RequestPuzzleSolution(coin_name, height), ) result.append(msg) return result async def _resend_queue(self): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None for msg, sent_peers in await self._messages_to_resend(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None full_nodes = self.server.get_full_node_connections() for peer in full_nodes: if peer.peer_node_id in sent_peers: continue await peer.send_message(msg) for msg in await self._action_messages(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None await self.server.send_to_all([msg], NodeType.FULL_NODE) async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]: if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down: return [] messages: List[Tuple[Message, Set[bytes32]]] = [] records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent() for record in records: if record.spend_bundle is None: continue msg = make_msg( ProtocolMessageTypes.send_transaction, wallet_protocol.SendTransaction(record.spend_bundle), ) already_sent = set() for peer, status, _ in record.sent_to: if status == MempoolInclusionStatus.SUCCESS.value: already_sent.add(hexstr_to_bytes(peer)) messages.append((msg, already_sent)) return messages def set_server(self, server: ChiaServer): self.server = server DNS_SERVERS_EMPTY: list = [] # TODO: Perhaps use a different set of DNS seeders for wallets, to split the traffic. self.wallet_peers = WalletPeers( self.server, self.root_path, self.config["target_peer_count"], self.config["wallet_peers_path"], self.config["introducer_peer"], DNS_SERVERS_EMPTY, self.config["peer_connect_interval"], self.config["selected_network"], None, self.log, ) async def on_connect(self, peer: WSChiaConnection): if self.wallet_state_manager is None or self.backup_initialized is False: return None messages_peer_ids = await self._messages_to_resend() self.wallet_state_manager.state_changed("add_connection") for msg, peer_ids in messages_peer_ids: if peer.peer_node_id in peer_ids: continue await peer.send_message(msg) if not self.has_full_node() and self.wallet_peers is not None: asyncio.create_task(self.wallet_peers.on_connect(peer)) async def _periodically_check_full_node(self) -> None: tries = 0 while not self._shut_down and tries < 5: if self.has_full_node(): await self.wallet_peers.ensure_is_closed() if self.wallet_state_manager is not None: self.wallet_state_manager.state_changed("add_connection") break tries += 1 await asyncio.sleep(self.config["peer_connect_interval"]) def has_full_node(self) -> bool: if self.server is None: return False if "full_node_peer" in self.config: full_node_peer = PeerInfo( self.config["full_node_peer"]["host"], self.config["full_node_peer"]["port"], ) peers = [c.get_peer_info() for c in self.server.get_full_node_connections()] full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port) if full_node_peer in peers or full_node_resolved in peers: self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}") for connection in self.server.get_full_node_connections(): if ( connection.get_peer_info() != full_node_peer and connection.get_peer_info() != full_node_resolved ): self.log.info(f"Closing unnecessary connection to {connection.get_peer_logging()}.") asyncio.create_task(connection.close()) return True return False async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection): if self.wallet_state_manager is None: return None header_block_records: List[HeaderBlockRecord] = [] assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) async with self.wallet_state_manager.blockchain.lock: for block in header_blocks: if block.is_transaction_block: # Find additions and removals (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( block, block.transactions_filter, None ) # Get Additions added_coins = await self.get_additions(peer, block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") # Get removals removed_coins = await self.get_removals(peer, block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") # If there is a launcher created, or we have a singleton spent, fetches the required solutions additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, block, added_coins, removed_coins ) hbr = HeaderBlockRecord(block, added_coins, removed_coins) else: hbr = HeaderBlockRecord(block, [], []) header_block_records.append(hbr) additional_coin_spends = [] (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( hbr, trusted=trusted, additional_coin_spends=additional_coin_spends ) if result == ReceiveBlockResult.NEW_PEAK: if not self.wallet_state_manager.sync_mode: self.wallet_state_manager.blockchain.clean_block_records() self.wallet_state_manager.state_changed("new_block") self.wallet_state_manager.state_changed("sync_changed") await self.wallet_state_manager.new_peak() elif result == ReceiveBlockResult.INVALID_BLOCK: self.log.info(f"Invalid block from peer: {peer.get_peer_logging()} {error}") await peer.close() return else: self.log.debug(f"Result: {result}") async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection): if self.wallet_state_manager is None: return if self.wallet_state_manager.blockchain.contains_block(peak.header_hash): self.log.debug(f"known peak {peak.header_hash}") return if self.wallet_state_manager.sync_mode: self.last_new_peak_messages.put(peer, peak) return async with self.new_peak_lock: curr_peak = self.wallet_state_manager.blockchain.get_peak() if curr_peak is not None and curr_peak.weight >= peak.weight: return request = wallet_protocol.RequestBlockHeader(peak.height) response: Optional[RespondBlockHeader] = await peer.request_block_header(request) if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None: self.log.warning(f"bad peak response from peer {response}") return header_block = response.header_block curr_peak_height = 0 if curr_peak is None else curr_peak.height if (curr_peak_height == 0 and peak.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or ( curr_peak_height > peak.height - 200 ): if peak.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]: await self.wallet_short_sync_backtrack(header_block, peer) else: await self.batch_sync_to_peak(curr_peak_height, peak) elif peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS: # Request weight proof # Sync if PoW validates weight_request = RequestProofOfWeight(peak.height, peak.header_hash) weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight( weight_request, timeout=360 ) if weight_proof_response is None: return weight_proof = weight_proof_response.wp if self.wallet_state_manager is None: return if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]): valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations( weight_proof ) else: valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof( weight_proof ) if not valid: self.log.error( f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}" f" recent blocks num ,{len(weight_proof.recent_chain_data)}" ) self.log.debug(f"{weight_proof}") return self.log.info(f"Validated, fork point is {fork_point}") self.wallet_state_manager.sync_store.add_potential_fork_point( header_block.header_hash, uint32(fork_point) ) self.wallet_state_manager.sync_store.add_potential_peak(header_block) self.start_sync() async def wallet_short_sync_backtrack(self, header_block, peer): top = header_block blocks = [top] # Fetch blocks backwards until we hit the one that we have, # then complete them with additions / removals going forward while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0: request_prev = wallet_protocol.RequestBlockHeader(top.height - 1) response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev) if response_prev is None or not isinstance(response_prev, RespondBlockHeader): raise RuntimeError("bad block header response from peer while syncing") prev_head = response_prev.header_block blocks.append(prev_head) top = prev_head blocks.reverse() await self.complete_blocks(blocks, peer) await self.wallet_state_manager.create_more_puzzle_hashes() async def batch_sync_to_peak(self, fork_height, peak): advanced_peak = False batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS for i in range(max(0, fork_height - 1), peak.height, batch_size): start_height = i end_height = min(peak.height, start_height + batch_size) peers = self.server.get_full_node_connections() added = False for peer in peers: try: added, advanced_peak = await self.fetch_blocks_and_validate( peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height ) if added: break except Exception as e: await peer.close() exc = traceback.format_exc() self.log.error(f"Error while trying to fetch from peer:{e} {exc}") if not added: raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}") curr_peak = self.wallet_state_manager.blockchain.get_peak() assert peak is not None self.wallet_state_manager.blockchain.clean_block_record( min(end_height, curr_peak.height) - self.constants.BLOCKS_CACHE_SIZE ) def start_sync(self) -> None: self.log.info("self.sync_event.set()") self.sync_event.set() async def check_new_peak(self) -> None: if self.wallet_state_manager is None: return None current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak() if current_peak is None: return None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() for _, block in potential_peaks: if current_peak.weight < block.weight: await asyncio.sleep(5) self.start_sync() return None async def sync_job(self) -> None: while True: self.log.info("Loop start in sync job") if self._shut_down is True: break asyncio.create_task(self.check_new_peak()) await self.sync_event.wait() self.last_new_peak_messages = LRUCache(5) self.sync_event.clear() if self._shut_down is True: break try: assert self.wallet_state_manager is not None self.wallet_state_manager.set_sync_mode(True) await self._sync() except Exception as e: tb = traceback.format_exc() self.log.error(f"Loop exception in sync {e}. {tb}") finally: if self.wallet_state_manager is not None: self.wallet_state_manager.set_sync_mode(False) for peer, peak in self.last_new_peak_messages.cache.items(): asyncio.create_task(self.new_peak_wallet(peak, peer)) self.log.info("Loop end in sync job") async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash) assert fork_height is not None # This is the fork point in SES in the case where no fork was detected peers = self.server.get_full_node_connections() fork_height = await check_fork_next_block( self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check ) if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) await self.batch_sync_to_peak(fork_height, peak) async def fetch_blocks_and_validate( self, peer: WSChiaConnection, height_start: uint32, height_end: uint32, fork_point_with_peak: Optional[uint32], ) -> Tuple[bool, bool]: """ Returns whether the blocks validated, and whether the peak was advanced """ if self.wallet_state_manager is None: return False, False self.log.info(f"Requesting blocks {height_start}-{height_end}") request = RequestHeaderBlocks(uint32(height_start), uint32(height_end)) res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request) if res is None or not isinstance(res, RespondHeaderBlocks): raise ValueError("Peer returned no response") header_blocks: List[HeaderBlock] = res.header_blocks advanced_peak = False if header_blocks is None: raise ValueError(f"No response from peer {peer}") assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) pre_validation_results: Optional[List[PreValidationResult]] = None if not trusted: pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing( header_blocks ) if pre_validation_results is None: return False, advanced_peak assert len(header_blocks) == len(pre_validation_results) for i in range(len(header_blocks)): header_block = header_blocks[i] if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None: raise ValidationError(Err(pre_validation_results[i].error)) fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak if header_block.is_transaction_block: # Find additions and removals (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( header_block, header_block.transactions_filter, fork_point_with_old_peak ) # Get Additions added_coins = await self.get_additions(peer, header_block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") # Get removals removed_coins = await self.get_removals(peer, header_block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") # If there is a launcher created, or we have a singleton spent, fetches the required solutions additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, header_block, added_coins, removed_coins ) header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins) else: header_block_record = HeaderBlockRecord(header_block, [], []) additional_coin_spends = [] start_t = time.time() if trusted: (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, None, trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) else: assert pre_validation_results is not None (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, pre_validation_results[i], trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) self.log.debug( f"Time taken to validate {header_block.height} with fork " f"{fork_point_with_old_peak}: {time.time() - start_t}" ) if result == ReceiveBlockResult.NEW_PEAK: advanced_peak = True self.wallet_state_manager.state_changed("new_block") elif result == ReceiveBlockResult.INVALID_BLOCK: raise ValueError("Value error peer sent us invalid block") if advanced_peak: await self.wallet_state_manager.create_more_puzzle_hashes() return True, advanced_peak def validate_additions( self, coins: List[Tuple[bytes32, List[Coin]]], proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]], root, ): if proofs is None: # Verify root additions_merkle_set = MerkleSet() # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle_hash, coins_l in coins: additions_merkle_set.add_already_hashed(puzzle_hash) additions_merkle_set.add_already_hashed(hash_coin_list(coins_l)) additions_root = additions_merkle_set.get_root() if root != additions_root: return False else: for i in range(len(coins)): assert coins[i][0] == proofs[i][0] coin_list_1: List[Coin] = coins[i][1] puzzle_hash_proof: bytes32 = proofs[i][1] coin_list_proof: Optional[bytes32] = proofs[i][2] if len(coin_list_1) == 0: # Verify exclusion proof for puzzle hash not_included = confirm_not_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if not_included is False: return False else: try: # Verify inclusion proof for coin list included = confirm_included_already_hashed( root, hash_coin_list(coin_list_1), coin_list_proof, ) if included is False: return False except AssertionError: return False try: # Verify inclusion proof for puzzle hash included = confirm_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if included is False: return False except AssertionError: return False return True def validate_removals(self, coins, proofs, root): if proofs is None: # If there are no proofs, it means all removals were returned in the response. # we must find the ones relevant to our wallets. # Verify removals root removals_merkle_set = MerkleSet() for name_coin in coins: # TODO review all verification name, coin = name_coin if coin is not None: removals_merkle_set.add_already_hashed(coin.name()) removals_root = removals_merkle_set.get_root() if root != removals_root: return False else: # This means the full node has responded only with the relevant removals # for our wallet. Each merkle proof must be verified. if len(coins) != len(proofs): return False for i in range(len(coins)): # Coins are in the same order as proofs if coins[i][0] != proofs[i][0]: return False coin = coins[i][1] if coin is None: # Verifies merkle proof of exclusion not_included = confirm_not_included_already_hashed( root, coins[i][0], proofs[i][1], ) if not_included is False: return False else: # Verifies merkle proof of inclusion of coin name if coins[i][0] != coin.name(): return False included = confirm_included_already_hashed( root, coin.name(), proofs[i][1], ) if included is False: return False return True async def fetch_puzzle_solution(self, peer, height: uint32, coin: Coin) -> CoinSpend: solution_response = await peer.request_puzzle_solution( wallet_protocol.RequestPuzzleSolution(coin.name(), height) ) if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution): raise ValueError(f"Was not able to obtain solution {solution_response}") return CoinSpend(coin, solution_response.response.puzzle, solution_response.response.solution) async def get_additional_coin_spends( self, peer, block, added_coins: List[Coin], removed_coins: List[Coin] ) -> List[CoinSpend]: assert self.wallet_state_manager is not None additional_coin_spends: List[CoinSpend] = [] if len(removed_coins) > 0: removed_coin_ids = set([coin.name() for coin in removed_coins]) all_added_coins = await self.get_additions(peer, block, [], get_all_additions=True) assert all_added_coins is not None if all_added_coins is not None: for coin in all_added_coins: # This searches specifically for a launcher being created, and adds the solution of the launcher if coin.puzzle_hash == SINGLETON_LAUNCHER_HASH and coin.parent_coin_info in removed_coin_ids: cs: CoinSpend = await self.fetch_puzzle_solution(peer, block.height, coin) additional_coin_spends.append(cs) # Apply this coin solution, which might add things to interested list await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) all_removed_coins: Optional[List[Coin]] = await self.get_removals( peer, block, added_coins, removed_coins, request_all_removals=True ) assert all_removed_coins is not None all_removed_coins_dict: Dict[bytes32, Coin] = {coin.name(): coin for coin in all_removed_coins} keep_searching = True while keep_searching: # This keeps fetching solutions for coins we are interested list, in this block, until # there are no more interested things to fetch keep_searching = False interested_ids: List[ bytes32 ] = await self.wallet_state_manager.interested_store.get_interested_coin_ids() for coin_id in interested_ids: if coin_id in all_removed_coins_dict: coin = all_removed_coins_dict[coin_id] cs = await self.fetch_puzzle_solution(peer, block.height, coin) # Apply this coin solution, which might add things to interested list await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) additional_coin_spends.append(cs) keep_searching = True all_removed_coins_dict.pop(coin_id) break return additional_coin_spends async def get_additions( self, peer: WSChiaConnection, block_i, additions: Optional[List[bytes32]], get_all_additions: bool = False ) -> Optional[List[Coin]]: if (additions is not None and len(additions) > 0) or get_all_additions: if get_all_additions: additions = None additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions) additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions( additions_request ) if additions_res is None: await peer.close() return None elif isinstance(additions_res, RespondAdditions): validated = self.validate_additions( additions_res.coins, additions_res.proofs, block_i.foliage_transaction_block.additions_root, ) if not validated: await peer.close() return None added_coins = [] for ph_coins in additions_res.coins: ph, coins = ph_coins added_coins.extend(coins) return added_coins elif isinstance(additions_res, RejectRemovalsRequest): await peer.close() return None return None else: return [] # No added coins async def get_removals( self, peer: WSChiaConnection, block_i, additions, removals, request_all_removals=False ) -> Optional[List[Coin]]: assert self.wallet_state_manager is not None # Check if we need all removals for coin in additions: puzzle_store = self.wallet_state_manager.puzzle_store record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash( coin.puzzle_hash.hex() ) if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN: # TODO why ? request_all_removals = True break if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID: request_all_removals = True break if len(removals) > 0 or request_all_removals: if request_all_removals: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None) else: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals) removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals( removals_request ) if removals_res is None: return None elif isinstance(removals_res, RespondRemovals): validated = self.validate_removals( removals_res.coins, removals_res.proofs, block_i.foliage_transaction_block.removals_root, ) if validated is False: await peer.close() return None removed_coins = [] for _, coins_l in removals_res.coins: if coins_l is not None: removed_coins.append(coins_l) return removed_coins elif isinstance(removals_res, RejectRemovalsRequest): return None else: return None else: return [] async def wallet_next_block_check( peer: WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface ) -> bool: block_response = await peer.request_header_blocks( wallet_protocol.RequestHeaderBlocks(potential_peek, potential_peek) ) if block_response is not None and isinstance(block_response, wallet_protocol.RespondHeaderBlocks): our_peak = blockchain.get_peak() if our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash: return True return False
[ [ [ 7, 14 ], [ 3560, 3567 ], [ 4850, 4857 ], [ 7026, 7033 ], [ 7490, 7497 ], [ 9003, 9010 ], [ 9856, 9863 ], [ 9940, 9947 ], [ 9981, 9988 ], [ 10388, 10395 ], [ 11330, 11337 ], [ 15433, 15440 ], [ 15911, 15918 ], [ 17049, 17056 ], [ 26244, 26251 ], [ 26508, 26515 ], [ 27310, 27317 ] ], [ [ 22, 26 ], [ 11726, 11730 ] ], [ [ 34, 41 ], [ 3014, 3021 ], [ 4050, 4057 ] ], [ [ 49, 55 ], [ 16396, 16402 ] ], [ [ 63, 67 ], [ 32868, 32872 ], [ 33807, 33811 ] ], [ [ 75, 84 ], [ 6227, 6236 ], [ 25095, 25104 ], [ 26976, 26985 ] ], [ [ 105, 109 ], [ 3427, 3431 ], [ 3703, 3707 ], [ 6568, 6572 ] ], [ [ 129, 137 ], [ 3469, 3477 ], [ 10906, 10914 ] ], [ [ 139, 143 ], [ 2774, 2778 ], [ 2791, 2795 ], [ 3678, 3682 ], [ 4156, 4160 ], [ 4200, 4204 ], [ 4496, 4500 ], [ 4544, 4548 ], [ 40617, 40621 ] ], [ [ 145, 149 ], [ 4290, 4294 ], [ 4328, 4332 ], [ 11413, 11417 ], [ 11549, 11553 ], [ 11657, 11661 ], [ 13417, 13421 ], [ 13594, 13598 ], [ 13652, 13656 ], [ 17190, 17194 ], [ 17334, 17338 ], [ 18505, 18509 ], [ 26006, 26010 ], [ 27894, 27898 ], [ 28692, 28696 ], [ 30478, 30482 ], [ 30792, 30796 ], [ 32465, 32469 ], [ 34332, 34336 ], [ 34352, 34356 ], [ 34391, 34395 ], [ 35118, 35122 ], [ 39289, 39293 ], [ 39242, 39246 ], [ 39269, 39273 ], [ 39391, 39395 ], [ 40380, 40384 ], [ 41018, 41022 ], [ 42038, 42042 ], [ 41972, 41976 ], [ 43510, 43514 ] ], [ [ 151, 159 ], [ 2850, 2858 ], [ 2894, 2902 ], [ 2984, 2992 ], [ 3179, 3187 ], [ 3460, 3468 ], [ 3517, 3525 ], [ 3551, 3559 ], [ 3808, 3816 ], [ 4841, 4849 ], [ 4907, 4915 ], [ 5685, 5693 ], [ 5667, 5675 ], [ 6482, 6490 ], [ 6559, 6567 ], [ 20593, 20601 ], [ 23738, 23746 ], [ 25852, 25860 ], [ 27840, 27848 ], [ 29861, 29869 ], [ 30255, 30263 ], [ 30783, 30791 ], [ 34382, 34390 ], [ 34418, 34426 ], [ 35234, 35242 ], [ 40371, 40379 ], [ 42029, 42037 ], [ 41963, 41971 ], [ 42322, 42330 ], [ 43501, 43509 ], [ 43738, 43746 ], [ 44591, 44599 ] ], [ [ 161, 164 ], [ 13437, 13440 ], [ 13614, 13617 ] ], [ [ 166, 171 ], [ 13422, 13427 ], [ 13599, 13604 ], [ 26024, 26029 ], [ 27912, 27917 ], [ 29888, 29893 ], [ 34337, 34342 ], [ 34396, 34401 ] ], [ [ 173, 178 ], [ 42331, 42336 ], [ 44600, 44605 ] ], [ [ 198, 208 ], [ 5694, 5704 ], [ 5720, 5730 ] ], [ [ 249, 260 ], [ 25861, 25872 ] ], [ [ 309, 328 ], [ 45683, 45702 ] ], [ [ 366, 384 ], [ 2811, 2829 ], [ 3738, 3756 ] ], [ [ 436, 455 ], [ 30797, 30816 ] ], [ [ 501, 514 ], [ 2859, 2872 ], [ 5131, 5144 ] ], [ [ 520, 550 ], [ 5502, 5532 ], [ 6173, 6203 ], [ 6741, 6771 ] ], [ [ 556, 570 ], [ 5906, 5920 ] ], [ [ 576, 591 ], [ 6067, 6082 ] ], [ [ 597, 629 ], [ 5373, 5405 ] ], [ [ 635, 654 ], [ 5256, 5275 ] ], [ [ 694, 717 ], [ 39916, 39939 ] ], [ [ 745, 760 ], [ 12113, 12128 ], [ 13934, 13949 ], [ 19883, 19898 ], [ 20523, 20538 ], [ 23660, 23675 ], [ 38788, 38803 ], [ 38931, 38946 ], [ 44358, 44373 ], [ 44486, 44501 ], [ 45777, 45792 ], [ 45916, 45931 ] ], [ [ 807, 827 ], [ 21642, 21662 ] ], [ [ 829, 849 ], [ 21733, 21753 ] ], [ [ 900, 920 ], [ 12047, 12067 ], [ 13879, 13899 ] ], [ [ 970, 992 ], [ 42355, 42377 ] ], [ [ 998, 1019 ], [ 43203, 43224 ], [ 44623, 44644 ], [ 45447, 45468 ] ], [ [ 1025, 1041 ], [ 42230, 42246 ] ], [ [ 1047, 1066 ], [ 30180, 30199 ] ], [ [ 1072, 1088 ], [ 42337, 42353 ], [ 42604, 42620 ] ], [ [ 1094, 1112 ], [ 20602, 20620 ], [ 20725, 20743 ], [ 23747, 23765 ], [ 23885, 23903 ] ], [ [ 1118, 1137 ], [ 30264, 30283 ], [ 30375, 30394 ] ], [ [ 1143, 1158 ], [ 44606, 44621 ], [ 44832, 44847 ] ], [ [ 1201, 1212 ], [ 3047, 3058 ], [ 14522, 14533 ] ], [ [ 1254, 1261 ], [ 11418, 11425 ], [ 11662, 11669 ], [ 13428, 13435 ], [ 13605, 13612 ] ], [ [ 1263, 1271 ], [ 13353, 13361 ] ], [ [ 1273, 1281 ], [ 12017, 12025 ], [ 13853, 13861 ] ], [ [ 1313, 1323 ], [ 2993, 3003 ], [ 14321, 14331 ] ], [ [ 1362, 1378 ], [ 14938, 14954 ], [ 17215, 17231 ], [ 19920, 19936 ], [ 28697, 28713 ], [ 29755, 29771 ], [ 41925, 41941 ], [ 43417, 43433 ], [ 45629, 45645 ] ], [ [ 1425, 1429 ], [ 34357, 34361 ], [ 35123, 35127 ], [ 38692, 38696 ], [ 39247, 39251 ], [ 39274, 39278 ], [ 40385, 40389 ], [ 40631, 40635 ], [ 42043, 42047 ], [ 43515, 43519 ] ], [ [ 1431, 1445 ], [ 34829, 34843 ], [ 35897, 35911 ] ], [ [ 1499, 1506 ], [ 11889, 11896 ], [ 13441, 13448 ], [ 13618, 13625 ], [ 26030, 26037 ], [ 27918, 27925 ], [ 34343, 34350 ], [ 34402, 34409 ], [ 35178, 35185 ], [ 35243, 35250 ], [ 40622, 40629 ], [ 41048, 41055 ], [ 41977, 41984 ] ], [ [ 1541, 1550 ], [ 18510, 18519 ], [ 32470, 32479 ], [ 38701, 38710 ], [ 39071, 39080 ], [ 39294, 39303 ], [ 39396, 39405 ], [ 40015, 40024 ] ], [ [ 1587, 1598 ], [ 17195, 17206 ], [ 26039, 26050 ], [ 27849, 27860 ], [ 27927, 27938 ], [ 30483, 30494 ] ], [ [ 1647, 1669 ], [ 14115, 14137 ] ], [ [ 1703, 1711 ], [ 3526, 3534 ], [ 16131, 16139 ], [ 16387, 16395 ] ], [ [ 1745, 1760 ], [ 11897, 11912 ], [ 14190, 14205 ] ], [ [ 1805, 1826 ], [ 29329, 29350 ] ], [ [ 1856, 1859 ], [ 31413, 31416 ] ], [ [ 1861, 1876 ], [ 31397, 31412 ] ], [ [ 1904, 1910 ], [ 11965, 11971 ], [ 23107, 23113 ], [ 24828, 24834 ], [ 24850, 24856 ], [ 27816, 27822 ], [ 27807, 27813 ], [ 29539, 29545 ], [ 29795, 29801 ], [ 29823, 29829 ], [ 29870, 29876 ], [ 30200, 30206 ], [ 30222, 30228 ], [ 38678, 38684 ], [ 45663, 45669 ] ], [ [ 1912, 1919 ], [ 27775, 27782 ], [ 27765, 27772 ] ], [ [ 1951, 1959 ], [ 2903, 2911 ], [ 3817, 3825 ] ], [ [ 1992, 2000 ], [ 5073, 5081 ], [ 26634, 26642 ] ], [ [ 2034, 2043 ], [ 34547, 34556 ], [ 36962, 36971 ] ], [ [ 2045, 2076 ], [ 35802, 35833 ], [ 36289, 36320 ], [ 38362, 38393 ] ], [ [ 2078, 2113 ], [ 35405, 35440 ], [ 37904, 37939 ] ], [ [ 2141, 2146 ], [ 7442, 7447 ] ], [ [ 2148, 2162 ], [ 7385, 7399 ] ], [ [ 2200, 2217 ], [ 17339, 17356 ], [ 18675, 18692 ], [ 18776, 18793 ], [ 32646, 32663 ], [ 32762, 32779 ] ], [ [ 2260, 2276 ], [ 43747, 43763 ] ], [ [ 2327, 2344 ], [ 7827, 7844 ] ], [ [ 2388, 2405 ], [ 13657, 13674 ] ], [ [ 2448, 2464 ], [ 9160, 9176 ] ], [ [ 2507, 2517 ], [ 43948, 43958 ], [ 44139, 44149 ] ], [ [ 2556, 2568 ], [ 11554, 11566 ] ], [ [ 2611, 2629 ], [ 19146, 19164 ], [ 19565, 19583 ], [ 33870, 33888 ], [ 34032, 34050 ] ], [ [ 2675, 2693 ], [ 3188, 3206 ], [ 7586, 7604 ] ], [ [ 2725, 2737 ], [ 7046, 7058 ] ], [ [ 2746, 2756 ] ], [ [ 45584, 46145 ], [ 29430, 29453 ] ] ]
from phiqnet.train.train import train_main if __name__ == '__main__': args = {} args['multi_gpu'] = 0 args['gpu'] = 0 args['result_folder'] = r'..\databases\experiments\koniq_small' args['n_quality_levels'] = 1 args['train_folders'] = [#r'..\databases\train\koniq_normal', r'..\databases\train\koniq_small',] # r'..\databases\train\live'] args['val_folders'] = [#r'..\databases\val\koniq_normal', r'..\databases\val\koniq_small',] # r'..\databases\val\live'] args['koniq_mos_file'] = r'..\databases\koniq10k_images_scores.csv' args['live_mos_file'] = r'..\databases\live_mos.csv' args['naive_backbone'] = False args['backbone'] = 'resnet50' args['model_weights'] = r'..\databases\experiments\koniq_small\resnet50_mos_attention_fpn\44_0.0094_0.0473.h5' args['initial_epoch'] = 0 args['lr_base'] = 1e-6 args['lr_schedule'] = True args['batch_size'] = 8 args['epochs'] = 120 args['fpn_type'] = 'fpn' args['attention_module'] = True args['image_aug'] = True train_main(args)
[ [ [ 32, 42 ], [ 1158, 1168 ] ], [ [ 75, 79 ], [ 89, 93 ], [ 115, 119 ], [ 136, 140 ], [ 204, 208 ], [ 238, 242 ], [ 428, 432 ], [ 606, 610 ], [ 678, 682 ], [ 736, 740 ], [ 771, 775 ], [ 805, 809 ], [ 920, 924 ], [ 951, 955 ], [ 978, 982 ], [ 1009, 1013 ], [ 1036, 1040 ], [ 1062, 1066 ], [ 1091, 1095 ], [ 1128, 1132 ], [ 1169, 1173 ] ] ]
import anchor name = 'anchor'
[ [ [ 7, 13 ] ], [ [ 15, 19 ] ] ]
# web_app/__init__.py from flask import Flask from web_app.models import db, migrate from web_app.routes.home_routes import home_routes from web_app.routes.book_routes import book_routes DATABASE_URI = "sqlite:///twitoff_class.db" # using relative filepath #DATABASE_URI = "sqlite:////Users/Username/Desktop/your-repo-name/web_app_99.db" # using absolute filepath on Mac (recommended) #DATABASE_URI = "sqlite:///C:\\Users\\Username\\Desktop\\your-repo-name\\web_app_99.db" # using absolute filepath on Windows (recommended) h/t: https://stackoverflow.com/a/19262231/670433 def create_app(): app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False db.init_app(app) migrate.init_app(app, db) app.register_blueprint(home_routes) app.register_blueprint(book_routes) return app if __name__ == "__main__": my_app = create_app() my_app.run(debug=True)
[ [ [ 41, 46 ], [ 605, 610 ] ], [ [ 75, 77 ], [ 740, 742 ], [ 783, 785 ] ], [ [ 79, 86 ], [ 761, 768 ] ], [ [ 126, 137 ], [ 815, 826 ] ], [ [ 177, 188 ], [ 855, 866 ] ], [ [ 190, 202 ], [ 666, 678 ] ], [ [ 581, 591 ], [ 925, 935 ] ], [ [ 916, 922 ], [ 942, 948 ] ] ]
from flask import jsonify, request, url_for, abort from app import db from app.api import bp from app.api.auth import token_auth from app.api.errors import bad_request from app.models import User @bp.route('/users/<int:id>', methods=['GET']) @token_auth.login_required def get_user(id): return jsonify(User.query.get_or_404(id).to_dict()) @bp.route('/users', methods=['GET']) @token_auth.login_required def get_users(): page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(User.query, page, per_page, 'api.get_users') return jsonify(data) @bp.route('/users/<int:id>/followers', methods=['GET']) @token_auth.login_required def get_followers(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followers, page, per_page, 'api.get_followers', id=id) return jsonify(data) @bp.route('/users/<int:id>/followed', methods=['GET']) @token_auth.login_required def get_followed(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followed, page, per_page, 'api.get_followed', id=id) return jsonify(data) @bp.route('/users', methods=['POST']) def create_user(): data = request.get_json() or {} if 'username' not in data or 'email' not in data or 'password' not in data: return bad_request('Request must include username, email and password') if User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user = User() user.from_dict(data, new_user=True) db.session.add(user) db.session.commit() response = jsonify(user.to_dict()) response.status_code = 201 response.headers['Location'] = url_for('api.get_user', id=user.id) return response @bp.route('/users/<int:id>', methods=['PUT']) @token_auth.login_required def update_user(id): if token_auth.current_user().id != id: abort(403) user = User.query.get_or_404(id) data = request.get_json() or {} if 'username' in data and data['username'] != user.username and \ User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if 'email' in data and data['email'] != user.email and \ User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user.from_dict(data, new_user=False) db.session.commit() return jsonify(user.to_dict())
[ [ [ 18, 25 ], [ 299, 306 ], [ 634, 641 ], [ 1051, 1058 ], [ 1464, 1471 ], [ 2097, 2104 ], [ 2933, 2940 ] ], [ [ 27, 34 ], [ 437, 444 ], [ 494, 501 ], [ 803, 810 ], [ 860, 867 ], [ 1218, 1225 ], [ 1275, 1282 ], [ 1547, 1554 ], [ 2448, 2455 ] ], [ [ 36, 43 ], [ 2187, 2194 ] ], [ [ 45, 50 ], [ 2389, 2394 ] ], [ [ 67, 69 ], [ 2037, 2039 ], [ 2062, 2064 ], [ 2902, 2904 ] ], [ [ 90, 92 ], [ 198, 200 ], [ 346, 348 ], [ 650, 652 ], [ 1067, 1069 ], [ 1480, 1482 ], [ 2245, 2247 ] ], [ [ 118, 128 ], [ 244, 254 ], [ 383, 393 ], [ 706, 716 ], [ 1122, 1132 ], [ 2291, 2301 ], [ 2345, 2355 ] ], [ [ 156, 167 ], [ 1667, 1678 ], [ 1811, 1822 ], [ 1931, 1942 ], [ 2627, 2638 ], [ 2813, 2824 ] ], [ [ 191, 195 ], [ 307, 311 ], [ 554, 558 ], [ 578, 582 ], [ 766, 770 ], [ 920, 924 ], [ 1181, 1185 ], [ 1335, 1339 ], [ 1739, 1743 ], [ 1865, 1869 ], [ 1986, 1990 ], [ 2411, 2415 ], [ 2555, 2559 ], [ 2747, 2751 ] ], [ [ 274, 282 ] ], [ [ 413, 422 ] ], [ [ 736, 749 ] ], [ [ 1152, 1164 ] ], [ [ 1521, 1532 ] ], [ [ 2321, 2332 ] ] ]