content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import functools,fractions n=int(input()) a=list(map(int,input().split())) print(functools.reduce(fractions.gcd,a))
nilq/baby-python
python
from pymining import itemmining from pymining import seqmining import sys if(len(sys.argv) != 3): print("Please provide the data file and the minimum support as input, e.g., python freq_seq.py ./output.txt 40") sys.exit(-1) f = open(sys.argv[1], 'r') lines = f.read().splitlines() seqs = [] for s in lines: seq = s.split("---")[1] seq = seq[1:-1] seqs.append(seq.split(", ")) freq_seqs = seqmining.freq_seq_enum(seqs, int(sys.argv[2])) for p in freq_seqs: print(p)
nilq/baby-python
python
""" 属性的使用 - 访问器/修改器/删除器 - 使用__slots__对属性加以限制 Version: 0.1 Author: BDFD Date: 2018-03-12 """ class Car(object): __slots__ = ('_brand', '_max_speed') def __init__(self, brand, max_speed): self._brand = brand self._max_speed = max_speed @property def brand(self): return self._brand @brand.setter def brand(self, brand): self._brand = brand @brand.deleter def brand(self): del self._brand @property def max_speed(self): return self._max_speed @max_speed.setter def max_speed(self, max_speed): if max_speed < 0: raise ValueError('Invalid max speed for car') self._max_speed = max_speed def __str__(self): return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed) car = Car('QQ', 120) print(car) # ValueError # car.max_speed = -100 car.max_speed = 320 car.brand = "Benz" # 使用__slots__属性限制后下面的代码将产生异常 # car.current_speed = 80 print(car) # 如果提供了删除器可以执行下面的代码 # del car.brand # 属性的实现 print(Car.brand) print(Car.brand.fget) print(Car.brand.fset) print(Car.brand.fdel) # 通过上面的代码帮助学生理解之前提到的包装器的概念 # Python中有很多类似的语法糖后面还会出现这样的东西
nilq/baby-python
python
# Lint as: python3 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for confusion matrix at thresholds.""" from __future__ import absolute_import from __future__ import division # Standard __future__ imports from __future__ import print_function import math from absl.testing import parameterized import apache_beam as beam from apache_beam.testing import util import numpy as np import tensorflow as tf from tensorflow_model_analysis.eval_saved_model import testutil from tensorflow_model_analysis.metrics import confusion_matrix_metrics from tensorflow_model_analysis.metrics import metric_types from tensorflow_model_analysis.metrics import metric_util class ConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase): @parameterized.named_parameters( ('specificity', confusion_matrix_metrics.Specificity(), 2.0 / (2.0 + 3.0)), ('fall_out', confusion_matrix_metrics.FallOut(), 3.0 / (3.0 + 2.0)), ('miss_rate', confusion_matrix_metrics.MissRate(), 4.0 / (4.0 + 1.0)), ('negative_predictive_value', confusion_matrix_metrics.NegativePredictiveValue(), 2.0 / (2.0 + 4.0)), ('false_discovery_rate', confusion_matrix_metrics.FalseDiscoveryRate(), 3.0 / (3.0 + 1.0)), ('false_omission_rate', confusion_matrix_metrics.FalseOmissionRate(), 4.0 / (4.0 + 2.0)), ('prevalence', confusion_matrix_metrics.Prevalence(), (1.0 + 4.0) / (1.0 + 2.0 + 3.0 + 4.0)), ('prevalence_threshold', confusion_matrix_metrics.PrevalenceThreshold(), (math.sqrt((1.0 / (1.0 + 4.0)) * (1.0 - 1.0 * (2.0 / (2.0 + 3.0)))) + (2.0 / (2.0 + 3.0) - 1.0)) / ((1.0 / (1.0 + 4.0) + (2.0 / (2.0 + 3.0)) - 1.0))), ('threat_score', confusion_matrix_metrics.ThreatScore(), 1.0 / (1.0 + 4.0 + 3.0)), ('balanced_accuracy', confusion_matrix_metrics.BalancedAccuracy(), ((1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0))) / 2), ('f1_score', confusion_matrix_metrics.F1Score(), 2 * 1.0 / (2 * 1.0 + 3.0 + 4.0)), ('matthews_correlation_coefficient', confusion_matrix_metrics.MatthewsCorrelationCoefficent(), (1.0 * 2.0 - 3.0 * 4.0) / math.sqrt( (1.0 + 3.0) * (1.0 + 4.0) * (2.0 + 3.0) * (2.0 + 4.0))), ('fowlkes_mallows_index', confusion_matrix_metrics.FowlkesMallowsIndex(), math.sqrt(1.0 / (1.0 + 3.0) * 1.0 / (1.0 + 4.0))), ('informedness', confusion_matrix_metrics.Informedness(), (1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0)) - 1.0), ('markedness', confusion_matrix_metrics.Markedness(), (1.0 / (1.0 + 3.0)) + (2.0 / (2.0 + 4.0)) - 1.0), ('positive_likelihood_ratio', confusion_matrix_metrics.PositiveLikelihoodRatio(), (1.0 / (1.0 + 4.0)) / (3.0 / (3.0 + 2.0))), ('negative_likelihood_ratio', confusion_matrix_metrics.NegativeLikelihoodRatio(), (4.0 / (4.0 + 1.0)) / (2.0 / (2.0 + 3.0))), ('diagnostic_odds_ratio', confusion_matrix_metrics.DiagnosticOddsRatio(), ((1.0 / 3.0)) / (4.0 / 2.0)), ) def testConfusionMatrixMetrics(self, metric, expected_value): computations = metric.computations() histogram = computations[0] matrices = computations[1] metrics = computations[2] # tp = 1 # tn = 2 # fp = 3 # fn = 4 example1 = { 'labels': np.array([1.0]), 'predictions': np.array([0.6]), 'example_weights': np.array([1.0]), } example2 = { 'labels': np.array([0.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([1.0]), } example3 = { 'labels': np.array([0.0]), 'predictions': np.array([0.2]), 'example_weights': np.array([1.0]), } example4 = { 'labels': np.array([0.0]), 'predictions': np.array([0.6]), 'example_weights': np.array([1.0]), } example5 = { 'labels': np.array([0.0]), 'predictions': np.array([0.7]), 'example_weights': np.array([1.0]), } example6 = { 'labels': np.array([0.0]), 'predictions': np.array([0.8]), 'example_weights': np.array([1.0]), } example7 = { 'labels': np.array([1.0]), 'predictions': np.array([0.1]), 'example_weights': np.array([1.0]), } example8 = { 'labels': np.array([1.0]), 'predictions': np.array([0.2]), 'example_weights': np.array([1.0]), } example9 = { 'labels': np.array([1.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([1.0]), } example10 = { 'labels': np.array([1.0]), 'predictions': np.array([0.4]), 'example_weights': np.array([1.0]), } with beam.Pipeline() as pipeline: # pylint: disable=no-value-for-parameter result = ( pipeline | 'Create' >> beam.Create([ example1, example2, example3, example4, example5, example6, example7, example8, example9, example10 ]) | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs) | 'AddSlice' >> beam.Map(lambda x: ((), x)) | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner) | 'ComputeMatrices' >> beam.Map( lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1]))) ) # pyformat: ignore # pylint: enable=no-value-for-parameter def check_result(got): try: self.assertLen(got, 1) got_slice_key, got_metrics = got[0] self.assertEqual(got_slice_key, ()) self.assertLen(got_metrics, 1) key = metrics.keys[0] self.assertDictElementsAlmostEqual( got_metrics, {key: expected_value}, places=5) except AssertionError as err: raise util.BeamAssertException(err) util.assert_that(result, check_result, label='result') def testConfusionMatrixMetricsWithNan(self): computations = confusion_matrix_metrics.Specificity().computations() histogram = computations[0] matrices = computations[1] metrics = computations[2] example1 = { 'labels': np.array([1.0]), 'predictions': np.array([1.0]), 'example_weights': np.array([1.0]), } with beam.Pipeline() as pipeline: # pylint: disable=no-value-for-parameter result = ( pipeline | 'Create' >> beam.Create([example1]) | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs) | 'AddSlice' >> beam.Map(lambda x: ((), x)) | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner) | 'ComputeMatrices' >> beam.Map( lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1]))) ) # pyformat: ignore # pylint: enable=no-value-for-parameter def check_result(got): try: self.assertLen(got, 1) got_slice_key, got_metrics = got[0] self.assertEqual(got_slice_key, ()) self.assertLen(got_metrics, 1) key = metrics.keys[0] self.assertIn(key, got_metrics) self.assertTrue(math.isnan(got_metrics[key])) except AssertionError as err: raise util.BeamAssertException(err) util.assert_that(result, check_result, label='result') def testConfusionMatrixAtThresholds(self): computations = confusion_matrix_metrics.ConfusionMatrixAtThresholds( thresholds=[0.3, 0.5, 0.8]).computations() histogram = computations[0] matrices = computations[1] metrics = computations[2] example1 = { 'labels': np.array([0.0]), 'predictions': np.array([0.0]), 'example_weights': np.array([1.0]), } example2 = { 'labels': np.array([0.0]), 'predictions': np.array([0.5]), 'example_weights': np.array([1.0]), } example3 = { 'labels': np.array([1.0]), 'predictions': np.array([0.3]), 'example_weights': np.array([1.0]), } example4 = { 'labels': np.array([1.0]), 'predictions': np.array([0.9]), 'example_weights': np.array([1.0]), } with beam.Pipeline() as pipeline: # pylint: disable=no-value-for-parameter result = ( pipeline | 'Create' >> beam.Create([example1, example2, example3, example4]) | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs) | 'AddSlice' >> beam.Map(lambda x: ((), x)) | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner) | 'ComputeMatrices' >> beam.Map( lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1]))) ) # pyformat: ignore # pylint: enable=no-value-for-parameter def check_result(got): try: self.assertLen(got, 1) got_slice_key, got_metrics = got[0] self.assertEqual(got_slice_key, ()) self.assertLen(got_metrics, 1) key = metric_types.MetricKey(name='confusion_matrix_at_thresholds') self.assertIn(key, got_metrics) got_metric = got_metrics[key] self.assertProtoEquals( """ matrices { threshold: 0.3 false_negatives: 1.0 true_negatives: 1.0 false_positives: 1.0 true_positives: 1.0 precision: 0.5 recall: 0.5 } matrices { threshold: 0.5 false_negatives: 1.0 true_negatives: 2.0 true_positives: 1.0 precision: 1.0 recall: 0.5 } matrices { threshold: 0.8 false_negatives: 1.0 true_negatives: 2.0 true_positives: 1.0 precision: 1.0 recall: 0.5 } """, got_metric) except AssertionError as err: raise util.BeamAssertException(err) util.assert_that(result, check_result, label='result') if __name__ == '__main__': tf.test.main()
nilq/baby-python
python
# Generated by Django 4.0.2 on 2022-02-19 14:09 from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Review', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rating', models.IntegerField(validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)])), ('text', models.TextField(blank=True, default='')), ('created', models.DateTimeField(auto_now_add=True)), ('is_published', models.BooleanField(default=False)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-created'], }, ), ]
nilq/baby-python
python
import cv2 import numpy as np import torch from ..builder import MOTION @MOTION.register_module() class CameraMotionCompensation(object): """Camera motion compensation. Args: warp_mode (str): Warp mode in opencv. num_iters (int): Number of the iterations. stop_eps (float): Terminate threshold. """ def __init__(self, warp_mode='cv2.MOTION_EUCLIDEAN', num_iters=50, stop_eps=0.001): self.warp_mode = eval(warp_mode) self.num_iters = num_iters self.stop_eps = stop_eps def get_warp_matrix(self, img, ref_img): """Calculate warping matrix between two images.""" img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ref_img = cv2.cvtColor(ref_img, cv2.COLOR_RGB2GRAY) warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.num_iters, self.stop_eps) cc, warp_matrix = cv2.findTransformECC(img, ref_img, warp_matrix, self.warp_mode, criteria, None, 1) warp_matrix = torch.from_numpy(warp_matrix) return warp_matrix def warp_bboxes(self, bboxes, warp_matrix): """Warp bounding boxes according to the warping matrix.""" tl, br = bboxes[:, :2], bboxes[:, 2:] tl = torch.cat((tl, torch.ones(tl.shape[0], 1).to(bboxes.device)), dim=1) br = torch.cat((br, torch.ones(tl.shape[0], 1).to(bboxes.device)), dim=1) trans_tl = torch.mm(warp_matrix, tl.t()).t() trans_br = torch.mm(warp_matrix, br.t()).t() trans_bboxes = torch.cat((trans_tl, trans_br), dim=1) return trans_bboxes.to(bboxes.device) def track(self, img, ref_img, tracks, num_samples, frame_id): """Tracking forward.""" img = img.squeeze(0).cpu().numpy().transpose((1, 2, 0)) ref_img = ref_img.squeeze(0).cpu().numpy().transpose((1, 2, 0)) warp_matrix = self.get_warp_matrix(img, ref_img) bboxes = [] num_bboxes = [] for k, v in tracks.items(): if int(v['frame_ids'][-1]) < frame_id - 1: _num = 1 else: _num = min(num_samples, len(v.bboxes)) num_bboxes.append(_num) bboxes.extend(v.bboxes[-_num:]) bboxes = torch.cat(bboxes, dim=0) warped_bboxes = self.warp_bboxes(bboxes, warp_matrix.to(bboxes.device)) warped_bboxes = torch.split(warped_bboxes, num_bboxes) for b, (k, v) in zip(warped_bboxes, tracks.items()): _num = b.shape[0] b = torch.split(b, [1] * _num) tracks[k].bboxes[-_num:] = b return tracks
nilq/baby-python
python
""" Access to data resources installed with this package """ from servicelib.resources import ResourcesFacade resources = ResourcesFacade( package_name=__name__, distribution_name="simcore-service-storage", config_folder="", )
nilq/baby-python
python
#!/usr/bin/env python import time from slackclient import SlackClient import os, re base_dir = os.path.dirname(os.path.realpath(__file__)) player = 'afplay' text2voice = 'espeak' sounds_dir = 'sounds' filetype = 'mp3' debug = True bots_channel = 'build' play_fixed = re.compile("FIXED") play_cancelled = re.compile("CANCELLED") play_failed = re.compile("FAILED") play_broken = re.compile("BROKEN") play_building = re.compile("BUILDING") add_sound_regex = re.compile("^add-sound\s([a-z0-9]+)\s<?(https?:\/\/[a-z./]*\?v=[a-zA-Z0-9_-]*)>?(\s([0-9.]*)\s([0-9.]*)$)?") def action(command, message): global debug global sc global bots_channel sc.rtm_send_message(bots_channel, message) if debug: print ('Running command: ' + command) os.system(command) whitelist = {} with open(os.path.join(base_dir, 'whitelist.txt')) as f: for line in f: (name, identifier) = line.split() whitelist[identifier] = name f = open(os.path.join(base_dir, 'token.txt')) token = f.readline().rstrip() f.close() print ("Connecting using token " + token) sc = SlackClient(token) if sc.rtm_connect(): while True: for event in sc .rtm_read(): if 'type' in event and event['type'] == 'message' and 'text' in event: if ('user' in event and event['user'] in whitelist.keys()): user = whitelist[event['user']] elif ('subtype' in event and event['subtype'] == 'bot_message' and 'bot_id' in event and event['bot_id'] in whitelist.keys()): user = whitelist[event['bot_id']] else: user = False if user: if debug: print ("Parsing message from " + user + ": '" + event['attachments'][0]['fallback'] + "'") add_sound_match = add_sound_regex.match(event['attachments'][0]['fallback']) fixed = play_fixed.search(event['attachments'][0]['fallback']) cancelled = play_cancelled.search(event['attachments'][0]['fallback']) failed = play_failed.search(event['attachments'][0]['fallback']) broken = play_broken.search(event['attachments'][0]['fallback']) building = play_building.search(event['attachments'][0]['fallback']) if fixed: message = user + ' FIXED ' sound_file = os.path.join(base_dir, sounds_dir, 'dai' + '.' + filetype) command = player + ' ' + sound_file action(command, message) elif cancelled: message = user + ' CANCELLED ' sound_file = os.path.join(base_dir, sounds_dir, 'noooo' + '.' + filetype) command = player + ' ' + sound_file action(command, message) elif failed: message = user + ' FAILED ' sound_file = os.path.join(base_dir, sounds_dir, 'heygirl' + '.' + filetype) command = player + ' ' + sound_file action(command, message) elif broken: message = user + ' BROKEN ' sound_file = os.path.join(base_dir, sounds_dir, 'horror' + '.' + filetype) command = player + ' ' + sound_file action(command, message) elif building: message = user + ' BUILDING ' sound_file = os.path.join(base_dir, sounds_dir, 'dangerzone' + '.' + filetype) command = player + ' ' + sound_file action(command, message) elif add_sound_match: message = user + ' adds sound ' + add_sound_match.group(1) + ' from youtube video ' + add_sound_match.group(2) command = os.path.join(base_dir, 'yt-add-sound.sh') + ' ' + add_sound_match.group(1) + ' ' + add_sound_match.group(2) if add_sound_match.group(3): command += add_sound_match.group(3) action(command, message) time.sleep(1); else: print ('Connection failed, invalid token?')
nilq/baby-python
python
""" Produces Fig. A1 of Johnson & Weinberg (2020), a single axis plot showing the abundance data of several dwarf galaxies taken from Kirby et al. (2010) in comparison to a smooth and single-burst model simulated in VICE. """ import visuals # visuals.py -> matplotlib subroutines in this directory import matplotlib.pyplot as plt import vice import sys import warnings warnings.filterwarnings("ignore") _NAMES_ = { "Scl": "Sculptor", "LeoI": "Leo I", "Sex": "Sextans", "LeoII": "Leo II", "CVnI": "Canes Venatici I", "UMi": "Ursa Minor", "Dra": "Draco" } _COLORS_ = { "Scl": "crimson", "LeoI": "grey", "Sex": "lime", "LeoII": "deepskyblue", "CVnI": "darkviolet", "UMi": "black", "Dra": "gold" } _MARKERS_ = { "Scl": "circle", "LeoI": "square", "Sex": "star", "LeoII": "thin_diamond", "CVnI": "pentagon", "UMi": "hexagon2", "Dra": "triangle_up" } _SIZES_ = { "Scl": 30, "LeoI": 10, "Sex": 80, "LeoII": 30, "CVnI": 60, "UMi": 50, "Dra": 40 } def setup_axis(): """ Sets up the axis with the proper labels and ranges Returns ======= axis :: matplotlib subplot The axis to plot the data on """ fig = plt.figure(figsize = (10, 7)) ax = fig.add_subplot(111, facecolor = "white") ax.set_xlabel("[Fe/H]") ax.set_ylabel("[Mg/Fe]") ax.set_xlim([-3.2, -0.4]) ax.set_ylim([-0.9, 1.4]) return ax def read_data(filename = "../../data/kirby2010processed.dat"): """ Import the data from the associated file. Args ==== filename :: str [default :: ../data/kirby2010processed.dat] The path to the data file Returns ======= An 2D-ascii list containing the data as it appears in the file """ data = 849 * [None] with open(filename, 'r') as f: f.readline() # header for i in range(len(data)): data[i] = f.readline().split() for j in range(2, len(data[i])): data[i][j] = float(data[i][j]) f.close() return data def plot_data(ax, data, dwarf): """ Plots an individual dwarf galaxy's abundance data on the subplot. Parameters ========== ax :: matplotlib subplot The axis to plot the abundance data on data :: 2D-list The raw data itself dwarf :: str A key denoting which dwarf is being plotted. These appear in the first column of the argument data. """ FeH_column = 12 MgFe_column = 14 fltrd = list(filter(lambda x: x[0] == dwarf, data)) kwargs = { "c": visuals.colors()[_COLORS_[dwarf]], "marker": visuals.markers()[_MARKERS_[dwarf]], "linestyle": "None", "label": _NAMES_[dwarf], "s": _SIZES_[dwarf] } if dwarf == "LeoI": kwargs["zorder"] = 0 ax.scatter( [row[FeH_column] for row in fltrd], [row[MgFe_column] for row in fltrd], **kwargs ) def plot_representative_errorbar(ax, data, dwarf): """ Plots a representative error bar in the lower-left corner of the figure Parameters ========== ax :: matplotlib subplot The axis object to put the errorbar on data :: 2D-list The raw data itself dwarf :: str The name of the dwarf to take the median errors from """ err_FeH_column = 13 err_MgFe_column = 15 fltrd = list(filter(lambda x: x[0] == dwarf, data)) ax.errorbar(-2.8, -0.4, xerr = sorted([row[err_FeH_column] for row in fltrd])[len(fltrd) // 2], yerr = sorted([row[err_MgFe_column] for row in fltrd])[len(fltrd) // 2], ms = 0, color = visuals.colors()[_COLORS_[dwarf]]) def plot_vice_comparison(ax, name): """ Plots the [Mg/Fe]-[Fe/H] track of a given VICE model on the subplot. Parameters ========== ax :: matplotlib subplot The axis to plot on name :: str The relative path to the VICE output """ out = vice.output(name) ax.plot(out.history["[fe/h]"], out.history["[mg/fe]"], c = visuals.colors()["black"], linestyle = '--') def main(): """ Produces the figure and saves it as a PDF. """ plt.clf() ax = setup_axis() data = read_data() for i in _NAMES_.keys(): plot_data(ax, data, i) plot_vice_comparison(ax, "../../simulations/kirby2010_smooth_enh1") plot_vice_comparison(ax, "../../simulations/kirby2010_smooth") plot_vice_comparison(ax, "../../simulations/kirby2010_burst") plot_representative_errorbar(ax, data, "UMi") ax.legend(loc = visuals.mpl_loc()["upper left"], ncol = 1, frameon = False, bbox_to_anchor = (1.02, 0.98), fontsize = 18) plt.tight_layout() plt.savefig(sys.argv[1]) plt.clf() if __name__ == "__main__": main()
nilq/baby-python
python
# Copyright © 2021 Lynx-Userbot (LLC Company (WARNING)) # GPL-3.0 License From Github (General Public License) # Ported From Cat Userbot For Lynx-Userbot By Alvin/LiuAlvinas. # Based On Plugins # Credits @Cat-Userbot by Alvin from Lord-Userbot from userbot.events import register from userbot import CMD_HELP, bot from telethon.errors.rpcerrorlist import YouBlockedUserError # Ported by KENZO @TeamSecret_Kz @register(outgoing=True, pattern=r"^\.detect(?: |$)(.*)") async def detect(event): if event.fwd_from: return input_str = "".join(event.text.split(maxsplit=1)[1:]) reply_message = await event.get_reply_message() if not event.reply_to_msg_id: await event.edit("```Please reply to the user or type .detect (ID/Username) that you want to detect.```") return if input_str: try: lynxuser = int(input_str) except ValueError: try: u = await event.client.get_entity(input_str) except ValueError: await edit.event("`Please Give ID/Username to Find History.`" ) lynxuser = u.id else: lynxuser = reply_message.sender_id chat = "@tgscanrobot" event = await event.edit("`Currently Doing Account Detection...`") event = await event.edit("__Connecting to server telegram.__") event = await event.edit("__Connecting to server telegram..__") event = await event.edit("__Connecting to server telegram...__") event = await event.edit("__Connecting to server telegram.__") event = await event.edit("__Connecting to server telegram..__") event = await event.edit("__Connecting to server telegram...__") event = await event.edit("__Connecting to server telegram.__") event = await event.edit("__Connecting to server telegram..__") event = await event.edit("__Connecting to server telegram...__") event = await event.edit("__Connecting to server telegram.__") event = await event.edit("__Connecting to server telegram..__") event = await event.edit("__Connecting to server telegram...__") async with bot.conversation(chat) as conv: try: await conv.send_message(f"{lynxuser}") except YouBlockedUserError: await steal.reply( "```Please Unblock @tgscanrobot And Try Again.```" ) response = await conv.get_response() await event.client.send_read_acknowledge(conv.chat_id) await event.edit(response.text) def inline_mention(user): full_name = user_full_name(user) or "No Name" return f"[{full_name}](tg://user?id={user.id})" def user_full_name(user): names = [user.first_name, user.last_name] names = [i for i in list(names) if i] return " ".join(names) CMD_HELP.update({ "detection": "✘ Pʟᴜɢɪɴ : Detection\ \n\n⚡𝘾𝙈𝘿⚡: `.detect` <Reply/Username/ID>\ \n↳ : Melihat Riwayat Group Yang Pernah/Sedang Dimasuki." })
nilq/baby-python
python
"""https://de.dariah.eu/tatom/topic_model_python.html""" import os import numpy as np # a conventional alias import sklearn.feature_extraction.text as text from sklearn import decomposition class TM_NMF: def __init__(self, all_documents, num_topics, num_top_words, min_df, max_df, isblock): self.all_documents = all_documents self.num_topics = num_topics self.num_top_words = num_top_words self.min_df = min_df self.max_df = max_df path = os.getcwd() + '/' #'/IEami/' #self.file = open(path + 'Topic_huge.txt', 'w') if isblock: self.file = open(path + 'result_ami/' + 'Topic_modeling_nmf_block_' + str(num_topics) + '_topics.txt', 'w') else: self.file = open(path + 'result_ami/' + 'Topic_modeling_nmf_' + str(num_topics) + '_topic_scenario.txt', 'w') def find_NMF_topics(self): """ :param num_topics: :param num_top_words: a list of top words for each topic :return: """ vectorizer = text.CountVectorizer(input='filename', stop_words='english', min_df= self.min_df, max_df= self.max_df) dtm = vectorizer.fit_transform(self.all_documents).toarray() vocab = np.array(vectorizer.get_feature_names()) clf = decomposition.NMF(n_components = self.num_topics, random_state=1) # it shows for how many proability each corpus is related to a word in topic results self.doctopic = clf.fit_transform(dtm) self.topic_words = [] for topic in clf.components_: word_idx = np.argsort(topic)[::-1][0:self.num_top_words] self.topic_words.append([vocab[i] for i in word_idx]) return def show_corpus_vs_topics(self): # *************************************** self.file.write('******************************************************\n') # they normaloze doctopic w.r.t its rows doctopic = (self.doctopic) / (np.sum(self.doctopic, axis=1, keepdims=True)) corpus_names = [] for fn in self.all_documents: name = os.path.basename(fn) # name = name.rstrip('0123456789') corpus_names.append(name) # turn this into an array so we can use NumPy functions novel_names = np.asarray(corpus_names) doctopic_orig = doctopic.copy() # use method described in preprocessing section doctopic_grouped = np.zeros((len(corpus_names), self.num_topics)) # self.file.write('\t\t\t\t\t') # for i in range(self.num_topics): # self.file.write( 'topic'+ str(i+1) + '\t') self.file.write('\n') for i, name in enumerate(sorted(set(novel_names))): tempo = np.mean(doctopic[novel_names == name, :], axis=0) doctopic_grouped[i, :] = tempo #self.file.write(name + " " + str(doctopic_grouped[i, :]) + '\n') self.file.write('\n') self.file.write("meetings\t\t\t\t\t") self.file.write("top topics\t\t\t\t\t\t") self.file.write("probabilities for top topics\n") corpus = corpus_names for i in range(len(doctopic)): top_topics = np.argsort(doctopic[i, :])[::-1][0:5] top_topics_str = ' '.join(str(t) for t in top_topics) top_probabilities = ' '.join(str(doctopic[i][t]) for t in top_topics) self.file.write("{}: {} {}".format(corpus[i], top_topics_str, top_probabilities) + '\n') self.file.flush() return def show_topic_words(self): self.file.write('\n') for t in range(len(self.topic_words)): self.file.write("Topic {}: {}".format(t, ' '.join(self.topic_words[t][:self.num_top_words]) + '\n')) self.file.flush() return
nilq/baby-python
python
import RoothPath import os import re import yaml import json if __name__ == '__main__': yaml_dic = {} with open(os.path.join(os.path.join(RoothPath.get_root(), 'Benchmarks'), 'den312d.map')) as ascii_map: ascii_map.readline() h = int(re.findall(r'\d+', ascii_map.readline())[0]) w = int(re.findall(r'\d+', ascii_map.readline())[0]) yaml_dic['agents'] = [{'start': [48, 10], 'name': 'agent0'}] yaml_dic['map'] = {'dimensions': [w, h], 'obstacles': [], 'non_task_endpoints': [[48, 10]], 'start_locations': [[50, 10]], 'goal_locations': [[54, 10]]} yaml_dic['n_tasks'] = 1 yaml_dic['task_freq'] = 1 yaml_dic['n_delays_per_agent'] = 10 ascii_map.readline() for i in range(h - 1, -1, -1): line = ascii_map.readline() print(line) for j in range(w): if line[j] == '@' or line[j] == 'T': yaml_dic['map']['obstacles'].append((j, i)) with open(os.path.join(RoothPath.get_root(), 'config.json'), 'r') as json_file: config = json.load(json_file) with open(os.path.join(os.path.join(RoothPath.get_root(), config['input_path']), 'dragon_age_map.yaml'), 'w') as param_file: yaml.dump(yaml_dic, param_file)
nilq/baby-python
python
# Copyright 2015, Ansible, Inc. # Luke Sneeringer <lsneeringer@ansible.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from six.moves import StringIO from tower_cli import models, resources, exceptions as exc from tower_cli.api import client from tower_cli.utils import debug from tower_cli.constants import CUR_API_VERSION from tests.compat import unittest, mock class ResourceMetaTests(unittest.TestCase): """A set of tests to establish that the ResourceMeta metaclass works in the way we expect. """ def test_commands(self): """Establish that commands are appropriately classified within the resource, and that the stock commands are not present on a BaseResource subclass. """ # Create the resource. class MyResource(models.BaseResource): endpoint = '/bogus/' @resources.command def foo(self): pass @resources.command def bar(self): pass def boring_method(self): pass # Establish that the commands are present on the resource where # we expect, and that the defined methods are still plain methods. # # Note: We can use something like types.FunctionType or # types.UnboundMethodType to test against directly, but using a # regular method is preferable because of differences between # the type internals in Python 2 vs. Python 3. # # By just getting the desirable control type from another method # on the resource, we are ensuring that it "just matches" regardless # of which version of Python is in use. self.assertIsInstance(MyResource.foo, type(MyResource.boring_method)) self.assertIsInstance(MyResource.bar, type(MyResource.boring_method)) self.assertEqual(set(MyResource.commands), set(['foo', 'bar', 'list', 'delete', 'get'])) def test_inherited_commands(self): """Establish that the stock commands are automatically present on classes inherited from Resource. """ # Create the resource. class MyResource(models.Resource): endpoint = '/bogus/' # Establish it has the commands we expect. self.assertEqual(set(MyResource.commands), set(['create', 'copy', 'modify', 'list', 'get', 'delete'])) def test_subclassed_commands(self): """Establish that commands overridden in subclasses retain their superclass implementation options. """ # Create the subclass resource, overriding a superclass command. class MyResource(models.Resource): endpoint = '/bogus/' @resources.command def list(self, **kwargs): return super(MyResource, self).list(**kwargs) # Establish that it has one of the options added to the # superclass list command. self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__) def test_multiple_inheritance(self): """ Establish that click decoration from all parent class chains are preserved in a subclass. """ class MyMixin(models.Resource): endpoint = '/bogus/' def list(self, **kwargs): return super(MyMixin, self).list(**kwargs) class MyResource(MyMixin, models.Resource): endpoint = '/bogus/' def list(self, **kwargs): return super(MyResource, self).list(**kwargs) self.assertTrue(hasattr(MyResource.list, '__click_params__')) self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__) def test_no_duplicate_options_from_inheritance(self): """ Test that metaclass does not duplicate options from multiple parents """ class MyMixin1(models.Resource): endpoint = '/bogus/' class MyMixin2(models.Resource): endpoint = '/boguser/' class MyResource(MyMixin1, MyMixin2): endpoint = '/boguser/' def list(self, **kwargs): return super(MyResource, self).list(**kwargs) self.assertTrue(hasattr(MyResource.list, '__click_params__')) self.assertEqual(MyResource.list.__click_params__, models.Resource.list.__click_params__) def test_fields(self): """Establish that fields are appropriately classified within the resource. """ # Create the resource. class MyResource(models.Resource): endpoint = '/bogus/' foo = models.Field(unique=True) bar = models.Field() # Establish that our fields lists are the length we expect. self.assertEqual(len(MyResource.fields), 2) self.assertEqual(len(MyResource.unique_fields), 1) # Establish that the fields are present in fields. self.assertEqual(MyResource.fields[0].name, 'foo') self.assertEqual(MyResource.fields[1].name, 'bar') self.assertEqual(MyResource.unique_fields, set(['foo'])) def test_error_no_endpoint(self): """Establish that Resource subclasses are required to have an endpoint, and attempting to create one that lacks an endpoint raises TypeError. """ with self.assertRaises(TypeError): class MyResource(models.Resource): pass def test_endpoint_normalization(self): """Establish that the endpoints have leading and trailing slashes added if they are not present on a resource. """ class MyResource(models.Resource): endpoint = 'foo' self.assertEqual(MyResource.endpoint, '/foo/') def test_disabled_property(self): """Establish that disabled_methods of derived classes disable specified attributes derived from base classes. """ class MyRes(models.Resource): endpoint = 'foo' foobar = 'baz' class MyDerivedRes(MyRes): endpoint = 'bar' disabled_methods = set(['foobar']) res = MyDerivedRes() with self.assertRaises(AttributeError): getattr(res, 'foobar') res.foobar = 'hey' self.assertEqual(res.foobar, 'hey') del res.foobar with self.assertRaises(AttributeError): getattr(res, 'foobar') class ResourceTests(unittest.TestCase): """A set of tests to establish that the Resource class works in the way that we expect. """ def setUp(self): # Create a resource class that can be used across this particular # suite. class FooResource(models.Resource): endpoint = '/foo/' name = models.Field(unique=True) description = models.Field(required=False) self.res = FooResource() def test_get(self): """Establish that the Resource class' `get` method works in the way that we expect. """ with client.test_mode as t: t.register_json('/foo/42/', {'id': 42, 'description': 'bar', 'name': 'foo'}) result = self.res.get(42) self.assertEqual(result['id'], 42) self.assertEqual(result['name'], 'foo') def test_list_no_kwargs(self): """Establish that the Resource class' `list` method correctly requests the resource and parses out a list of results. """ with client.test_mode as t: t.register_json('/foo/', {'count': 2, 'results': [ {'id': 1, 'name': 'foo', 'description': 'bar'}, {'id': 2, 'name': 'spam', 'description': 'eggs'}, ], 'next': None, 'previous': None}) result = self.res.list() self.assertEqual(t.requests[0].url, 'https://20.12.4.21/api/%s/foo/' % CUR_API_VERSION) self.assertEqual(result['count'], 2) self.assertEqual(result['results'][0]['id'], 1) def test_list_all_pages(self): """Establish that the Resource class' `list` method correctly accepts the --all-pages flag and checks follow-up pages. """ with client.test_mode as t: # Register the first, second, and third page. t.register_json('/foo/', {'count': 3, 'results': [ {'id': 1, 'name': 'foo', 'description': 'bar'}, ], 'next': '/foo/?page=2', 'previous': None}) t.register_json('/foo/?page=2', {'count': 3, 'results': [ {'id': 2, 'name': 'spam', 'description': 'eggs'}, ], 'next': '/foo/?page=3', 'previous': None}) t.register_json('/foo/?page=3', {'count': 3, 'results': [ {'id': 3, 'name': 'bacon', 'description': 'cheese'}, ], 'next': None, 'previous': None}) # Get the list result = self.res.list(all_pages=True) # Assert that there are three results, and three requests. self.assertEqual(len(t.requests), 3) self.assertEqual(len(result['results']), 3) def test_list_with_page_1_special_case(self): """Establish that the list function works even if the server gives /foo/ as the relative link for page 1. """ with client.test_mode as t: # Register the 2nd page in order to test this. t.register_json('/foo/?page=2', {'count': 2, 'results': [ {'id': 2, 'name': 'spam', 'description': 'eggs'}, ], 'next': None, 'previous': '/foo/'}) # Get the list result = self.res.list(page=2) # Check that the function knows that /foo/ is page 1 self.assertEqual(result['previous'], 1) def test_list_custom_kwargs(self): """Establish that if we pass custom keyword arguments to list, that they are included in the final request. """ with client.test_mode as t: t.register_json('/foo/?bar=baz', {'count': 0, 'results': [], 'next': None, 'previous': None}) self.res.list(query=[('bar', 'baz')]) self.assertTrue(t.requests[0].url.endswith('bar=baz')) def test_get_unexpected_zero_results(self): """Establish that if a read method gets 0 results when it should have gotten one or more, that it raises NotFound. """ with client.test_mode as t: t.register_json('/foo/?name=spam', {'count': 0, 'results': []}) with self.assertRaises(exc.NotFound): self.res.get(name='spam') def test_get_no_debug_header(self): """Establish that if get is called with include_debug_header=False, no debug header is issued. """ with mock.patch.object(type(self.res), 'read') as read: with mock.patch.object(debug, 'log') as dlog: read.return_value = {'results': [True]} result = self.res.get(42, include_debug_header=False) self.assertEqual(dlog.call_count, 0) self.assertTrue(result) def test_get_unexpected_multiple_results(self): """Establish that if a read method gets more than one result when it should have gotten one and exactly one, that it raises MultipleResults. """ # Register the response to the request URL. # Note that this response should represent bad data, since name is # generally unique within Tower. This doesn't matter for the purpose # of this test; what's important is that if we expected one and exactly # one result and we get two or more, that we complain in an expected # (and later, handled) way. with client.test_mode as t: t.register_json('/foo/?name=spam', {'count': 2, 'results': [ {'id': 1, 'name': 'spam'}, {'id': 2, 'name': 'spam'}, ], 'next': None, 'previous': None}) with self.assertRaises(exc.MultipleResults): self.res.get(name='spam') def test_list_with_none_kwargs(self): """Establish that if `list` is called with keyword arguments with None values, that these are ignored. This is to ensure that click's eagerness to send None values doesn't cause problems. """ # Register the request and make the call. with client.test_mode as t: t.register_json('/foo/?name=foo', {'count': 1, 'results': [ {'id': 1, 'name': 'foo', 'description': 'bar'}, ], 'next': None, 'previous': None}) self.res.list(name='foo', description=None) self.assertEqual(len(t.requests), 1) # Ensure that there are no other query param arguments other # than `?name=foo` in the request URL. self.assertNotIn('&', t.requests[0].url) self.assertTrue(t.requests[0].url.endswith('?name=foo')) def test_list_with_pagination(self): """Establish that the `list` method returns pages as integers if it is given pages at all. """ with client.test_mode as t: t.register_json('/foo/', {'count': 10, 'results': [ {'id': 1, 'name': 'bar'}, ], 'next': '/api/%s/foo/?page=2' % CUR_API_VERSION, 'previous': None}) result = self.res.list() self.assertEqual(result['next'], 2) def test_reading_with_file(self): """Establish that if we get a file-like object, that it is appropriately read. """ # Note: This is primarily for a case of longer input that belongs # in files (such as SSH RSA/DSA private keys), but in this case we're # using something trivial; we need only provide a proof of concept # to test against. sio = StringIO('bar') with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 0, 'results': [], 'next': None, 'previous': None}) self.res.list(name=sio) self.assertTrue(t.requests[0].url.endswith('?name=bar')) def test_create(self): """Establish that a standard create call works in the way that we expect. """ with client.test_mode as t: # `create` will attempt to see if the record already exists; # mock this to state that it does not. t.register_json('/foo/?name=bar', {'count': 0, 'results': [], 'next': None, 'previous': None}) t.register_json('/foo/', {'changed': True, 'id': 42}, method='POST') self.res.create(name='bar') self.assertEqual(t.requests[0].method, 'GET') self.assertEqual(t.requests[1].method, 'POST') def test_create_already_existing(self): """Establish that if we attempt to create a record that already exists, that no action ends up being taken. """ with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 1, 'results': [ {'id': 42, 'name': 'bar'}, ], 'next': None, 'previous': None}) result = self.res.create(name='bar') self.assertEqual(len(t.requests), 1) self.assertFalse(result['changed']) def test_create_missing_required_fields(self): """Establish that if we attempt to create a record and don't specify all required fields, that we raise BadRequest. """ # Create a resource with a required field that isn't the name # field. class BarResource(models.Resource): endpoint = '/bar/' name = models.Field(unique=True) required = models.Field() res = BarResource() # Attempt to write the resource and prove that it fails. with client.test_mode as t: t.register_json('/bar/?name=foo', {'count': 0, 'results': [], 'next': None, 'previous': None}) with self.assertRaises(exc.BadRequest): res.create(name='foo') def test_modify(self): """Establish that the modify method works in the way we expect, given a normal circumstance. """ with client.test_mode as t: t.register_json('/foo/42/', {'id': 42, 'name': 'bar', 'description': 'baz'}) t.register_json('/foo/42/', {'changed': True, 'id': 42}, method='PATCH') result = self.res.modify(42, description='spam') self.assertTrue(result['changed']) self.assertEqual(t.requests[1].body, '{"description": "spam"}') def test_modify_no_changes(self): """Establish that the modify method does not actually attempt a modification if there are no changes. """ with client.test_mode as t: t.register_json('/foo/42/', {'id': 42, 'name': 'bar', 'description': 'baz'}) result = self.res.modify(42, description='baz') self.assertFalse(result['changed']) self.assertEqual(len(t.requests), 1) def test_modify_ignore_kwargs_none(self): """Establish that we ignore keyword arguments set to None when performing writes. """ with client.test_mode as t: t.register_json('/foo/42/', {'id': 42, 'name': 'bar', 'description': 'baz'}) result = self.res.modify(42, name=None, description='baz') self.assertFalse(result['changed']) self.assertEqual(len(t.requests), 1) self.assertNotIn('name', t.requests[0].url) def test_write_file_like_object(self): """Establish that our write method, if it gets a file-like object, correctly reads it and uses the file's value as what it sends. """ sio = StringIO('bar') with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 1, 'results': [ {'id': 42, 'name': 'bar', 'description': 'baz'}, ], 'next': None, 'previous': None}) result = self.res.modify(name=sio, description='baz') self.assertFalse(result['changed']) self.assertIn('name=bar', t.requests[0].url) def test_write_with_null_field(self): """Establish that a resource with 'null' field is written.""" with client.test_mode as t: t.register_json('/foo/42/', {'id': 42, 'name': 'bar', 'description': 'baz'}, method='GET') t.register_json('/foo/42/', {'name': 'bar', 'id': 42, 'inventory': 'null'}, method='PATCH') self.res.write(42, inventory='null') self.assertEqual(json.loads(t.requests[1].body)['inventory'], None) def test_delete_with_pk(self): """Establish that calling `delete` and providing a primary key works in the way that we expect. """ with client.test_mode as t: t.register('/foo/42/', '', method='DELETE') result = self.res.delete(42) self.assertTrue(result['changed']) def test_delete_without_pk(self): """Establish that calling `delete` with keyword arguments works in the way that we expect. """ with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 1, 'results': [ {'id': 42, 'name': 'bar', 'description': 'baz'}, ], 'next': None, 'previous': None}) t.register('/foo/42/', '', method='DELETE') result = self.res.delete(name='bar') self.assertEqual(len(t.requests), 2) self.assertTrue(t.requests[1].url.endswith('/foo/42/')) self.assertTrue(result['changed']) def test_delete_with_pk_already_missing(self): """Establish that calling `delete` on a record that does not exist returns back an unchanged response. """ with client.test_mode as t: t.register_json('/foo/42/', '', method='DELETE', status_code=404) result = self.res.delete(42) self.assertFalse(result['changed']) def test_delete_with_pk_already_missing_exc(self): """Establish that calling `delete` on a record that does not exist raises an exception if requested. """ with client.test_mode as t: t.register_json('/foo/42/', '', method='DELETE', status_code=404) with self.assertRaises(exc.NotFound): self.res.delete(42, fail_on_missing=True) def test_delete_without_pk_already_missing(self): """Establish that calling `delete` on a record without a primary key correctly sends back an unchanged response. """ with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 0, 'results': []}) result = self.res.delete(name='bar') self.assertFalse(result['changed']) def test_delete_without_pk_already_missing_exc(self): """Establish that calling `delete` on a record without a primary key correctly sends back an unchanged response. """ with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 0, 'results': []}) with self.assertRaises(exc.NotFound): self.res.delete(name='bar', fail_on_missing=True) def test_assoc_already_present(self): """Establish that the _assoc method returns an unchanged status message if it attempts to associate two records that are already associated. """ with client.test_mode as t: t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [ {'id': 84}, ], 'next': None, 'previous': None}) result = self.res._assoc('bar', 42, 84) self.assertFalse(result['changed']) def test_assoc_not_already_present(self): """Establish that the _assoc method returns an changed status message and associates objects if appropriate. """ with client.test_mode as t: t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []}) t.register_json('/foo/42/bar/', {}, method='POST') result = self.res._assoc('bar', 42, 84) self.assertEqual(json.loads(t.requests[1].body), {'associate': True, 'id': 84}) self.assertTrue(result['changed']) def test_disassoc_not_already_present(self): """Establish that the _disassoc method returns an unchanged status message if it attempts to associate two records that are not associated. """ with client.test_mode as t: t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []}) result = self.res._disassoc('bar', 42, 84) self.assertFalse(result['changed']) def test_disassoc_already_present(self): """Establish that the _assoc method returns an changed status message and associates objects if appropriate. """ with client.test_mode as t: t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [ {'id': 84}, ], 'next': None, 'previous': None}) t.register_json('/foo/42/bar/', {}, method='POST') result = self.res._disassoc('bar', 42, 84) self.assertEqual(json.loads(t.requests[1].body), {'disassociate': True, 'id': 84}) self.assertTrue(result['changed']) def test_lookup_with_unique_field_not_present(self): """Establish that a if _lookup is invoked without any unique field specified, that BadRequest is raised. """ with client.test_mode: with self.assertRaises(exc.BadRequest): self.res._lookup(description='abcd') def test_lookup_errant_found(self): """Establish that if _lookup is invoked and finds a record when it should not, that an appropriate exception is raised. """ with client.test_mode as t: t.register_json('/foo/?name=bar', {'count': 1, 'results': [ {'id': 42, 'name': 'bar'}, ], 'next': None, 'previous': None}) with self.assertRaises(exc.Found): self.res._lookup(name='bar', fail_on_found=True) def test_copy_with_multiples(self): """ A resource with fields marked `multiple` has those fields copied fully """ class BarResource(models.Resource): endpoint = '/bar/' name = models.Field(unique=True) variables = models.Field(multiple=True) res = BarResource() with mock.patch.object(res, 'read') as read_mock: read_mock.return_value = { "count": 1, "results": [ { "id": 42, "name": "foobarin", "variables": "foobar: barfood" } ] } with mock.patch.object(res, 'write') as write_mock: res.copy() name, args, kwargs = write_mock.mock_calls[0] self.assertEqual(kwargs['name'][:len("foobarin")], "foobarin") self.assertEqual(kwargs['variables'], ('foobar: barfood',)) self.assertNotIn('id', kwargs) class MonitorableResourcesTests(unittest.TestCase): """Estblaish that the MonitorableResource abstract class works in the way that we expect. """ def test_status_not_implemented(self): """Establish that the abstract MonitorableResource's status method raises NotImplementedError. """ with self.assertRaises(NotImplementedError): models.MonitorableResource().status(None) class SurveyResourceTests(unittest.TestCase): """Test methods specific to survey models.""" def setUp(self): self.res = models.SurveyResource() self.res.endpoint = '/job_templates/' def test_survey_no_op(self): with mock.patch.object(models.base.BaseResource, 'write') as w: self.res.modify(name='foobar') w.assert_called_once_with( create_on_missing=False, force_on_exists=True, name='foobar', pk=None) def test_survey_create(self): with mock.patch.object(models.base.BaseResource, 'write') as w: w.return_value = {'id': 42, 'survey_enabled': True} survey_data = {'foobar': 'foo'} with client.test_mode as t: t.register_json( '/job_templates/42/survey_spec/', {}, method='POST' ) self.res.modify(survey_spec=survey_data, verbose=True) self.assertEqual(t.requests[0].body, json.dumps(survey_data)) def test_survey_delete(self): with mock.patch.object(models.base.BaseResource, 'write') as w: w.return_value = {'id': 42, 'survey_enabled': True} with client.test_mode as t: t.register_json( '/job_templates/42/survey_spec/', {}, method='DELETE' ) self.res.modify(survey_spec={}, verbose=True) self.assertEqual(t.requests[0].method, 'DELETE')
nilq/baby-python
python
# -*- coding: utf-8 -*- import json from typing import Iterable from pyrus_nn.rust.pyrus_nn import PyrusSequential from pyrus_nn import layers class Sequential: # This is the actual Rust implementation with Python interface _model: PyrusSequential def __init__(self, lr: float, n_epochs: int, batch_size: int = 32, cost_func: str = "mse"): """ Initialize the model. Parameters ---------- lr: float The learning rate of the model n_epochs: int How many epochs shall it do for training """ self._model = PyrusSequential(lr, n_epochs, batch_size, cost_func) self.lr = lr self.n_epochs = n_epochs self.batch_size = batch_size self.cost_func = cost_func def fit(self, X: Iterable[Iterable[float]], y: Iterable[Iterable[float]]): """ Fit the model using X and y. Each of which would be a 2d iterable. For example:: X = [[1, 2, 3], [4, 5, 6]] y = [[1], [2]] Parameters ---------- X: Iterable 2d iterable y: Iterable 2d iterable Returns ------- self """ self._model.fit(X, y) return self def predict(self, X: Iterable[Iterable[float]]) -> Iterable[Iterable[float]]: """ Apply the model to input data Parameters ---------- X: Iterable 2d iterable Returns ------- Iterable[Iterable[float]] """ return self._model.predict(X) def add(self, layer: layers.Layer): """ Add a layer to this network Parameters ---------- layer: pyrus_nn.layers.Layer A layer compatible with the previous layer Returns ------- None """ if isinstance(layer, layers.Dense): self._model.add_dense(layer.n_input, layer.n_output, layer.activation) def to_dict(self): """ Serialize this network as a dictionary of primitives suitable for further serialization into json, yaml, etc. Returns ------- dict """ return dict( params=self.get_params(), model=json.loads(self._model.to_json()) ) @classmethod def from_dict(cls, conf: dict): """ Re-construct the model from a serialized version of itself Parameters ---------- conf: dict Configuration resulting from a previous call to ``.to_dict()`` Returns ------- Sequential """ model = cls(**conf['params']) model._model = PyrusSequential.from_json(json.dumps(conf['model'])) return model def get_params(self, deep=False): return dict( lr=self.lr, n_epochs=self.n_epochs ) def __eq__(self, other: "Sequential"): return other.to_dict() == self.to_dict()
nilq/baby-python
python
from django import template from cart.utils import get_or_set_order_session register = template.Library() @register.filter def cart_item_count(request): order = get_or_set_order_session(request) count = order.items.count() return count
nilq/baby-python
python
from visions.utils.monkeypatches import imghdr_patch, pathlib_patch __all__ = [ "imghdr_patch", "pathlib_patch", ]
nilq/baby-python
python
from pprint import pprint from ayesaac.services.common import QueueManager from ayesaac.utils.logger import get_logger logger = get_logger(__file__) class Interpreter(object): """ The Interpreter class purpose is a simple comparison with what the vision part find and what the user asked for. (Which object was found and not found) """ def __init__(self): self.queue_manager = QueueManager( [self.__class__.__name__, "NaturalLanguageGenerator"] ) self.memory = {} logger.info(f"{self.__class__.__name__} ready") def filter_objects(self, body): return body["objects"] def filter_texts(self, body): return body["texts"] def callback(self, body, **_): data = None key = "" if "objects" in body: key = "objects" data = self.filter_objects(body) body["objects"] = data elif "texts" in body: key = "texts" data = self.filter_texts(body) body["texts"] = data if body["wait_package"] == 1: body["path_done"].append(self.__class__.__name__) del body["vision_path"] # pprint(body) # TODO: uncomment if you wanna test the NLG, it could be text, objects, # objects + colour, objects + lateral position self.queue_manager.publish("NaturalLanguageGenerator", body) else: if body["intern_token"] not in self.memory: self.memory[body["intern_token"]] = {key: data} elif ( body["intern_token"] in self.memory and body["wait_package"] < len(self.memory[body["intern_token"]]) - 1 ): self.memory[body["intern_token"]][key] = data else: for key in self.memory[body["intern_token"]]: body[key] = self.memory[body["intern_token"]][key] del self.memory[body["intern_token"]][key] # pprint(body) # TODO: uncomment if you wanna test the NLG self.queue_manager.publish("NaturalLanguageGenerator", body) def run(self): self.queue_manager.start_consuming(self.__class__.__name__, self.callback) def main(): interpreter = Interpreter() interpreter.run() if __name__ == "__main__": main()
nilq/baby-python
python
from core.models import MedicalCare, Pets, Tutor, Vet from django.contrib import admin admin.site.register(Vet) class MedicalCareAdmin(admin.ModelAdmin): list_display = ('id', 'date', 'time', 'pet_name', 'procedure', 'report') admin.site.register(MedicalCare, MedicalCareAdmin) class PetsAdmin(admin.ModelAdmin): list_display = ('id', 'pet_name', 'species', 'breed', 'gender', 'date_of_birth', 'castrated', 'weight') admin.site.register(Pets, PetsAdmin) class TutorAdmin(admin.ModelAdmin): list_display = ('tutor_name', 'cpf', 'phone', 'email', 'street', 'number', 'district', 'state', 'cep') admin.site.register(Tutor, TutorAdmin)
nilq/baby-python
python
#Test Array Implementation import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pyds import array #test array print("01 : ======= Creating Array of size 5 =======") arr = array(5) print("02: ======= Traversing Array =======") arr.print() print("03: ======= Insert 5 Items =======") arr.insert(0,1) arr.insert(1,2) arr.insert(2,3) arr.insert(3,4) arr.insert(4,5) print("======= Traversing Array =======") arr.print() print("04: ======= Exceeding Items =======") try: arr.insert(5,6) except Exception as err: print(err) print("05: ======= Delete Item at index 0 =======") print(arr.delete(0)) print("06: ======= Re-Traversing Array =======") arr.print()
nilq/baby-python
python
import json import time import logging import requests import functools class WechatAppPush: """ WechatAppPush decorator Push the msg of the decorated function Example 1: @WechatAppPush(corpid, corpsecret, agentid) def func(): return 'xxx' Example 2: def func(): return 'xxx' WechatAppPush(corpid, corpsecret, agentid)(func())() Example 3: WechatAppPush(corpid, corpsecret, agentid)('xxx')() Then wechat app will push xxx :param corpid: wechat app corpid :param corpsecret: wechat app corpsecret :param agentid: wechat app agentid :param touser: wechat app @ touser (optional, default: @all ) :param message: wechat push message (optional, default: Wechat push message tset) :return func: docs: https://developer.work.weixin.qq.com/document/path/90236 """ def __init__(self, corpid: str, corpsecret: str, agentid: str, touser: str = '@all', message: str = 'Wechat push message tset') -> None: self._corpid = corpid self._corpsecret = corpsecret self._agentid = agentid self._touser = touser self._message = message def __call__(self, func=None): @functools.wraps(func) def wrapper(*args, **kwargs): # before func try: self._message = func(*args, **kwargs) except: if func != None: self._message = func # after func response = self.send_text() if response != "ok": print(f'Wechat push error: {response}') return self._message return wrapper def get_access_token(self) -> str: send_url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken" send_values = { "corpid": self._corpid, "corpsecret": self._corpsecret, } response = requests.post(send_url, params=send_values).json() return response["access_token"] def send_text(self) -> str: send_url = ( "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=" + self.get_access_token() ) send_values = { "touser": self._touser, "msgtype": "text", "agentid": self._agentid, "text": {"content": self._message}, "safe": "0", } send_msges = bytes(json.dumps(send_values), "utf-8") respone = requests.post(send_url, send_msges) respone = respone.json() return respone["errmsg"] class Debug: """ Debug decorator :param level: :param func_time: :param func_info: """ def __init__(self, level=logging.DEBUG, func_time=True, func_info=True) -> None: self._func_timer = func_time self._level = level self._func_info = func_info LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p" logging.basicConfig(level=self._level, format=LOG_FORMAT, datefmt=DATE_FORMAT) def __call__(self, func): @self.func_time @self.func_info @functools.wraps(func) def wrapper(*args, **kwargs): value = func(*args, **kwargs) return value return wrapper def func_time(self, func): """Print the runtime of the decorated function""" if self._func_timer != True: return func @functools.wraps(func) def wrapper(*args, **kwargs): start_time = time.perf_counter() # before func value = func(*args, **kwargs) # after func end_time = time.perf_counter() run_time = end_time - start_time logging.log(msg=f"Finished {func.__name__!r} in {run_time:.4f} secs", level=self._level) return value return wrapper def func_info(self, func): """Print the function signature and return value""" if self._func_info != True: return func @functools.wraps(func) def wrapper(*args, **kwargs): args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = ", ".join(args_repr + kwargs_repr) logging.log(msg=f"Calling {func.__name__}({signature})", level=self._level) # before func value = func(*args, **kwargs) # after func logging.log(msg=f"{func.__name__!r} returned {value!r}", level=self._level) return value return wrapper
nilq/baby-python
python
N = int(input()) print(f'{((N + 1) // 2 / N):.10f}')
nilq/baby-python
python
try: from datetime import datetime import pandas as pd import numpy as np from pathlib import Path from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.linear_model import BayesianRidge from sklearn import preprocessing except: pass from environmental_data_modules import PostProcessor, AurnModule, DateRangeProcessor class AurnPostProcessor(PostProcessor, AurnModule, DateRangeProcessor): """ Class used for post-processing data that has been extracted from AURN server. """ # Define 'absolute' constants BASE_FILE_OUT = '{}/aurn_processed_daily_{}.csv' # Define default constants DEFAULT_OUT_DIR = 'Aurn_processed_data' DEFAULT_EMEP_FILENAME = None # Calculation defaults DEFAULT_MIN_YEARS_REFERENCE = 1 DEFAULT_MIN_YEARS = 1 DEFAULT_IMPUTER_RANDOM_STATE = 0 DEFAULT_IMPUTER_ADD_INDICATOR = False DEFAULT_IMPUTER_INITIAL_STRATEGY = 'mean' DEFAULT_IMPUTER_MAX_ITER = 100 try: DEFAULT_IMPUTER_ESTIMATOR = BayesianRidge() except: DEFAULT_IMPUTER_ESTIMATOR = None DEFAULT_TRANSFORMER_OUTPUT_DISTRIBUTION = 'normal' DEFAULT_TRANSFORMER_METHOD = 'box-cox' DEFAULT_TRANSFORMER_STANDARDIZE = False def __init__(self, metadata_filename=AurnModule.DEFAULT_METADATA_FILE, metadata_url=AurnModule.DEFAULT_METADATA_URL, out_dir=DEFAULT_OUT_DIR, verbose=PostProcessor.DEFAULT_VERBOSE): """ Initialise instance of the AurnPostProcessor class. Initialises the private class variables Args: metadata_filename: filename of the metadata used in Aurn data extraction metadata_url: alternative source of AURN metadata, if metadata_filename is None out_dir: (string) directory to be used for all outputs verbose: (integer) level of verbosity in output. Returns: Initialised instance of AurnPostProcessor """ super(AurnPostProcessor, self).__init__(out_dir, verbose) AurnModule.__init__(self, metadata_filename=metadata_filename, metadata_url=metadata_url) DateRangeProcessor.__init__(self) self._emep_data = None self.min_years_reference = AurnPostProcessor.DEFAULT_MIN_YEARS_REFERENCE self.min_years = AurnPostProcessor.DEFAULT_MIN_YEARS self.impute_data = False self._imputer = None self._transformer = None @PostProcessor.transformer.setter def transformer(self, transformer): if transformer is None or type(transformer).__name__ in ['QuantileTransformer','PowerTransformer']: self._transformer = transformer else: raise ValueError('Error setting transformer, incorrect object type: {}'.format(type(transformer).__name__)) @PostProcessor.station_data.setter def station_data(self, raw_data): if self.verbose > 0: print('Loading stations data metadata') try: station_data = raw_data.drop_duplicates() station_data = station_data.set_index('site_id') except Exception as err: raise ValueError('Unable to get correct site data from Metadata input file. Check metadata file content.') self._station_data = station_data def impute_method_setup(self, random_state=DEFAULT_IMPUTER_RANDOM_STATE, add_indicator=DEFAULT_IMPUTER_ADD_INDICATOR, initial_strategy=DEFAULT_IMPUTER_INITIAL_STRATEGY, max_iter=DEFAULT_IMPUTER_MAX_ITER, estimator=DEFAULT_IMPUTER_ESTIMATOR, output_distribution=DEFAULT_TRANSFORMER_OUTPUT_DISTRIBUTION, transformer_method=DEFAULT_TRANSFORMER_METHOD, transformer_standardize=DEFAULT_TRANSFORMER_STANDARDIZE): """ Initialises the IterativeImputer, QuantileTransformer and PowerTransformer methods required if missing data is to be imputed. Parameters are passed to the sklearn routines. Where this is being done it is noted below. For further documentation on how these functions work, and what the parameters denote, please refer to the sklearn documentation. IterativeImputer: https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html QuantileTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html PowerTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html Args: random_state: (int) (IterativeImputer & QuantileTransformer) seed for pseudo random number generator add_indicator: (boolean) (IterativeImputer) if True adds a `MissingIndicator` transform to the stack initial_strategy: (str) (IterativeImputer) define strategy to use for initialising missing values max_iter: (int) (IterativeImputer) maximum number of imputation rounds to perform estimator: (str) (IterativeImputer) estimator method to be used output_distribution: (str) (QuantileTransformer) Marginal distribution for the transformed data transformer_method (str) (PowerTransformer) method to use, 'box-cox' is default transformer_standardize (boolean) (PowerTransformer) select if zero-mean, unit-variance normalisation is applied, default is True Returns: None """ # set the imputer options (if we are using them) self.imputer = IterativeImputer(random_state=random_state, add_indicator=add_indicator, initial_strategy=initial_strategy, max_iter=max_iter, verbose=self.verbose, estimator=estimator) # set the power transform options self.transformer_quantile = preprocessing.QuantileTransformer(output_distribution=output_distribution, random_state=random_state) # set the power transform options self.transformer_power = preprocessing.PowerTransformer(method=transformer_method, standardize=transformer_standardize) def process(self, in_file, date_range=None, site_list=AurnModule.DEFAULT_SITE_LIST, emep_filename=DEFAULT_EMEP_FILENAME, min_years_reference=DEFAULT_MIN_YEARS_REFERENCE, min_years=DEFAULT_MIN_YEARS, impute_data=PostProcessor.DEFAULT_IMPUTE_DATA, save_to_csv=PostProcessor.DEFAULT_SAVE_TO_CSV, outfile_suffix='', species_list=AurnModule.SPECIES_LIST_EXTRACTED): """ Post process the data extracted from the AURN dataset, based on the parameters given. Args: in_file: (str) The file spec of the input file (required) date_range: (list of 2 datetime) The date range of interest site_list: (list of string/number) Site IDs of interest emep_filename: (str) The file spec of the EMEP file to be used to help calculate #Todo Doug min_years_reference: (float) The minimum number of years of data for any site that we are going to use as a reference site later. (this cannot be less than min_years) min_years: (float) The minimum number of years of data that a site must have impute_data: (boolean) Whether to attempt to impute missing data save_to_csv: (boolean) Whether to save the output dateframes to CSV file(s) outfile_suffix: (str) The suffix to appended to the end of output file names. Returns: daily_dataframe: daily dataset, for all measurements, as pandas.Dataframe Required MultiIndex: 'time_stamp' (datetime object): date (only) (e.g. 2017-06-01) 'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]') Required columns: 'O3.max' (float): daily maximum value 'O3.mean' (float): daily mean value 'O3.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'PM10.max' (float): daily maximum value 'PM10.mean' (float): daily mean value 'PM10.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'PM2.5.max' (float): daily maximum value 'PM2.5.mean' (float): daily mean value 'PM2.5.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'NO2.max' (float): daily maximum value 'NO2.mean' (float): daily mean value 'NO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'NOXasNO2.max' (float): daily maximum value 'NOXasNO2.mean' (float): daily mean value 'NOXasNO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'SO2.max' (float): daily maximum value 'SO2.mean' (float): daily mean value 'SO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) """ if not isinstance(in_file, str): raise ValueError('in_file must be a string') # Process inputs if date_range is not None: self.date_range = [datetime.strptime(date_range[0], DateRangeProcessor.INPUT_DATE_FORMAT), datetime.strptime(date_range[1], DateRangeProcessor.INPUT_DATE_FORMAT)] else: self.date_range = [self.get_available_start(), self.get_available_end()] self.file_out = AurnPostProcessor.BASE_FILE_OUT.format(self.out_dir, outfile_suffix) self._emep_data = self.load_emep_data(emep_filename) self.min_years = min_years self.min_years_reference = min_years_reference self.species_list = species_list self.site_list = site_list self.station_data = self.metadata['AURN_metadata'][['site_id', 'latitude', 'longitude', 'site_name']] if self.verbose > 1: print('Station data: \n {}'.format(self.station_data)) self.impute_data = impute_data # load and prepare the hourly dataset hourly_dataframe = self.load_aurn_data(in_file) print('filter for minimum data lengths, and reduce dataset to only stations of interest') hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal = \ self.list_required_and_reference_sites(hourly_dataframe) # get the list of required sites from what is available, and what was requested site_list_internal = set(site_list_internal).intersection(self.site_list) if len(hourly_dataframe_filtered.index) == 0: print('Exiting post-processing: Metadata is empty after initial filtering processes') return if self.impute_data: print('imputation of data, returning hourly data') hourly_dataframe = self.organise_data_imputation( hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal) else: print('sorting data (no imputation), returning hourly data') hourly_dataframe = self.organise_data(hourly_dataframe_filtered, site_list_internal) # calculate the daily max and mean for each station daily_dataframe = self.combine_and_organise_mean_max(hourly_dataframe) if save_to_csv: # write this dataset to file daily_dataframe.to_csv(self.file_out, index=True, header=True, float_format='%.2f') return daily_dataframe def combine_and_organise_mean_max(self, hourly_dataframe): """ Combine and organise the daily mean, maximum, and count information. Args: hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe Required Index: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): imputed O3 (int): flag indicating imputed data (0=original,1=imputed) imputed PM10 (int): imputed PM2.5 (int): imputed NO2 (int): imputed NOXasNO2 (int): imputed SO2 (int): Returns: final_dataframe: daily dataset, for all measurements, as pandas.Dataframe Required MultiIndex: 'time_stamp' (datetime object): date (only) (e.g. 2017-06-01) 'sensor_name' (string): ID string for site (e.g. 'LIN3 [AQ]') Required columns: 'O3.max' (float): daily maximum value 'O3.mean' (float): daily mean value 'O3.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'PM10.max' (float): daily maximum value 'PM10.mean' (float): daily mean value 'PM10.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'PM2.5.max' (float): daily maximum value 'PM2.5.mean' (float): daily mean value 'PM2.5.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'NO2.max' (float): daily maximum value 'NO2.mean' (float): daily mean value 'NO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'NOXasNO2.max' (float): daily maximum value 'NOXasNO2.mean' (float): daily mean value 'NOXasNO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) 'SO2.max' (float): daily maximum value 'SO2.mean' (float): daily mean value 'SO2.flag' (float): flag to indicate fraction of imputed data (1 = fully imputed, 0 = no imputed values were used) """ #### group by date and site daily_grouped_data = hourly_dataframe.groupby([pd.Grouper(level=self._timestamp_string, freq='1D'), self._site_string]) spc_list = self.species_list #### loop by spc through grouped data, and calculate the mean, max, and flag values for spc in spc_list: temp_dataframe = pd.DataFrame() temp_dataframe['{}_mean'.format(spc)] = daily_grouped_data.mean()[spc] temp_dataframe['{}_max'.format(spc)] = daily_grouped_data.max()[spc] temp_dataframe['{}_flag'.format(spc)] = daily_grouped_data.mean()['{}_flag'.format(spc)] try: final_dataframe = final_dataframe.merge(temp_dataframe, how='outer', left_index=True, right_index=True) except: final_dataframe = temp_dataframe.copy() #### rename the sites, to include AQ flag final_dataframe.index = final_dataframe.index.set_levels( ['{} [AQ]'.format(x) for x in final_dataframe.index.levels[1]], level=1) #### return output dataframe return(final_dataframe) def load_aurn_data(self, file_in): """ Loading the AURN dataset. Args: file_in (Path object or string): path for the file to be read in Returns: hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe Index: none Required Columns: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): """ # Read in hourly dataframe file try: hourly_dataframe = pd.read_csv(file_in, sep=',', usecols=[AurnModule.INDEX_EXTRACTED].append(AurnModule.NEW_FILE_COLS), index_col=AurnModule.INDEX_EXTRACTED, parse_dates=[self._timestamp_string]) except Exception as err: raise ValueError('Unable to read Met extracted data file {}. {}'.format(file_in, err)) if self.verbose > 1: print('Hourly dataframe: \n {}'.format(hourly_dataframe)) print('Hourly dataframe data types: \n {}'.format(hourly_dataframe.dtypes)) return(hourly_dataframe) def load_emep_data(self, filename): """ Loads the EMEP model data, or create an empty dataframe (required for logic checks in the workflow) Args: filename (str): location of the EMEP file. This should be empty if there is no EMEP data Returns: emep_dataframe: pandas Dataframe, containing the EMEP model data. If no EMEP data is to be used then this will be an empty Dataframe. Index: none Required Columns: timestamp (datetime object): site_id (string): O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): """ if filename is not None: filename = Path(filename) print('reading emep file') try: emep_dataframe = pd.read_csv(filename) except Exception as err: raise ValueError('Error loading the emap data from filename: {} . {}'.format(filename, err)) try: return emep_dataframe.rename(columns={'NOx': 'NOXasNO2'}) except Exception as err: raise ValueError('EMEP file does not contain an \'NOx\' column') else: return pd.DataFrame() def list_required_and_reference_sites(self, data_in): """ This function creates the lists of required sites, and reference sites, for the final dataset. Args: data_in: hourly dataset, for all measurements, as pandas.Dataframe Index: none Required Columns: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): Returns: met_data_filtered: pandas dataframe, as above, containing hourly dataset for only the required station datasets reference_sites: (dict, keys are species): items: (list of strings) the site_id's for our reference sites for each `spc` required_sites: (dict, keys are species): items: (list of strings) required sites for `spc` combined_req_site_list: (list, strings) a single list of required sites """ print(' get the lists of required and reference stations for each measurement variable') tempgroups = data_in.groupby([self._site_string, pd.Grouper(key=self._timestamp_string, freq='1D')]) daily_hour_counts = tempgroups.count() spc_list = daily_hour_counts.columns.values required_sites = {} reference_sites = {} combined_req_site_list = [] for spc in spc_list: print('site day counts for {}'.format(spc)) req_days_counts = daily_hour_counts[spc] req_days_counts = req_days_counts[req_days_counts > 0] required_sites[spc], reference_sites[spc] = self.station_listing(req_days_counts) combined_req_site_list = combined_req_site_list + required_sites[spc] print('VERBOSE: ', self.verbose) if self.verbose > 0: print('\t\treq sites {}:'.format(spc), required_sites[spc]) if self.verbose > 0: print('\t\tuse sites {}:'.format(spc), reference_sites[spc]) # get a list of all sites which are required for at least one measurement set combined_req_site_list = list(dict.fromkeys(combined_req_site_list)) data_filtered = data_in[data_in[self._site_string].isin(combined_req_site_list)] return data_filtered, reference_sites, required_sites, combined_req_site_list def organise_data_imputation(self, hourly_dataframe_filtered, reference_sites, required_sites, site_list_internal): """ Function for organising the imputation of the datasets. This runs the 'transform_and_impute_data' function for each of the variables of interest. Args: hourly_dataframe_filtered: hourly dataset, for all measurements, as pandas.Dataframe Index: none Required Columns: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): reference_sites (list, string or int): sites to use for reference when imputing datasets required_sites: (dict, keys are species): items: (list of strings) required sites for `spc` site_list_internal (list, string or int): combined list of sites to retain Returns: output_dataframe: hourly dataset, for all measurements, as pandas.Dataframe Required Index: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): O3_flag (int): flag indicating imputed data (0=original,1=imputed) PM10_flag (int): PM2.5_flag (int): NO2_flag (int): NOXasNO2_flag (int): SO2_flag (int): """ transformer = self.transformer_quantile output_dataframe = pd.DataFrame() date_index = pd.date_range(start=self.start, end=self.end, freq='1H', name=self._timestamp_string) # Set the number of reference stations to request ref_station_numbers = [len(reference_sites[x]) for x in reference_sites.keys()] print(ref_station_numbers) station_number = min([5] + [x - 1 for x in ref_station_numbers]) hourly_dataframe_internal = hourly_dataframe_filtered.set_index(self._timestamp_string) spc_list = self.species_list if not self._emep_data.empty: if self.verbose > 0: print('Loading EMEP data') emep_dataframe_internal = self._emep_data.set_index(self._timestamp_string) if self.verbose > 1: print('1. Site list internal: ', site_list_internal) for site in site_list_internal: if self.verbose > 1: print('2. Site: ', site) # get list of chemical species that we need to impute for this site (including Date info) req_spc = [] for spc in spc_list: if site in required_sites[spc]: req_spc.append(spc) # copy these to a new dataframe working_hourly_dataframe = pd.DataFrame([], index=date_index) working_hourly_dataframe[req_spc] = \ hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == site][req_spc] copy_hourly_dataframe = working_hourly_dataframe.copy() copy_hourly_dataframe[self._site_string] = site # get list of neighbouring sites for each of the chemical species of interest for spc in spc_list: if self.verbose > 1: print('3. Species: ', spc) station_distances = self.get_station_distances(site, reference_sites[spc]) if self.verbose > 1: print('4. Station number:', station_number) if self.verbose > 1: print('5. distances:', station_distances) if self.verbose > 1: print('6.', len(station_distances)) for ii in range(0, min(station_number, len(station_distances))): if self.verbose > 1: print('7. ii', ii) station_code = station_distances.index[ii] working_hourly_dataframe['{}_{}'.format(spc, station_code)] = \ hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == station_code][spc] # get EMEP predictions of chemical species of interest (if needed) if self.verbose > 1: print('EMEP data: {}'.format(self._emep_data)) if not self._emep_data.empty: if self.verbose > 0: print('Using EMEP data') for spc in spc_list: working_hourly_dataframe['{}_{}'.format(spc, 'EMEP')] = \ emep_dataframe_internal[emep_dataframe_internal[self._site_string] == site][spc] # run the imputation process imputed_hourly_dataframe = self.transform_and_impute_data(working_hourly_dataframe,transformer=transformer) # copy imputed data of interest into copy of original dataframe (without EMEP and neighbouring sites) for spc in spc_list: copy_hourly_dataframe['{}_flag'.format(spc)] = 0 if spc in req_spc: copy_hourly_dataframe['{}_flag'.format(spc)] = copy_hourly_dataframe[spc].isna() * 1 copy_hourly_dataframe[spc] = imputed_hourly_dataframe[spc] else: copy_hourly_dataframe[spc] = np.nan output_dataframe = output_dataframe.append(copy_hourly_dataframe) output_dataframe = output_dataframe.reset_index().set_index([self._timestamp_string,self._site_string]) return(output_dataframe) def organise_data(self, hourly_dataframe_filtered, site_list_internal): """ Function for organising the required datasets. This mirrors the imputation function. Args: hourly_dataframe_filtered: hourly dataset, for all measurements, as pandas.Dataframe Index: none Required Columns: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): site_list_internal (list, string or int): combined list of sites to retain Returns: hourly_dataframe: hourly dataset, for all measurements, as pandas.Dataframe Required Index: timestamp (datetime object): site_id (string): Optional Columns: O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): O3_flag (int): flag indicating imputed data (0=original,1=imputed) PM10_flag (int): PM2.5_flag (int): NO2_flag (int): NOXasNO2_flag (int): SO2_flag (int): """ date_index = pd.date_range(start=self.start, end=self.end, freq='1H', name=self._timestamp_string) output_dataframe = pd.DataFrame() hourly_dataframe_internal = hourly_dataframe_filtered.set_index(self._timestamp_string) spc_list = self.species_list if self.verbose > 1: print('1. Site list internal: ', site_list_internal) for site in site_list_internal: if self.verbose > 1: print('2. Site: ', site) # create new dataframe, with the dates that we are interested in working_hourly_dataframe = pd.DataFrame([], index=date_index) working_hourly_dataframe[self._site_string] = site # copy these to a new dataframe working_hourly_dataframe[spc_list] = \ hourly_dataframe_internal[hourly_dataframe_internal[self._site_string] == site][spc_list] # copy imputed data of interest into copy of original dataframe (without EMEP and neighbouring sites) for spc in spc_list: working_hourly_dataframe['{}_flag'.format(spc)] = 0 # append data to the output dataframe output_dataframe = output_dataframe.append(working_hourly_dataframe) output_dataframe = output_dataframe.reset_index().set_index([self._timestamp_string,self._site_string]) return(output_dataframe) def transform_and_impute_data(self, df_in, transformer): """ Function for organising the transformation of the dataset, then imputing missing data, before detransforming the data and returning it. Args: df_in: pandas dataframe containing the datasets to impute Required Index: date (datetime64 objects): date / time for each reading Optional Columns: Measurement data at the site for which we are imputing the data. Only those pollutants which have been measured at this site will be included. O3 (float): PM10 (float): PM2.5 (float): NO2 (float): NOXasNO2 (float): SO2 (float): Reference Columns: Reference data at the X nearest sites to the measurement being processed. All datasets will be included, even for those pollutants which were not included in the optional columns above. So, if 5 reference stations are used, this will give 30 (5*6) columns of reference data. If EMEP data is being used then these are added for EMEP data too, but only at the station of interest (so only another 6 columns are added). O3_[site_id] (float): PM10_[site_id] (float): PM2.5_[site_id] (float): NO2_[site_id] (float): NOXasNO2_[site_id] (float): SO2_[site_id] (float): transformer: the transform function to use, passed so that we can chose based on the variable being operated on Uses: self.imputer Returns: df_out: pandas dataframe, containing the same datasets as above, but including the imputed data too. All imputed data is included (including that for the reference sites) - it is the task of the calling function to only retain the imputed data for the station of interest, and to discard the rest of the imputed data. """ # copy the input array, and note the columns df_work = df_in.copy(deep=True) cols = df_in.columns # find missing datasets to remove # also we note the columns that will be saved, and their order, for transferring data back! col_remove = [] col_save = [] for col in cols: if all(df_work[col].isna()): col_remove.append(col) else: col_save.append(col) df_work = df_work.drop(columns=col_remove) if self.verbose > 2: print('df_work input to power transformer: \n {}'.format(df_work)) # power transformer fitting and transforming transformer.fit(df_work.dropna()) if self.verbose > 2: print('Power transformer: Completed data fitting. Beginning power transformation') np_out = transformer.transform(df_work) if self.verbose > 2: print('Power transformer: Completed transformation. Beginning imputation') # impute the missing values in this new dataframe self.imputer.fit(np_out) if self.verbose > 2: print('Imputer: Completed imputation fitting. Beginning imputer tranformation') imp_out = self.imputer.transform(np_out) if self.verbose > 2: print('Imputer Completed transformation. Beginning inverse transformation') # apply the inverse transformation for our datasets (leaving out the indicator flags) np_inv = transformer.inverse_transform(imp_out[:, :np_out.shape[1]]) if self.verbose > 2: print('Imputer Completed inverse transformation. Beginning copying and tranforming values') # copy the transformed values to a new dataframe df_out = df_in.copy(deep=True) for pos, col in enumerate(col_save): pos_out = list(cols).index(col) df_out.iloc[:, pos_out] = np_inv[:, pos] if self.verbose > 1: print('Imputation: copied transformed values into new dataframe') return df_out
nilq/baby-python
python
from .util import * from .db import Database from .optimizer import * from .ops import Print, Yield from .parseops import * from .udfs import * from .parse_sql import parse from .tuples import * from .tables import * from .schema import Schema from .exprs import Attr from .compile import * from .context import *
nilq/baby-python
python
from __future__ import print_function import getopt def usage(): print("""Usage: check_asdis -i <pcap_file> [-o <wrong_packets.pcap>] -v increase verbosity -d hexdiff packets that differ -z compress output pcap -a open pcap file in append mode""", file=sys.stderr) def main(argv): PCAP_IN = None PCAP_OUT = None COMPRESS = False APPEND = False DIFF = False VERBOSE = 0 try: opts = getopt.getopt(argv, "hi:o:azdv") for opt, parm in opts[0]: if opt == "-h": usage() raise SystemExit elif opt == "-i": PCAP_IN = parm elif opt == "-o": PCAP_OUT = parm elif opt == "-v": VERBOSE += 1 elif opt == "-d": DIFF = True elif opt == "-a": APPEND = True elif opt == "-z": COMPRESS = True if PCAP_IN is None: raise getopt.GetoptError("Missing pcap file (-i)") except getopt.GetoptError as e: print("ERROR: %s" % e, file=sys.stderr) raise SystemExit from scapy.config import conf from scapy.utils import RawPcapReader, RawPcapWriter, hexdiff from scapy.layers import all # noqa: F401 pcap = RawPcapReader(PCAP_IN) pcap_out = None if PCAP_OUT: pcap_out = RawPcapWriter(PCAP_OUT, append=APPEND, gz=COMPRESS, linktype=pcap.linktype) # noqa: E501 pcap_out._write_header(None) LLcls = conf.l2types.get(pcap.linktype) if LLcls is None: print(" Unknown link type [%i]. Can't test anything!" % pcap.linktype, file=sys.stderr) # noqa: E501 raise SystemExit i = -1 differ = 0 failed = 0 for p1, meta in pcap: i += 1 try: p2d = LLcls(p1) p2 = str(p2d) except KeyboardInterrupt: raise except Exception as e: print("Dissection error on packet %i: %s" % (i, e)) failed += 1 else: if p1 == p2: if VERBOSE >= 2: print("Packet %i ok" % i) continue else: print("Packet %i differs" % i) differ += 1 if VERBOSE >= 1: print(repr(p2d)) if DIFF: hexdiff(p1, p2) if pcap_out is not None: pcap_out.write(p1) i += 1 correct = i - differ - failed print("%i total packets. %i ok, %i differed, %i failed. %.2f%% correct." % (i, correct, differ, # noqa: E501 failed, i and 100.0 * (correct) / i)) # noqa: E501 if __name__ == "__main__": import sys try: main(sys.argv[1:]) except KeyboardInterrupt: print("Interrupted by user.", file=sys.stderr)
nilq/baby-python
python
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for ViLT. """ from typing import List, Optional, Union from transformers import BertTokenizerFast from ...file_utils import TensorType from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from .feature_extraction_vilt import ViltFeatureExtractor class ViltProcessor: r""" Constructs a ViLT processor which wraps a BERT tokenizer and ViLT feature extractor into a single processor. [`ViltProcessor`] offers all the functionalities of [`ViltFeatureExtractor`] and [`BertTokenizerFast`]. See the docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information. Args: feature_extractor (`ViltFeatureExtractor`): An instance of [`ViltFeatureExtractor`]. The feature extractor is a required input. tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ def __init__(self, feature_extractor, tokenizer): if not isinstance(feature_extractor, ViltFeatureExtractor): raise ValueError( f"`feature_extractor` has to be of type {ViltFeatureExtractor.__class__}, but is {type(feature_extractor)}" ) if not isinstance(tokenizer, BertTokenizerFast): raise ValueError(f"`tokenizer` has to be of type {BertTokenizerFast.__class__}, but is {type(tokenizer)}") self.feature_extractor = feature_extractor self.tokenizer = tokenizer self.current_processor = self.feature_extractor def save_pretrained(self, save_directory): """ Save a ViLT feature_extractor object and BERT tokenizer object to the directory `save_directory`, so that it can be re-loaded using the [`~ViltProcessor.from_pretrained`] class method. <Tip> This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods above for more information. </Tip> Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist). """ self.feature_extractor.save_pretrained(save_directory) self.tokenizer.save_pretrained(save_directory) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`ViltProcessor`] from a pretrained ViLT processor. <Tip> This class method is simply calling ViltFeatureExtractor's [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] and BertTokenizerFast's [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`]. Please refer to the docstrings of the methods above for more information. </Tip> Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a feature extractor file saved using the [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. **kwargs Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and [`PreTrainedTokenizer`] """ feature_extractor = ViltFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__( self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs ) -> BatchEncoding: """ This method uses [`ViltFeatureExtractor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) # add pixel_values + pixel_mask encoding_feature_extractor = self.feature_extractor(images, return_tensors=return_tensors) encoding.update(encoding_feature_extractor) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
nilq/baby-python
python
from mr_scraper.api import dispatch, ScraperMessage def levels_fyi(): """Scraper using Puppeter""" message = ScraperMessage( scraper="example.scrapers.levels_fyi", type='companies', payload={'url': '/company/'} ) return dispatch(message)
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from setuptools import setup, find_packages version = '0.12.0' setup( name='SerpScrap', version=version, description=''' SEO python scraper to extract data from major searchengine result pages. Extract data like url, title, snippet, richsnippet and the type from searchresults for given keywords. Detect Ads or make automated screenshots. You can also fetch text content of urls provided in searchresults or by your own. It's usefull for SEO and business related research tasks. ''', long_description=open('README.rst').read(), author='Ronald Schmidt', author_email='ronald.schmidt@zu-web.de', doc_url='http://serpscrap.readthedocs.io/en/latest/', url='https://github.com/ecoron/SerpScrap', license='MIT', packages=find_packages(), install_requires=[ 'PySocks==1.6.8', 'chardet==3.0.4', 'beautifulsoup4==4.6.3', 'html2text==2018.1.9', 'lxml==4.2.3', 'sqlalchemy==1.2.10', 'selenium==3.14.1', 'cssselect==1.0.3', ], scripts=['install_chrome.sh'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Internet', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='seo scraper ad-detection scraping keywords', )
nilq/baby-python
python
import json import subprocess from oslo_log import log as logging from magnum.common import exception LOG = logging.getLogger(__name__) class KubeCtl(object): def __init__(self, bin='kubectl', global_flags=''): super(KubeCtl, self).__init__() self.kubectl = '{} {}'.format(bin, global_flags) def execute(self, command, definition=None, namespace=None, print_error=True): if definition: cmd = "cat <<'EOF' | {} {} -f -\n{}\nEOF".format( self.kubectl, command, definition ) else: if namespace: cmd = "{} -n {} {}".format(self.kubectl, namespace, command) else: cmd = "{} {}".format(self.kubectl, command) try: r = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) return r # except subprocess.CalledProcessError as ex: # # if print_error: # if "delete" in command: # LOG.warning("K8s: Delete failed.") # else: # exc_msg = "Failed to execute kubectl command, cmd={}, err={}".format(cmd, ex.stderr.decode()) # LOG.error(exc_msg) # raise exception.MagnumException(message=exc_msg) except Exception as ex: # if print_error: if "delete" in command: LOG.warning("K8s: Delete failed.") else: exc_msg = "Failed to execute kubectl command, cmd={},\n STDOUT/STDERR={}".format(cmd, ex.stdout.decode()) LOG.error(exc_msg) raise exception.MagnumException(message="Failed to execute kubectl command") def apply(self, *args, **kwargs): return self.execute('apply', *args, **kwargs) def delete(self, *args, **kwargs): return self.execute('delete', *args, **kwargs) def get(self, resource, namespace=None, **kwargs): result = self.execute( 'get %s -o json' % resource, namespace=namespace, **kwargs ).decode() ret = json.loads(result) if 'items' in ret: return ret['items'] return ret def describe(self, *args, **kwargs): return self.execute('describe', *args, **kwargs) def batch_delete(self, resource_mapping=[]): """Deletes Kubernetes resources. Example for the resource_mapping param: [{"service": ["srv1", "srv2"]}, {"deployment": ["deploy1"]}] Be careful to the deletion order. """ for res in resource_mapping: for res_type, items in res.items(): resources = " ".join(items) self.execute("delete %s %s" % (res_type, resources))
nilq/baby-python
python
import json import cfnresponse def lambda_handler(event, context): print(json.dumps(event)) response_data = {} response_data['Data'] = None if event['RequestType'] != 'Create': cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, "CustomResourcePhysicalID") return password = event['ResourceProperties']['Password'] confirm_password = event['ResourceProperties']['ConfirmPassword'] if password == confirm_password: cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data, "CustomResourcePhysicalID") else: print('Passwords do not match!') cfnresponse.send(event, context, cfnresponse.FAILED, response_data, "CustomResourcePhysicalID")
nilq/baby-python
python
from typing import NamedTuple from thundersnow.precondition import check_argument from thundersnow.predicate import is_not_blank class Version(NamedTuple): """Sematnic Version object""" major: str minort: str patch: str def __str__(self): return '.'.join(self) def from_string(s): """ '1.2.3' -> Version('1','2','3')""" s = str(s) check_argument((s is not None) and is_not_blank(s), 'cannot create version from blank string') parts = s.split('.') if len(parts) == 1: major, minor, patch = (parts[0], 0, 0) elif len(parts) == 2: major, minor, patch = (parts[0], parts[1], 0) elif len(parts) == 3: major, minor, patch = parts else: major, minor, patch = parts[:3] major, minor, patch = [str(i) for i in (major, minor, patch)] return Version(major, minor, patch) Version.from_string = from_string
nilq/baby-python
python
import sys from loguru import logger logger.remove() logger.add(sys.stderr, format="", colorize=True, backtrace=False, diagnose=True) def div(x, y): x / y def cause(x, y): try: div(x, y) except Exception: raise ValueError("Division error") def context(x, y): try: cause(x, y) except Exception as e: raise ValueError("Cause error") from e try: context(1, 0) except ValueError: logger.exception("")
nilq/baby-python
python
import numpy as np print("Did you know 2 + 2 = {}".format(2+2)) print("Of course I knew that, I have 4 fingers") print("Well, I knew you had 4 fingers. I didn't know that you knew how to count!")
nilq/baby-python
python