text
stringlengths
48
9.16k
def correct_lis(self, uris, blocking=True, verbose=False): """ Returns logical interconnects to a consistent state. The current logical interconnect state is compared to the associated logical interconnect group. Any differences identified are corrected, bringing the logical interconnect back to a consistent state. Changes are asynchronously applied to all managed interconnects. Note that if the changes detected involve differences in the interconnect map between the logical interconnect group and the logical interconnect, the process of bringing the logical interconnect back to a consistent state may involve automatically removing existing interconnects from management and/or adding new interconnects for management. """ request = {"uris": uris} task, body = self._con.put(uri['li'] + '/compliance', request) if blocking is True: task = self._activity.wait4task(task, verbose=verbose) return task
def get_server_profiles(self): body = self._con.get(uri['profiles']) return get_members(body)
def add_post(self, activity_post): if (self.gplus_posts.count(activity_post.post_id) == 0): self.gplus_posts.append(activity_post.post_id) self.calculate_impact() self.put()
def IdSet(self, value): """Setter to be used for default id EndpointsAliasProperty. Sets the key on the current entity using the value passed in as the ID. Using this key, attempts to retrieve the entity from the datastore and update the unset properties of the current entity with those from the retrieved entity. Args: value: An integer ID value for a simple key. Raises: TypeError: if the value to be set is not an integer. (Though if outside of a given range, the get call will also throw an exception.) """ if not isinstance(value, (int, long)): raise TypeError('ID must be an integer.') self.UpdateFromKey(ndb.Key(self.__class__, value))
def __call__(self, shape, name=None): r = np_rng.normal(loc=0, scale=0.01, size=shape) r = r/np.sqrt(np.sum(r**2))*np.sqrt(shape[1]) return sharedX(r, name=name)
def shutdown(sts, mode): print(mode + 'ing Appliance') ret = sts.shutdown(mode) pprint(ret)
@property def last_voted_blockproposal(self): "the last block proposal node voted on" for r in self.rounds: if isinstance(self.rounds[r].proposal, BlockProposal): assert isinstance(self.rounds[r].lock, Vote) if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash: return self.rounds[r].proposal
def __init__(self, motor_config): """Initialize a set of DMCCs and their associated motors :param motor_config: Config entry mapping motor names to DMCC ids and motor indices Dictionary entries are in the format: <motor_name>: { board_num: [0-3], motor_num: [1-2] } """ self.config = lib.get_config("bot/config.yaml") self.logger = lib.get_logger() self.is_testing = self.config["test_mode"]["DMCC"] # print "Testing: ", self.config["testing"] # print pyDMCC.lib._config # This instantiates all DMCCs in every DMCCManager, which is probably # not optimal, which works fine for our purposes. Potentially better # approaches: # - global state: shared dmccs dictionary, instantiated once # - selected instantiation: only initialize the dmccs we are control if not self.is_testing: dmccs = pyDMCC.autodetect() self.logger.debug("Found %d physical DMCC boards" % len(dmccs)) else: self.logger.debug("Skipping autodetect due to test mode") dmccs = defaultdict( lambda: pyDMCC.DMCC( 0, verify=False, bus=None, logger=self.logger)) self.logger.debug("DMCC Motor conf: {}".format(dmccs)) self.motors = {} for name, conf in motor_config.items(): if 'invert' in conf.keys(): invert = conf['invert'] else: invert = False try: self.motors[name] = DMCCMotor( dmccs[conf['board_num']], conf['motor_num'], invert) except KeyError: self.logger.error( "Bad motor definition for motor: '{}'".format( name)) raise self.logger.debug("Setup {}".format(self))
def Equals(self, x): if x is self: return 1 return 1
def aes(key, data, counter=False): """ encrypt data with aes, using either pycryptopp or PyCrypto. Args key: The encryption key data: plain text data counter: a callable, usually not needed """ # using either pycryptopp... if hasattr(AES, "process"): a = AES(key) return a.process(data) # ... or PyCrypto counter = counter or Counter() a = AES.new(key, AES.MODE_CTR, counter=counter) rest = len(data) % 16 if not rest: return a.encrypt(data) # Data length must be a multiple of 16 # Pad with bytes all of the same value as the number of padding bytes pad = (16 - rest) data += chr(pad) * pad return a.encrypt(data)[:-pad]
def __call__(self, cmd, shell): result = self.handler(cmd, shell).strip() if result: return result.split("|") else: return []
def visible(self): "Return whether the window is displayed and visible." d = _xwininfo(self.identifier, "stats") return d["Map State"] == "IsViewable"
def test_args_port(self): r = PsDashRunner(args=['-p', '5555']) self.assertEqual(r.app.config['PSDASH_PORT'], 5555)
def get_label_from_id_name(self, label_info_dict, key_name=None, label_prefix='', non_formatted=False): """ Expects a dict in one of 2 formats (where key_name is "status"): Format 1: "status": { "id": 1, "name": "New" }, Format 2: { "id":3, "name":"UX/UI Component"} """ if not type(label_info_dict) is dict: return None # For Format 1 above if key_name is not None: label_info_dict = label_info_dict.get(key_name, None) if label_info_dict is None: return None if label_info_dict.has_key('id') and label_info_dict.has_key('name'): if non_formatted: return label_info_dict['name'] if label_prefix: return '%s %s' % (label_prefix, label_info_dict['name']) return label_info_dict['name'] return None
def _handle_double_quote(s, t): k, v = t.split('=') return k, v.strip('"')
def getspp(sts): spps = sts.get_spps() for spp in spps: print('') for k, v in spp.items(): if k != 'fwComponents': print('{0:25} : {1}'.format(k, v)) print('')
def decode(self, bvlpdu): BVLCI.update(self, bvlpdu) self.bvlciBDT = [] while bvlpdu.pduData: bdte = Address(unpack_ip_addr(bvlpdu.get_data(6))) bdte.addrMask = bvlpdu.get_long() self.bvlciBDT.append(bdte)
def __init__(self, instances, default_queue="pool"): pool = Queue() if hasattr(instances, '__iter__'): for i in instances: pool.put(i) else: pool.put(instances) self._pool = pool self._default_queue = default_queue
def __init__(self, hashFn=0, challenge='', *args, **kwargs): super(AccessChallenge, self).__init__(*args, **kwargs) self.bslciFunction = BSLCI.accessChallenge self.bslciLength = 5 self.bslciHashFn = hashFn self.bslciChallenge = challenge if challenge: self.bslciLength += len(challenge)
def setUp(self): """Setup test hardware files and build motor object.""" config = path.dirname(path.realpath(__file__))+"/test_config.yaml" self.config = lib.get_config(config) # Build motor in testing mode self.pwm_num = self.config["two_motors"][0]["PWM"] self.gpio_num = self.config["two_motors"][0]["GPIO"] self.setup_pwm(self.pwm_num, "1\n", "0\n", "1000\n", "0\n") self.setup_gpio(self.gpio_num) self.motor = m_mod.Motor(self.pwm_num, self.gpio_num)
def __init__(self, peer): if _debug: TCPClient._debug("__init__ %r", peer) asyncore.dispatcher.__init__(self) # ask the dispatcher for a socket self.create_socket(socket.AF_INET, socket.SOCK_STREAM) # save the peer self.peer = peer # create a request buffer self.request = '' # hold the socket error if there was one self.socketError = None # try to connect the socket if _debug: TCPClient._debug(" - try to connect") self.connect(peer) if _debug: TCPClient._debug(" - connected (maybe)")
def merge(a, b, path=None): "merges b into a" if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value else: a[key] = b[key] # prefer b to a else: a[key] = b[key] return a
def container_literal( before, left_delimiter, items, right_delimiter, trailing_comma=True, ): before = _clean_before(before) items = list(items) c = _single_line_container(before, left_delimiter, items, right_delimiter) if len(c) <= 79: return c return _multi_line_container( before, left_delimiter, items, right_delimiter, trailing_comma, )
def update(self, pci): """Copy the PCI fields.""" _PCI.update(self, pci) # now do the BACnet PCI fields self.pduExpectingReply = pci.pduExpectingReply self.pduNetworkPriority = pci.pduNetworkPriority
def _call(rel, jwt=None, data=None): rel = 'http://localhost:5000/api/' + rel if jwt: if data: result = requests.post(rel, headers=_auth_header(jwt), data=json.dumps(data)) else: result = requests.get(rel, headers=_auth_header(jwt)) else: if data: result = requests.post(rel, data=json.dumps(data)) else: result = requests.get(rel) return result
def restore_selections(view, lines_and_pts): view.sel().clear() for stored in lines_and_pts: view.sel().add(region_from_stored_selection(view, stored))
def append_segment(self, apdu): """This function appends the apdu content to the end of the current APDU being built. The segmentAPDU is the context.""" if _debug: SSM._debug("append_segment %r", apdu) # check for no context if not self.segmentAPDU: raise RuntimeError("no segmentation context established") # append the data self.segmentAPDU.put_data(apdu.pduData)
def _find_images(self, local_dir=None): if local_dir is None: local_dir = self.settings.local_images return _list_files_recursively(local_dir, relative_to_path=True)
def encode(self, bslpdu): addrLen = self.bslciAddress.addrLen # make sure the length is correct self.bslciLength = 5 + addrLen + len(self.pduData) BSLCI.update(bslpdu, self) # encode the address bslpdu.put(addrLen) bslpdu.put_data( self.bslciAddress.addrAddr ) # encode the rest of the data bslpdu.put_data( self.pduData )
def load_or_create_user(username): resources.init('Kickflip', 'Kickflip') config_json = resources.user.read('config.json') if not config_json: resources.user.write('config.json', json.dumps({}, sort_keys=True)) config_json = resources.user.read('config.json') settings = json.loads(config_json) if not settings.has_key('username'): settings['username'] = username resources.user.write('config.json', json.dumps(settings, sort_keys=True)) if not settings.has_key('user_access_key'): user = kickflip.create_user(username) settings['username'] = user['name'] settings['user_access_key'] = user['aws_access_key'] settings['user_secret_access_key'] = user['aws_secret_key'] settings['user_uuid'] = user['uuid'] resources.user.write('config.json', json.dumps(settings, sort_keys=True)) kickflip.set_aws_keys(settings['username'], settings['user_access_key'], settings['user_secret_access_key']) kickflip.set_uuid(settings['user_uuid']) return settings['username'], settings['user_uuid'], settings['user_access_key'], settings['user_secret_access_key']
def test_is_builtin_variable(id_value, is_function, expected_result): id_node = create_id(id_value, is_function=is_function) result = is_builtin_variable(id_node) assert expected_result == result
@cherrypy.expose def template(self, *paths, **_ignored): template_name = '/'.join(paths) template_path = os.path.join(self.settings.templates, template_name) if os.path.isdir(template_path): return self.edit_renderer.directory( 'Contents of ' + (template_name or 'template directory'), self.settings.templates, template_name, '/template/{}'.format, (lambda path: os.path.isdir(path) or '.htm' in path.lower()) ) else: # A file document = _extract_document({}, template_name=template_name) if not document.template_name: raise cherrypy.HTTPRedirect('/timeout') return self.edit_renderer.render_editor( document.template_name, document.styles, actions=self._actions(document, **{TEMPLATE_PARAM_NAME: template_name}), )
def set_live_channel_list(self, ch_list): # swapcase to simulate display name does not necessary have the same # case as input for ch in ch_list: self.live_ch_list[ch.swapcase()] = ch_list[ch]
@pyqtSlot(bool) def setEnabled(self, enabled): super(Window, self).setEnabled(enabled)
def testWindowFlags(self, windowFlags): return self.windowFlags() & windowFlags
@pytest.mark.django_db def test_orm(): Conf.ORM = 'default' # check broker broker = get_broker(list_key='orm_test') assert broker.ping() is True assert broker.info() is not None # clear before we start broker.delete_queue() # enqueue broker.enqueue('test') assert broker.queue_size() == 1 # dequeue task = broker.dequeue()[0] assert task[1] == 'test' broker.acknowledge(task[0]) assert broker.queue_size() == 0 # Retry test Conf.RETRY = 1 broker.enqueue('test') assert broker.queue_size() == 1 broker.dequeue() assert broker.queue_size() == 0 sleep(1.5) assert broker.queue_size() == 1 task = broker.dequeue()[0] assert broker.queue_size() == 0 broker.acknowledge(task[0]) sleep(1.5) assert broker.queue_size() == 0 # delete job task_id = broker.enqueue('test') broker.delete(task_id) assert broker.dequeue() is None # fail task_id = broker.enqueue('test') broker.fail(task_id) # bulk test for i in range(5): broker.enqueue('test') Conf.BULK = 5 tasks = broker.dequeue() assert broker.lock_size() == Conf.BULK for task in tasks: assert task is not None broker.acknowledge(task[0]) # test lock size assert broker.lock_size() == 0 # test duplicate acknowledge broker.acknowledge(task[0]) # delete queue broker.enqueue('test') broker.enqueue('test') broker.delete_queue() assert broker.queue_size() == 0 # back to django-redis Conf.ORM = None
def __init__(self, bounds, body): self.bounds = bounds self.args = [body]
def __init__(self,*args): assert all(isinstance(a,UpdatePattern) for a in args) self.args = args
def segmented_response(self, apdu): if _debug: ServerSSM._debug("segmented_response %r", apdu) # client is ready for the next segment if (apdu.apduType == SegmentAckPDU.pduType): if _debug: ServerSSM._debug(" - segment ack") # duplicate ack received? if not self.in_window(apdu.apduSeq, self.initialSequenceNumber): if _debug: ServerSSM._debug(" - not in window") self.restart_timer(self.ssmSAP.segmentTimeout) # final ack received? elif self.sentAllSegments: if _debug: ServerSSM._debug(" - all done sending response") self.set_state(COMPLETED) else: if _debug: ServerSSM._debug(" - more segments to send") self.initialSequenceNumber = (apdu.apduSeq + 1) % 256 self.actualWindowSize = apdu.apduWin self.segmentRetryCount = 0 self.FillWindow(self.initialSequenceNumber) self.restart_timer(self.ssmSAP.segmentTimeout) # some kind of problem elif (apdu.apduType == AbortPDU.pduType): self.set_state(COMPLETED) self.response(apdu) else: raise RuntimeError("invalid APDU (7)")
def make_oauth_protected_endpoint(*args, **kwargs): """ Returns a dummy API endpoint that returns True. This endpoint will be protected with the @oauth_scope decorator -- see that function's signature for a description of the parameters that may be passed. """ @oauth_scope(*args, **kwargs) def api_endpoint(access_token, request): """ A Dummy API endpoint that accepts no URL parameters. Always returns True. """ return True return api_endpoint
def __enter__(self): self.interrupted = False self.original_handler = signal.getsignal(signal.SIGINT) def handler(signum, frame): self.interrupted = True signal.signal(signal.SIGINT, handler) return self
def make_ntlm_negotiate(self): msg = 'NTLMSSP\x00' # Signature msg += pack('<I', 1) # Message Type 1 # Flags self.flags = ( self.NTLMSSP_NEGOTIATE_UNICODE | self.NTLM_NEGOTIATE_OEM | self.NTLMSSP_REQUEST_TARGET | self.NTLMSSP_NEGOTIATE_LM_KEY | self.NTLMSSP_NEGOTIATE_NTLM | self.NTLMSSP_NEGOTIATE_ALWAYS_SIGN | #self.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY | self.NTLMSSP_NEGOTIATE_VERSION ) msg += pack('<I', self.flags) # DomainNameFields msg += pack('<HHI', 0, 0, 0) # WorkstationNameFields msg += pack('<HHI', 0, 0, 0) # Version (to be removed) if self.flags & self.NTLMSSP_NEGOTIATE_VERSION: msg += '\x05' # Product Major: Win XP SP2 msg += '\x01' # Product Minor: Win XP SP2 msg += pack('<H', 2600) # ProductBuild msg += '\x00\x00\x00' # Reserved msg += '\x0F' # NTLMRevisionCurrent return msg
def parse_with(s,parser,lexer): global error_list error_list = [] res = parser.parse(s,lexer=lexer) if error_list: raise ParseErrorList(error_list) return res
def error(self, proto, *args): if proto in ['http', 'https']: # XXX http[s] protocols are special-cased dict = self.handle_error['http'] # https is not different than http proto = args[2] # YUCK! meth_name = 'http_error_%s' % proto http_err = 1 orig_args = args else: dict = self.handle_error meth_name = proto + '_error' http_err = 0 args = (dict, proto, meth_name) + args result = apply(self._call_chain, args) if result: return result if http_err: args = (dict, 'default', 'http_error_default') + orig_args return apply(self._call_chain, args)
def p_top_macro_atom_eq_lcb_action_rcb(p): 'top : top MACRO atom EQ LCB action RCB' p[0] = p[1] d = Definition(app_to_atom(p[3]),p[6]) p[0].declare(MacroDecl(d))
def show_graph(self,sg): return tk_graph_ui.show_graph(sg,self.tk,parent=self,frame=self.state_frame,ui_parent=self.ui_parent)
def reducer(self, word, counts): yield(word, sum(counts))
def getFlag(self,flag): try: return self.flags[flag] except: return False
def init_bsum(self, bsum, flags): flags |= self.flags if bsum: bsum_gpudata = bsum.gpudata self.bsum_zero = [bsum_gpudata, 0, bsum.size, self.lib.stream] flags |= 4 else: bsum_gpudata = 0 self.bsum_zero = 0 flags &= ~4 return bsum_gpudata, flags
@placebo_session def test_create_lambda_function(self, session): bucket_name = 'lmbda' zip_path = 'Spheres-dev-1454694878.zip' z = Zappa(session) z.aws_region = 'us-east-1' z.load_credentials(session) z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' arn = z.create_lambda_function( bucket=bucket_name, s3_key=zip_path, function_name='test_lmbda_function55', handler='runme.lambda_handler' ) arn = z.update_lambda_function( bucket=bucket_name, s3_key=zip_path, function_name='test_lmbda_function55', )
def handle_start_expr(self, token_text): if self.start_of_statement(): # The conditional starts the statement if appropriate. pass next_mode = MODE.Expression if token_text == '[': if self.last_type == 'TK_WORD' or self.flags.last_text == ')': if self.flags.last_text in self.line_starters: self.output_space_before_token = True self.set_mode(next_mode) self.append_token(token_text) self.indent() if self.opts.space_in_paren: self.output_space_before_token = True return next_mode = MODE.ArrayLiteral if self.is_array(self.flags.mode): if self.flags.last_text == '[' or ( self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')): # ], [ goes to a new line # }, [ goes to a new line if not self.opts.keep_array_indentation: self.append_newline() else: if self.flags.last_text == 'for': next_mode = MODE.ForInitializer elif self.flags.last_text in ['if', 'while']: next_mode = MODE.Conditional else: next_mode = MODE.Expression if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK': self.append_newline() elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.': # do nothing on (( and )( and ][ and ]( and .( # TODO: Consider whether forcing this is required. Review failing tests when removed. self.allow_wrap_or_preserved_newline(token_text, self.input_wanted_newline); self.output_wrapped = False; elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']: self.output_space_before_token = True elif self.flags.last_word == 'function' or self.flags.last_word == 'typeof': # function() vs function (), typeof() vs typeof () if self.opts.jslint_happy: self.output_space_before_token = True elif self.flags.last_text in self.line_starters or self.flags.last_text == 'catch': # TODO: option space_before_conditional self.output_space_before_token = True # Support of this kind of newline preservation: # a = (b && # (c || d)); if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']: if not self.start_of_object_property(): self.allow_wrap_or_preserved_newline(token_text) self.set_mode(next_mode) self.append_token(token_text) if self.opts.space_in_paren: self.output_space_before_token = True # In all cases, if we newline while inside an expression it should be indented. self.indent()
def _test(self,response): '''test a value''' #make an ordered list of cases ordered_cases = [] for case in self.cases: if len(ordered_cases) == 0: ordered_cases.append(self.cases[case]) else: broke = False for index in xrange(len(ordered_cases)): if self.cases[case]['mean'] <= ordered_cases[index]['mean']: ordered_cases.insert(index,self.cases[case]) broke = True break if not broke: ordered_cases.append(self.cases[case]) value = getattr(response,self.comparison_attr) #figure out which case best fits our value for index in xrange(len(ordered_cases)): lower_avg = None upper_avg = None math = EasyMath() if index != 0: lower_avg = math.mean([ordered_cases[index-1]['mean'],ordered_cases[index]['mean']]) if index != len(ordered_cases) - 1: upper_avg = math.mean([ordered_cases[index]['mean'],ordered_cases[index+1]['mean']]) if not lower_avg and value <= upper_avg: return ordered_cases[index]['case'] elif not upper_avg and value >= lower_avg: return ordered_cases[index]['case'] elif value >= lower_avg and value <= upper_avg: return ordered_cases[index]['case'] #should never get here raise Exception('this is shit hitting the fan')
def test_signup_today(self): today = date.today() self.assertTrue(self.client.login(username=self.user.username, password=self.user.username)) response = self.client.get(reverse('home')) self.assertEqual(response.status_code, 200) self.assertEqual(DailyActivity.objects.count(), 1) activity = DailyActivity.objects.all()[0] self.assertEqual(activity.user, self.user) self.assertEqual(activity.medium, 'Default') self.assertEqual(activity.date, today) self.assertEqual(activity.days, 0)
def testDailyEngagementReport(self): users_test = [] users_control = [] num_control1 = 0 num_test1 = 0 num_control2 = 0 num_test2 = 0 #create users for i in range(5): users_control.append(create_user_in_group(self.experiment, i, Participant.CONTROL_GROUP, date.today() - timedelta(days=i))) users_test.append(create_user_in_group(self.experiment, i, Participant.TEST_GROUP, date.today() - timedelta(days=i))) # users_<test|control>[0] were enrolled today, [1] 1 day ago, etc. report_date = date.today() - timedelta(days=1) expected_engagement_score_calls = { (users_test[1], date.today() - timedelta(days=1), report_date): 3.2, (users_test[2], date.today() - timedelta(days=2), report_date): 2.5, (users_test[3], date.today() - timedelta(days=3), report_date): 4.1, (users_test[4], date.today() - timedelta(days=4), report_date): 0, (users_control[1], date.today() - timedelta(days=1), report_date): 0, (users_control[2], date.today() - timedelta(days=2), report_date): 0, (users_control[3], date.today() - timedelta(days=3), report_date): 0, (users_control[4], date.today() - timedelta(days=4), report_date): 0} test_case = self class EngagementScoreCalculatorStub(object): def calculate_user_engagement_score(self, user, start_date, end_date): test_case.assertNotEquals(user, None) test_case.assertTrue(expected_engagement_score_calls. has_key((user, start_date, end_date))) return expected_engagement_score_calls[(user, start_date, end_date)] (EngagementReportGenerator(EngagementScoreCalculatorStub()). generate_daily_report_for_experiment(self.experiment, report_date)) experiment_report = DailyEngagementReport.objects.get( experiment=self.experiment, date=report_date) self.assertAlmostEqual((3.2 + 2.5 + 4.1 + 0)/4.0, experiment_report.test_score) self.assertAlmostEqual(0.0, experiment_report.control_score) self.assertEquals(4, experiment_report.test_group_size) self.assertEquals(4, experiment_report.control_group_size) self.assertAlmostEqual(96.819293337188498, experiment_report.confidence)
@staticmethod def tokenize(sentence): """ Split a sentence into tokens including punctuation. Args: sentence (string) : String of sentence to tokenize. Returns: list : List of tokens. """ return [x.strip() for x in re.split('(\W+)?', sentence) if x.strip()]
def _OnChar(self, event): "Handle the OnChar event by rejecting non-numerics" if event.GetModifiers() != 0 and event.GetModifiers() != wx.MOD_SHIFT: event.Skip() return if event.GetKeyCode() in self.acceptableCodes: event.Skip() return wx.Bell()
def __init__(self, title, artist, album, sizeInBytes, lastPlayed, rating): self.title = title self.artist = artist self.album = album a = datetime.datetime.strptime(lastPlayed, "%d/%m/%Y %H:%M") datenp = np.datetime64(a) self.lastPlayed = datenp # self.date = datenp - datenp.astype('datetime64[M]') + 1 self.years = datenp.astype('datetime64[Y]').astype(int) + 1970 #months = dates.astype('datetime64[M]').astype(int) % 12 + 1 #self.lastPlayed = datetime.datetime.strptime(lastPlayed, "%d/%m/%Y %H:%M") #self.date = datetime.datetime.strptime(lastPlayed, "%d/%m/%Y %H:%M").date() #self.time = unicode(datetime.datetime.strptime(lastPlayed, "%d/%m/%Y %H:%M").time()) # print "type of self.time: ", type(self.time), str(self.time) self.sizeInBytes = sizeInBytes self.rating = rating
def __call__(self, params, cost): updates = [] grads = T.grad(cost, params) grads = clip_norms(grads, self.clipnorm) t = theano.shared(floatX(1.)) b1_t = self.b1*self.l**(t-1) for p, g in zip(params, grads): g = self.regularizer.gradient_regularize(p, g) m = theano.shared(p.get_value() * 0.) v = theano.shared(p.get_value() * 0.) m_t = b1_t*m + (1 - b1_t)*g v_t = self.b2*v + (1 - self.b2)*g**2 m_c = m_t / (1-self.b1**t) v_c = v_t / (1-self.b2**t) p_t = p - (self.lr * m_c) / (T.sqrt(v_c) + self.e) p_t = self.regularizer.weight_regularize(p_t) updates.append((m, m_t)) updates.append((v, v_t)) updates.append((p, p_t) ) updates.append((t, t + 1.)) return updates
def configure(self, in_obj): super(ColorNoise, self).configure(in_obj) self.out_shape = self.in_shape try: self.nfm, self.H, self.W = self.in_shape self.HW = self.H * self.W except: raise AttributeError('ColorNoise can only be used with layer providing CHW') return self
def SetCellPadding(self, padding): """ Set the padding around cells in this format Padding is either a single numeric (indicating the values on all sides) or a collection of paddings [left, top, right, bottom] """ self.cellPadding = self._MakePadding(padding)
def addEdit(self, event): with wx.BusyInfo("Please wait for a moment while ODMTools fetches the data and stores it in our database", parent=self): logger.debug("Beginning editing") isSelected, seriesID = self.pnlSelector.onReadyToEdit() # logger.debug("Initializing DataTable") # # tasks = [("dataTable", (memDB.conn, self.dataTable.myOlv))] # tasks = [("dataTable", (self.dataTable.myOlv))] # self.taskserver.setTasks(tasks) # self.taskserver.processTasks() if isSelected: self.record_service = self.service_manager.get_record_service(self.txtPythonScript, seriesID, connection=self.memDB) self._ribbon.toggleEditButtons(True) logger.debug("Initializing Plot") self.pnlPlot.addEditPlot(self.memDB, seriesID, self.record_service) logger.debug("Initializing DataTable") self.dataTable.init(self.memDB) # set record service for console Publisher.sendMessage("setEdit", isEdit=True) logger.debug("Enabling Edit") self.record_service.toggle_record(True) # set the cursor for matplotlib selectedObject = self.record_service.get_series() Publisher.sendMessage("updateCursor", selectedObject=selectedObject) else: logger.debug("disabling Edit") Publisher.sendMessage("setEdit", isEdit=False) self.record_service.toggle_record(False) # disable cursor for matplotlib selectedObject = self.record_service.get_series() Publisher.sendMessage("updateCursor", deselectedObject=selectedObject) # self._mgr.Update() logger.debug("Recording? %s" % self.record_service._record) #self.record_service = None self.txtPythonConsole.shell.run("edit_service = app.TopWindow.record_service", prompt=False, verbose=False) self.txtPythonConsole.shell.run("series_service = edit_service.get_series_service()", prompt=False, verbose=False) #from meliae import scanner #scanner.dump_all_objects("edit_plotting.dat") logger.info("Finished Setting up Editing Series: %s " % seriesID)
def test_qcl_relationship(self): qcl = self.series.quality_control_level assert qcl != None
def analyze(self, filename): """Reimplement analyze method""" if self.dockwidget and not self.ismaximized: self.dockwidget.setVisible(True) self.dockwidget.setFocus() self.dockwidget.raise_() pythonpath = self.main.get_spyder_pythonpath() runconf = runconfig.get_run_configuration(filename) wdir, args = None, None if runconf is not None: if runconf.wdir_enabled: wdir = runconf.wdir if runconf.args_enabled: args = runconf.args LineProfilerWidget.analyze( self, filename, wdir=wdir, args=args, pythonpath=pythonpath, use_colors=self.get_option('use_colors', True))
def _split_stages(node, duplicates=None, aliases=None, stages=None, parents=None): """ Split out all reductions and post reduction scalar operations into seperate stacks (stages) This leaves remaining in the tree anything not in these categories. """ # init data structures if duplicates is None: duplicates = dict() aliases = set() stages = list() parents = list() if type(node) is list: # don't count assignment node as a parent, # it will always exist in the final stage which is processed outside of # this function if node[0][0] != "assign": parents.append(node) # post order traversal (pulls the stages deepest in the tree first) if len(node) > 3: _split_stages(node[3], duplicates, aliases, stages, parents) if len(node) > 4: _split_stages(node[4], duplicates, aliases, stages, parents) if len(parents) > 0: parents.pop() if node[0][0] in _reduction_ops: red_stack = _process_node(node, aliases, duplicates) if red_stack: # add this reduction stack to the stages stages.append(("reduction", red_stack)) # decrement reduction count for all parents for parent in parents: parent[2] -= 1 # walk up the parent list # TODO: potentially do this iteratively to find longest common set # of operations scalar_parent = None for parent in parents[::-1]: # find the highest parent that is both scalar and has no other # child reductions if parent[1] and parent[2] == 0: scalar_parent = parent else: break # if there are any scalar operations over this reduction, remove # them from the tree as well if scalar_parent is not None: scalar_stack = _process_node( scalar_parent, aliases, duplicates) if scalar_stack: # add this scalar stack to the stages stages.append(("scalar", scalar_stack)) return stages
def on_next(self, element): self._post_message(self.formatter.create_element_message(element))
def getXMLElement(self): item = ElementTree.Element('item', self.attrb) for (k, v) in self.content.iteritems(): attrb = {} if k == 'icon' and self.icon_type: attrb['type'] = self.icon_type sub = ElementTree.SubElement(item, k, attrb) sub.text = v return item
def capture_seconds(num_seconds, chunksize, rate, width): num_buffers = int(float(num_seconds * rate) / chunksize) return capture_buffers(num_buffers, chunksize, rate, width)
def configure(self, in_obj): super(BiRNN, self).configure(in_obj) (self.nin, self.nsteps) = self.in_shape self.out_shape = (2 * self.nout, self.nsteps) self.gate_shape = (2 * self.nout * self.ngates, self.nsteps) if self.split_inputs is True and self.nin % 2 == 1: raise ValueError("# inputs units is odd and split_inputs is True ") self.o_shape = (self.nout, self.nsteps) self.g_shape = (self.nout * self.ngates, self.nsteps) self.i_shape = ( self.nin/2, self.nsteps) if self.split_inputs else (self.nin, self.nsteps) if self.weight_shape is None: self.weight_shape = (self.nout, self.nin) return self
def init_params(self, shape): """ Initialize params for GRU including weights and biases. The weight matrix and bias matrix are concatenated from the weights for inputs and weights for recurrent inputs and bias. The shape of the weights are (number of inputs + number of outputs +1 ) by (number of outputs * 3) Arguments: shape (Tuple): contains number of outputs and number of inputs """ super(GRU, self).init_params(shape) (nout, nin) = shape # indices for slicing gate buffers (rz1, rz2) = (0, nout * 2) (c1, c2) = (nout * 2, nout * 3) self.Wrz_recur = self.W_recur[rz1:rz2] self.Whcan_recur = self.W_recur[c1:c2] self.b_rz = self.b[rz1:rz2] self.b_hcan = self.b[c1:c2] self.dWrz_recur = self.dW_recur[rz1:rz2] self.dWhcan_recur = self.dW_recur[c1:c2]
def createOpener(): '''Create a generic opener for http This is particularly helpful when there is a proxy server in line''' # Thanks to: http://www.decalage.info/en/python/urllib2noproxy proxy_handler = urllib2.ProxyHandler(HTTP_PROXY) opener = urllib2.build_opener(proxy_handler) urllib2.install_opener(opener) return opener
def build(self): cmd = {'sender': self.sender, 'receiver': self.receiver, 'output': self.output, 'cmd': self.cmd, 'jobid': self.jobid} return base64.b64encode(json.dumps(cmd))
def __init__(self, ttps=None): super(TTPs, self).__init__(ttps) self.kill_chains = KillChains()
def csv_sym(sym, d_data, ls_keys, s_directory): bool_first_iter = True for key in ls_keys: if bool_first_iter == True: df_sym = d_data[key].reindex(columns = [sym]) df_sym = df_sym.rename(columns = {sym : key}) bool_first_iter = False else: df_temp = d_data[key].reindex(columns = [sym]) df_temp = df_temp.rename(columns = {sym : key}) df_sym = df_sym.join(df_temp, how= 'outer') symfilename = sym.split('-')[0] sym_file = open(s_directory + symfilename + '.csv', 'w') sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n") ldt_timestamps = list(df_sym.index) ldt_timestamps.reverse() for date in ldt_timestamps: date_to_csv = '{:%Y-%m-%d}'.format(date) string_to_csv = date_to_csv for key in ls_keys: string_to_csv = string_to_csv + ',' + str(df_sym[key][date]) string_to_csv = string_to_csv + '\n' sym_file.write(string_to_csv)
@assert_warnings def test_deprecated_related_packages(self): e = et.ExploitTarget() e.related_packages.append(STIXPackage()) self.assertEqual(len(e.related_packages), 1)
@pytest.mark.parametrize('cls', [AsciiTable, UnixTable]) def test_attributes(cls): """Test different table attributes.""" table_data = [ ['Name', 'Color', 'Type'], ['Avocado', 'green', 'nut'], ['Tomato', 'red', 'fruit'], ['Lettuce', 'green', 'vegetable'], ] table = cls(table_data) # '| Lettuce | green | vegetable |' table.outer_border = False assert table.column_max_width(0) == 58 assert table.column_max_width(1) == 56 assert table.column_max_width(2) == 60 table.outer_border = True table.inner_column_border = False assert table.column_max_width(0) == 58 assert table.column_max_width(1) == 56 assert table.column_max_width(2) == 60 table.outer_border = False assert table.column_max_width(0) == 60 assert table.column_max_width(1) == 58 assert table.column_max_width(2) == 62 table.outer_border = True table.inner_column_border = True table.padding_left = 0 assert table.column_max_width(0) == 59 assert table.column_max_width(1) == 57 assert table.column_max_width(2) == 61 table.padding_right = 5 assert table.column_max_width(0) == 47 assert table.column_max_width(1) == 45 assert table.column_max_width(2) == 49
def featHiLow(dData, lLookback=20, b_human=False ): ''' @summary: 1 represents a high for the lookback -1 represents a low @param dData: Dictionary of data to use @param lLookback: Number of days to look in the past @param b_human: if true return dataframe to plot @return: DataFrame array containing values ''' if b_human: for sym in dData['close']: x=1000/dData['close'][sym][0] dData['close'][sym]=dData['close'][sym]*x return dData['close'] dfPrice = dData['close'] #Find Max for each price for lookback maxes = pand.rolling_max(dfPrice, lLookback, 1) #Find Min mins = pand.rolling_min(dfPrice, lLookback, 1) #Find Range ranges = maxes - mins #Calculate (price - min) * 2 / range -1 dfRet = (((dfPrice-mins)*2)/ranges)-1 return dfRet
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ExploitTargetsType', fromsubclass_=False, pretty_print=True): super(ExploitTargetsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, fromsubclass_=True, pretty_print=pretty_print) if pretty_print: eol_ = '\n' else: eol_ = '' for Exploit_Target_ in self.Exploit_Target: Exploit_Target_.export(lwrite, level, nsmap, namespace_, name_='Exploit_Target', pretty_print=pretty_print)
@classmethod @contextmanager def temp(cls, domain, token): import shopify original_site = shopify.ShopifyResource.get_site() original_token = shopify.ShopifyResource.get_headers().get('X-Shopify-Access-Token') original_session = shopify.Session(original_site, original_token) session = Session(domain, token) shopify.ShopifyResource.activate_session(session) yield shopify.ShopifyResource.activate_session(original_session)
def sequenceProb(self, newData): """ Returns the probability that this HMM generated the given sequence. Uses the forward-backward algorithm. If given an array of sequences, returns a 1D array of probabilities. """ if len(newData.shape) == 1: return forwardbackward( self.prior,\ self.transition_matrix,\ self.emission_matrix,\ newData,\ self.num_states,\ self.precision) elif len(newData.shape) == 2: return numpy.array([forwardbackward(self.prior,self.transition_matrix,self.emission_matrix,newSeq,self.num_states,self.precision) for newSeq in newData])
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Tools': obj_ = ToolsType.factory() obj_.build(child_) self.set_Tools(obj_) elif nodeName_ == 'Infrastructure': obj_ = InfrastructureType.factory() obj_.build(child_) self.set_Infrastructure(obj_) elif nodeName_ == 'Personas': obj_ = PersonasType.factory() obj_.build(child_) self.set_Personas(obj_)
@retry(9, Exception, 0.01, 'pypet.retry') def _put_on_queue(self, to_put): """Puts data on queue""" old = self.pickle_queue self.pickle_queue = False try: self.queue.put(to_put, block=True) finally: self.pickle_queue = old
def thumbnail_id(self, name): return '%s_thumb_id' % name
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Suggested_COA': obj_ = stix_common_binding.RelatedCourseOfActionType.factory() obj_.build(child_) self.Suggested_COA.append(obj_) super(SuggestedCOAsType, self).buildChildren(child_, node, nodeName_, True)
def exportAttributes(self, lwrite, level, already_processed, namespace_='ttp:', name_='MalwareInstanceType'): if self.idref is not None and 'idref' not in already_processed: already_processed.add('idref') lwrite(' idref=%s' % (quote_attrib(self.idref), )) if self.id is not None and 'id' not in already_processed: already_processed.add('id') lwrite(' id=%s' % (quote_attrib(self.id), ))
def matchesExclusions(strippedRule): strippedDomain = strippedRule.split()[1] for exclusionRegex in settings["exclusionregexs"]: if exclusionRegex.search(strippedDomain): return True return False
def perform(self, token_stream, text): return self.function(token_stream, text)
def test_list_add(self): fields = brewery.FieldList(["foo", "bar"]) fields.append("baz") self.assertEqual(3, len(fields))
def store_references(self, references): """Stores references to disk and may collect garbage.""" for trajectory_name in references: self._storage_service.store(pypetconstants.LIST, references[trajectory_name], trajectory_name=trajectory_name) self._check_and_collect_garbage()
def pop(self): """Pop an arbitrary element from the stack.""" try: key = heapq.heappop(self.heap) return self.elements[key] except: raise StopIteration
def writeOpeningHeader(finalFile): finalFile.seek(0) #reset file pointer fileContents = finalFile.read() #save content finalFile.seek(0) #write at the top writeData(finalFile, "# This hosts file is a merged collection of hosts from reputable sources,\n") writeData(finalFile, "# with a dash of crowd sourcing via Github\n#\n") writeData(finalFile, "# Date: " + time.strftime("%B %d %Y", time.gmtime()) + "\n") if settings["extensions"]: writeData(finalFile, "# Extensions added to this file: " + ", ".join(settings["extensions"]) + "\n") writeData(finalFile, "# Number of unique domains: " + "{:,}\n#\n".format(settings["numberofrules"])) writeData(finalFile, "# Fetch the latest version of this file: https://raw.githubusercontent.com/StevenBlack/hosts/master/"+ os.path.join(settings["outputsubfolder"],"") + "hosts\n") writeData(finalFile, "# Project home page: https://github.com/StevenBlack/hosts\n#\n") writeData(finalFile, "# ===============================================================\n") writeData(finalFile, "\n") if not settings["skipstatichosts"]: writeData(finalFile, "127.0.0.1 localhost\n") writeData(finalFile, "127.0.0.1 localhost.localdomain\n") writeData(finalFile, "127.0.0.1 local\n") writeData(finalFile, "255.255.255.255 broadcasthost\n") writeData(finalFile, "::1 localhost\n") writeData(finalFile, "fe80::1%lo0 localhost\n") if platform.system() == "Linux": writeData(finalFile, "127.0.1.1 " + socket.gethostname() + "\n") writeData(finalFile, "\n") preamble = os.path.join(BASEDIR_PATH, "myhosts") if os.path.isfile(preamble): with open(preamble, "r") as f: writeData(finalFile, f.read()) finalFile.write(fileContents)
def test_distance(self): coordinate_pairs = { 1: [[10, 10], [10, 10]], 2: [[33.7550, 84.3900], [40.7127, 74.0059]], 3: [[0, 0], [0, 0]], 4: [[-33.7550, -84.3900], [40.7127, 74.0059]], } results = { 1: 0.0, 2: 1200.0, 3: 0.0, 4: 17959 } for coordinate_pair in coordinate_pairs: source, destination = coordinate_pairs[coordinate_pair] expected = results[coordinate_pair] got = round(bt.distance(source, destination)) self.assertTrue(expected == got)
def send_notification(self, token_hex, payload, identifier=0, expiry=0): """ in enhanced mode, send_notification may return error response from APNs if any """ if self.enhanced: self._last_activity_time = time.time() message = self._get_enhanced_notification(token_hex, payload, identifier, expiry) _logger.debug("message: %s" % b2a_hex(message)) timeout_sec = 2 timeout_tot = 0 i = 0 _succ = False while timeout_tot < 30: try: with self._send_lock: timeout_tot += timeout_sec i += 1 #self._make_sure_error_response_handler_worker_alive() self.write(message) self._sent_notifications.append(dict({'id': identifier, 'message': message})) _logger.debug("send notification to APNS.") rlist, _, _ = select.select([self._connection()], [], [], WAIT_READ_TIMEOUT_SEC) _logger.debug("got response from APNS: %d" % len(rlist)) if len(rlist) > 0: # there's some data from APNs self._socket.settimeout(0.5) buff = self.read(ERROR_RESPONSE_LENGTH) if len(buff) == ERROR_RESPONSE_LENGTH: command, status, identifier = unpack(ERROR_RESPONSE_FORMAT, buff) if 8 == command: # there is error response from APNS #if self._response_listener: # self._response_listener(Util.convert_error_response_to_dict(error_response)) _logger.info("got error-response from APNS: %d" % status) self._disconnect() #self._resend_notifications_by_id(identifier) if len(buff) == 0: _logger.warning("read socket got 0 bytes data") #DEBUG self._disconnect() _succ = True break except socket_error as e: timeout_sec *= 2 _logger.exception("sending notification with id:" + str(identifier) + " to APNS failed: " + str(type(e)) + ": " + str(e) + " in " + str(i+1) + "th attempt, will wait " + str(timeout_sec) + " secs for next action") time.sleep(timeout_sec) # wait potential error-response to be read return _succ else: message = self._get_notification(token_hex, payload) _logger.debug("message: %s" % b2a_hex(message)) self.write(message) return True
def compare_explore_more_trials_with_removing_duplicates(self,traj): self.explored ={'Normal.trial': [0,1,0,1,0,1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0]), np.array([-1.0,3.0,5.0,7.0]), np.array([-1.0,2.0,3.0,5.0]), np.array([-1.0,2.0,3.0,5.0])]} traj.f_explore(self.explored)
def __init__(self, url): # TODO: currently only local paths are supported if is_local(url) and not url.endswith("/"): url = url + "/" self.url = url infopath = urljoin(url, "datapackage.json") metadata = read_json(infopath) with open(infopath) as f: try: metadata = json.load(f) except Exception as e: raise Exception("Unable to read %s: %s" % (infopath, str(e))) self.name = metadata.get("name") self._resources = OrderedDict() for i, res in enumerate(metadata["resources"]): resource = DataPackageResource(self, res) if not resource.name: resource.name = "resource%d" % i if resource.name in self._resources: raise Exception("Duplicate resource '%s' in data package '%s'" % (resource.name, self.name)) self._resources[resource.name] = resource
def redraw(self, view): """Redraw all marks in the given view.""" self.clear(view) self.draw(view)
def __init__(self, fields = None, chars = None): """Creates a node for string stripping. :Attributes: * `fields`: fields to be stripped * `chars`: characters to be stripped """ super(StringStripNode, self).__init__() self.fields = fields self.chars = chars
def _remove_all_contracts(self): to_remove = list() with self.contracts_lock: for c in self.contracts.values(): to_remove.append(c) for c in to_remove: self._remove_contract(c)
def on_query_completions(self, view, prefix, locations): if not view.match_selector(locations[0], "source.cfscript.cfc - text - meta - string - comment"): return [] if not SETTINGS.get("component_method_completions"): return # set local _completions variable _completions = [] # try and find the cfc file and add it's methods try: cfc_region = view.find_by_selector("meta.component-operator.extends.value.cfscript")[0] except IndexError: cfc_region = "" if len(cfc_region): extendspath = view.substr(cfc_region).replace(".","/") # first check the current directory for nested cfc path # get the dir this file is in first this_file = view.file_name() dir_len = this_file.rfind('/') #(for OSX) if not dir_len > 0: dir_len = this_file.rfind('\\') #(for Windows) this_dir = this_file[:(dir_len + 1)] # adds ending '/' cfc_file = this_dir + extendspath + ".cfc" if not os.path.isfile(cfc_file): # check for the cfc in root folders for folder in sublime.active_window().folders(): if os.path.isfile(folder + "/" + extendspath + ".cfc"): cfc_file = folder + "/" + extendspath + ".cfc" break try: add_methods(cfc_file, view.substr(cfc_region).split(".")[-1] ) except UnboundLocalError: pass except IOError: pass # add this files methods to autocomplete add_methods(view.file_name(), "this") # add the completions to the local _completions variable _completions.extend(completions) # prevents dups del completions[:] return _completions
def initialize_test(self): """Test if calculate throws an error as expected.""" data = [[0.0, 0.0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]] tsOrg = TimeSeries.from_twodim_list(data) tsCalc = TimeSeries.from_twodim_list(data) bem = BaseErrorMeasure() try: bem.initialize(tsOrg, tsCalc) except NotImplementedError: pass else: assert False # pragma: no cover assert not bem.initialize(tsOrg, TimeSeries())
def testDependentServiceExtension(self): """Assigning more references to an attribute""" importConfigurationManifest( 'wdrtest/manifests/references/dependent_service.wdrc', topology ) importConfigurationManifest( 'wdrtest/manifests/references/dependent_service2.wdrc', topology ) server = getid1( '/Cell:%(cellName)s/Node:%(nodeName)s/Server:%(serverName)s/' % topology ) referenceList = server.lookup1( 'CustomService', { 'displayName': 'with dependencies', }, 'customServices' ).prerequisiteServices self.assertEquals(len(referenceList), 5) self.assertEquals(referenceList[0].displayName, 'first') self.assertEquals(referenceList[1].displayName, 'second') self.assertEquals(referenceList[2].displayName, 'fifth') self.assertEquals(referenceList[3].displayName, 'third') self.assertEquals(referenceList[4].displayName, 'fourth')