language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public Object asConstantPoolValue() { StringBuilder stringBuilder = new StringBuilder().append('('); for (TypeDescription parameterType : getParameterTypes()) { stringBuilder.append(parameterType.getDescriptor()); } String descriptor = stringBuilder.append(')').append(getReturnType().getDescriptor()).toString(); return new Handle(getHandleType().getIdentifier(), getOwnerType().getInternalName(), getName(), descriptor, getOwnerType().isInterface()); }
java
public static LocalizationContext getLocalizationContext(PageContext pc, String basename) { LocalizationContext locCtxt = null; ResourceBundle bundle = null; if ((basename == null) || basename.equals("")) { return new LocalizationContext(); } // Try preferred locales Locale pref = SetLocaleSupport.getLocale(pc, Config.FMT_LOCALE); if (pref != null) { // Preferred locale is application-based bundle = findMatch(basename, pref); if (bundle != null) { locCtxt = new LocalizationContext(bundle, pref); } } else { // Preferred locales are browser-based locCtxt = findMatch(pc, basename); } if (locCtxt == null) { // No match found with preferred locales, try using fallback locale pref = SetLocaleSupport.getLocale(pc, Config.FMT_FALLBACK_LOCALE); if (pref != null) { bundle = findMatch(basename, pref); if (bundle != null) { locCtxt = new LocalizationContext(bundle, pref); } } } if (locCtxt == null) { // try using the root resource bundle with the given basename try { ClassLoader cl = getClassLoaderCheckingPrivilege(); bundle = ResourceBundle.getBundle(basename, EMPTY_LOCALE, cl); if (bundle != null) { locCtxt = new LocalizationContext(bundle, null); } } catch (MissingResourceException mre) { // do nothing } } if (locCtxt != null) { // set response locale if (locCtxt.getLocale() != null) { SetLocaleSupport.setResponseLocale(pc, locCtxt.getLocale()); } } else { // create empty localization context locCtxt = new LocalizationContext(); } return locCtxt; }
java
public static Map<String, Method> getClassWriteMethods(Class<?> clazz) { Map<String, Method> writeMethods = classWriteMethods.get(clazz); if (writeMethods == null) { cacheReadWriteMethodsAndBoxField(clazz); return classWriteMethods.get(clazz); } else return writeMethods; }
python
def getProfileInfo(exp): """ Prints profiling information. Parameters: ---------------------------- @param reset (bool) If set to True, the profiling will be reset. """ totalTime = 0.000001 for region in exp.network.regions.values(): timer = region.getComputeTimer() totalTime += timer.getElapsed() # Sort the region names regionNames = list(exp.network.regions.keys()) regionNames.sort() count = 1 profileInfo = [] L2Time = 0.0 L4Time = 0.0 for regionName in regionNames: region = exp.network.regions[regionName] timer = region.getComputeTimer() count = max(timer.getStartCount(), count) profileInfo.append([region.name, timer.getStartCount(), timer.getElapsed(), 100.0 * timer.getElapsed() / totalTime, timer.getElapsed() / max(timer.getStartCount(), 1)]) if "L2Column" in regionName: L2Time += timer.getElapsed() elif "L4Column" in regionName: L4Time += timer.getElapsed() return L2Time
python
def optimize(objective_function, domain, stopping_condition, parameters=None, position_update=functions.std_position, velocity_update=functions.std_velocity, parameter_update=functions.std_parameter_update, measurements=(), measurer=dictionary_based_metrics): """ Perform particle swarm optimization of the given fitness function. Args: objective_function: the cost function to optimize. stopping_condition: function specifying the stopping condition. parameters: dictionary: parameter dictionary for the PSO. Returns: cipy.algorithms.pso.Particle: The global best particle. """ params = __init_parameters__(parameters) rng = np.random.RandomState(params['seed']) initial_swarm = [functions.initialize_particle(rng, domain, objective_function) for i in range(params['swarm_size'])] state = types.PSOState(rng, params, iterations=0, swarm=initial_swarm) topology_function = state.params['topology'] update_fitness = functions.update_fitness update_particle = functions.update_particle results, measure = measurer(measurements) while not stopping_condition(state): n_bests = topology_function(state) state = state._replace(swarm=[update_particle(position_update, velocity_update, state, n_bests, ip) for ip in enumerate(state.swarm)]) state = state._replace(swarm=[update_fitness(objective_function, particle) for particle in state.swarm], iterations=state.iterations + 1) state = parameter_update(state, objective_function) results = measure(results, state) return functions.solution(state.swarm), results
java
public final <K> Ix<T> distinct(IxFunction<? super T, K> keySelector) { return new IxDistinct<T, K>(this, nullCheck(keySelector, "keySelector is null")); }
python
def ReceiveMessages(self, client_id, messages): """Receives and processes the messages from the source. For each message we update the request object, and place the response in that request's queue. If the request is complete, we send a message to the worker. Args: client_id: The client which sent the messages. messages: A list of GrrMessage RDFValues. """ if data_store.RelationalDBEnabled(): return self.ReceiveMessagesRelationalFlows(client_id, messages) now = time.time() with queue_manager.QueueManager(token=self.token) as manager: for session_id, msgs in iteritems( collection.Group(messages, operator.attrgetter("session_id"))): # Remove and handle messages to WellKnownFlows leftover_msgs = self.HandleWellKnownFlows(msgs) unprocessed_msgs = [] for msg in leftover_msgs: if (msg.auth_state == msg.AuthorizationState.AUTHENTICATED or msg.session_id == self.unauth_allowed_session_id): unprocessed_msgs.append(msg) if len(unprocessed_msgs) < len(leftover_msgs): logging.info("Dropped %d unauthenticated messages for %s", len(leftover_msgs) - len(unprocessed_msgs), client_id) if not unprocessed_msgs: continue for msg in unprocessed_msgs: manager.QueueResponse(msg) for msg in unprocessed_msgs: # Messages for well known flows should notify even though they don't # have a status. if msg.request_id == 0: manager.QueueNotification(session_id=msg.session_id) # Those messages are all the same, one notification is enough. break elif msg.type == rdf_flows.GrrMessage.Type.STATUS: # If we receive a status message from the client it means the client # has finished processing this request. We therefore can de-queue it # from the client queue. msg.task_id will raise if the task id is # not set (message originated at the client, there was no request on # the server), so we have to check .HasTaskID() first. if msg.HasTaskID(): manager.DeQueueClientRequest(msg) manager.QueueNotification( session_id=msg.session_id, last_status=msg.request_id) stat = rdf_flows.GrrStatus(msg.payload) if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED: # A client crashed while performing an action, fire an event. crash_details = rdf_client.ClientCrash( client_id=client_id, session_id=session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now()) events.Events.PublishEvent( "ClientCrash", crash_details, token=self.token) logging.debug("Received %s messages from %s in %s sec", len(messages), client_id, time.time() - now)
java
@Override public final Choice2<A, B> converge(Function<? super C, ? extends CoProduct2<A, B, ?>> convergenceFn) { return match(Choice2::a, Choice2::b, convergenceFn.andThen(cp2 -> cp2.match(Choice2::a, Choice2::b))); }
java
public static boolean sendRequestedBlobs(HttpServerExchange exchange) { ByteBuffer buffer = null; String type = null; String etag = null; String quotedEtag = null; if ("css".equals(exchange.getQueryString())) { buffer = Blobs.FILE_CSS_BUFFER.duplicate(); type = "text/css"; etag = Blobs.FILE_CSS_ETAG; quotedEtag = Blobs.FILE_CSS_ETAG_QUOTED; } else if ("js".equals(exchange.getQueryString())) { buffer = Blobs.FILE_JS_BUFFER.duplicate(); type = "application/javascript"; etag = Blobs.FILE_JS_ETAG; quotedEtag = Blobs.FILE_JS_ETAG_QUOTED; } if (buffer != null) { if(!ETagUtils.handleIfNoneMatch(exchange, new ETag(false, etag), false)) { exchange.setStatusCode(StatusCodes.NOT_MODIFIED); return true; } exchange.getResponseHeaders().put(Headers.CONTENT_LENGTH, String.valueOf(buffer.limit())); exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, type); exchange.getResponseHeaders().put(Headers.ETAG, quotedEtag); if (Methods.HEAD.equals(exchange.getRequestMethod())) { exchange.endExchange(); return true; } exchange.getResponseSender().send(buffer); return true; } return false; }
java
public Iterable<DContact> queryByCreatedBy(Object parent, java.lang.String createdBy) { return queryByField(parent, DContactMapper.Field.CREATEDBY.getFieldName(), createdBy); }
java
public InstanceResizePolicy withInstancesToTerminate(String... instancesToTerminate) { if (this.instancesToTerminate == null) { setInstancesToTerminate(new com.amazonaws.internal.SdkInternalList<String>(instancesToTerminate.length)); } for (String ele : instancesToTerminate) { this.instancesToTerminate.add(ele); } return this; }
python
def _importSNPs_dbSNPSNP(setName, species, genomeSource, snpsFile) : "This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based" snpData = VCFFile(snpsFile, gziped = True, stream = True) dbSNPSNP.dropIndex(('start', 'chromosomeNumber', 'setName')) conf.db.beginTransaction() pBar = ProgressBar() pLabel = '' for snpEntry in snpData : pBar.update(label = 'Chr %s, %s...' % (snpEntry['#CHROM'], snpEntry['ID'])) snp = dbSNPSNP() for f in snp.getFields() : try : setattr(snp, f, snpEntry[f]) except KeyError : pass snp.chromosomeNumber = snpEntry['#CHROM'] snp.species = species snp.setName = setName snp.start = snpEntry['POS']-1 snp.alt = snpEntry['ALT'] snp.ref = snpEntry['REF'] snp.end = snp.start+len(snp.alt) snp.save() pBar.close() snpMaster = SNPMaster() snpMaster.set(setName = setName, SNPType = 'dbSNPSNP', species = species) snpMaster.save() printf('saving...') conf.db.endTransaction() printf('creating indexes...') dbSNPSNP.ensureGlobalIndex(('start', 'chromosomeNumber', 'setName')) printf('importation of SNP set %s for species %s done.' %(setName, species)) return True
java
public ResultList<TVInfo> getTVSimilar(int tvID, Integer page, String language) throws MovieDbException { return tmdbTv.getTVSimilar(tvID, page, language); }
java
public float getBoundsHeight(){ if (mSceneObject != null) { GVRSceneObject.BoundingVolume v = mSceneObject.getBoundingVolume(); return v.maxCorner.y - v.minCorner.y; } return 0f; }
java
public String getPathName() { if( pathName_ == null) { StringBuilder pathName = new StringBuilder(); IVarDef parent = getParent(); if( parent != null) { pathName .append( parent.getPathName()) .append( '.'); } String name = getName(); if( name != null) { pathName.append( name); } pathName_ = pathName.toString(); } return pathName_; }
java
public void removeExtension() { deleteRelationships(); geoPackage.deleteTable(StyleTable.TABLE_NAME); geoPackage.deleteTable(IconTable.TABLE_NAME); try { if (extensionsDao.isTableExists()) { extensionsDao.deleteByExtension(EXTENSION_NAME); } } catch (SQLException e) { throw new GeoPackageException( "Failed to delete Feature Style extension. GeoPackage: " + geoPackage.getName(), e); } }
java
public boolean getForecast(JsonObject forecast) { this.forecast = forecast; try { this.currently = forecast.get("currently").asObject(); } catch (NullPointerException e) { this.currently = null; } try { this.minutely = forecast.get("minutely").asObject(); } catch (NullPointerException e) { this.minutely = null; } try { this.hourly = forecast.get("hourly").asObject(); } catch (NullPointerException e) { this.hourly = null; } try { this.daily = forecast.get("daily").asObject(); } catch (NullPointerException e) { this.daily = null; } try { this.flags = forecast.get("flags").asObject(); } catch (NullPointerException e) { this.flags = null; } try { this.alerts = forecast.get("alerts").asArray(); } catch (NullPointerException e) { this.alerts = null; } return true; }
python
def FromJsonString(self, value): """Parse a RFC 3339 date string format to Timestamp. Args: value: A date string. Any fractional digits (or none) and any offset are accepted as long as they fit into nano-seconds precision. Example of accepted format: '1972-01-01T10:00:20.021-05:00' Raises: ParseError: On parsing problems. """ timezone_offset = value.find('Z') if timezone_offset == -1: timezone_offset = value.find('+') if timezone_offset == -1: timezone_offset = value.rfind('-') if timezone_offset == -1: raise ParseError( 'Failed to parse timestamp: missing valid timezone offset.') time_value = value[0:timezone_offset] # Parse datetime and nanos. point_position = time_value.find('.') if point_position == -1: second_value = time_value nano_value = '' else: second_value = time_value[:point_position] nano_value = time_value[point_position + 1:] date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT) td = date_object - datetime(1970, 1, 1) seconds = td.seconds + td.days * _SECONDS_PER_DAY if len(nano_value) > 9: raise ParseError( 'Failed to parse Timestamp: nanos {0} more than ' '9 fractional digits.'.format(nano_value)) if nano_value: nanos = round(float('0.' + nano_value) * 1e9) else: nanos = 0 # Parse timezone offsets. if value[timezone_offset] == 'Z': if len(value) != timezone_offset + 1: raise ParseError('Failed to parse timestamp: invalid trailing' ' data {0}.'.format(value)) else: timezone = value[timezone_offset:] pos = timezone.find(':') if pos == -1: raise ParseError( 'Invalid timezone offset value: {0}.'.format(timezone)) if timezone[0] == '+': seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 else: seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60 # Set seconds and nanos self.seconds = int(seconds) self.nanos = int(nanos)
python
def is_binary_address(value: Any) -> bool: """ Checks if the given string is an address in raw bytes form. """ if not is_bytes(value): return False elif len(value) != 20: return False else: return True
python
async def stor(self, sops, splices=None): ''' Execute a series of storage operations. Overrides implementation in layer.py to avoid unnecessary async calls. ''' for oper in sops: func = self._stor_funcs.get(oper[0]) if func is None: # pragma: no cover raise s_exc.NoSuchStor(name=oper[0]) func(oper) if splices: self._storSplicesSync(splices) self.spliced.set() self.spliced.clear()
python
def list(self, options=None, **kwds): """ Endpoint: /photos[/<options>]/list.json Returns a list of Photo objects. The options parameter can be used to narrow down the list. Eg: options={"album": <album_id>} """ option_string = self._build_option_string(options) photos = self._client.get("/photos%s/list.json" % option_string, **kwds)["result"] photos = self._result_to_list(photos) return [Photo(self._client, photo) for photo in photos]
java
public void loadDefaultImports() { /** Note: the resolver looks through these in reverse order, per * precedence rules... so for max efficiency put the most common ones * later. */ this.importClass("bsh.EvalError"); this.importClass("bsh.Interpreter"); this.importClass("bsh.Capabilities"); this.importPackage("java.net"); this.importClass("java.util.Map.Entry"); this.importPackage("java.util.function"); this.importPackage("java.util.stream"); this.importPackage("java.util.regex"); this.importPackage("java.util"); this.importPackage("java.io"); this.importPackage("java.lang"); this.importClass("bsh.FileReader"); this.importClass("java.math.BigInteger"); this.importClass("java.math.BigDecimal"); this.importCommands("/bsh/commands"); }
java
public String summarize() { return String.format(SUMMARY_FORMAT, ticket, action, (client != null ? client.getServiceNumber() : 0), (client != null ? client.getNumber() : 0), (client != null ? client.getClientId() : 0), (client != null ? client.getClientSecret() : null), (client != null ? client.getClientType() : null), (client != null ? client.getDeveloper() : null), display, maxAge, Utils.stringifyScopeNames(scopes), Utils.join(uiLocales, " "), Utils.join(claimsLocales, " "), Utils.join(claims, " "), acrEssential, clientIdAliasUsed, Utils.join(acrs, " "), subject, loginHint, lowestPrompt, Utils.stringifyPrompts(prompts) ); }
python
def midi_event(self, event_type, channel, param1, param2=None): """Convert and return the paraters as a MIDI event in bytes.""" assert event_type < 0x80 and event_type >= 0 assert channel < 16 and channel >= 0 tc = a2b_hex('%x%x' % (event_type, channel)) if param2 is None: params = a2b_hex('%02x' % param1) else: params = a2b_hex('%02x%02x' % (param1, param2)) return self.delta_time + tc + params
python
def delete_tmpl(args): """Delete template. Argument: args: arguments object """ if args.__dict__.get('template'): template = args.template password = get_password(args) token = connect.get_token(args.username, password, args.server) processing.delete_template(args.server, token, template)
python
def _ParseDateTimeValue(self, parser_mediator, date_time_value): """Parses a date time value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. date_time_value (str): date time value (CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ". Returns: dfdatetime.TimeElements: date and time extracted from the value or None if the value does not represent a valid string. """ if date_time_value[14] != 'Z': parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None try: year = int(date_time_value[0:4], 10) month = int(date_time_value[4:6], 10) day_of_month = int(date_time_value[6:8], 10) hours = int(date_time_value[8:10], 10) minutes = int(date_time_value[10:12], 10) seconds = int(date_time_value[12:14], 10) except (TypeError, ValueError): parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds) try: return dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date and time value: {0!s}'.format(date_time_value)) return None
python
def _track_stack_pointers(self): """ For each instruction, track its stack pointer offset and stack base pointer offset. :return: None """ regs = {self.project.arch.sp_offset} if hasattr(self.project.arch, 'bp_offset') and self.project.arch.bp_offset is not None: regs.add(self.project.arch.bp_offset) spt = self.project.analyses.StackPointerTracker(self.function, regs, track_memory=self._sp_tracker_track_memory) if spt.inconsistent_for(self.project.arch.sp_offset): l.warning("Inconsistency found during stack pointer tracking. Decompilation results might be incorrect.") return spt
python
def names(self): """ Returns the player's name and real name. Returns two empty strings if the player is unknown. AI real name is always an empty string. """ if self.name == self.UNKNOWN_HUMAN_PLAYER: return "", "" if not self.is_ai and " " in self.name: return "", self.name return self.name, ""
java
public AbstractDockerCommand withParam(String name, String value) { parameters.put(name, value); return this; }
python
def callable_check(func, arg_count=1, arg_value=None, allow_none=False): """Check whether func is callable, with the given number of positional arguments. Returns True if check succeeded, False otherwise.""" if func is None: if not allow_none: raise ValueError('callable cannot be None') elif not arg_checker(func, *[arg_value for _ in range(arg_count)]): raise ValueError('callable %s invalid (for %d arguments)' % (func, arg_count))
java
static void assertEquals(String message, Object comparison1, Object comparison2) { if(comparison1 == comparison2) { return; } if(comparison1.equals(comparison2)) { return; } System.err.println(message); }
java
public String getBody() { JsonObject wrapper = new JsonObject(); wrapper.add(Ref.getTypeFromRef(ref), obj); return gsonBuilder.create().toJson(wrapper); }
java
public static int cuGraphAddDependencies(CUgraph hGraph, CUgraphNode from[], CUgraphNode to[], long numDependencies) { return checkResult(cuGraphAddDependenciesNative(hGraph, from, to, numDependencies)); }
java
@Override public void onDialogRequestEricsson(MAPDialog mapDialog, AddressString destReference, AddressString origReference, AddressString arg3, AddressString arg4) { if (logger.isDebugEnabled()) { logger.debug(String.format("onDialogRequest for DialogId=%d DestinationReference=%s OriginReference=%s ", mapDialog.getLocalDialogId(), destReference, origReference)); } }
python
def _is_valid(self, value): """Return True if the input value is valid for insertion into the inner list. Args: value: An object about to be inserted. """ # Entities have an istypeof method that can perform more sophisticated # type checking. if hasattr(self._type, "istypeof"): return self._type.istypeof(value) else: return isinstance(value, self._type)
java
@Override public RoundedMoney add(MonetaryAmount amount) { MoneyUtils.checkAmountParameter(amount, currency); if (amount.isZero()) { return this; } return new RoundedMoney(number.add(amount.getNumber().numberValue(BigDecimal.class)), currency, rounding).with(rounding); }
java
private void flagSFSBRemoveMethods(BeanMetaData bmd, final Method[] allMethodsOfEJB, Map<Method, ArrayList<EJBMethodInfoImpl>> methodInfoMap) throws EJBConfigurationException { final boolean isTraceOn = TraceComponent.isAnyTracingEnabled(); String ejbName = bmd.enterpriseBeanName; if (isTraceOn && tc.isEntryEnabled()) { Tr.entry(tc, "flagSFSBRemoveMethods: " + ejbName); } // First process any method that is annotated as a remove method // if metadata complete is false. This is done first since the xml // can be used to override the retain if exception setting. if (!bmd.metadataComplete) { for (Method m : allMethodsOfEJB) { // Does this method have the Remove annotation? Remove ra = m.getAnnotation(Remove.class); if (ra != null) { // find the list of EJBMEthodInfoImpl objects that need to be // flagged as remove methods. ArrayList<EJBMethodInfoImpl> infoList = methodInfoMap.get(m); if (infoList == null) { // CNTR0233E: The {1} enterprise bean specifies an @Remove annotation on the {0} method. // This annotation is not valid because this method is not a business method of this bean. String method = m.toGenericString(); Object[] data = new Object[] { method, ejbName }; Tr.error(tc, "INVALID_REMOVE_ANNOTATION_CNTR0233E", data); throw new EJBConfigurationException("CNTR0233E: The " + ejbName + " enterprise bean specifies an @Remove annotation on the " + m.toGenericString() + " method. This annotation is not valid because this method is not a business method of this bean."); } else { // Get retainIfException from remove annotation boolean retainIfException = ra.retainIfException(); // Flag each of the EJBMethodInfoImpl objects for this business method that // it is a SFSB remove method. for (EJBMethodInfoImpl methodInfo : infoList) { methodInfo.setSFSBRemove(retainIfException); if (isTraceOn && tc.isDebugEnabled()) { Tr.debug(tc, "SFSB @Remove method: " + methodInfo.getInterfaceType() + " - " + methodInfo.getJDIMethodSignature() + ", retain-if-exception = " + retainIfException); } } } } } } // Now augment or override the results of above processing with remove-method // defined in the ejb-jar.xml file (note, eb is set to null if there is no // ejb-jar.xml file). EnterpriseBean eb = bmd.wccm.enterpriseBean; if (eb != null) { // Get remove-method list for this Session bean from WCCM and process. Session sb = (Session) eb; List<RemoveMethod> rmList = sb.getRemoveMethod(); for (RemoveMethod rm : rmList) { NamedMethod namedMethod = rm.getBeanMethod(); String namedMethodName = namedMethod.getMethodName(); ArrayList<EJBMethodInfoImpl> infoList = null; // Determine if the remove method was specified using style // 1, 2, or 3 and build the list of EJBMethodInfos that need // to be flagged as remove methods. d615325 if ("*".equals(namedMethodName)) { // style 1 - all methods are considered remove methods infoList = new ArrayList<EJBMethodInfoImpl>(methodInfoMap.size()); for (ArrayList<EJBMethodInfoImpl> methodInfoList : methodInfoMap.values()) { infoList.addAll(methodInfoList); } } else if (namedMethod.getMethodParamList() == null) { // style 2 - all methods with the same name are remove methods infoList = new ArrayList<EJBMethodInfoImpl>(); for (Method method : methodInfoMap.keySet()) { if (method.getName().equals(namedMethodName)) { infoList.addAll(methodInfoMap.get(method)); } } } else { // style 3 - remove method must match both name and parameters Method m = DDUtil.findMethod(namedMethod, allMethodsOfEJB); if (m != null) { infoList = methodInfoMap.get(m); } } if (infoList == null || infoList.size() == 0) { // CNTR0233E: Remove annotation can not be applied to method "{0}" // since the method is not a business method of EJB name "{1}". String method = namedMethod.getMethodName(); List<String> parms = namedMethod.getMethodParamList(); List<String> parmList = parms == null ? Collections.<String> emptyList() : parms; method = method + parmList; Object[] data = new Object[] { method, ejbName }; Tr.error(tc, "INVALID_REMOVE_ANNOTATION_CNTR0233E", data); throw new EJBConfigurationException("remove-method annotation can not be applied to method \"" + method + "\" since the method is not a business method of EJB name \"" + ejbName + "\""); } else { // Determine if retain-if-exception is set in ejb-jar.xml for this EJB. boolean retainIfExceptionIsSet = rm.isSetRetainIfException(); //d454711 // Flag each of the EJBMethodInfoImpl objects for this business method that // it is a SFSB remove method. for (EJBMethodInfoImpl methodInfo : infoList) { // Was method annotated as a remove method? if (methodInfo.isSFSBRemoveMethod()) { // Yep, it was annoted with Remove method. Is the ejb-jar.xml // overriding the retain if exception setting? if (retainIfExceptionIsSet) { // Yep, ejb-jar.xml is overriding the retain if exception, // so get setting from xml and set it in EJBMethodInfo object. boolean retainIfException = rm.isRetainIfException(); methodInfo.setSFSBRemove(retainIfException); if (isTraceOn && tc.isDebugEnabled()) { Tr.debug(tc, "SFSB remove-method: " + methodInfo.getInterfaceType() + " - " + methodInfo.getJDIMethodSignature() + ", ejb-jar.xml overriding retain-if-exception to be " + retainIfException); } } } else { // d454711 // Method was not annotated as a remove method, but it is a remove // method in the xml file. So flag method using information from xml file. // Need to default retain if exception if not specified in xml file. boolean retainIfException = (retainIfExceptionIsSet) ? rm.isRetainIfException() : false; //d454711 methodInfo.setSFSBRemove(retainIfException); if (isTraceOn && tc.isDebugEnabled()) { Tr.debug(tc, "SFSB remove-method: " + methodInfo.getInterfaceType() + " - " + methodInfo.getJDIMethodSignature() + ", retain-if-exception = " + retainIfException); } } } } } } if (isTraceOn && tc.isEntryEnabled()) { Tr.exit(tc, "flagSFSBRemoveMethods: " + ejbName); } }
java
@NotNull public Stream<T> filterIndexed(int from, int step, @NotNull IndexedPredicate<? super T> predicate) { return new Stream<T>(params, new ObjFilterIndexed<T>( new IndexedIterator<T>(from, step, iterator), predicate)); }
python
def add_options(parser): """ Add optional arguments to the parser """ partial_action = common.partial_append_action file_mods = parser.add_argument_group("Sequence File Modification") file_mods.add_argument('--line-wrap', dest='line_wrap', metavar='N', type=int, help='Adjust line wrap for sequence strings. ' 'When N is 0, all line breaks are removed. Only fasta files ' 'are supported for the output format.') file_mods.add_argument('--sort', dest='sort', choices=['length-asc', 'length-desc', 'name-asc', 'name-desc'], help='Perform sorting by length or name, ascending or descending. ' 'ASCII sorting is performed for names') parser.epilog = """Filters using regular expressions are case-sensitive by default. Append "(?i)" to a pattern to make it case-insensitive.""" seq_mods = parser.add_argument_group("Sequence Modificaton") seq_mods.add_argument('--apply-function', type=module_function, metavar='/path/to/module.py:function_name[:parameter]', help="""Specify a custom function to apply to the input sequences, specified as /path/to/file.py:function_name. Function should accept an iterable of Bio.SeqRecord objects, and yield SeqRecords. If the parameter is specified, it will be passed as a string as the second argument to the function. Specify more than one to chain.""", default=[], action='append') seq_mods.add_argument('--cut', dest='transforms', metavar="start:end[,start2:end2]", type=common.sequence_slices, action=partial_action(transform.multi_cut_sequences, 'slices'), help="""Keep only the residues within the 1-indexed start and end positions specified, : separated. Includes last item. Start or end can be left unspecified to indicate start/end of sequence. A negative start may be provided to indicate an offset from the end of the sequence. Note that to prevent negative numbers being interpreted as flags, this should be written with an equals sign between `--cut` and the argument, e.g.: `--cut=-10:`""") seq_mods.add_argument('--relative-to', dest='cut_relative', metavar='ID', help="""Apply --cut relative to the indexes of non-gap residues in sequence identified by ID""") seq_mods.add_argument('--drop', dest='transforms', metavar='start:end[,start2:end2]', type=common.sequence_slices, action=partial_action(transform.drop_columns, 'slices'), help="""Remove the residues at the specified indices. Same format as `--cut`.""") seq_mods.add_argument('--dash-gap', action=partial_action(transform.dashes_cleanup), dest='transforms', help="""Replace any of the characters "?.:~" with a "-" for all sequences""") seq_mods.add_argument('--lower', action=partial_action(transform.lower_sequences), dest='transforms', help='Translate the sequences to lower case') seq_mods.add_argument('--mask', metavar="start1:end1[,start2:end2]", action=partial_action(transform.multi_mask_sequences, 'slices'), type=common.sequence_slices, dest='transforms', help="""Replace residues in 1-indexed slice with gap-characters. If --relative-to is also specified, coordinates are relative to the sequence ID provided.""") seq_mods.add_argument('--reverse', action=partial_action(transform.reverse_sequences), dest='transforms', help='Reverse the order of sites in sequences') seq_mods.add_argument('--reverse-complement', dest='transforms', action=partial_action(transform.reverse_complement_sequences), help='Convert sequences into reverse complements') seq_mods.add_argument('--squeeze', action=partial_action(transform.squeeze), dest='transforms', help='''Remove any gaps that are present in the same position across all sequences in an alignment (equivalent to --squeeze-threshold=1.0)''') seq_mods.add_argument('--squeeze-threshold', dest='transforms', action=partial_action(transform.squeeze, 'gap_threshold'), type=common.typed_range(float, 0.0, 1.0), metavar='PROP', help="""Trim columns from an alignment which have gaps in least the specified proportion of sequences.""") seq_mods.add_argument('--transcribe', dest='transforms', action=partial_action(transform.transcribe, 'transcribe'), choices=('dna2rna', 'rna2dna'), help="""Transcription and back transcription for generic DNA and RNA. Source sequences must be the correct alphabet or this action will likely produce incorrect results.""") seq_mods.add_argument('--translate', dest='transforms', action=partial_action(transform.translate, 'translate'), choices=['dna2protein', 'rna2protein', 'dna2proteinstop', 'rna2proteinstop'], help="""Translate from generic DNA/RNA to proteins. Options with "stop" suffix will NOT translate through stop codons . Source sequences must be the correct alphabet or this action will likely produce incorrect results.""") seq_mods.add_argument('--ungap', action=partial_action(transform.ungap_sequences), dest='transforms', help='Remove gaps in the sequence alignment') seq_mods.add_argument('--upper', action=partial_action(transform.upper_sequences), dest='transforms', help='Translate the sequences to upper case') seq_select = parser.add_argument_group("Record Selection") seq_select.add_argument('--deduplicate-sequences', action='store_const', const=None, default=False, dest='deduplicate_sequences', help='Remove any duplicate sequences ' 'by sequence content, keep the first instance seen') seq_select.add_argument('--deduplicated-sequences-file', action='store', metavar='FILE', dest='deduplicate_sequences', default=False, type=common.FileType('wt'), help='Write all of the deduplicated sequences to a file') seq_select.add_argument('--deduplicate-taxa', action=partial_action(transform.deduplicate_taxa), dest='transforms', help="""Remove any duplicate sequences by ID, keep the first instance seen""") seq_select.add_argument('--exclude-from-file', metavar='FILE', type=common.FileType('rt'), help="""Filter sequences, removing those sequence IDs in the specified file""", dest='transforms', action=partial_action(transform.exclude_from_file, 'handle')) seq_select.add_argument('--include-from-file', metavar='FILE', type=common.FileType('rt'), help="""Filter sequences, keeping only those sequence IDs in the specified file""", dest='transforms', action=partial_action(transform.include_from_file, 'handle')) seq_select.add_argument('--head', metavar='N', dest='transforms', action=partial_action(transform.head, 'head'), help="""Trim down to top N sequences. With the leading `-', print all but the last N sequences.""") seq_select.add_argument('--max-length', dest='transforms', metavar='N', action=partial_action(transform.max_length_discard, 'max_length'), type=int, help="""Discard any sequences beyond the specified maximum length. This operation occurs *before* all length-changing options such as cut and squeeze.""") seq_select.add_argument('--min-length', dest='transforms', metavar='N', action=partial_action(transform.min_length_discard, 'min_length'), type=int, help="""Discard any sequences less than the specified minimum length. This operation occurs *before* cut and squeeze.""") seq_select.add_argument('--min-ungapped-length', metavar='N', action=partial_action(transform.min_ungap_length_discard, 'min_length'), type=int, help="""Discard any sequences less than the specified minimum length, excluding gaps. This operation occurs *before* cut and squeeze.""", dest='transforms') seq_select.add_argument('--pattern-include', metavar='REGEX', action=partial_action(transform.name_include, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in ID or description""") seq_select.add_argument('--pattern-exclude', metavar='REGEX', action=partial_action(transform.name_exclude, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in ID or description""") seq_select.add_argument('--prune-empty', action=partial_action(transform.prune_empty), dest='transforms', help="Prune sequences containing only gaps ('-')") seq_select.add_argument('--sample', metavar='N', dest='transforms', type=int, action=partial_action(transform.sample, 'k'), help = """ Select a random sampling of sequences """) seq_select.add_argument('--sample-seed', metavar='N', type=int, help = """Set random seed for sampling of sequences""") seq_select.add_argument('--seq-pattern-include', metavar='REGEX', action=partial_action(transform.seq_include, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in sequence""") seq_select.add_argument('--seq-pattern-exclude', metavar='REGEX', action=partial_action(transform.seq_exclude, 'filter_regex'), dest='transforms', help="""Filter the sequences by regular expression in sequence""") seq_select.add_argument('--tail', metavar='N', dest='transforms', action=partial_action(transform.tail, 'tail'), help="""Trim down to bottom N sequences. Use +N to output sequences starting with the Nth.""") id_mods = parser.add_argument_group("Sequence ID Modification") id_mods.add_argument('--first-name', action=partial_action(transform.first_name_capture), dest='transforms', help='''Take only the first whitespace-delimited word as the name of the sequence''') id_mods.add_argument('--name-suffix', metavar='SUFFIX', action=partial_action(transform.name_append_suffix, 'suffix'), dest='transforms', help='Append a suffix to all IDs.') id_mods.add_argument('--name-prefix', metavar='PREFIX', action=partial_action(transform.name_insert_prefix, 'prefix'), dest='transforms', help="""Insert a prefix for all IDs.""") id_mods.add_argument('--pattern-replace', nargs=2, metavar=('search_pattern', 'replace_pattern'), action=partial_action(transform.name_replace, ('search_regex', 'replace_pattern')), dest='transforms', help="""Replace regex pattern "search_pattern" with "replace_pattern" in sequence ID and description""") id_mods.add_argument('--strip-range', dest='transforms', action=partial_action(transform.strip_range), help="""Strip ranges from sequences IDs, matching </x-y>""") format_group = parser.add_argument_group('Format Options') format_group.add_argument('--input-format', metavar='FORMAT', help="Input file format (default: determine from extension)") format_group.add_argument('--output-format', metavar='FORMAT', help="Output file format (default: determine from extension)") parser.add_argument('--alphabet', choices=ALPHABETS, help="""Input alphabet. Required for writing NEXUS.""") return parser
python
def ihs(h, pos, map_pos=None, min_ehh=0.05, min_maf=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): """Compute the unstandardized integrated haplotype score (IHS) for each variant, comparing integrated haplotype homozygosity between the reference (0) and alternate (1) alleles. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. min_maf : float, optional Do not compute integrated haplotype homozogysity for variants with minor allele frequency below this value. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized IHS scores. Notes ----- This function will calculate IHS for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes IHS comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function returns NaN for any IHS calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count """ # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) pos = asarray_ndim(pos, 1) check_dim0_aligned(h, pos) h = memoryview_safe(h) pos = memoryview_safe(pos) # compute gaps between variants for integration gaps = compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible) # setup kwargs kwargs = dict(min_ehh=min_ehh, min_maf=min_maf, include_edges=include_edges) if use_threads and multiprocessing.cpu_count() > 1: # run with threads # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(ihh01_scan, (h, gaps), kwargs) # scan backward result_rev = pool.apply_async(ihh01_scan, (h[::-1], gaps[::-1]), kwargs) # wait for both to finish pool.close() pool.join() # obtain results ihh0_fwd, ihh1_fwd = result_fwd.get() ihh0_rev, ihh1_rev = result_rev.get() # cleanup pool.terminate() else: # run without threads # scan forward ihh0_fwd, ihh1_fwd = ihh01_scan(h, gaps, **kwargs) # scan backward ihh0_rev, ihh1_rev = ihh01_scan(h[::-1], gaps[::-1], **kwargs) # handle reverse scan ihh0_rev = ihh0_rev[::-1] ihh1_rev = ihh1_rev[::-1] # compute unstandardized score ihh0 = ihh0_fwd + ihh0_rev ihh1 = ihh1_fwd + ihh1_rev score = np.log(ihh1 / ihh0) return score
python
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
java
protected String normalizeName(String originalName) { if (originalName == null || originalName.endsWith(".js")) { return originalName; } return originalName + ".js"; }
java
public void marshall(CopySnapshotRequest copySnapshotRequest, ProtocolMarshaller protocolMarshaller) { if (copySnapshotRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(copySnapshotRequest.getSourceSnapshotName(), SOURCESNAPSHOTNAME_BINDING); protocolMarshaller.marshall(copySnapshotRequest.getTargetSnapshotName(), TARGETSNAPSHOTNAME_BINDING); protocolMarshaller.marshall(copySnapshotRequest.getSourceRegion(), SOURCEREGION_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public static <T> Observable.Transformer<T, T> applyCustomSchedulers(final Handler subscribeHandler) { return new Observable.Transformer<T, T>() { @Override public Observable<T> call(Observable<T> observable) { return observable.subscribeOn(HandlerScheduler.from(subscribeHandler)) .observeOn(AndroidSchedulers.mainThread()); } }; }
java
public static Session getSession(final ServerSetup setup, Properties mailProps) { Properties props = setup.configureJavaMailSessionProperties(mailProps, false); log.debug("Mail session properties are {}", props); return Session.getInstance(props, null); }
java
@Override public Request<DescribeDhcpOptionsRequest> getDryRunRequest() { Request<DescribeDhcpOptionsRequest> request = new DescribeDhcpOptionsRequestMarshaller().marshall(this); request.addParameter("DryRun", Boolean.toString(true)); return request; }
python
def post_create(self, tags, file_=None, rating=None, source=None, rating_locked=None, note_locked=None, parent_id=None, md5=None): """Function to create a new post (Requires login). There are only two mandatory fields: you need to supply the 'tags', and you need to supply the 'file_', either through a multipart form or through a source URL (Requires login) (UNTESTED). Parameters: tags (str): A space delimited list of tags. file_ (str): The file data encoded as a multipart form. Path of content. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post. md5 (str): Supply an MD5 if you want Moebooru to verify the file after uploading. If the MD5 doesn't match, the post is destroyed. Raises: PybooruAPIError: When file or source are empty. """ if file_ or source is not None: params = { 'post[tags]': tags, 'post[source]': source, 'post[rating]': rating, 'post[is_rating_locked]': rating_locked, 'post[is_note_locked]': note_locked, 'post[parent_id]': parent_id, 'md5': md5} file_ = {'post[file]': open(file_, 'rb')} return self._get('post/create', params, 'POST', file_) else: raise PybooruAPIError("'file_' or 'source' is required.")
python
def is_pigalle(obj: Any) -> bool: """ Return true if the passed object as argument is a class or an instance of class being to the Pigalle framework. # Arguments obj: The class or object to test. # Returns: bool: * True if class or object is Pigalle. * False else. """ return PygalleBaseClass.is_pigalle_class(obj) or PygalleBaseClass.is_pigalle_instance(obj)
python
def writejar(self, jar): """Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest. :param string jar: the path to the pre-existing jar to graft into this jar """ if not jar or not isinstance(jar, string_types): raise ValueError('The jar path must be a non-empty string') self._jars.append(jar)
java
public static String messageAliasTableLocked(String lockOwner) { return Messages.get().key(Messages.GUI_ALIASES_TABLE_LOCKED_1, lockOwner); }
python
def get_attribute(self, app=None, key=None): """Returns an application attribute :param app: application id :param key: attribute key or None to retrieve all values for the given application :returns: attribute value if key was specified, or an array of tuples (key, value) for each attribute :raises: HTTPResponseError in case an HTTP error status was returned """ path = 'getattribute' if app is not None: path += '/' + parse.quote(app, '') if key is not None: path += '/' + parse.quote(self._encode_string(key), '') res = self._make_ocs_request( 'GET', self.OCS_SERVICE_PRIVATEDATA, path ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) values = [] for element in tree.find('data').iter('element'): app_text = element.find('app').text key_text = element.find('key').text value_text = element.find('value').text or '' if key is None: if app is None: values.append((app_text, key_text, value_text)) else: values.append((key_text, value_text)) else: return value_text if len(values) == 0 and key is not None: return None return values raise HTTPResponseError(res)
python
def _add_value(self, skey, vtyp, key, val, _deser, null_allowed): """ Main method for adding a value to the instance. Does all the checking on type of value and if among allowed values. :param skey: string version of the key :param vtyp: Type of value :param key: original representation of the key :param val: The value to add :param _deser: A deserializer for this value type :param null_allowed: Whether null is an allowed value for this key """ if isinstance(val, list): if (len(val) == 0 or val[0] is None) and null_allowed is False: return if isinstance(vtyp, tuple): vtyp = vtyp[0] if isinstance(vtyp, list): vtype = vtyp[0] if isinstance(val, vtype): if issubclass(vtype, Message): self._dict[skey] = [val] elif _deser: try: self._dict[skey] = _deser(val, sformat="urlencoded") except Exception as exc: raise DecodeError(ERRTXT % (key, exc)) else: setattr(self, skey, [val]) elif isinstance(val, list): if _deser: try: val = _deser(val, sformat="dict") except Exception as exc: raise DecodeError(ERRTXT % (key, exc)) if issubclass(vtype, Message): try: _val = [] for v in val: _val.append(vtype(**dict([(str(x), y) for x, y in v.items()]))) val = _val except Exception as exc: raise DecodeError(ERRTXT % (key, exc)) else: for v in val: if not isinstance(v, vtype): raise DecodeError( ERRTXT % (key, "type != %s (%s)" % ( vtype, type(v)))) self._dict[skey] = val elif isinstance(val, dict): try: val = _deser(val, sformat="dict") except Exception as exc: raise DecodeError(ERRTXT % (key, exc)) else: self._dict[skey] = val else: raise DecodeError(ERRTXT % (key, "type != %s" % vtype)) else: if val is None: self._dict[skey] = None elif isinstance(val, bool): if vtyp is bool: self._dict[skey] = val else: raise ValueError( '"{}", wrong type of value for "{}"'.format(val, skey)) elif isinstance(val, vtyp): # Not necessary to do anything self._dict[skey] = val else: if _deser: try: val = _deser(val, sformat="dict") except Exception as exc: raise DecodeError(ERRTXT % (key, exc)) else: # if isinstance(val, str): # self._dict[skey] = val # elif isinstance(val, list): # if len(val) == 1: # self._dict[skey] = val[0] # elif not len(val): # pass # else: # raise TooManyValues(key) # else: self._dict[skey] = val elif vtyp is int: try: self._dict[skey] = int(val) except (ValueError, TypeError): raise ValueError( '"{}", wrong type of value for "{}"'.format(val, skey)) elif vtyp is bool: raise ValueError( '"{}", wrong type of value for "{}"'.format(val, skey)) elif vtyp != type(val): if vtyp == Message: if type(val) == dict or isinstance(val, str): self._dict[skey] = val else: raise ValueError( '"{}", wrong type of value for "{}"'.format( val, skey)) else: raise ValueError( '"{}", wrong type of value for "{}"'.format(val, skey))
python
def request_io(self, iocb): """Called by a client to start processing a request.""" if _debug: IOController._debug("request_io %r", iocb) # check that the parameter is an IOCB if not isinstance(iocb, IOCB): raise TypeError("IOCB expected") # bind the iocb to this controller iocb.ioController = self try: # hopefully there won't be an error err = None # change the state iocb.ioState = PENDING # let derived class figure out how to process this self.process_io(iocb) except: # extract the error err = sys.exc_info()[1] # if there was an error, abort the request if err: self.abort_io(iocb, err)
python
def get_user_group(self, user=None, group=None): """ Get the user and group information. Parameters ---------- user : str User name or user id (default is the `os.getuid()`). group : str Group name or group id (default is the group of `user`). Returns ------- user : pwd.struct_passwd User object. group : grp.struct_group Group object. """ user = user or os.getuid() # Convert the information we have obtained to a user object try: try: user = pwd.getpwuid(int(user)) except ValueError: user = pwd.getpwnam(user) except KeyError as ex: # pragma: no cover self.logger.fatal("could not resolve user: %s", ex) raise # Get the group group = group or user.pw_gid try: try: group = grp.getgrgid(int(group)) except ValueError: group = grp.getgrnam(group) except KeyError as ex: # pragma: no cover self.logger.fatal("could not resolve group:%s", ex) raise return user, group
java
private int getPacketSize(Format codecFormat, int milliseconds) throws IllegalArgumentException { String encoding = codecFormat.getEncoding(); if (encoding.equalsIgnoreCase(AudioFormat.GSM) || encoding.equalsIgnoreCase(AudioFormat.GSM_RTP)) { return milliseconds * 4; // 1 byte per millisec } else if (encoding.equalsIgnoreCase(AudioFormat.ULAW) || encoding.equalsIgnoreCase(AudioFormat.ULAW_RTP)) { return milliseconds * 8; } else { throw new IllegalArgumentException("Unknown codec type"); } }
python
def max(self): """Return the maximum value in this histogram. If there are no values in the histogram at all, return 600. Returns: int: The maximum value in the histogram. """ if len(self._data) == 0: return 600 return next(iter(reversed(sorted(self._data.keys()))))
python
def ls(system, user, local, include_missing): """List configuration files detected (and/or examined paths).""" # default action is to list *all* auto-detected files if not (system or user or local): system = user = local = True for path in get_configfile_paths(system=system, user=user, local=local, only_existing=not include_missing): click.echo(path)
python
def subvolume_create(name, dest=None, qgroupids=None): ''' Create subvolume `name` in `dest`. Return True if the subvolume is created, False is the subvolume is already there. name Name of the new subvolume dest If not given, the subvolume will be created in the current directory, if given will be in /dest/name qgroupids Add the newly created subcolume to a qgroup. This parameter is a list CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_create var salt '*' btrfs.subvolume_create var dest=/mnt salt '*' btrfs.subvolume_create var qgroupids='[200]' ''' if qgroupids and type(qgroupids) is not list: raise CommandExecutionError('Qgroupids parameter must be a list') if dest: name = os.path.join(dest, name) # If the subvolume is there, we are done if subvolume_exists(name): return False cmd = ['btrfs', 'subvolume', 'create'] if type(qgroupids) is list: cmd.append('-i') cmd.extend(qgroupids) cmd.append(name) res = __salt__['cmd.run_all'](cmd) salt.utils.fsutils._verify_run(res) return True
python
def readDate(self): """ Read date from the stream. The timezone is ignored as the date is always in UTC. """ ref = self.readInteger(False) if ref & REFERENCE_BIT == 0: return self.context.getObject(ref >> 1) ms = self.stream.read_double() result = util.get_datetime(ms / 1000.0) if self.timezone_offset is not None: result += self.timezone_offset self.context.addObject(result) return result
java
public static List<CommerceNotificationQueueEntry> findBySent( boolean sent, int start, int end, OrderByComparator<CommerceNotificationQueueEntry> orderByComparator) { return getPersistence().findBySent(sent, start, end, orderByComparator); }
python
def save_gradebook_column(self, gradebook_column_form, *args, **kwargs): """Pass through to provider GradebookColumnAdminSession.update_gradebook_column""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if gradebook_column_form.is_for_update(): return self.update_gradebook_column(gradebook_column_form, *args, **kwargs) else: return self.create_gradebook_column(gradebook_column_form, *args, **kwargs)
java
void notifyAsynchDeletionEnd(AsynchDeletionThread thread) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "notifyAsynchDeletionEnd", thread); // Under the ADT lock, we'll notify any waiters and set the running // flag to false synchronized (deletionThreadLock) { thread.setRunning(false); deletionThreadLock.notifyAll(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "notifyAsynchDeletionEnd"); }
java
public static String guessMimeTypeFromExtension(String extension) { if (extension == null || extension.isEmpty()) { return null; } return extensionToMimeTypeMap.get(extension); }
python
def lift(data, addr, arch, max_bytes=None, max_inst=None, bytes_offset=0, opt_level=1, traceflags=0, strict_block_end=True, inner=False, skip_stmts=False, collect_data_refs=False): """ Recursively lifts blocks using the registered lifters and postprocessors. Tries each lifter in the order in which they are registered on the data to lift. If a lifter raises a LiftingException on the data, it is skipped. If it succeeds and returns a block with a jumpkind of Ijk_NoDecode, all of the lifters are tried on the rest of the data and if they work, their output is appended to the first block. :param arch: The arch to lift the data as. :type arch: :class:`archinfo.Arch` :param addr: The starting address of the block. Effects the IMarks. :param data: The bytes to lift as either a python string of bytes or a cffi buffer object. :param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used. :param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used. :param bytes_offset: The offset into `data` to start lifting at. :param opt_level: The level of optimization to apply to the IR, -1 through 2. -1 is the strictest unoptimized level, 0 is unoptimized but will perform some lookahead/lookbehind optimizations, 1 performs constant propogation, and 2 performs loop unrolling, which honestly doesn't make much sense in the context of pyvex. The default is 1. :param traceflags: The libVEX traceflags, controlling VEX debug prints. .. note:: Explicitly specifying the number of instructions to lift (`max_inst`) may not always work exactly as expected. For example, on MIPS, it is meaningless to lift a branch or jump instruction without its delay slot. VEX attempts to Do The Right Thing by possibly decoding fewer instructions than requested. Specifically, this means that lifting a branch or jump on MIPS as a single instruction (`max_inst=1`) will result in an empty IRSB, and subsequent attempts to run this block will raise `SimIRSBError('Empty IRSB passed to SimIRSB.')`. .. note:: If no instruction and byte limit is used, pyvex will continue lifting the block until the block ends properly or until it runs out of data to lift. """ if max_bytes is not None and max_bytes <= 0: raise PyVEXError("Cannot lift block with no data (max_bytes <= 0)") if not data: raise PyVEXError("Cannot lift block with no data (data is empty)") if isinstance(data, str): raise TypeError("Cannot pass unicode string as data to lifter") if isinstance(data, bytes): py_data = data c_data = None allow_arch_optimizations = False else: if max_bytes is None: raise PyVEXError("Cannot lift block with ffi pointer and no size (max_bytes is None)") c_data = data py_data = None allow_arch_optimizations = True # In order to attempt to preserve the property that # VEX lifts the same bytes to the same IR at all times when optimizations are disabled # we hack off all of VEX's non-IROpt optimizations when opt_level == -1. # This is intended to enable comparisons of the lifted IR between code that happens to be # found in different contexts. if opt_level < 0: allow_arch_optimizations = False opt_level = 0 for lifter in lifters[arch.name]: try: u_data = data if lifter.REQUIRE_DATA_C: if c_data is None: u_data = ffi.new('unsigned char [%d]' % (len(py_data) + 8), py_data + b'\0' * 8) max_bytes = min(len(py_data), max_bytes) if max_bytes is not None else len(py_data) else: u_data = c_data elif lifter.REQUIRE_DATA_PY: if py_data is None: if max_bytes is None: l.debug('Cannot create py_data from c_data when no max length is given') continue u_data = ffi.buffer(c_data, max_bytes)[:] else: u_data = py_data try: final_irsb = lifter(arch, addr)._lift(u_data, bytes_offset, max_bytes, max_inst, opt_level, traceflags, allow_arch_optimizations, strict_block_end, skip_stmts, collect_data_refs, ) except SkipStatementsError: assert skip_stmts is True final_irsb = lifter(arch, addr)._lift(u_data, bytes_offset, max_bytes, max_inst, opt_level, traceflags, allow_arch_optimizations, strict_block_end, skip_stmts=False, collect_data_refs=collect_data_refs, ) #l.debug('block lifted by %s' % str(lifter)) #l.debug(str(final_irsb)) break except LiftingException as ex: l.debug('Lifting Exception: %s', str(ex)) continue else: final_irsb = IRSB.empty_block(arch, addr, size=0, nxt=Const(const.vex_int_class(arch.bits)(addr)), jumpkind='Ijk_NoDecode', ) final_irsb.invalidate_direct_next() return final_irsb if final_irsb.size > 0 and final_irsb.jumpkind == 'Ijk_NoDecode': # We have decoded a few bytes before we hit an undecodeable instruction. # Determine if this is an intentional NoDecode, like the ud2 instruction on AMD64 nodecode_addr_expr = final_irsb.next if type(nodecode_addr_expr) is Const: nodecode_addr = nodecode_addr_expr.con.value next_irsb_start_addr = addr + final_irsb.size if nodecode_addr != next_irsb_start_addr: # The last instruction of the IRSB has a non-zero length. This is an intentional NoDecode. # The very last instruction has been decoded final_irsb.jumpkind = 'Ijk_NoDecode' final_irsb.next = final_irsb.next final_irsb.invalidate_direct_next() return final_irsb # Decode more bytes if skip_stmts: # When gymrat will be invoked, we will merge future basic blocks to the current basic block. In this case, # statements are usually required. # TODO: In the future, we may further optimize it to handle cases where getting statements in gymrat is not # TODO: required. return lift(data, addr, arch, max_bytes=max_bytes, max_inst=max_inst, bytes_offset=bytes_offset, opt_level=opt_level, traceflags=traceflags, strict_block_end=strict_block_end, skip_stmts=False, collect_data_refs=collect_data_refs, ) next_addr = addr + final_irsb.size if max_bytes is not None: max_bytes -= final_irsb.size if isinstance(data, (str, bytes)): data_left = data[final_irsb.size:] else: data_left = data + final_irsb.size if max_inst is not None: max_inst -= final_irsb.instructions if (max_bytes is None or max_bytes > 0) and (max_inst is None or max_inst > 0) and data_left: more_irsb = lift(data_left, next_addr, arch, max_bytes=max_bytes, max_inst=max_inst, bytes_offset=bytes_offset, opt_level=opt_level, traceflags=traceflags, strict_block_end=strict_block_end, inner=True, skip_stmts=False, collect_data_refs=collect_data_refs, ) if more_irsb.size: # Successfully decoded more bytes final_irsb.extend(more_irsb) elif max_bytes == 0: # We have no more bytes left. Mark the jumpkind of the IRSB as Ijk_Boring if final_irsb.size > 0 and final_irsb.jumpkind == 'Ijk_NoDecode': final_irsb.jumpkind = 'Ijk_Boring' final_irsb.next = Const(vex_int_class(arch.bits)(final_irsb.addr + final_irsb.size)) if not inner: for postprocessor in postprocessors[arch.name]: try: postprocessor(final_irsb).postprocess() except NeedStatementsNotification: # The post-processor cannot work without statements. Re-lift the current block with skip_stmts=False if not skip_stmts: # sanity check # Why does the post-processor raise NeedStatementsNotification when skip_stmts is False? raise TypeError("Bad post-processor %s: " "NeedStatementsNotification is raised when statements are available." % postprocessor.__class__) # Re-lift the current IRSB return lift(data, addr, arch, max_bytes=max_bytes, max_inst=max_inst, bytes_offset=bytes_offset, opt_level=opt_level, traceflags=traceflags, strict_block_end=strict_block_end, inner=inner, skip_stmts=False, collect_data_refs=collect_data_refs, ) except LiftingException: continue return final_irsb
java
private void initializeStaticPolicy(List<ProGradePolicyEntry> grantEntriesList) throws Exception { // grant codeBase "file:${{java.ext.dirs}}/*" { // permission java.security.AllPermission; // }; ProGradePolicyEntry p1 = new ProGradePolicyEntry(true, debug); Certificate[] certificates = null; URL url = new URL(expandStringWithProperty("file:${{java.ext.dirs}}/*")); CodeSource cs = new CodeSource(adaptURL(url), certificates); p1.setCodeSource(cs); p1.addPermission(new AllPermission()); grantEntriesList.add(p1); // grant { // permission java.lang.RuntimePermission "stopThread"; // permission java.net.SocketPermission "localhost:1024-", "listen"; // permission java.util.PropertyPermission "java.version", "read"; // permission java.util.PropertyPermission "java.vendor", "read"; // permission java.util.PropertyPermission "java.vendor.url", "read"; // permission java.util.PropertyPermission "java.class.version", "read"; // permission java.util.PropertyPermission "os.name", "read"; // permission java.util.PropertyPermission "os.version", "read"; // permission java.util.PropertyPermission "os.arch", "read"; // permission java.util.PropertyPermission "file.separator", "read"; // permission java.util.PropertyPermission "path.separator", "read"; // permission java.util.PropertyPermission "line.separator", "read"; // permission java.util.PropertyPermission "java.specification.version", "read"; // permission java.util.PropertyPermission "java.specification.vendor", "read"; // permission java.util.PropertyPermission "java.specification.name", "read"; // permission java.util.PropertyPermission "java.vm.specification.version", "read"; // permission java.util.PropertyPermission "java.vm.specification.vendor", "read"; // permission java.util.PropertyPermission "java.vm.specification.name", "read"; // permission java.util.PropertyPermission "java.vm.version", "read"; // permission java.util.PropertyPermission "java.vm.vendor", "read"; // permission java.util.PropertyPermission "java.vm.name", "read"; // }; ProGradePolicyEntry p2 = new ProGradePolicyEntry(true, debug); p2.addPermission(new RuntimePermission("stopThread")); p2.addPermission(new SocketPermission("localhost:1024-", "listen")); p2.addPermission(new PropertyPermission("java.version", "read")); p2.addPermission(new PropertyPermission("java.vendor", "read")); p2.addPermission(new PropertyPermission("java.vendor.url", "read")); p2.addPermission(new PropertyPermission("java.class.version", "read")); p2.addPermission(new PropertyPermission("os.name", "read")); p2.addPermission(new PropertyPermission("os.version", "read")); p2.addPermission(new PropertyPermission("os.arch", "read")); p2.addPermission(new PropertyPermission("file.separator", "read")); p2.addPermission(new PropertyPermission("path.separator", "read")); p2.addPermission(new PropertyPermission("line.separator", "read")); p2.addPermission(new PropertyPermission("java.specification.version", "read")); p2.addPermission(new PropertyPermission("java.specification.vendor", "read")); p2.addPermission(new PropertyPermission("java.specification.name", "read")); p2.addPermission(new PropertyPermission("java.vm.specification.version", "read")); p2.addPermission(new PropertyPermission("java.vm.specification.vendor", "read")); p2.addPermission(new PropertyPermission("java.vm.specification.name", "read")); p2.addPermission(new PropertyPermission("java.vm.version", "read")); p2.addPermission(new PropertyPermission("java.vm.vendor", "read")); p2.addPermission(new PropertyPermission("java.vm.name", "read")); grantEntriesList.add(p2); }
java
public ModifiableDBIDs determineIDs(DBIDs superSetIDs, HyperBoundingBox interval, double d_min, double d_max) { StringBuilder msg = LOG.isDebugging() ? new StringBuilder() : null; if(msg != null) { msg.append("interval ").append(interval); } ModifiableDBIDs childIDs = DBIDUtil.newHashSet(superSetIDs.size()); Map<DBID, Double> minima = f_minima.get(interval); Map<DBID, Double> maxima = f_maxima.get(interval); if(minima == null || maxima == null) { minima = new HashMap<>(); f_minima.put(interval, minima); maxima = new HashMap<>(); f_maxima.put(interval, maxima); } for(DBIDIter iter = superSetIDs.iter(); iter.valid(); iter.advance()) { DBID id = DBIDUtil.deref(iter); Double f_min = minima.get(id); Double f_max = maxima.get(id); if(f_min == null) { ParameterizationFunction f = database.get(id); HyperBoundingBox minMax = f.determineAlphaMinMax(interval); f_min = f.function(SpatialUtil.getMin(minMax)); f_max = f.function(SpatialUtil.getMax(minMax)); minima.put(id, f_min); maxima.put(id, f_max); } if(msg != null) { msg.append("\n\nf_min ").append(f_min); msg.append("\nf_max ").append(f_max); msg.append("\nd_min ").append(d_min); msg.append("\nd_max ").append(d_max); } if(f_min - f_max > ParameterizationFunction.DELTA) { throw new IllegalArgumentException("Houston, we have a problem: f_min > f_max! " + "\nf_min[" + FormatUtil.format(SpatialUtil.centroid(interval)) + "] = " + f_min + "\nf_max[" + FormatUtil.format(SpatialUtil.centroid(interval)) + "] = " + f_max + "\nf " + database.get(id)); } if(f_min <= d_max && f_max >= d_min) { childIDs.add(id); if(msg != null) { msg.append("\nid ").append(id).append(" appended"); } } else { if(msg != null) { msg.append("\nid ").append(id).append(" NOT appended"); } } } if(msg != null) { msg.append("\nchildIds ").append(childIDs.size()); LOG.debugFine(msg.toString()); } if(childIDs.size() < minPts) { return null; } else { return childIDs; } }
java
static IOException ioeToSocketException(IOException ioe) { if (ioe.getClass().equals(IOException.class)) { // "se" could be a new class in stead of SocketException. IOException se = new SocketException("Original Exception : " + ioe); se.initCause(ioe); /* Change the stacktrace so that original trace is not truncated * when printed.*/ se.setStackTrace(ioe.getStackTrace()); return se; } // otherwise just return the same exception. return ioe; }
java
public void setInputChannels(java.util.Collection<Integer> inputChannels) { if (inputChannels == null) { this.inputChannels = null; return; } this.inputChannels = new java.util.ArrayList<Integer>(inputChannels); }
python
def rewrap(s, width=COLS): """ Join all lines from input string and wrap it at specified width """ s = ' '.join([l.strip() for l in s.strip().split('\n')]) return '\n'.join(textwrap.wrap(s, width))
java
private void startConnecting() { // Open the connecting socket. try { boolean rc = open(); // Connect may succeed in synchronous manner. if (rc) { handle = ioObject.addFd(fd); connectEvent(); } // Connection establishment may be delayed. Poll for its completion. else { handle = ioObject.addFd(fd); ioObject.setPollConnect(handle); socket.eventConnectDelayed(addr.toString(), -1); } } catch (RuntimeException | IOException e) { // Handle any other error condition by eventual reconnect. if (fd != null) { close(); } addReconnectTimer(); } }
java
public boolean contains(BoundingBox boundingBox) { return getMinLongitude() <= boundingBox.getMinLongitude() && getMaxLongitude() >= boundingBox.getMaxLongitude() && getMinLatitude() <= boundingBox.getMinLatitude() && getMaxLatitude() >= boundingBox.getMaxLatitude(); }
java
public static CmsSubscriptionReadMode modeForName(String modeName) { if (MODE_NAME_ALL.equals(modeName)) { return ALL; } else if (MODE_NAME_VISITED.equals(modeName)) { return VISITED; } return UNVISITED; }
java
@Nullable public static <DATATYPE> Class <DATATYPE> getClassFromNameSafe (@Nonnull final String sName) { try { return getClassFromName (sName); } catch (final ClassNotFoundException e) { return null; } }
python
def _get_from_cache(self, sector, scale, eft, basis): """Try to load a set of Wilson coefficients from the cache, else return None.""" try: return self._cache[eft][scale][basis][sector] except KeyError: return None
python
def forecast(place, series=True): """NOAA weather forecast for a location""" lat, lon = place url = "http://graphical.weather.gov/xml/SOAP_server/ndfdXMLclient.php?" + \ "whichClient=NDFDgen&" + "lat=%s&lon=%s&" % (lat, lon) + \ "Unit=e&temp=temp&wspd=wspd&sky=sky&wx=wx&rh=rh&" + \ "product=time-series&Submit=Submit" logger.debug(url) res = urllib2.urlopen(url).read() root = ET.fromstring(res) time_series = [(i.text) for i in \ root.findall('./data/time-layout')[0].iterfind('start-valid-time')] logger.debug(res) #knots to mph wind_speed = [eval(i.text)*1.15 for i in \ root.findall('./data/parameters/wind-speed')[0].iterfind('value')] cloud_cover = [eval(i.text)/100.0 for i in \ root.findall('./data/parameters/cloud-amount')[0].iterfind('value')] temperature = [eval(i.text) for i in \ root.findall('./data/parameters/temperature')[0].iterfind('value')] if not series: return {'cloudCover':cloud_cover[0], \ 'temperature':temperature[0], \ 'windSpeed':wind_speed[0], \ 'start-valid-time':time_series[0]} else: return {'cloudCover':cloud_cover, \ 'temperature':temperature, \ 'windSpeed':wind_speed, \ 'start-valid-time':time_series}
python
def filterVariantAnnotation(self, vann): """ Returns true when an annotation should be included. """ # TODO reintroduce feature ID search ret = False if len(self._effects) != 0 and not vann.transcript_effects: return False elif len(self._effects) == 0: return True for teff in vann.transcript_effects: if self.filterEffect(teff): ret = True return ret
python
def create_namespaced_daemon_set(self, namespace, body, **kwargs): """ create a DaemonSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_daemon_set(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1DaemonSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1DaemonSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_daemon_set_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_daemon_set_with_http_info(namespace, body, **kwargs) return data
java
public DetectFacesRequest withAttributes(Attribute... attributes) { java.util.ArrayList<String> attributesCopy = new java.util.ArrayList<String>(attributes.length); for (Attribute value : attributes) { attributesCopy.add(value.toString()); } if (getAttributes() == null) { setAttributes(attributesCopy); } else { getAttributes().addAll(attributesCopy); } return this; }
python
def set_unit(self, unit): """Set the GPS step scale """ # accept all core time units if unit is None or (isinstance(unit, units.NamedUnit) and unit.physical_type == 'time'): self._unit = unit return # convert float to custom unit in seconds if isinstance(unit, Number): unit = units.Unit(unit * units.second) # otherwise, should be able to convert to a time unit try: unit = units.Unit(unit) except ValueError as exc: # catch annoying plurals try: unit = units.Unit(str(unit).rstrip('s')) except ValueError: raise exc # decompose and check that it's actually a time unit dec = unit.decompose() if dec.bases != [units.second]: raise ValueError("Cannot set GPS unit to %s" % unit) # check equivalent units for other in TIME_UNITS: if other.decompose().scale == dec.scale: self._unit = other return raise ValueError("Unrecognised unit: %s" % unit)
python
def system_info(self): """Return system information.""" res = self.client.service.SystemInfo() res = {ustr(x[0]): x[1] for x in res[0]} to_str = lambda arr: '.'.join([ustr(x) for x in arr[0]]) res['OSVersion'] = to_str(res['OSVersion']) res['RuntimeVersion'] = to_str(res['RuntimeVersion']) res['Version'] = to_str(res['Version']) res['Name'] = ustr(res['Name']) res['Server'] = ustr(res['Server']) res['LocalNameCheck'] = ustr(res['LocalNameCheck']) res['UserHostAddress'] = ustr(res['UserHostAddress']) return res
python
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
java
@Override public EEnum getIfcSequenceEnum() { if (ifcSequenceEnumEEnum == null) { ifcSequenceEnumEEnum = (EEnum) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI).getEClassifiers() .get(1061); } return ifcSequenceEnumEEnum; }
java
private Map<String,Object> getMap() { if (m_data == null) m_data = new HashMap<String,Object>(); return (Map<String, Object>)m_data; }
python
def prompt(self, for_=None, error_type=None, card_type=None, attempt=None, **kwargs): """ Create a <Prompt> element :param for_: Name of the credit card data element :param error_type: Type of error :param card_type: Type of the credit card :param attempt: Current attempt count :param kwargs: additional attributes :returns: <Prompt> element """ return self.nest(Prompt( for_=for_, error_type=error_type, card_type=card_type, attempt=attempt, **kwargs ))
java
public Matrix3x2f translate(Vector2fc offset, Matrix3x2f dest) { return translate(offset.x(), offset.y(), dest); }
python
def _check_config(): """Create config files as necessary.""" config.CONFIG_DIR.mkdir(parents=True, exist_ok=True) verfile = config.CONFIG_DIR / '.version' uptodate = verfile.is_file() and verfile.read_text() == __version__ if not uptodate: verfile.write_text(__version__) if not (uptodate and config.CONFIG_FILE.is_file()): conf.create_config_(update=True) for stfile in ('stagpy-paper.mplstyle', 'stagpy-slides.mplstyle'): stfile_conf = config.CONFIG_DIR / stfile if not (uptodate and stfile_conf.is_file()): stfile_local = pathlib.Path(__file__).parent / stfile shutil.copy(str(stfile_local), str(stfile_conf))
java
public int nnz() { int sum = 0; for (int i = 0; i < N; i++) sum += rows[i].nnz(); return sum; }
python
def get_rlzs_by_gsim_grp(self, sm_lt_path=None, trts=None): """ :returns: a dictionary src_group_id -> gsim -> rlzs """ self.rlzs_assoc = self.get_rlzs_assoc(sm_lt_path, trts) dic = {grp.id: self.rlzs_assoc.get_rlzs_by_gsim(grp.id) for sm in self.source_models for grp in sm.src_groups} return dic
java
private ZapMenuItem getMenuToolsFilter() { if (menuToolsFilter == null) { menuToolsFilter = new ZapMenuItem("menu.tools.filter"); menuToolsFilter.addActionListener(new java.awt.event.ActionListener() { @Override public void actionPerformed(java.awt.event.ActionEvent e) { FilterDialog dialog = new FilterDialog(getView().getMainFrame()); dialog.setAllFilters(filterFactory.getAllFilter()); dialog.showDialog(false); boolean startThread = false; for (Filter filter : filterFactory.getAllFilter()) { if (filter.isEnabled()) { startThread = true; break; } } if (startThread) { if (timerFilterThread == null) { timerFilterThread = new TimerFilterThread(filterFactory.getAllFilter()); timerFilterThread.start(); } } else if (timerFilterThread != null) { timerFilterThread.setStopped(); timerFilterThread = null; } } }); } return menuToolsFilter; }
python
def search_by(lookup, tgt_type='compound', minion_id=None): ''' Search a dictionary of target strings for matching targets This is the inverse of :py:func:`match.filter_by <salt.modules.match.filter_by>` and allows matching values instead of matching keys. A minion can be matched by multiple entries. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' match.search_by '{web: [node1, node2], db: [node2, node]}' Pillar Example: .. code-block:: jinja {% set roles = salt.match.search_by({ 'web': ['G@os_family:Debian not nodeX'], 'db': ['L@node2,node3 and G@datacenter:west'], 'caching': ['node3', 'node4'], }) %} # Make the filtered data available to Pillar: roles: {{ roles | yaml() }} ''' expr_funcs = dict(inspect.getmembers(sys.modules[__name__], predicate=inspect.isfunction)) matches = [] for key, target_list in lookup.items(): for target in target_list: params = (target, minion_id) if minion_id else (target, ) if expr_funcs[tgt_type](*params): matches.append(key) return matches or None
python
def get_by_page(query, page, page_size): """ Осуществляет пагинацию :param query: запрос :param page: номер страницы :param page_size: количество объектов на странице :return: """ pager = Paginator(query, page_size) try: models = pager.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. models = pager.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. models = pager.page(pager.num_pages) return models
python
def _init_health_checker(self): """ start the health checker stub and start a thread to ping it every 30 seconds :return: None """ stub = Health_pb2_grpc.HealthStub(channel=self._channel) self._health_check = stub.Check health_check_thread = threading.Thread(target=self._health_check_thread) health_check_thread.daemon = True health_check_thread.start()
java
public void setExtension(boolean newExtension) { boolean oldExtension = extension; extension = newExtension; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, XtypePackage.XIMPORT_DECLARATION__EXTENSION, oldExtension, extension)); }
python
def pack_small_tensors(tower_grads, max_bytes=0): """Concatenate gradients together more intelligently. Does binpacking Args: tower_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. """ assert max_bytes >= 0 orig_grads = [g for g, _ in tower_grads[0]] # Check to make sure sizes are accurate; not entirely important assert all(g.dtype == tf.float32 for g in orig_grads) sizes = [4 * g.shape.num_elements() for g in orig_grads] print_stats(sizes) small_ranges = [] large_indices = [] new_sizes = [] def end_interval(indices, small_ranges, large_indices): if len(indices) > 1: small_ranges.insert(0, [indices[0], indices[-1]]) else: large_indices.insert(0, indices[0]) cur_range = [] cur_size = 0 for i, s in reversed(list(enumerate(sizes))): if cur_size > max_bytes: end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) cur_range = [] cur_size = 0 cur_range.insert(0, i) cur_size += s end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) print_stats(new_sizes) num_gv = len(orig_grads) packing = {} if len(small_ranges): new_tower_grads = [] for dev_idx, gv_list in enumerate(tower_grads): assert len(gv_list) == num_gv, ( "Possible cause: " "Networks constructed on different workers " "don't have the same number of variables. " "If you use tf.GraphKeys or tf.global_variables() " "with multiple graphs per worker during network " "construction, you need to use " "appropriate scopes, see " "https://github.com/ray-project/ray/issues/3136") new_gv_list = [] for r in small_ranges: key = "%d:%d" % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), "packing_var_placeholder")) for i in large_indices: new_gv_list.append(gv_list[i]) new_tower_grads.append(new_gv_list) return new_tower_grads, packing else: return tower_grads, None
python
def arp_ip(opcode, src_mac, src_ip, dst_mac, dst_ip): """A convenient wrapper for IPv4 ARP for Ethernet. This is an equivalent of the following code. arp(ARP_HW_TYPE_ETHERNET, ether.ETH_TYPE_IP, \ 6, 4, opcode, src_mac, src_ip, dst_mac, dst_ip) """ return arp(ARP_HW_TYPE_ETHERNET, ether.ETH_TYPE_IP, 6, # ether mac address length 4, # ipv4 address length, opcode, src_mac, src_ip, dst_mac, dst_ip)
python
def overlap(self, x, j=None): """Checks how many ellipsoid(s) `x` falls within, skipping the `j`-th ellipsoid.""" q = len(self.within(x, j=j)) return q
java
@RequestMapping(value = "api/servergroup/{id}", method = RequestMethod.DELETE) public @ResponseBody List<ServerGroup> deleteServerGroup(Model model, @PathVariable int id, @RequestParam(value = "profileId", required = false) Integer profileId, @RequestParam(value = "clientUUID", required = false) String clientUUID, @RequestParam(value = "profileIdentifier", required = false) String profileIdentifier) throws Exception { if (profileId == null && profileIdentifier == null) { throw new Exception("profileId required"); } if (profileId == null) { profileId = ProfileService.getInstance().getIdFromName(profileIdentifier); } int clientId = ClientService.getInstance().findClient(clientUUID, profileId).getId(); ServerGroup group = ServerRedirectService.getInstance().getServerGroup(id, profileId); ServerRedirectService.getInstance().deleteServerGroup(id); return ServerRedirectService.getInstance().tableServerGroups(clientId); }
java
@Override public void addRow(final KeyValue row) { if (this.key == null) { throw new IllegalStateException("setRow was never called on " + this); } final byte[] key = row.key(); if (Bytes.memcmp(this.key, key, Const.SALT_WIDTH(), key.length - Const.SALT_WIDTH()) != 0) { throw new IllegalDataException("Attempt to add a different row=" + row + ", this=" + this); } final byte[] remote_qual = row.qualifier(); final byte[] remote_val = row.value(); final byte[] merged_qualifiers = new byte[qualifiers.length + remote_qual.length]; final byte[] merged_values = new byte[values.length + remote_val.length]; int remote_q_index = 0; int local_q_index = 0; int merged_q_index = 0; int remote_v_index = 0; int local_v_index = 0; int merged_v_index = 0; short v_length; short q_length; while (remote_q_index < remote_qual.length || local_q_index < qualifiers.length) { // if the remote q has finished, we just need to handle left over locals if (remote_q_index >= remote_qual.length) { v_length = Internal.getValueLengthFromQualifier(qualifiers, local_q_index); System.arraycopy(values, local_v_index, merged_values, merged_v_index, v_length); local_v_index += v_length; merged_v_index += v_length; q_length = Internal.getQualifierLength(qualifiers, local_q_index); System.arraycopy(qualifiers, local_q_index, merged_qualifiers, merged_q_index, q_length); local_q_index += q_length; merged_q_index += q_length; continue; } // if the local q has finished, we need to handle the left over remotes if (local_q_index >= qualifiers.length) { v_length = Internal.getValueLengthFromQualifier(remote_qual, remote_q_index); System.arraycopy(remote_val, remote_v_index, merged_values, merged_v_index, v_length); remote_v_index += v_length; merged_v_index += v_length; q_length = Internal.getQualifierLength(remote_qual, remote_q_index); System.arraycopy(remote_qual, remote_q_index, merged_qualifiers, merged_q_index, q_length); remote_q_index += q_length; merged_q_index += q_length; continue; } // for dupes, we just need to skip and continue final int sort = Internal.compareQualifiers(remote_qual, remote_q_index, qualifiers, local_q_index); if (sort == 0) { //LOG.debug("Discarding duplicate timestamp: " + // Internal.getOffsetFromQualifier(remote_qual, remote_q_index)); v_length = Internal.getValueLengthFromQualifier(remote_qual, remote_q_index); remote_v_index += v_length; q_length = Internal.getQualifierLength(remote_qual, remote_q_index); remote_q_index += q_length; continue; } if (sort < 0) { v_length = Internal.getValueLengthFromQualifier(remote_qual, remote_q_index); System.arraycopy(remote_val, remote_v_index, merged_values, merged_v_index, v_length); remote_v_index += v_length; merged_v_index += v_length; q_length = Internal.getQualifierLength(remote_qual, remote_q_index); System.arraycopy(remote_qual, remote_q_index, merged_qualifiers, merged_q_index, q_length); remote_q_index += q_length; merged_q_index += q_length; } else { v_length = Internal.getValueLengthFromQualifier(qualifiers, local_q_index); System.arraycopy(values, local_v_index, merged_values, merged_v_index, v_length); local_v_index += v_length; merged_v_index += v_length; q_length = Internal.getQualifierLength(qualifiers, local_q_index); System.arraycopy(qualifiers, local_q_index, merged_qualifiers, merged_q_index, q_length); local_q_index += q_length; merged_q_index += q_length; } } // we may have skipped some columns if we were given duplicates. Since we // had allocated enough bytes to hold the incoming row, we need to shrink // the final results if (merged_q_index == merged_qualifiers.length) { qualifiers = merged_qualifiers; } else { qualifiers = Arrays.copyOfRange(merged_qualifiers, 0, merged_q_index); } // set the meta bit based on the local and remote metas byte meta = 0; if ((values[values.length - 1] & Const.MS_MIXED_COMPACT) == Const.MS_MIXED_COMPACT || (remote_val[remote_val.length - 1] & Const.MS_MIXED_COMPACT) == Const.MS_MIXED_COMPACT) { meta = Const.MS_MIXED_COMPACT; } values = Arrays.copyOfRange(merged_values, 0, merged_v_index + 1); values[values.length - 1] = meta; }
java
private void createChameleonMarkerFile(Path parent) throws IOException { final Path chameleon = parent.resolve("chameleonrunner"); Files.write(chameleon, "Chameleon Runner was there".getBytes()); }
python
def addNoiseToVector(inputVector, noiseLevel, vectorType): """ Add noise to SDRs @param inputVector (array) binary vector to be corrupted @param noiseLevel (float) amount of noise to be applied on the vector. @param vectorType (string) "sparse" or "dense" """ if vectorType == 'sparse': corruptSparseVector(inputVector, noiseLevel) elif vectorType == 'dense': corruptDenseVector(inputVector, noiseLevel) else: raise ValueError("vectorType must be 'sparse' or 'dense' ")