,bio_main 0,"def show_slug_with_level(context, page, lang=None, fallback=True): """"""Display slug with level by language."""""" if not lang: lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if not page: return '' return {'content': page.slug_with_level(lang)}" 1,"def show_revisions(context, page, content_type, lang=None): """"""Render the last 10 revisions of a page content with a list using the ``pages/revisions.html`` template"""""" if not pages_settings.PAGE_CONTENT_REVISION: return {'revisions': None} revisions = Content.objects.filter(page=page, language=lang, type=content_type).order_by('-creation_date') if len(revisions) < 2: return {'revisions': None} return {'revisions': revisions[0:10]}" 2,"def do_videoplaceholder(parser, token): """""" Method that parse the imageplaceholder template tag. """""" name, params = parse_placeholder(parser, token) return VideoPlaceholderNode(name, **params)" 3,"def do_get_pages_with_tag(parser, token): """""" Return Pages with given tag Syntax:: {% get_pages_with_tag as %} Example use: {% get_pages_with_tag ""footer"" as pages %} """""" bits = token.split_contents() if 4 != len(bits): raise TemplateSyntaxError('%r expects 2 arguments' % bits[0]) if bits[-2] != 'as': raise TemplateSyntaxError( '%r expects ""as"" as the second last argument' % bits[0]) varname = bits[-1] tag = parser.compile_filter(bits[1]) varname = bits[-1] return GetPagesWithTagNode(tag, varname)" 4,"def parserunstats(self): """"""Parses the XML run statistics file (GenerateFASTQRunStatistics.xml). In some cases, the file is not available. Equivalent data can be pulled from Basespace.Generate a text file name indexingQC.txt containing the copied tables from the Indexing QC tab of the run on Basespace"""""" # metadata = GenObject() # If the default file GenerateFASTQRunStatistics.xml is present, parse it if os.path.isfile(os.path.join(self.path, ""GenerateFASTQRunStatistics.xml"")): # Create a list of keys for which values are to be extracted datalist = [""SampleNumber"", ""SampleID"", ""SampleName"", ""NumberOfClustersPF""] # Load the file as an xml ElementTree object runstatistics = ElementTree.ElementTree(file=os.path.join(self.path, ""GenerateFASTQRunStatistics.xml"")) # Iterate through all the elements in the object # .iterfind() allow for the matching and iterating though matches # This is stored as a float to allow subsequent calculations tclusterspf = [float(element.text) for element in runstatistics.iterfind(""RunStats/NumberOfClustersPF"")][0] # Iterate through all the elements (strains) in the OverallSamples/SummarizedSampleStatistics category for element in runstatistics.iterfind(""OverallSamples/SummarizedSampleStatistics""): # List comprehension. Essentially iterate through each element for each category in datalist: # (element.iter(category) and pull out the value for nestedelement straindata = [nestedelement.text for category in datalist for nestedelement in element.iter(category)] # Try and replicate the Illumina rules to create file names from ""Sample_Name"" samplename = samplenamer(straindata, 1) # Calculate the percentage of clusters associated with each strain # noinspection PyTypeChecker percentperstrain = ""{:.2f}"".format((float(straindata[3]) / tclusterspf * 100)) try: # Use the sample number -1 as the index in the list of objects created in parsesamplesheet strainindex = int(straindata[0]) - 1 # Set run to the .run object of self.samples[index] run = self.samples[strainindex].run # An assertion that compares the sample computer above to the previously entered sample name # to ensure that the samples are the same assert self.samples[strainindex].name == samplename, \ ""Sample name does not match object name {0!r:s}"".format(straindata[1]) # Add the appropriate values to the strain metadata object run.SampleNumber = straindata[0] run.NumberofClustersPF = straindata[3] run.TotalClustersinRun = tclusterspf run.PercentOfClusters = percentperstrain run.flowcell = self.flowcell run.instrument = self.instrument except IndexError: pass elif os.path.isfile(os.path.join(self.path, 'indexingQC.txt')): # Grab the first element from the second line in the file tclusterspf = float(getline(os.path.join(self.path, ""indexingQC.txt""), 2).split(""\t"")[0]) # Open the file and extract the relevant data with open(os.path.join(""indexingQC.txt"")) as indexqc: # Iterate through the file for line in indexqc: # Once ""Index"" is encountered, iterate through the rest of the file if ""Index"" in line: for subline in indexqc: straindata = [x.rstrip() for x in subline.rstrip().split(""\t"")] # Try and replicate the Illumina rules to create file names from ""Sample_Name"" samplename = samplenamer(straindata, 1) # Use the sample number -1 as the index in the list of objects created in parsesamplesheet strainindex = int(straindata[0]) - 1 # Set run to the .run object of self.samples[index] run = self.samples[strainindex].run # An assertion that compares the sample computer above to the previously entered sample name # to ensure that the samples are the same assert self.samples[strainindex].name == samplename, \ ""Sample name {} does not match object name {}"" \ .format(self.samples[strainindex].name, samplename) # Extract and format the percent of reads (passing filter) associated with each sample # noinspection PyTypeChecker percentperstrain = float(""{:.2f}"".format(float(straindata[5]))) # Calculate the number of reads passing filter associated with each sample: # percentage of reads per strain times the total reads passing filter divided by 100 numberofclusterspf = int(percentperstrain * tclusterspf / 100) # Update the object with the variables run.SampleNumber = straindata[0] run.NumberofClustersPF = numberofclusterspf run.TotalClustersinRun = tclusterspf run.PercentOfClusters = percentperstrain run.flowcell = self.flowcell run.instrument = self.instrument else: strainindex = 0 for i in range(len(self.samples)): # Set run to the .run object of self.samples[index] run = self.samples[strainindex].run # Update the object with the variables run.SampleNumber = strainindex + 1 run.NumberofClustersPF = 'NA' run.TotalClustersinRun = 'NA' run.PercentOfClusters = 'NA' run.flowcell = self.flowcell run.instrument = self.instrument strainindex += 1" 5,"def fix_raw_path(path): """"""Prettify name of path :param path: path to fix :return: Good name for path """""" double_path_separator = PATH_SEPARATOR + PATH_SEPARATOR while path.find( double_path_separator) >= 0: # there are double separators path = path.replace(double_path_separator, PATH_SEPARATOR) # remove double path separator if is_folder(path) and not path.endswith(""/""): path = path + ""/"" return path" 6,"def remove_year(name): """"""Removes year from input :param name: path to edit :return: inputs with no years """""" for i in range(len( name) - 3): # last index is length - 3 - 1 = length - 4 if name[i: i + 4].isdigit(): name = name[:i] + name[i + 4:] return remove_year( name) # if there is a removal, start again return name" 7,"def remove_brackets(name): """"""Removes brackets form input :param name: path to fix :return: inputs with no brackets """""" name = re.sub( r""([(\[]).*?([)\]])"", r""\g<1>\g<2>"", name ) # remove anything in between brackets brackets = ""()[]{}"" # list of brackets for bracket in brackets: name = name.replace(bracket, """") return name" 8,"def extract_name_max_chars(name, max_chars=64, blank="" ""): """"""Extracts max chars in name truncated to nearest word :param name: path to edit :param max_chars: max chars of new name :param blank: char that represents the blank between words :return: Name edited to contain at most max_chars """""" new_name = name.strip() if len(new_name) > max_chars: new_name = new_name[:max_chars] # get at most 64 chars if new_name.rfind(blank) > 0: new_name = new_name[:new_name.rfind(blank)] # nearest word return new_name" 9,"def prettify(name, blank="" ""): """"""Prettify name of path :param name: path Name: to edit :param blank: default blanks in name :return: Prettier name from given one: replace bad chars with good ones """""" if name.startswith("".""): # remove starting name = name[1:] for bad_char in BAD_CHARS: name = name.replace(bad_char, blank) # remove token name = String(name).remove_all(blank) for i in range(1, len(name) - 2): try: are_blanks = name[i - 1] == blank and name[i + 1] == blank if are_blanks and name[i] in BAD_CHARS: name = name[:i - 1] + name[i + 2:] except: # out of bounds pass if name.startswith(blank): name = name[1:] if name.endswith(blank): # remove ending replacement name = name[:-1] return name" 10,"def get_parent_folder_name(file_path): """"""Finds parent folder of file :param file_path: path :return: Name of folder container """""" return os.path.split(os.path.split(os.path.abspath(file_path))[0])[-1]" 11,"def ls_dir(path, include_hidden=False): """"""Finds content of folder :param path: directory to get list of files and folders :param include_hidden: True iff include hidden files in list :return: List of paths in given directory """""" lst = [] for file in os.listdir(path): hidden_file = FileSystem(file).is_hidden() if (hidden_file and include_hidden) or (not hidden_file): lst.append(os.path.join(path, file)) return list(set(lst))" 12,"def ls_recurse(path, include_hidden=False): """"""Finds content of folder recursively :param path: directory to get list of files and folders :param include_hidden: True iff include hidden files in list :return: List of paths in given directory recursively """""" lst = [] for file in os.listdir(path): hidden_file = FileSystem(file).is_hidden() if (hidden_file and include_hidden) or (not hidden_file): lst.append(os.path.join(path, file)) if is_folder(os.path.join(path, file)): lst += ls_recurse( os.path.join(path, file), include_hidden=include_hidden ) # get list of files in directory return list(set(lst))" 13,"def list_content(path, recurse, include_hidden=False): """"""Finds content of folder (recursively) :param path: directory to get list of files and folders :param recurse: True iff recurse into subdirectories or not :param include_hidden: True iff include hidden files in list :return: List of paths in given directory recursively """""" if recurse: return ls_recurse(path, include_hidden=include_hidden) return ls_dir(path, include_hidden=include_hidden)" 14,"def is_russian(self): """"""Checks if file path is russian :return: True iff document has a russian name """""" russian_chars = 0 for char in RUSSIAN_CHARS: if char in self.name: russian_chars += 1 # found a russian char return russian_chars > len(RUSSIAN_CHARS) / 2.0" 15,"def rename(self, new_path): """"""Renames to new path :param new_path: new path to use """""" rename_path = fix_raw_path(new_path) if is_folder(self.path): os.rename(self.path, rename_path) else: os.renames(self.path, rename_path)" 16,"def __start_waiting_for_events(self): ''' This waits until the whole chain of callback methods triggered by ""trigger_connection_to_rabbit_etc()"" has finished, and then starts waiting for publications. This is done by starting the ioloop. Note: In the pika usage example, these things are both called inside the run() method, so I wonder if this check-and-wait here is necessary. Maybe not. But the usage example does not implement a Thread, so it probably blocks during the opening of the connection. Here, as it is a different thread, the run() might get called before the __init__ has finished? I'd rather stay on the safe side, as my experience of threading in Python is limited. ''' # Start ioloop if connection object ready: if self.thread._connection is not None: try: logdebug(LOGGER, 'Starting ioloop...') logtrace(LOGGER, 'ioloop is owned by connection %s...', self.thread._connection) # Tell the main thread that we're now open for events. # As soon as the thread._connection object is not None anymore, it # can receive events. self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events() self.thread.continue_gently_closing_if_applicable() self.thread._connection.ioloop.start() except PIDServerException as e: raise e # It seems that some connection problems do not cause # RabbitMQ to call any callback (on_connection_closed # or on_connection_error) - it just silently swallows the # problem. # So we need to manually trigger reconnection to the next # host here, which we do by manually calling the callback. # We start the ioloop, so it can handle the reconnection events, # or also receive events from the publisher in the meantime. except Exception as e: # This catches any error during connection startup and during the entire # time the ioloop runs, blocks and waits for events. time_passed = datetime.datetime.now() - self.__start_connect_time time_passed_seconds = time_passed.total_seconds() # Some pika errors: if isinstance(e, pika.exceptions.ProbableAuthenticationError): errorname = self.__make_error_name(e, 'e.g. wrong user or password') elif isinstance(e, pika.exceptions.ProbableAccessDeniedError): errorname = self.__make_error_name(e, 'e.g. wrong virtual host name') elif isinstance(e, pika.exceptions.IncompatibleProtocolError): errorname = self.__make_error_name(e, 'e.g. trying TLS/SSL on wrong port') # Other errors: else: errorname = self.__make_error_name(e) logdebug(LOGGER, 'Unexpected error during event listener\'s lifetime (after %s seconds): %s', time_passed_seconds, errorname) # Now trigger reconnection: self.statemachine.set_to_waiting_to_be_available() self.on_connection_error(self.thread._connection, errorname) self.thread._connection.ioloop.start() else: # I'm quite sure that this cannot happen, as the connection object # is created in ""trigger_connection_...()"" and thus exists, no matter # if the actual connection to RabbitMQ succeeded (yet) or not. logdebug(LOGGER, 'This cannot happen: Connection object is not ready.') logerror(LOGGER, 'Cannot happen. Cannot properly start the thread. Connection object is not ready.')" 17,"def setClass(self, factoryclass): """"""Sets the constructor for the component type this label is to represent :param factoryclass: a class that, when called, results in an instance of the desired class :type factoryclass: callable """""" self.factoryclass = factoryclass self.setText(str(factoryclass.name))" 18,"def mouseMoveEvent(self, event): """"""Determines if a drag is taking place, and initiates it"""""" if (event.pos() - self.dragStartPosition).manhattanLength() < 10: return QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor)) factory = self.factoryclass() mimeData = QtCore.QMimeData() try: mimeData.setData(""application/x-protocol"", factory.serialize()) except: mimeData.setData(""application/x-protocol"", cPickle.dumps(factory)) drag = QtGui.QDrag(self) drag.setMimeData(mimeData) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, self.frameRect()) # below makes the pixmap half transparent # painter = QtGui.QPainter(pixmap) # painter.setCompositionMode(painter.CompositionMode_DestinationIn) # painter.fillRect(pixmap.rect(), QtGui.QColor(0, 0, 0, 127)) # painter.end() drag.setPixmap(pixmap) drag.setHotSpot(QtCore.QPoint(pixmap.width()/2, pixmap.height()/2)) drag.setPixmap(pixmap) self.dragActive.emit(True) result = drag.exec_(QtCore.Qt.MoveAction) QtGui.QApplication.restoreOverrideCursor()" 19,"def database(self): """""" Enters all the metadata into a database """""" import sqlite3 try: os.remove('{}/metadatabase.sqlite'.format(self.reportpath)) except OSError: pass # Set the name of the database db = sqlite3.connect('{}/metadatabase.sqlite'.format(self.reportpath)) # Create a cursor to allow access to the database cursor = db.cursor() # Set up the db cursor.execute(''' CREATE TABLE IF NOT EXISTS Samples ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, name TEXT UNIQUE ) ''') # Create a variable to store the names of the header values for each individual table # This will store a set of all the headers from all the strains, as there can be some variability present, as # not all analyses are available for all taxonomic groups columns = dict() for sample in self.metadata: # Create a metadata object to store the new tables data = MetadataObject() data.name = sample.name # Insert each strain name into the Samples table cursor.execute(''' INSERT OR IGNORE INTO Samples (name) VALUES ( ? ) ''', (sample.name, )) # Each header in the .json file represents a major category e.g. ARMI, GeneSeekr, commands, etc. and # will be made into a separate table for header in sample.datastore.items(): # Allow for certain analyses, such as core genome, not being performed on all strains try: # Key and value: data description and data value e.g. targets present: 1012, etc. for key, value in sorted(header[1].datastore.items()): # Only the values consisting of dictionaries are of interest if type(value) == dict: # Clean the column names so there are no issues entering names into the database cleanedcolumn = self.columnclean(key) # Set the table name tablename = '{}_{}'.format(header[0].replace('.', '_'), cleanedcolumn) # Create the table (if it doesn't already exist) cursor.execute(''' CREATE TABLE IF NOT EXISTS {} ( sample_id INTEGER ) '''.format(tablename)) # Add the attributes with the dictionaries (values) to the metadata object setattr(data, tablename, GenObject(value)) for gene, result in sorted(value.items()): # Add the data header to the dictionary try: columns[tablename].add(gene) # Initialise the dictionary the first time a table name is encountered except KeyError: columns[tablename] = set() columns[tablename].add(str(gene)) except (AttributeError, IndexError): pass self.tabledata.append(data) # Iterate through the dictionary containing all the data headers for table, setofheaders in sorted(columns.items()): # Each header will be used as a column in the appropriate table for cleanedcolumn in sorted(setofheaders): # Alter the table by adding each header as a column cursor.execute(''' ALTER TABLE {} ADD COLUMN {} TEXT '''.format(table, cleanedcolumn)) # Iterate through the samples and pull out the data for each table/column # for sample in self.metadata: for sample in self.tabledata: # Find the id associated with each sample in the Sample table cursor.execute(''' SELECT id from Samples WHERE name=? ''', (sample.name,)) sampleid = cursor.fetchone()[0] # Add the sample_id to the table cursor.execute(''' INSERT OR IGNORE INTO {} (sample_id) VALUES (""{}"") '''.format(table, sampleid)) # Add the data to the table try: # Find the data for each table/column for item in sorted(sample[table].datastore.items()): # Clean the names cleanedcolumn = self.columnclean(str(item[0])) # Add the data to the column of the appropriate table, # where the sample_id matches the current strain cursor.execute(''' UPDATE {} SET {} = ? WHERE sample_id = {} '''.format(table, cleanedcolumn, sampleid), (str(item[1]), )) except KeyError: pass # Commit the changes to the database db.commit()" 20,"def columnclean(column): """""" Modifies column header format to be importable into a database :param column: raw column header :return: cleanedcolumn: reformatted column header """""" cleanedcolumn = str(column) \ .replace('%', 'percent') \ .replace('(', '_') \ .replace(')', '') \ .replace('As', 'Adenosines') \ .replace('Cs', 'Cytosines') \ .replace('Gs', 'Guanines') \ .replace('Ts', 'Thymines') \ .replace('Ns', 'Unknowns') \ .replace('index', 'adapterIndex') return cleanedcolumn" 21,"def getLabel(self, key): """"""Gets the label assigned to an axes :param key:??? :type key: str """""" axisItem = self.getPlotItem().axes[key]['item'] return axisItem.label.toPlainText()" 22,"def updateData(self, axeskey, x, y): """"""Replaces the currently displayed data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param x: index values associated with y to plot :type x: numpy.ndarray :param y: values to plot at x :type y: numpy.ndarray """""" if axeskey == 'stim': self.stimPlot.setData(x,y) # call manually to ajust placement of signal ranges = self.viewRange() self.rangeChange(self, ranges) if axeskey == 'response': self.clearTraces() if self._traceUnit == 'A': y = y * self._ampScalar if self.zeroAction.isChecked(): start_avg = np.mean(y[5:25]) y = y - start_avg self.tracePlot.setData(x,y*self._polarity)" 23,"def appendData(self, axeskey, bins, ypoints): """"""Appends data to existing plotted data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param bins: bins to plot a point for :type bin: numpy.ndarray :param ypoints: iteration number of raster, *should* match bins dimension, but really takes the first value in array for iteration number and plot row at proper place for included bins :type ypoints: numpy.ndarray """""" if axeskey == 'raster' and len(bins) > 0: x, y = self.rasterPlot.getData() # don't plot overlapping points bins = np.unique(bins) # adjust repetition number to response scale ypoints = np.ones_like(bins)*self.rasterYslots[ypoints[0]] x = np.append(x, bins) y = np.append(y, ypoints) self.rasterPlot.setData(x, y)" 24,"def setThreshold(self, threshold): """"""Sets the current threshold :param threshold: the y value to set the threshold line at :type threshold: float """""" self.threshLine.setValue(threshold) self.threshold_field.setValue(threshold)" 25,"def setRasterBounds(self, lims): """"""Sets the raster plot y-axis bounds, where in the plot the raster will appear between :param lims: the (min, max) y-values for the raster plot to be placed between :type lims: (float, float) """""" self.rasterBottom = lims[0] self.rasterTop = lims[1] self.updateRasterBounds()" 26,"def updateRasterBounds(self): """"""Updates the y-coordinate slots where the raster points are plotted, according to the current limits of the y-axis"""""" yrange = self.viewRange()[1] yrange_size = yrange[1] - yrange[0] rmax = self.rasterTop*yrange_size + yrange[0] rmin = self.rasterBottom*yrange_size + yrange[0] self.rasterYslots = np.linspace(rmin, rmax, self.nreps) self.rasterBoundsUpdated.emit((self.rasterBottom, self.rasterTop), self.getTitle())" 27,"def askRasterBounds(self): """"""Prompts the user to provide the raster bounds with a dialog. Saves the bounds to be applied to the plot"""""" dlg = RasterBoundsDialog(bounds= (self.rasterBottom, self.rasterTop)) if dlg.exec_(): bounds = dlg.values() self.setRasterBounds(bounds)" 28,"def rangeChange(self, pw, ranges): """"""Adjusts the stimulus signal to keep it at the top of a plot, after any ajustment to the axes ranges takes place. This is a slot for the undocumented pyqtgraph signal sigRangeChanged. From what I can tell the arguments are: :param pw: reference to the emitting object (plot widget in my case) :type pw: object :param ranges: I am only interested when this turns out to be a nested list of axis bounds :type ranges: object """""" if hasattr(ranges, '__iter__'): # adjust the stim signal so that it falls in the correct range yrange_size = ranges[1][1] - ranges[1][0] stim_x, stim_y = self.stimPlot.getData() if stim_y is not None: stim_height = yrange_size*STIM_HEIGHT # take it to 0 stim_y = stim_y - np.amin(stim_y) # normalize if np.amax(stim_y) != 0: stim_y = stim_y/np.amax(stim_y) # scale for new size stim_y = stim_y*stim_height # raise to right place in plot stim_y = stim_y + (ranges[1][1] - (stim_height*1.1 + (stim_height*0.2))) self.stimPlot.setData(stim_x, stim_y) # rmax = self.rasterTop*yrange_size + ranges[1][0] # rmin = self.rasterBottom*yrange_size + ranges[1][0] self.updateRasterBounds()" 29,"def update_thresh(self): """"""Emits a Qt signal thresholdUpdated with the current threshold value"""""" thresh_val = self.threshLine.value() self.threshold_field.setValue(thresh_val) self.thresholdUpdated.emit(thresh_val, self.getTitle())" 30,"def fromFile(self, fname): """"""Displays a spectrogram of an audio file. Supported formats see :func:`sparkle.tools.audiotools.audioread` :param fname: file path of the audiofile to display :type fname: str :returns: float -- duration of audio recording (seconds) """""" spec, f, bins, dur = audiotools.spectrogram(fname, **self.specgramArgs) self.updateImage(spec, bins, f) return dur" 31,"def updateImage(self, imgdata, xaxis=None, yaxis=None): """"""Updates the Widget image directly. :type imgdata: numpy.ndarray, see :meth:`pyqtgraph:pyqtgraph.ImageItem.setImage` :param xaxis: x-axis values, length should match dimension 1 of imgdata :param yaxis: y-axis values, length should match dimension 0 of imgdata """""" imgdata = imgdata.T self.img.setImage(imgdata) if xaxis is not None and yaxis is not None: xscale = 1.0/(imgdata.shape[0]/xaxis[-1]) yscale = 1.0/(imgdata.shape[1]/yaxis[-1]) self.resetScale() self.img.scale(xscale, yscale) self.imgScale = (xscale, yscale) self.imageArray = np.fliplr(imgdata) self.updateColormap()" 32,"def resetScale(self): """"""Resets the scale on this image. Correctly aligns time scale, undoes manual scaling"""""" self.img.scale(1./self.imgScale[0], 1./self.imgScale[1]) self.imgScale = (1.,1.)" 33,"def updateData(self, signal, fs): """"""Displays a spectrogram of the provided signal :param signal: 1-D signal of audio :type signal: numpy.ndarray :param fs: samplerate of signal :type fs: int """""" # use a separate thread to calculate spectrogram so UI doesn't lag t = threading.Thread(target=_doSpectrogram, args=(self.spec_done, (fs, signal),), kwargs=self.specgramArgs) t.start()" 34,"def setSpecArgs(**kwargs): """"""Sets optional arguments for the spectrogram appearance. Available options: :param nfft: size of FFT window to use :type nfft: int :param overlap: percent overlap of window :type overlap: number :param window: Type of window to use, choices are hanning, hamming, blackman, bartlett or none (rectangular) :type window: string :param colormap: Gets set by colormap editor. Holds the information to generate the colormap. Items: :meth:`lut`, :meth:`levels`, state (info for editor) :type colormap: dict """""" for key, value in kwargs.items(): if key == 'colormap': SpecWidget.imgArgs['lut'] = value['lut'] SpecWidget.imgArgs['levels'] = value['levels'] SpecWidget.imgArgs['state'] = value['state'] for w in SpecWidget.instances: w.updateColormap() else: SpecWidget.specgramArgs[key] = value" 35,"def clearImg(self): """"""Clears the current image"""""" self.img.setImage(np.array([[0]])) self.img.image = None" 36,"def editColormap(self): """"""Prompts the user with a dialog to change colormap"""""" self.editor = pg.ImageView() # remove the ROI and Norm buttons self.editor.ui.roiBtn.setVisible(False) self.editor.ui.menuBtn.setVisible(False) self.editor.setImage(self.imageArray) if self.imgArgs['state'] is not None: self.editor.getHistogramWidget().item.gradient.restoreState(self.imgArgs['state']) self.editor.getHistogramWidget().item.setLevels(*self.imgArgs['levels']) self.editor.closeEvent = self._editor_close self.editor.setWindowModality(QtCore.Qt.ApplicationModal) self.editor.show()" 37,"def updateColormap(self): """"""Updates the currently colormap accoring to stored settings"""""" if self.imgArgs['lut'] is not None: self.img.setLookupTable(self.imgArgs['lut']) self.img.setLevels(self.imgArgs['levels'])" 38,"def appendData(self, xdata, ydata, color='b', legendstr=None): """"""Adds the data to the plot :param xdata: index values for data, plotted on x-axis :type xdata: numpy.ndarray :param ydata: value data to plot, dimension must match xdata :type ydata: numpy.ndarray """""" item = self.plot(xdata, ydata, pen=color) if legendstr is not None: self.legend.addItem(item, legendstr) return item" 39,"def setLabels(self, xlabel=None, ylabel=None, title=None, xunits=None, yunits=None): """"""Sets the plot labels :param xlabel: X-axis label (do not include units) :type xlabel: str :param ylabel: Y-axis label (do not include units) :type ylabel: str :param title: Plot title :type title: str :param xunit: SI units for the x-axis. An appropriate label will be appended according to scale :type xunit: str :param yunit: SI units for the y-axis. An appropriate label will be appended according to scale :type yunit: str """""" if xlabel is not None: self.setLabel('bottom', xlabel, units=xunits) if ylabel is not None: self.setLabel('left', ylabel, units=yunits) if title is not None: self.setTitle(title)" 40,"def setPoint(self, x, group, y): """"""Sets the given point, connects line to previous point in group :param x: x value of point :type x: float :param group: group which plot point for :type group: float :param y: y value of point :type y: float """""" if x == -1: # silence window self.plot([0],[y], symbol='o') else: yindex = self.groups.index(group) xdata, ydata = self.lines[yindex].getData() if ydata is None: xdata = [x] ydata = [y] else: xdata = np.append(xdata, x) ydata = np.append(ydata, y) self.lines[yindex].setData(xdata, ydata)" 41,"def setLabels(self, name): """"""Sets plot labels, according to predefined options :param name: The type of plot to create labels for. Options: calibration, tuning, anything else labels for spike counts :type name: str """""" if name == ""calibration"": self.setWindowTitle(""Calibration Curve"") self.setTitle(""Calibration Curve"") self.setLabel('bottom', ""Frequency"", units='Hz') self.setLabel('left', 'Recorded Intensity (dB SPL)') elif name == ""tuning"": self.setWindowTitle(""Tuning Curve"") self.setTitle(""Tuning Curve"") self.setLabel('bottom', ""Frequency"", units=""Hz"") self.setLabel('left', ""Spike Count (mean)"") else: self.setWindowTitle(""Spike Counts"") self.setTitle(""Spike Counts"") self.setLabel('bottom', ""Test Number"", units='') self.setLabel('left', ""Spike Count (mean)"", units='')" 42,"def loadCurve(data, groups, thresholds, absvals, fs, xlabels): """"""Accepts a data set from a whole test, averages reps and re-creates the progress plot as the same as it was during live plotting. Number of thresholds must match the size of the channel dimension"""""" xlims = (xlabels[0], xlabels[-1]) pw = ProgressWidget(groups, xlims) spike_counts = [] # skip control for itrace in range(data.shape[0]): count = 0 for ichan in range(data.shape[2]): flat_reps = data[itrace,:,ichan,:].flatten() count += len(spikestats.spike_times(flat_reps, thresholds[ichan], fs, absvals[ichan])) spike_counts.append(count/(data.shape[1]*data.shape[2])) #mean spikes per rep i = 0 for g in groups: for x in xlabels: pw.setPoint(x, g, spike_counts[i]) i +=1 return pw" 43,"def setBins(self, bins): """"""Sets the bin centers (x values) :param bins: time bin centers :type bins: numpy.ndarray """""" self._bins = bins self._counts = np.zeros_like(self._bins) bar_width = bins[0]*1.5 self.histo.setOpts(x=bins, height=self._counts, width=bar_width) self.setXlim((0, bins[-1]))" 44,"def clearData(self): """"""Clears all histograms (keeps bins)"""""" self._counts = np.zeros_like(self._bins) self.histo.setOpts(height=self._counts)" 45,"def appendData(self, bins, repnum=None): """"""Increases the values at bins (indexes) :param bins: bin center values to increment counts for, to increment a time bin more than once include multiple items in list with that bin center value :type bins: numpy.ndarray """""" # only if the last sample was above threshold, but last-1 one wasn't bins[bins >= len(self._counts)] = len(self._counts) -1 bin_totals = np.bincount(bins) self._counts[:len(bin_totals)] += bin_totals self.histo.setOpts(height=np.array(self._counts))" 46,"def processData(self, times, response, test_num, trace_num, rep_num): """"""Calulate spike times from raw response data"""""" # invert polarity affects spike counting response = response * self._polarity if rep_num == 0: # reset self.spike_counts = [] self.spike_latencies = [] self.spike_rates = [] fs = 1./(times[1] - times[0]) # process response; calculate spike times spike_times = spikestats.spike_times(response, self._threshold, fs) self.spike_counts.append(len(spike_times)) if len(spike_times) > 0: self.spike_latencies.append(spike_times[0]) else: self.spike_latencies.append(np.nan) self.spike_rates.append(spikestats.firing_rate(spike_times, times)) binsz = self._bins[1] - self._bins[0] response_bins = spikestats.bin_spikes(spike_times, binsz) # self.putnotify('spikes_found', (response_bins, rep_num)) self.appendData(response_bins, rep_num)" 47,"def setSr(self, fs): """"""Sets the samplerate of the input operation being plotted"""""" self.tracePlot.setSr(fs) self.stimPlot.setSr(fs)" 48,"def setWindowSize(self, winsz): """"""Sets the size of scroll window"""""" self.tracePlot.setWindowSize(winsz) self.stimPlot.setWindowSize(winsz)" 49,"def addPlot(self, xdata, ydata, xlabel=None, ylabel=None, title=None, xunits=None, yunits=None): """"""Adds a new plot for the given set of data and/or labels, Generates a SimplePlotWidget :param xdata: index values for data, plotted on x-axis :type xdata: numpy.ndarray :param ydata: value data to plot, dimension must match xdata :type ydata: numpy.ndarray """""" p = SimplePlotWidget(xdata, ydata) p.setLabels(xlabel, ylabel, title, xunits, yunits) # self.plots.append(p) self.stacker.addWidget(p)" 50,"def addSpectrogram(self, ydata, fs, title=None): """"""Adds a new spectorgram plot for the given image. Generates a SpecWidget :param ydata: 2-D array of the image to display :type ydata: numpy.ndarray :param fs: the samplerate of the signal in the image, used to set time/ frequency scale :type fs: int :param title: Plot title :type title: str """""" p = SpecWidget() p.updateData(ydata, fs) if title is not None: p.setTitle(title) self.stacker.addWidget(p)" 51,"def nextPlot(self): """"""Moves the displayed plot to the next one"""""" if self.stacker.currentIndex() < self.stacker.count(): self.stacker.setCurrentIndex(self.stacker.currentIndex()+1)" 52,"def prevPlot(self): """"""Moves the displayed plot to the previous one"""""" if self.stacker.currentIndex() > 0: self.stacker.setCurrentIndex(self.stacker.currentIndex()-1)" 53,"def most_even_chunk(string, group): """"""Divide a string into a list of strings as even as possible."""""" counts = [0] + most_even(len(string), group) indices = accumulate(counts) slices = window(indices, 2) return [string[slice(*one)] for one in slices]" 54,"def most_even(number, group): """"""Divide a number into a list of numbers as even as possible."""""" count, rest = divmod(number, group) counts = zip_longest([count] * group, [1] * rest, fillvalue=0) chunks = [sum(one) for one in counts] logging.debug('chunks: %s', chunks) return chunks" 55,"def window(seq, count=2): """"""Slide window."""""" iseq = iter(seq) result = tuple(islice(iseq, count)) if len(result) == count: yield result for elem in iseq: result = result[1:] + (elem,) yield result" 56,"def _get_modules(path): """"""Finds modules in folder recursively :param path: directory :return: list of modules """""" lst = [] folder_contents = os.listdir(path) is_python_module = ""__init__.py"" in folder_contents if is_python_module: for file in folder_contents: full_path = os.path.join(path, file) if is_file(full_path): lst.append(full_path) if is_folder(full_path): lst += _get_modules(full_path) # recurse in folder return list(set(lst))" 57,"def get_modules(folder, include_meta=False): """"""Finds modules (recursively) in folder :param folder: root folder :param include_meta: whether include meta files like (__init__ or __version__) :return: list of modules """""" files = [ file for file in _get_modules(folder) if is_file(file) # just files ] if not include_meta: files = [ file for file in files if not Document(file).name.startswith(""__"") ] return files" 58,"def _parse(self): """"""Parses file contents :return: Tree hierarchy of file """""" with open(self.path, ""rt"") as reader: return ast.parse(reader.read(), filename=self.path)" 59,"def _find_package(self, root_package): """"""Finds package name of file :param root_package: root package :return: package name """""" package = self.path.replace(root_package, """") if package.endswith("".py""): package = package[:-3] package = package.replace(os.path.sep, MODULE_SEP) root_package = get_folder_name(root_package) package = root_package + package # add root return package" 60,"def _get_instances(self, instance): """"""Finds all instances of instance in tree :param instance: type of object :return: list of objects in tree of same instance """""" return [ x for x in self.tree.body if isinstance(x, instance) ]" 61,"def get_classes(self): """"""Finds classes in file :return: list of top-level classes """""" instances = self._get_instances(ast.ClassDef) instances = [ PyClass(instance, self.package) for instance in instances ] return instances" 62,"def get_functions(self): """"""Finds top-level functions in file :return: list of top-level functions """""" instances = self._get_instances(ast.FunctionDef) instances = [ PyFunction(instance, self.package) for instance in instances ] return instances" 63,"def get_functions(self, include_meta=False): """"""Finds top-level functions in file :param include_meta: whether include meta functions like (__init__) :return: list of top-level functions """""" instances = self._get_instances(ast.FunctionDef) instances = [ PyFunction(instance, self.full_package) # fix package name for instance in instances ] if not include_meta: instances = [ instance # fix package name for instance in instances if not instance.get_name().startswith(""__"") ] return instances" 64,"def skesa_assemble(self): """""" Run skesa to assemble genomes """""" with progressbar(self.metadata) as bar: for sample in bar: # Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if '.gz' in forward else False # If there are two fastq files if len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \ '--use_paired_ends --vector_percent 1 ' \ '--contigs_out {contigs}'\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \ '--vector_percent 1 --contigs_out {contigs}'\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)" 65,"def merge(self, sample): """""" Use bbmerge to merge paired FASTQ files for use in metagenomics pipelines. Create a report with the total number of reads, and the number of reads that could be paired :param sample: metadata sample object flagged as a metagenome """""" # Set the assembly file to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged, and unmerged files sample.general.mergedreads = \ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report to store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and the number of reads that could be paired from the bbmerge # err stream num_reads, num_pairs = self.reads(error) # If the report doesn't exist, create it with the header and the results from the first sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\n{sample},{total},{paired}\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it to determine which samples have already been added - useful if re-running # the analysis else: lines = list() with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results to the report if sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\n' .format(sample=sample.name, total=num_reads, paired=num_pairs))" 66,"def reads(err_log): """""" Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that could be paired :param err_log: bbmerge outputs the stats in the error file :return: num_reads, the total number of reads, paired_reads, number of paired readds """""" # Initialise variables num_reads = 0 paired_reads = 0 # Open the log file with open(err_log, 'r') as error_log: # Extract the necessary information for line in error_log: if 'Pairs:' in line: num_reads = line.split('\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\t')[-2].rstrip() return num_reads, paired_reads" 67,"def best_assemblyfile(self): """""" Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA' """""" for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'" 68,"def groups(self): """"""Component groups Special property which point to a :class:`~pylls.cachet.ComponentGroups` instance for convenience. This instance is initialized on first call. """""" if not self._groups: self._groups = ComponentGroups(self.api_client) return self._groups" 69,"def get(self, component_id=None, **kwargs): """"""Get components :param component_id: Component ID (optional) :return: Components data (:class:`Generator`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-components .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage """""" path = 'components' if component_id is not None: path += '/%s' % component_id return self.paginate_get(path, data=kwargs)" 70,"def create(self, name, status, description="""", link="""", order=0, group_id=0, enabled=True): """"""Create a new component :param str name: Name of the component :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Created component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """""" data = ApiParams() data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._post('components', data=data)['data']" 71,"def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): """"""Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """""" data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']" 72,"def get(self, group_id=None, **kwargs): """"""Get component groups :param group_id: Component group ID (optional) :return: Component groups data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-componentgroups .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage """""" path = 'components/groups' if group_id is not None: path += '/%s' % group_id return self.paginate_get(path, data=kwargs)" 73,"def create(self, name, order=None, collapsed=None): """"""Create a new Component Group :param str name: Name of the component group :param int order: Order of the component group :param int collapsed: Collapse the group? 0-2 :return: Created component group data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#post-componentgroups """""" data = ApiParams() data['name'] = name data['order'] = order data['collapsed'] = collapsed return self._post('components/groups', data=data)['data']" 74,"def update(self, group_id, name=None, order=None, collapsed=None): """"""Update a Component Group :param int group_id: Component Group ID :param str name: Name of the component group :param int order: Order of the group :param int collapsed: Collapse the group? :return: Updated component group data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#put-component-group """""" data = ApiParams() data['group'] = group_id data['name'] = name data['order'] = order data['collapsed'] = collapsed return self._put('components/groups/%s' % group_id, data=data)['data']" 75,"def get(self, incident_id=None, **kwargs): """"""Get incidents :param int incident_id: :return: Incidents data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-incidents .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage """""" path = 'incidents' if incident_id is not None: path += '/%s' % incident_id return self.paginate_get(path, data=kwargs)" 76,"def create(self, name, message, status, visible, component_id=None, component_status=None, notify=None, created_at=None, template=None, tplvars=None): """"""Create a new Incident :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tplvars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#incidents """""" data = ApiParams() data['name'] = name data['message'] = message data['status'] = status data['visible'] = visible data['component_id'] = component_id data['component_status'] = component_status data['notify'] = notify data['created_at'] = created_at data['template'] = template data['vars'] = tplvars return self._post('incidents', data=data)['data']" 77,"def update(self, incident_id, name=None, message=None, status=None, visible=None, component_id=None, component_status=None, notify=None, created_at=None, template=None, tpl_vars=None): """"""Update an Incident :param int incident_id: Incident ID :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tpl_vars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#update-an-incident """""" data = ApiParams() data['name'] = name data['message'] = message data['status'] = status data['visible'] = visible data['component_id'] = component_id data['component_status'] = component_status data['notify'] = notify data['created_at'] = created_at data['template'] = template data['vars'] = tpl_vars return self._put('incidents/%s' % incident_id, data=data)['data']" 78,"def points(self): """"""Metric points Special property which point to a :class:`~pylls.cachet.MetricPoints` instance for convenience. This instance is initialized on first call. """""" if not self._points: self._points = MetricPoints(self.api_client) return self._points" 79,"def get(self, metric_id=None, **kwargs): """"""Get metrics :param int metric_id: Metric ID :return: Metrics data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-metrics .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage """""" path = 'metrics' if metric_id is not None: path += '/%s' % metric_id return self.paginate_get(path, data=kwargs)" 80,"def create(self, name, suffix, description, default_value, display=None): """"""Create a new Metric :param str name: Name of metric :param str suffix: Metric unit :param str description: Description of what the metric is measuring :param int default_value: Default value to use when a point is added :param int display: Display the chart on the status page :return: Created metric data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#metrics """""" data = ApiParams() data['name'] = name data['suffix'] = suffix data['description'] = description data['default_value'] = default_value data['display'] = display return self._post('metrics', data=data)['data']" 81,"def create(self, metric_id, value, timestamp=None): """"""Add a Metric Point to a Metric :param int metric_id: Metric ID :param int value: Value to plot on the metric graph :param str timestamp: Unix timestamp of the point was measured :return: Created metric point data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#post-metric-points """""" data = ApiParams() data['value'] = value data['timestamp'] = timestamp return self._post('metrics/%s/points' % metric_id, data=data)['data']" 82,"def create(self, email, verify=None, components=None): """"""Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers """""" data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']" 83,"def parser(metadata, analysistype, fieldnames, cutoff, program): """""" Read in the BLAST outputs, and populate dictionaries with the parsed results :param metadata: type LIST: List of metadata objects :param analysistype: type STR: Current analysis type :param fieldnames: type LIST: List of fields used to in BLAST analyses :param cutoff: type INT: Percent identity cutoff to use to determine if a match is present :param program: type STR: BLAST program used in the analyses :return: metadata: List of updated metadata objects """""" for sample in metadata: # Initialise a dictionary attribute to store results sample[analysistype].blastresults = dict() try: # Open the sequence profile file as a dictionary blastdict = DictReader(open(sample[analysistype].report), fieldnames=fieldnames, dialect='excel-tab') resultdict = dict() resultset = dict() # Initialise a dictionary to store all the target sequences sample[analysistype].targetsequence = dict() coregenomes = list() # Create a list of all the names of the database files, replace - with _, remove path and extension for fasta in sample[analysistype].targets: fastaname = os.path.basename(os.path.splitext(fasta)[0]).replace('-', '_') fastaname = fastaname.split('.')[0] coregenomes.append(fastaname) # Go through each BLAST result for row in blastdict: # Ignore the headers if row['query_id'].startswith(fieldnames[0]): pass else: # Create the subject length variable - if the sequences are DNA (e.g. blastn), use the subject # length as usual; if the sequences are protein (e.g. tblastx), use the subject length / 3 if program == 'blastn' or program == 'blastp' or program == 'blastx': subject_length = float(row['subject_length']) else: subject_length = float(row['subject_length']) / 3 # Calculate the percent identity and extract the bitscore from the row # Percent identity is: (length of the alignment - number of mismatches) / total subject length percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) / subject_length * 100)) # If the percent identity is greater than the cutoff if percentidentity >= cutoff: # Split off any | from the sample name target = row['subject_id'].split('|')[0] # As there are variable _ in the name, try to split off the last one only if there are # multiple and only keep the first part of the split if there is one _ in the name underscored = '_'.join(target.split('_')[:-1]) if len(target.split('_')) > 2 else \ target.split('_')[0] try: # Update the dictionary with the reference genome and the target resultset[underscored].add(target) except KeyError: # Initialise the dictionary with the first hit resultset[underscored] = set() resultset[underscored].add(target) # Get the number of unique genes per reference genome for underscored, target_set in resultset.items(): resultdict[underscored] = len(target_set) # Sort the dictionary on the number of hits - best at the top topcore = sorted(resultdict.items(), key=operator.itemgetter(1), reverse=True) # If there are no results, populate negative results if not resultdict: sample[analysistype].blastresults = 'NA' # If results, add a string of the best number of hits, and a string of the total number of genes # This is currently 1013. If this changes, I may re-implement a dynamic method of determining # this value else: sample[analysistype].blastresults[topcore[0][0]] = (str(topcore[0][1]), str(1013)) except FileNotFoundError: sample[analysistype].blastresults = 'NA' return metadata" 84,"def reporter(metadata, analysistype, reportpath): """""" Create the core genome report :param metadata: type LIST: List of metadata objects :param analysistype: type STR: Current analysis type :param reportpath: type STR: Absolute path to folder in which the reports are to be created :return: """""" header = 'Strain,ClosestRef,GenesPresent/Total,\n' data = str() for sample in metadata: try: if sample[analysistype].blastresults != 'NA': if sample.general.closestrefseqgenus == 'Listeria': # Write the sample name, closest ref genome, and the # of genes found / total # of genes closestref = list(sample[analysistype].blastresults.items())[0][0] coregenes = list(sample[analysistype].blastresults.items())[0][1][0] # Find the closest reference file try: ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*' .format(fasta=closestref)))[0] except IndexError: # Replace underscores with dashes to find files closestref = closestref.replace('_', '-') ref = glob(os.path.join(sample[analysistype].targetpath, '{fasta}*' .format(fasta=closestref)))[0] # Determine the number of core genes present in the closest reference file totalcore = 0 for _ in SeqIO.parse(ref, 'fasta'): totalcore += 1 # Add the data to the object sample[analysistype].targetspresent = coregenes sample[analysistype].totaltargets = totalcore sample[analysistype].coreresults = '{cg}/{tc}'.format(cg=coregenes, tc=totalcore) row = '{sn},{cr},{cg}/{tc}\n'.format(sn=sample.name, cr=closestref, cg=coregenes, tc=totalcore) # Open the report with open(os.path.join(sample[analysistype].reportdir, '{sn}_{at}.csv'.format(sn=sample.name, at=analysistype)), 'w') as report: # Write the row to the report report.write(header) report.write(row) data += row else: sample[analysistype].targetspresent = 'NA' sample[analysistype].totaltargets = 'NA' sample[analysistype].coreresults = 'NA' except KeyError: sample[analysistype].targetspresent = 'NA' sample[analysistype].totaltargets = 'NA' sample[analysistype].coreresults = 'NA' with open(os.path.join(reportpath, 'coregenome.csv'), 'w') as report: # Write the data to the report report.write(header) report.write(data)" 85,"def annotatedcore(self): """""" Calculates the core genome of organisms using custom databases """""" logging.info('Calculating annotated core') # Determine the total number of core genes self.total_core() # Iterate through all the samples, and process all Escherichia for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Create a set to store the names of all the core genes in this strain sample[self.analysistype].coreset = set() if sample.general.referencegenus == 'Escherichia': # Add the Escherichia sample to the runmetadata self.runmetadata.samples.append(sample) # Parse the BLAST report try: report = sample[self.analysistype].report self.blastparser(report=report, sample=sample, fieldnames=self.fieldnames) except KeyError: sample[self.analysistype].coreset = list() # Create the report self.reporter()" 86,"def total_core(self): """""" Determine the total number of core genes present """""" corefile = os.path.join(self.reffilepath, self.analysistype, 'Escherichia', 'core_combined.fasta') for record in SeqIO.parse(corefile, 'fasta'): gene_name = record.id.split('-')[0] if gene_name not in self.coregenomes: self.coregenomes.append(gene_name)" 87,"def blastparser(self, report, sample, fieldnames): """""" Parse the number of core genes present in the strain from the BLAST outputs :param report: the name and path of the BLAST outputs :param sample: the sample object :param fieldnames: type LIST: List of fields used to in BLAST analyses """""" try: # Open the sequence profile file as a dictionary blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab') # Go through each BLAST result for row in blastdict: # Ignore the headers if row['query_id'].startswith(fieldnames[0]): pass else: # Calculate the percent identity and extract the bitscore from the row # Percent identity is the (length of the alignment - number of mismatches) / total subject length percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) / float(row['subject_length']) * 100)) # Split off any | and - from the sample name target = row['subject_id'].split('|')[0].split('-')[0] # If the hit passes the cutoff threshold, add it to the set of core genes present if percentidentity >= self.cutoff: sample[self.analysistype].coreset.add(target) except FileNotFoundError: pass" 88,"def reporter(self): """""" Create a .csv file with the strain name, and the number of core genes present/the total number of core genes """""" with open(os.path.join(self.reportpath, 'Escherichia_core.csv'), 'w') as report: data = 'Strain,Genes Present/Total\n' for sample in self.runmetadata.samples: # Convert the set to a list for JSON serialization sample[self.analysistype].coreset = list(sample[self.analysistype].coreset) sample[self.analysistype].coreresults = '{cs}/{cg}'.format(cs=len(sample[self.analysistype].coreset), cg=len(self.coregenomes)) # Add strain name, the number of core genes present, and the number of total core genes to the string data += '{sn},{cr}\n'.format(sn=sample.name, cr=sample[self.analysistype].coreresults) report.write(data) for sample in self.metadata: # Remove the messy blast results and set/list of core genes from the object try: delattr(sample[self.analysistype], ""blastresults"") except AttributeError: pass try: delattr(sample[self.analysistype], 'coreset') except AttributeError: pass" 89,"def get_simple_output(self, stderr=STDOUT): """"""Executes a simple external command and get its output The command contains no pipes. Error messages are redirected to the standard output by default :param stderr: where to put stderr :return: output of command """""" args = shlex.split(self.cmd) proc = Popen(args, stdout=PIPE, stderr=stderr) return proc.communicate()[0].decode(""utf8"")" 90,"def get_complex_output(self, stderr=STDOUT): """"""Executes a piped command and get the lines of the output in a list :param stderr: where to put stderr :return: output of command """""" proc = Popen(self.cmd, shell=True, stdout=PIPE, stderr=stderr) return proc.stdout.readlines()" 91,"def get_output_from_pipe(self, input_file): """"""Executes an external command and get its output. The command receives its input_file from the stdin through a pipe :param input_file: input file :return: output of command """""" args = shlex.split(self.cmd) p = Popen(args, stdout=PIPE, stdin=PIPE) # | grep es p.stdin.write(bytearray(input_file.encode(""utf8""))) # echo test | return p.communicate()[0].decode(""utf8"")" 92,"def get_return_code(self, stderr=STDOUT): """"""Executes a simple external command and return its exit status :param stderr: where to put stderr :return: return code of command """""" args = shlex.split(self.cmd) return call(args, stdout=PIPE, stderr=stderr)" 93,"def get_exit_code(self): """"""Executes the external command and get its exitcode, stdout and stderr :return: exit code of command """""" args = shlex.split(self.cmd) proc = Popen(args, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() out, err = out.decode(""utf8""), err.decode(""utf8"") exitcode = proc.returncode # return exitcode, out, err" 94,"def execute_in_background(self): """"""Executes a (shell) command in the background :return: the process' pid """""" # http://stackoverflow.com/questions/1605520 args = shlex.split(self.cmd) p = Popen(args) return p.pid" 95,"def keep_alive(self): """"""Keeps a process alive. If the process terminates, it will restart it The terminated processes become zombies. They die when their parent terminates """""" while True: pid = self.execute_in_background() p = psutil.Process(pid) while p.is_running() and str(p.status) != 'zombie': os.system('sleep 5')" 96,"def get_free_mb(folder): """""" Return folder/drive free space (in bytes) """""" if platform.system() == 'Windows': free_bytes = ctypes.c_ulonglong(0) ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes)) return free_bytes.value/1024/1024 else: st = os.statvfs(folder) return st.f_bavail * st.f_frsize/1024/1024" 97,"def increment_title(title): """""" Increments a string that ends in a number """""" count = re.search('\d+$', title).group(0) new_title = title[:-(len(count))] + str(int(count)+1) return new_title" 98,"def check_limit(self, limit): """""" Checks if the given limit is valid. A limit must be > 0 to be considered valid. Raises ValueError when the *limit* is not > 0. """""" if limit > 0: self.limit = limit else: raise ValueError(""Rule limit must be strictly > 0 ({0} given)"" .format(limit)) return self" 99,"def build_filter(self, filter): """""" Tries to build a :class:`filter.Filter` instance from the given filter. Raises ValueError if the :class:`filter.Filter` object can't be build from the given filter. """""" try: self.filter = Filter.from_string(filter, self.limit) except ValueError: raise return self" 100,"def build_action(self, action): """""" Tries to build an :class:`action.Action` instance from the given action. Raises ValueError if the :class:`action.Action` object can't be build from the given action. """""" try: self.action = Action.from_string(action) except ValueError: raise return self" 101,"def get_args(): """""" request the arguments for running """""" ap = argparse.ArgumentParser(description=""Create frames for a movie that can be compiled using ffmpeg"") ap.add_argument(""start"", help=""date string as start time"") ap.add_argument(""end"", help=""date string as end time"") ap.add_argument(""step"", type=float, help=""fraction of a day to step by"") ap.add_argument(""--config"", help=""path to a config file"", default=""config.json"") return ap.parse_args()" 102,"def make_three_color(data, time, step, config, shape=(1280, 1280), lower_val=(0, 0, 0), upper_val=(2.5, 2.5, 2.5)): """""" create a three color image according to the config file :param data: a dictionary of fetched data where keys correspond to products :param config: a config object :param shape: the size of a composite image :param lower_val: a tuple of lower values for RGB, any value below this is set to the low value :param upper_val: a tuple of upper values for RGB, any value above this is set to the high value :return: a (m,n,3) numpy array for a three color image where all values are between 0 and 1 """""" order = {'red': 0, 'green': 1, 'blue': 2} three_color = np.zeros((shape[0], shape[1], 3)) channel_colors = {color: config.default[color] for color in ['red', 'green', 'blue']} for color, channel in channel_colors.items(): if data[channel][1] is None or \ abs((time - date_parser.parse(data[channel][0]['date-end'])).total_seconds()) > step.total_seconds()/2.0: return np.zeros((shape[0], shape[1], 3)) three_color[:, :, order[color]] = data[channel][1] # scale the image by the power three_color[:, :, order[color]] = np.power(three_color[:, :, order[color]], config.default[""{}_power"".format(color)]) # adjust the percentile thresholds lower = lower_val[order[color]] upper = upper_val[order[color]] three_color[np.where(three_color[:, :, order[color]] < lower)] = lower three_color[np.where(three_color[:, :, order[color]] > upper)] = upper # image values must be between (0,1) so scale image for color, index in order.items(): three_color[:, :, index] /= upper_val[order[color]] return three_color" 103,"def main(): """""" process the main task """""" args = get_args() args.start = date_parser.parse(args.start) args.end = date_parser.parse(args.end) args.step = timedelta(args.step) config = Config(args.config) times = [args.start + i * args.step for i in range(int((args.end - args.start) / args.step))] for i, time in enumerate(times): make_plot(time, config, args.step)" 104,"def make_plot(time, config, step): """""" create a three color and all composite images for a given time NOTE: channel mins and maxes are currently hardcoded since this is a very specific script :param i: the index to save the file as :param time: :param config: :return: """""" fig, ax = plt.subplots() try: result = Fetcher(time, products=config.products, suvi_composite_path=config.suvi_composite_path).fetch(multithread=False) if result: arr = make_three_color(result, time, step, config, upper_val=(2.4, 2.4, 2.4)) else: arr = np.zeros((1280, 1280, 3)) except ValueError: arr = np.zeros((1280, 1280, 3)) ax.imshow(arr, origin='lower') timestr = time.strftime(""%Y-%m-%d %H:%M:%S"") fnextend = time.strftime(""%Y%m%d%H%M%S"") ax.set_title(timestr) ax.set_axis_off() fig.savefig(""three_{}.png"".format(fnextend), bbox_inches='tight', dpi=300) plt.close(fig) channel_min = {'suvi-l2-ci094': 0, 'suvi-l2-ci131': 0, 'suvi-l2-ci171': 0, 'suvi-l2-ci195': 0, 'suvi-l2-ci284': 0, 'suvi-l2-ci304': 0} channel_max = {'suvi-l2-ci094': 1, 'suvi-l2-ci131': 1, 'suvi-l2-ci171': 1.8, 'suvi-l2-ci195': 1.8, 'suvi-l2-ci284': 1.8, 'suvi-l2-ci304': 2.5} for channel in channel_min: fig, ax = plt.subplots() if result[channel][1] is not None and \ abs((time - date_parser.parse(result[channel][0]['date-end'])).total_seconds()) < step.total_seconds()/2.0: dat = np.power(result[channel][1], 0.25) ax.set_title(date_parser.parse(result[channel][0]['date-obs']).strftime(""%Y-%m-%d %H:%M:%S"")) dat[np.isnan(dat)] = 0 else: dat = np.zeros((1280, 1280)) ax.set_title(timestr) ax.imshow(dat, vmin=channel_min[channel], vmax=channel_max[channel], cmap='gray', origin='lower') ax.set_axis_off() fig.savefig(""{}_{}.png"".format(channel, fnextend), bbox_inches='tight', dpi=300) plt.close(fig)" 105,"def overall(): """""" The overall grammer for pulling apart the main input files. """""" return ZeroOrMore(Grammar.comment) + Dict(ZeroOrMore(Group( Grammar._section + ZeroOrMore(Group(Grammar.line))) ))" 106,"def file(): """""" Grammar for files found in the overall input files. """""" return ( Optional(Word(alphanums).setResultsName('alias') + Suppress(Literal('.'))) + Suppress(White()) + Word(approved_printables).setResultsName('filename') )" 107,"def command_lines(): """""" Grammar for commands found in the overall input files. """""" return ZeroOrMore(Group( Group(ZeroOrMore(Group(Grammar.comment))) + Grammar._non_comment_line ))" 108,"def command(): """""" Grammar for commands found in the overall input files. """""" return ( OneOrMore( Word(approved_printables+' ').setResultsName('command', listAllMatches=True) ^ Grammar.__command_input_output.setResultsName('_in', listAllMatches=True) ) )" 109,"def listen_to_event_updates(): """"""Subscribe to events."""""" def callback(event): print('Event:', event) client.create_event_subscription(instance='simulator', on_data=callback) sleep(5)" 110,"def get_current_scene_node(): """"""Return the name of the jb_sceneNode, that describes the current scene or None if there is no scene node. :returns: the full name of the node or none, if there is no scene node :rtype: str | None :raises: None """""" c = cmds.namespaceInfo(':', listOnlyDependencyNodes=True, absoluteName=True, dagPath=True) l = cmds.ls(c, type='jb_sceneNode', absoluteName=True) if not l: return else: for n in sorted(l): if not cmds.listConnections(""%s.reftrack"" % n, d=False): return n" 111,"def updateSpec(self, *args, **kwargs): """"""Updates the spectrogram. First argument can be a filename, or a data array. If no arguments are given, clears the spectrograms. For other arguments, see: :meth:`SpecWidget.updateData` """""" if args[0] is None: self.specPlot.clearImg() elif isinstance(args[0], basestring): self.specPlot.fromFile(*args, **kwargs) else: self.specPlot.updateData(*args,**kwargs)" 112,"def showSpec(self, fname): """"""Draws the spectrogram if it is currently None"""""" if not self.specPlot.hasImg() and fname is not None: self.specPlot.fromFile(fname)" 113,"def updateSpiketrace(self, xdata, ydata, plotname=None): """"""Updates the spike trace :param xdata: index values :type xdata: numpy.ndarray :param ydata: values to plot :type ydata: numpy.ndarray """""" if plotname is None: plotname = self.responsePlots.keys()[0] if len(ydata.shape) == 1: self.responsePlots[plotname].updateData(axeskey='response', x=xdata, y=ydata) else: self.responsePlots[plotname].addTraces(xdata, ydata)" 114,"def addRasterPoints(self, xdata, repnum, plotname=None): """"""Add a list (or numpy array) of points to raster plot, in any order. :param xdata: bin centers :param ydata: rep number """""" if plotname is None: plotname = self.responsePlots.keys()[0] ydata = np.ones_like(xdata)*repnum self.responsePlots[plotname].appendData('raster', xdata, ydata)" 115,"def updateSignal(self, xdata, ydata, plotname=None): """"""Updates the trace of the outgoing signal :param xdata: time points of recording :param ydata: brain potential at time points """""" if plotname is None: plotname = self.responsePlots.keys()[0] self.responsePlots[plotname].updateData(axeskey='stim', x=xdata, y=ydata)" 116,"def setXlimits(self, lims): """"""Sets the X axis limits of the trace plot :param lims: (min, max) of x axis, in same units as data :type lims: (float, float) """""" # update all ""linked"", plots self.specPlot.setXlim(lims) for plot in self.responsePlots.values(): plot.setXlim(lims) # ridiculous... sizes = self.splittersw.sizes() if len(sizes) > 1: if self.badbadbad: sizes[0] +=1 sizes[1] -=1 else: sizes[0] -=1 sizes[1] +=1 self.badbadbad = not self.badbadbad self.splittersw.setSizes(sizes) self._ignore_range_signal = False" 117,"def setNreps(self, nreps): """"""Sets the number of reps before the raster plot resets"""""" for plot in self.responsePlots.values(): plot.setNreps(nreps)" 118,"def specAutoRange(self): """"""Auto adjusts the visible range of the spectrogram"""""" trace_range = self.responsePlots.values()[0].viewRange()[0] vb = self.specPlot.getViewBox() vb.autoRange(padding=0) self.specPlot.setXlim(trace_range)" 119,"def interpret_header(self): """""" Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel """""" # handle special cases since date-obs field changed names if 'DATE_OBS' in self.header: self.date = self.header['DATE_OBS'] elif 'DATE-OBS' in self.header: self.date = self.header['DATE-OBS'] else: raise Exception(""Image does not have a DATE_OBS or DATE-OBS field"") self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2'] sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec arcsec_per_pixel = self.header['CDELT1'] self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel)" 120,"def save(self): """""" Save as a FITS file and attempt an upload if designated in the configuration file """""" out = Outgest(self.output, self.selection_array.astype('uint8'), self.headers, self.config_path) out.save() out.upload()" 121,"def on_exit(self): """""" When you click to exit, this function is called, prompts whether to save"""""" answer = messagebox.askyesnocancel(""Exit"", ""Do you want to save as you quit the application?"") if answer: self.save() self.quit() self.destroy() elif answer is None: pass # the cancel action else: self.quit() self.destroy()" 122,"def make_gui(self): """""" Setups the general structure of the gui, the first function called """""" self.option_window = Toplevel() self.option_window.protocol(""WM_DELETE_WINDOW"", self.on_exit) self.canvas_frame = tk.Frame(self, height=500) self.option_frame = tk.Frame(self.option_window, height=300) self.canvas_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) self.option_frame.pack(side=tk.RIGHT, fill=None, expand=False) self.make_options_frame() self.make_canvas_frame() self.disable_singlecolor()" 123,"def configure_threecolor_image(self): """""" configures the three color image according to the requested parameters :return: nothing, just updates self.image """""" order = {'red': 0, 'green': 1, 'blue': 2} self.image = np.zeros((self.shape[0], self.shape[1], 3)) for color, var in self.multicolorvars.items(): channel = var.get() # determine which channel should be plotted as this color self.image[:, :, order[color]] = self.data[channel] # scale the image by the power self.image[:, :, order[color]] = np.power(self.image[:, :, order[color]], self.multicolorpower[color].get()) # adjust the percentile thresholds lower = np.nanpercentile(self.image[:, :, order[color]], self.multicolormin[color].get()) upper = np.nanpercentile(self.image[:, :, order[color]], self.multicolormax[color].get()) self.image[np.where(self.image[:, :, order[color]] < lower)] = lower self.image[np.where(self.image[:, :, order[color]] > upper)] = upper # image values must be between (0,1) so scale image for color, index in order.items(): self.image[:, :, index] /= np.nanmax(self.image[:, :, index])" 124,"def configure_singlecolor_image(self, scale=False): """""" configures the single color image according to the requested parameters :return: nothing, just updates self.image """""" # determine which channel to use self.image = self.data[self.singlecolorvar.get()] # scale the image by requested power self.image = np.power(self.image, self.singlecolorpower.get()) # adjust the percentile thresholds lower = np.nanpercentile(self.image, self.singlecolormin.get()) upper = np.nanpercentile(self.image, self.singlecolormax.get()) self.image[self.image < lower] = lower self.image[self.image > upper] = upper # image values must be between (0,1) so scale image self.image /= np.nanmax(self.image)" 125,"def updateArray(self, array, indices, value): """""" updates array so that pixels at indices take on value :param array: (m,n) array to adjust :param indices: flattened image indices to change value :param value: new value to assign :return: the changed (m,n) array """""" lin = np.arange(array.size) new_array = array.flatten() new_array[lin[indices]] = value return new_array.reshape(array.shape)" 126,"def onlasso(self, verts): """""" Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map :param verts: the vertices selected by the lasso :return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas """""" p = path.Path(verts) ind = p.contains_points(self.pix, radius=1) self.history.append(self.selection_array.copy()) self.selection_array = self.updateArray(self.selection_array, ind, self.solar_class_var.get()) self.mask.set_data(self.selection_array) self.fig.canvas.draw_idle()" 127,"def make_canvas_frame(self): """""" Create the data and thematic map images for the first time """""" self.fig, (self.imageax, self.previewax) = plt.subplots(ncols=2, figsize=self.canvas_size, sharex=True, sharey=True, gridspec_kw=self.subplot_grid_spec) self.canvas = FigureCanvasTkAgg(self.fig, master=self.canvas_frame) self.canvas.mpl_connect('button_press_event', self.onclick) self.canvas.mpl_connect('key_press_event', self.onpress) # set up the channel data view self.configure_threecolor_image() self.imageplot = self.imageax.imshow(self.image) self.imageax.set_xlim([0, self.shape[0]]) self.imageax.set_ylim([0, self.shape[0]]) self.imageax.set_axis_off() self.history.append(self.selection_array) cmap = self.config.solar_cmap self.mask = self.previewax.imshow(self.selection_array, origin='lower', interpolation='nearest', cmap=cmap, vmin=-1, vmax=max([num for _, num in self.config.solar_classes])+1) self.previewax.set_xlim([0, self.shape[0]]) self.previewax.set_ylim([0, self.shape[0]]) self.previewax.set_aspect(""equal"") self.previewax.set_axis_off() # add selection layer for lasso self.pix = np.arange(self.shape[0]) # assumes square image xv, yv = np.meshgrid(self.pix, self.pix) self.pix = np.vstack((xv.flatten(), yv.flatten())).T lineprops = dict(color=self.config.default['lasso_color'], linewidth=self.config.default['lasso_width']) self.lasso = LassoSelector(self.imageax, self.onlasso, lineprops=lineprops) # display everything self.canvas.show() self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True) # add the tool bar self.toolbarcenterframe = tk.LabelFrame(self.canvas_frame, borderwidth=0, text=""Draw: unlabeled"", relief=tk.FLAT, labelanchor=tk.N, background='red') toolbarframe = tk.Frame(self.toolbarcenterframe) toolbar = CustomToolbar(self.canvas, toolbarframe, self.toolbarcenterframe, self) toolbar.update() self.fig.canvas.toolbar.set_message = lambda x: """" # remove state reporting toolbarframe.pack() self.toolbarcenterframe.pack(side=tk.BOTTOM, fill=tk.X)" 128,"def onpress(self, event): """""" Reacts to key commands :param event: a keyboard event :return: if 'c' is pressed, clear all region patches """""" if event.key == 'c': # clears all the contours for patch in self.region_patches: patch.remove() self.region_patches = [] self.fig.canvas.draw_idle() elif event.key == ""u"": # undo a label self.undobutton_action()" 129,"def onclick(self, event): """""" Draw contours on the data for a click in the thematic map :param event: mouse click on thematic map preview """""" if event.inaxes == self.previewax: y, x = int(event.xdata), int(event.ydata) label = self.selection_array[x, y] contiguous_regions = scipy.ndimage.label(self.selection_array == label)[0] this_region = contiguous_regions == (contiguous_regions[x, y]) # remove the boundaries so any region touching the edge isn't drawn odd this_region[0, :] = 0 this_region[:, 0] = 0 this_region[this_region.shape[0]-1, :] = 0 this_region[:, this_region.shape[1]-1] = 0 # convert the region mask into just a true/false array of its boundary pixels edges = binary_erosion(this_region) ^ this_region # convert the boundary pixels into a path, moving around instead of just where x, y = np.where(edges) coords = np.dstack([x, y])[0] path = [coords[0]] coords = coords[1:] while len(coords): dist = np.sum(np.abs(path[-1] - coords), axis=1) neighbor_index = np.argmin(dist) if dist[neighbor_index] < 5: path.append(coords[neighbor_index].copy()) coords[neighbor_index:-1] = coords[neighbor_index + 1:] coords = coords[:-1] else: break path = np.array(path) clips = [] while len(coords) > 5: dist = np.sum(np.abs(path[-1] - coords), axis=1) neighbor_index = np.argmin(dist) clip = [coords[neighbor_index].copy()] coords[neighbor_index:-1] = coords[neighbor_index + 1:] coords = coords[:-1] while len(coords): dist = np.sum(np.abs(clip[-1] - coords), axis=1) neighbor_index = np.argmin(dist) if dist[neighbor_index] < 5: clip.append(coords[neighbor_index].copy()) coords[neighbor_index:-1] = coords[neighbor_index + 1:] coords = coords[:-1] else: break clips.append(np.array(clip)) # draw the continguous on the selection area self.region_patches.append(PatchCollection( [Polygon(np.dstack([path[:, 1], path[:, 0]])[0], False, fill=False, facecolor=None, edgecolor=""black"", alpha=1, lw=2.5)] + [Polygon(np.dstack([clip[:, 1], clip[:, 0]])[0], False, fill=False, facecolor=None, edgecolor=""black"", alpha=1, lw=2.0) for clip in clips], match_original=True)) self.imageax.add_collection(self.region_patches[-1]) self.fig.canvas.draw_idle()" 130,"def make_options_frame(self): """""" make the frame that allows for configuration and classification"""""" self.tab_frame = ttk.Notebook(self.option_frame, width=800) self.tab_configure = tk.Frame(self.tab_frame) self.tab_classify = tk.Frame(self.tab_frame) self.make_configure_tab() self.make_classify_tab() self.tab_frame.add(self.tab_configure, text=""Configure"") self.tab_frame.add(self.tab_classify, text=""Classify"") self.tab_frame.pack(fill=tk.BOTH, expand=True)" 131,"def disable_multicolor(self): """""" swap from the multicolor image to the single color image """""" # disable the multicolor image for color in ['red', 'green', 'blue']: self.multicolorscales[color].config(state=tk.DISABLED, bg='grey') self.multicolorframes[color].config(bg='grey') self.multicolorlabels[color].config(bg='grey') self.multicolordropdowns[color].config(bg='grey', state=tk.DISABLED) self.multicolorminscale[color].config(bg='grey', state=tk.DISABLED) self.multicolormaxscale[color].config(bg='grey', state=tk.DISABLED) # enable the single color self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme) self.singlecolorframe.config(bg=self.single_color_theme) self.singlecolorlabel.config(bg=self.single_color_theme) self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL)" 132,"def update_button_action(self): """""" when update button is clicked, refresh the data preview"""""" if self.mode.get() == 3: # threecolor self.configure_threecolor_image() elif self.mode.get() == 1: # singlecolor self.configure_singlecolor_image() else: raise ValueError(""mode can only be singlecolor or threecolor"") self.imageplot.set_data(self.image) if self.mode.get() == 1: # singlecolor self.imageplot.set_cmap('gist_gray') self.fig.canvas.draw_idle()" 133,"def make_configure_tab(self): """""" initial set up of configure tab"""""" # Setup the choice between single and multicolor modeframe = tk.Frame(self.tab_configure) self.mode = tk.IntVar() singlecolor = tk.Radiobutton(modeframe, text=""Single color"", variable=self.mode, value=1, command=lambda: self.disable_multicolor()) multicolor = tk.Radiobutton(modeframe, text=""Three color"", variable=self.mode, value=3, command=lambda: self.disable_singlecolor()) self.mode.set(3) singlecolor.pack(side=tk.LEFT) multicolor.pack(side=tk.LEFT) updatebutton = tk.Button(master=modeframe, text=""Update"", command=self.update_button_action) updatebutton.pack(side=tk.RIGHT) modeframe.grid(row=0, column=0) self.setup_multicolor() self.setup_singlecolor()" 134,"def make_classify_tab(self): """""" initial set up of classification tab"""""" self.pick_frame = tk.Frame(self.tab_classify) self.pick_frame2 = tk.Frame(self.tab_classify) self.solar_class_var = tk.IntVar() self.solar_class_var.set(0) # initialize to unlabeled buttonnum = 0 frame = [self.pick_frame, self.pick_frame2] for text, value in self.config.solar_classes: b = tk.Radiobutton(frame[buttonnum % 2], text=text, variable=self.solar_class_var, value=value, background=self.config.solar_colors[text], indicatoron=0, width=50, height=2, command=self.change_class) b.pack(fill=tk.BOTH, expand=1) buttonnum += 1 self.pick_frame.grid(row=0, column=0, rowspan=5, sticky=tk.W + tk.E + tk.N + tk.S) self.pick_frame2.grid(row=0, column=1, rowspan=5, sticky=tk.W + tk.E + tk.N + tk.S) undobutton = tk.Button(master=self.tab_classify, text=""Undo"", command=self.undobutton_action) undobutton.grid(row=6, column=0, columnspan=2, sticky=tk.W + tk.E)" 135,"def setup_singlecolor(self): """""" initial setup of single color options and variables"""""" self.singlecolorframe = tk.Frame(self.tab_configure, bg=self.single_color_theme) channel_choices = sorted(list(self.data.keys())) self.singlecolorlabel = tk.Label(self.singlecolorframe, text=""single"", bg=self.single_color_theme, width=10) self.singlecolorvar = tk.StringVar() self.singlecolorpower = tk.DoubleVar() self.singlecolormin = tk.DoubleVar() self.singlecolormax = tk.DoubleVar() self.singlecolordropdown = tk.OptionMenu(self.singlecolorframe, self.singlecolorvar, *channel_choices) self.singlecolorscale = tk.Scale(self.singlecolorframe, variable=self.singlecolorpower, orient=tk.HORIZONTAL, from_=self.config.ranges['single_color_power_min'], bg=self.single_color_theme, to_=self.config.ranges['single_color_power_max'], resolution=self.config.ranges['single_color_power_resolution'], length=200) self.singlecolorminscale = tk.Scale(self.singlecolorframe, variable=self.singlecolormin, orient=tk.HORIZONTAL, from_=0, bg=self.single_color_theme, to_=self.config.ranges['single_color_vmin'], resolution=self.config.ranges['single_color_vresolution'], length=200) self.singlecolormaxscale = tk.Scale(self.singlecolorframe, variable=self.singlecolormax, orient=tk.HORIZONTAL, from_=self.config.ranges['single_color_vmax'], bg=self.single_color_theme, to_=100, resolution=self.config.ranges['single_color_vresolution'], length=200) self.singlecolorvar.set(self.config.products_map[self.config.default['single']]) self.singlecolorpower.set(self.config.default['single_power']) self.singlecolormin.set(0) self.singlecolormax.set(100) self.singlecolordropdown.config(bg=self.single_color_theme, width=10) self.singlecolorlabel.pack(side=tk.LEFT) self.singlecolorscale.pack(side=tk.RIGHT) self.singlecolormaxscale.pack(side=tk.RIGHT) self.singlecolorminscale.pack(side=tk.RIGHT) self.singlecolordropdown.pack() self.singlecolorframe.grid(row=4, columnspan=5, rowspan=1)" 136,"def setup_multicolor(self): """""" initial setup of multicolor options and variables"""""" # Setup the options for multicolor multicolormasterframe = tk.Frame(self.tab_configure) channel_choices = sorted(list(self.data.keys())) rgb = ['red', 'green', 'blue'] self.multicolorframes = {color: tk.Frame(multicolormasterframe, bg=color) for color in rgb} self.multicolorlabels = {color: tk.Label(self.multicolorframes[color], text=color, bg=color, width=10) for color in rgb} self.multicolorvars = {color: tk.StringVar() for color in rgb} self.multicolorpower = {color: tk.DoubleVar() for color in rgb} self.multicolormin = {color: tk.DoubleVar() for color in rgb} self.multicolormax = {color: tk.DoubleVar() for color in rgb} self.multicolordropdowns = {color: tk.OptionMenu(self.multicolorframes[color], self.multicolorvars[color], *channel_choices) for color in rgb} self.multicolorscales = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolorpower[color], orient=tk.HORIZONTAL, from_=self.config.ranges['multi_color_power_min'], to_=self.config.ranges['multi_color_power_max'], bg=color, resolution=self.config.ranges['multi_color_power_resolution'], length=200) for color in rgb} self.multicolorminscale = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolormin[color], orient=tk.HORIZONTAL, from_=0, to_=self.config.ranges['multi_color_vmin'], bg=color, resolution=self.config.ranges['multi_color_vresolution'], length=200) for color in rgb} self.multicolormaxscale = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolormax[color], orient=tk.HORIZONTAL, from_=self.config.ranges['multi_color_vmax'], to_=100, bg=color, resolution=self.config.ranges['multi_color_vresolution'], length=200) for color in rgb} for color in rgb: self.multicolorvars[color].set(self.config.products_map[self.config.default[color]]) self.multicolorpower[color].set(self.config.default[color + ""_power""]) self.multicolormin[color].set(0) self.multicolormax[color].set(100) self.multicolordropdowns[color].config(bg=color, width=10) self.multicolorlabels[color].pack(side=tk.LEFT) self.multicolorscales[color].pack(side=tk.RIGHT) self.multicolormaxscale[color].pack(side=tk.RIGHT) self.multicolorminscale[color].pack(side=tk.RIGHT) self.multicolordropdowns[color].pack() self.multicolorframes[color].pack(fill=tk.BOTH) multicolormasterframe.grid(row=1, column=0, columnspan=5, rowspan=3)" 137,"def undobutton_action(self): """""" when undo is clicked, revert the thematic map to the previous state"""""" if len(self.history) > 1: old = self.history.pop(-1) self.selection_array = old self.mask.set_data(old) self.fig.canvas.draw_idle()" 138,"def change_class(self): """""" ""on changing the classification label, update the ""draw"" text """""" self.toolbarcenterframe.config(text=""Draw: {}"".format(self.config.solar_class_name[self.solar_class_var.get()]))" 139,"def draw_circle(self, center, radius, array, value, mode=""set""): """""" Draws a circle of specified radius on the input array and fills it with specified value :param center: a tuple for the center of the circle :type center: tuple (x,y) :param radius: how many pixels in radius the circle is :type radius: int :param array: image to draw circle on :type array: size (m,n) numpy array :param value: what value to fill the circle with :type value: float :param mode: if ""set"" will assign the circle interior value, if ""add"" will add the value to the circle interior, throws exception otherwise :type mode: string, either ""set"" or ""add"" :return: updates input array """""" ri, ci = draw.circle(center[0], center[1], radius=radius, shape=array.shape) if mode == ""add"": array[ri, ci] += value elif mode == ""set"": array[ri, ci] = value else: raise ValueError(""draw_circle mode must be 'set' or 'add' but {} used"".format(mode)) return ri, ci, array[ri,ci]" 140,"def draw_annulus(self, center, inner_radius, outer_radius, array, value, mode=""set""): """""" Draws an annulus of specified radius on the input array and fills it with specified value :param center: a tuple for the center of the annulus :type center: tuple (x,y) :param inner_radius: how many pixels in radius the interior empty circle is, where the annulus begins :type inner_radius: int :param outer_radius: how many pixels in radius the larger outer circle is, where the annulus ends :typde outer_radius: int :param array: image to draw annulus on :type array: size (m,n) numpy array :param value: what value to fill the annulus with :type value: float :param mode: if ""set"" will assign the circle interior value, if ""add"" will add the value to the circle interior, throws exception otherwise :type mode: string, either ""set"" or ""add"" :return: updates input array and then returns it with the annulus coordinates as a tuple """""" if mode == ""add"": self.draw_circle(center, outer_radius, array, value) self.draw_circle(center, inner_radius, array, -value) elif mode == ""set"": ri, ci, existing = self.draw_circle(center, inner_radius, array, -value) self.draw_circle(center, outer_radius, array, value) array[ri, ci] = existing else: raise ValueError(""draw_annulus mode must be 'set' or 'add' but {} used"".format(mode))" 141,"def draw_default(self, inside=5, outside=15): """""" Draw suggested sun disk, limb, and empty background :param inside: how many pixels from the calculated solar disk edge to go inward for the limb :param outside: how many pixels from the calculated solar disk edge to go outward for the limb :return: updates the self.selection_array """""" # fill everything with empty outer space if 'outer_space' in self.config.solar_class_index: self.selection_array[:, :] = self.config.solar_class_index['outer_space'] elif 'empty_outer_space' in self.config.solar_class_index: self.selection_array[:, :] = self.config.solar_class_index['empty_outer_space'] else: raise ValueError(""outer_space or empty_outer_space must be classes with colors."") # draw the limb label in its location self.draw_annulus((self.cx, self.cy), self.sun_radius_pixel - inside, self.sun_radius_pixel + outside, self.selection_array, self.config.solar_class_index['limb']) # draw quiet sun in its location self.draw_circle((self.cx, self.cy), self.sun_radius_pixel - inside, self.selection_array, self.config.solar_class_index['quiet_sun'])" 142,"def values(self): """"""Gets the parameter values :returns: dict of inputs: | *'nfft'*: int -- length, in samples, of FFT chunks | *'window'*: str -- name of window to apply to FFT chunks | *'overlap'*: float -- percent overlap of windows """""" self.vals['nfft'] = self.ui.nfftSpnbx.value() self.vals['window'] = str(self.ui.windowCmbx.currentText()).lower() self.vals['overlap'] = self.ui.overlapSpnbx.value() return self.vals" 143,"def main(): """""" Parses the command-line args, and calls run. """""" parser = argparse.ArgumentParser( description='A pipeline that generates analysis pipelines.') parser.add_argument('input', nargs='?', help='A valid metapipe configuration file.') parser.add_argument('-o', '--output', help='An output destination. If none is provided, the ' 'results will be printed to stdout.', default=sys.stdout) parser.add_argument('-t', '--temp', help='A desired metapipe binary file. This is used to store ' 'temp data between generation and execution. ' '(Default: ""%(default)s"")', default='.metapipe') parser.add_argument('-s', '--shell', help='The path to the shell to be used when executing the ' 'pipeline. (Default: ""%(default)s)""', default='/bin/bash') parser.add_argument('-r', '--run', help='Run the pipeline as soon as it\'s ready.', action='store_true') parser.add_argument('-n', '--name', help='A name for the pipeline.', default='') parser.add_argument('-j', '--job-type', help='The destination for calculations (i.e. local, a PBS ' 'queue on a cluster, etc).\nOptions: {}. ' '(Default: ""%(default)s)""'.format(JOB_TYPES.keys()), default='local') parser.add_argument('-p', '--max-jobs', help='The maximum number of concurrent jobs allowed. ' 'Defaults to maximum available cores.', default=None) parser.add_argument('--report-type', help='The output report type. By default metapipe will ' 'print updates to the console. \nOptions: {}. ' '(Default: ""%(default)s)""'.format(QUEUE_TYPES.keys()), default='text') parser.add_argument('-v','--version', help='Displays the current version of the application.', action='store_true') args = parser.parse_args() if args.version: print('Version: {}'.format(__version__)) sys.exit(0) try: with open(args.input) as f: config = f.read() except IOError: print('No valid config file found.') return -1 run(config, args.max_jobs, args.output, args.job_type, args.report_type, args.shell, args.temp, args.run)" 144,"def run(config, max_jobs, output=sys.stdout, job_type='local', report_type='text', shell='/bin/bash', temp='.metapipe', run_now=False): """""" Create the metapipe based on the provided input. """""" if max_jobs == None: max_jobs = cpu_count() parser = Parser(config) try: command_templates = parser.consume() except ValueError as e: raise SyntaxError('Invalid config file. \n%s' % e) options = '\n'.join(parser.global_options) queue_type = QUEUE_TYPES[report_type] pipeline = Runtime(command_templates,queue_type,JOB_TYPES,job_type,max_jobs) template = env.get_template('output_script.tmpl.sh') with open(temp, 'wb') as f: pickle.dump(pipeline, f, 2) script = template.render(shell=shell, temp=os.path.abspath(temp), options=options) if run_now: output = output if output != sys.stdout else PIPELINE_ALIAS submit_job = make_submit_job(shell, output, job_type) submit_job.submit() try: f = open(output, 'w') output = f except TypeError: pass output.write(script) f.close()" 145,"def make_submit_job(shell, output, job_type): """""" Preps the metapipe main job to be submitted. """""" run_cmd = [shell, output] submit_command = Command(alias=PIPELINE_ALIAS, cmds=run_cmd) submit_job = get_job(submit_command, job_type) submit_job.make() return submit_job" 146,"def yaml(modules_to_register: Iterable[Any] = None, classes_to_register: Iterable[Any] = None) -> ruamel.yaml.YAML: """""" Create a YAML object for loading a YAML configuration. Args: modules_to_register: Modules containing classes to be registered with the YAML object. Default: None. classes_to_register: Classes to be registered with the YAML object. Default: None. Returns: A newly creating YAML object, configured as apporpirate. """""" # Defein a round-trip yaml object for us to work with. This object should be imported by other modules # NOTE: ""typ"" is a not a typo. It stands for ""type"" yaml = ruamel.yaml.YAML(typ = ""rt"") # Register representers and constructors # Numpy yaml.representer.add_representer(np.ndarray, numpy_to_yaml) yaml.constructor.add_constructor(""!numpy_array"", numpy_from_yaml) # Register external classes yaml = register_module_classes(yaml = yaml, modules = modules_to_register) yaml = register_classes(yaml = yaml, classes = classes_to_register) return yaml" 147,"def register_classes(yaml: ruamel.yaml.YAML, classes: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML: """""" Register externally defined classes. """""" # Validation if classes is None: classes = [] # Register the classes for cls in classes: logger.debug(f""Registering class {cls} with YAML"") yaml.register_class(cls) return yaml" 148,"def register_module_classes(yaml: ruamel.yaml.YAML, modules: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML: """""" Register all classes in the given modules with the YAML object. This is a simple helper function. """""" # Validation if modules is None: modules = [] # Extract the classes from the modules classes_to_register = set() for module in modules: module_classes = [member[1] for member in inspect.getmembers(module, inspect.isclass)] classes_to_register.update(module_classes) # Register the extracted classes return register_classes(yaml = yaml, classes = classes_to_register)" 149,"def numpy_to_yaml(representer: Representer, data: np.ndarray) -> Sequence[Any]: """""" Write a numpy array to YAML. It registers the array under the tag ``!numpy_array``. Use with: .. code-block:: python >>> yaml = ruamel.yaml.YAML() >>> yaml.representer.add_representer(np.ndarray, yaml.numpy_to_yaml) Note: We cannot use ``yaml.register_class`` because it won't register the proper type. (It would register the type of the class, rather than of `numpy.ndarray`). Instead, we use the above approach to register this method explicitly with the representer. """""" return representer.represent_sequence( ""!numpy_array"", data.tolist() )" 150,"def numpy_from_yaml(constructor: Constructor, data: ruamel.yaml.nodes.SequenceNode) -> np.ndarray: """""" Read an array from YAML to numpy. It reads arrays registered under the tag ``!numpy_array``. Use with: .. code-block:: python >>> yaml = ruamel.yaml.YAML() >>> yaml.constructor.add_constructor(""!numpy_array"", yaml.numpy_from_yaml) Note: We cannot use ``yaml.register_class`` because it won't register the proper type. (It would register the type of the class, rather than of `numpy.ndarray`). Instead, we use the above approach to register this method explicitly with the representer. """""" # Construct the contained values so that we properly construct int, float, etc. # We just leave this to YAML because it already stores this information. values = [constructor.construct_object(n) for n in data.value] logger.debug(f""{data}, {values}"") return np.array(values)" 151,"def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML) -> ruamel.yaml.nodes.ScalarNode: """""" Encodes YAML representation. This is a mixin method for writing enum values to YAML. It needs to be added to the enum as a classmethod. See the module docstring for further information on this approach and how to implement it. This method writes whatever is used in the string representation of the YAML value. Usually, this will be the unique name of the enumeration value. If the name is used, the corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name isn't used, more care may be necessary, so a ``from_yaml`` method for that particular enumeration may be necessary. Note: This method assumes that the name of the enumeration value should be stored as a scalar node. Args: representer: Representation from YAML. data: Enumeration value to be encoded. Returns: Scalar representation of the name of the enumeration value. """""" return representer.represent_scalar( f""!{cls.__name__}"", f""{str(data)}"" )" 152,"def enum_from_yaml(cls: Type[T_EnumFromYAML], constructor: Constructor, node: ruamel.yaml.nodes.ScalarNode) -> T_EnumFromYAML: """""" Decode YAML representation. This is a mixin method for reading enum values from YAML. It needs to be added to the enum as a classmethod. See the module docstring for further information on this approach and how to implement it. Note: This method assumes that the name of the enumeration value was stored as a scalar node. Args: constructor: Constructor from the YAML object. node: Scalar node extracted from the YAML being read. Returns: The constructed YAML value from the name of the enumerated value. """""" # mypy doesn't like indexing to construct the enumeration. return cls[node.value]" 153,"def is_error(self): """""" Checks to see if the job errored out. """""" try: if self._task.is_alive(): if len(self._task.stderr.readlines()) > 0: self._task.join() self._write_log() return True except AttributeError: pass return False" 154,"def add_splash_ids(splash_mapping_file_pth, conn, db_type='sqlite'): """""" Add splash ids to database (in case stored in a different file to the msp files like for MoNA) Example: >>> from msp2db.db import get_connection >>> from msp2db.parse import add_splash_ids >>> conn = get_connection('sqlite', 'library.db') >>> add_splash_ids('splash_mapping_file.csv', conn, db_type='sqlite') Args: splash_mapping_file_pth (str): Path to the splash mapping file (needs to be csv format and have no headers, should contain two columns. The first the accession number the second the splash. e.g. AU100601, splash10-0a4i-1900000000-d2bc1c887f6f99ed0f74 \n """""" # get dictionary of accession and library_spectra_meta_id cursor = conn.cursor() cursor.execute(""SELECT id, accession FROM library_spectra_meta"") accession_d = {row[1]: row[0] for row in cursor} if db_type == 'sqlite': type_sign = '?' else: type_sign = '%s' rows = [] c = 0 # loop through splash mapping file with open(splash_mapping_file_pth, ""r"") as f: for line in f: c+=1 line = line.rstrip() line_l = line.split(',') accession = line_l[0] splash = line_l[1] try: aid = accession_d[accession] except KeyError as e: print(""can't find accession {}"".format(accession)) continue row = (splash, aid) rows.append(row) if c > 200: print(row) cursor.executemany(""UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} "".format(t=type_sign), rows) conn.commit() rows = [] c = 0 cursor.executemany(""UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} "".format(t=type_sign), rows) conn.commit()" 155,"def _get_current_ids(self, source=True, meta=True, spectra=True, spectra_annotation=True): """"""Get the current id for each table in the database Args: source (boolean): get the id for the table ""library_spectra_source"" will update self.current_id_origin meta (boolean): get the id for the table ""library_spectra_meta"" will update self.current_id_meta spectra (boolean): get the id for the table ""library_spectra"" will update self.current_id_spectra spectra_annotation (boolean): get the id for the table ""library_spectra_annotation"" will update self.current_id_spectra_annotation """""" # get the cursor for the database connection c = self.c # Get the last uid for the spectra_info table if source: c.execute('SELECT max(id) FROM library_spectra_source') last_id_origin = c.fetchone()[0] if last_id_origin: self.current_id_origin = last_id_origin + 1 else: self.current_id_origin = 1 if meta: c.execute('SELECT max(id) FROM library_spectra_meta') last_id_meta = c.fetchone()[0] if last_id_meta: self.current_id_meta = last_id_meta + 1 else: self.current_id_meta = 1 if spectra: c.execute('SELECT max(id) FROM library_spectra') last_id_spectra = c.fetchone()[0] if last_id_spectra: self.current_id_spectra = last_id_spectra + 1 else: self.current_id_spectra = 1 if spectra_annotation: c.execute('SELECT max(id) FROM library_spectra_annotation') last_id_spectra_annotation = c.fetchone()[0] if last_id_spectra_annotation: self.current_id_spectra_annotation = last_id_spectra_annotation + 1 else: self.current_id_spectra_annotation = 1" 156,"def _parse_files(self, msp_pth, chunk, db_type, celery_obj=False): """"""Parse the MSP files and insert into database Args: msp_pth (str): path to msp file or directory [required] db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required] chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required] celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks [default False] """""" if os.path.isdir(msp_pth): c = 0 for folder, subs, files in sorted(os.walk(msp_pth)): for msp_file in sorted(files): msp_file_pth = os.path.join(folder, msp_file) if os.path.isdir(msp_file_pth) or not msp_file_pth.lower().endswith(('txt', 'msp')): continue print('MSP FILE PATH', msp_file_pth) self.num_lines = line_count(msp_file_pth) # each file is processed separately but we want to still process in chunks so we save the number # of spectra currently being processed with the c variable with open(msp_file_pth, ""r"") as f: c = self._parse_lines(f, chunk, db_type, celery_obj, c) else: self.num_lines = line_count(msp_pth) with open(msp_pth, ""r"") as f: self._parse_lines(f, chunk, db_type, celery_obj) self.insert_data(remove_data=True, db_type=db_type)" 157,"def _parse_lines(self, f, chunk, db_type, celery_obj=False, c=0): """"""Parse the MSP files and insert into database Args: f (file object): the opened file object db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required] chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required] celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks [default False] c (int): Number of spectra currently processed (will reset to 0 after that chunk of spectra has been inserted into the database """""" old = 0 for i, line in enumerate(f): line = line.rstrip() if i == 0: old = self.current_id_meta self._update_libdata(line) if self.current_id_meta > old: old = self.current_id_meta c += 1 if c > chunk: if celery_obj: celery_obj.update_state(state='current spectra {}'.format(str(i)), meta={'current': i, 'total': self.num_lines}) print(self.current_id_meta) self.insert_data(remove_data=True, db_type=db_type) self.update_source = False c = 0 return c" 158,"def _update_libdata(self, line): """"""Update the library meta data from the current line being parsed Args: line (str): The current line of the of the file being parsed """""" #################################################### # parse MONA Comments line #################################################### # The mona msp files contain a ""comments"" line that contains lots of other information normally separated # into by """" if re.match('^Comment.*$', line, re.IGNORECASE): comments = re.findall('""([^""]*)""', line) for c in comments: self._parse_meta_info(c) self._parse_compound_info(c) #################################################### # parse meta and compound info lines #################################################### # check the current line for both general meta data # and compound information self._parse_meta_info(line) self._parse_compound_info(line) #################################################### # End of meta data #################################################### # Most MSP files have the a standard line of text before the spectra information begins. Here we check # for this line and store the relevant details for the compound and meta information to be ready for insertion # into the database if self.collect_meta and (re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE) or re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE)): self._store_compound_info() self._store_meta_info() # Reset the temp meta and compound information self.meta_info = get_blank_dict(self.meta_regex) self.compound_info = get_blank_dict(self.compound_regex) self.other_names = [] self.collect_meta = False # ignore additional information in the 3rd column if using the MassBank spectra schema if re.match('^PK\$PEAK: m/z int\. rel\.int\.$', line, re.IGNORECASE): self.ignore_additional_spectra_info = True # Check if annnotation or spectra is to be in the next lines to be parsed if re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE): self.start_spectra = True return elif re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE): self.start_spectra_annotation = True match = re.match('^PK\$ANNOTATION:(.*)', line, re.IGNORECASE) columns = match.group(1) cl = columns.split() self.spectra_annotation_indexes = {i: cl.index(i) for i in cl} return #################################################### # Process annotation details #################################################### # e.g. molecular formula for each peak in the spectra if self.start_spectra_annotation: self._parse_spectra_annotation(line) #################################################### # Process spectra #################################################### if self.start_spectra: self._parse_spectra(line)" 159,"def get_compound_ids(self): """"""Extract the current compound ids in the database. Updates the self.compound_ids list """""" cursor = self.conn.cursor() cursor.execute('SELECT inchikey_id FROM metab_compound') self.conn.commit() for row in cursor: if not row[0] in self.compound_ids: self.compound_ids.append(row[0])" 160,"def _store_compound_info(self): """"""Update the compound_info dictionary with the current chunk of compound details Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use a random string (uuid4) suffixed with UNKNOWN """""" other_name_l = [name for name in self.other_names if name != self.compound_info['name']] self.compound_info['other_names'] = ' <#> '.join(other_name_l) if not self.compound_info['inchikey_id']: self._set_inchi_pcc(self.compound_info['pubchem_id'], 'cid', 0) if not self.compound_info['inchikey_id']: self._set_inchi_pcc(self.compound_info['smiles'], 'smiles', 0) if not self.compound_info['inchikey_id']: self._set_inchi_pcc(self.compound_info['name'], 'name', 0) if not self.compound_info['inchikey_id']: print('WARNING, cant get inchi key for ', self.compound_info) print(self.meta_info) print('#########################') self.compound_info['inchikey_id'] = 'UNKNOWN_' + str(uuid.uuid4()) if not self.compound_info['pubchem_id'] and self.compound_info['inchikey_id']: self._set_inchi_pcc(self.compound_info['inchikey_id'], 'inchikey', 0) if not self.compound_info['name']: self.compound_info['name'] = 'unknown name' if not self.compound_info['inchikey_id'] in self.compound_ids: self.compound_info_all.append(tuple(self.compound_info.values()) + ( str(datetime.datetime.now()), str(datetime.datetime.now()), )) self.compound_ids.append(self.compound_info['inchikey_id'])" 161,"def _store_meta_info(self): """"""Update the meta dictionary with the current chunk of meta data details """""" # In the mass bank msp files, sometimes the precursor_mz is missing but we have the neutral mass and # the precursor_type (e.g. adduct) so we can calculate the precursor_mz if not self.meta_info['precursor_mz'] and self.meta_info['precursor_type'] and \ self.compound_info['exact_mass']: self.meta_info['precursor_mz'] = get_precursor_mz(float(self.compound_info['exact_mass']), self.meta_info['precursor_type']) if not self.meta_info['polarity']: # have to do special check for polarity (as sometimes gets missed) m = re.search('^\[.*\](\-|\+)', self.meta_info['precursor_type'], re.IGNORECASE) if m: polarity = m.group(1).strip() if polarity == '+': self.meta_info['polarity'] = 'positive' elif polarity == '-': self.meta_info['polarity'] = 'negative' if not self.meta_info['accession']: self.meta_info['accession'] = 'unknown accession' self.meta_info_all.append( (str(self.current_id_meta),) + tuple(self.meta_info.values()) + (str(self.current_id_origin), self.compound_info['inchikey_id'],) )" 162,"def _parse_spectra_annotation(self, line): """"""Parse and store the spectral annotation details """""" if re.match('^PK\$NUM_PEAK(.*)', line, re.IGNORECASE): self.start_spectra_annotation = False return saplist = line.split() sarow = ( self.current_id_spectra_annotation, float(saplist[self.spectra_annotation_indexes['m/z']]) if 'm/z' in self.spectra_annotation_indexes else None, saplist[self.spectra_annotation_indexes[ 'tentative_formula']] if 'tentative_formula' in self.spectra_annotation_indexes else None, float(saplist[self.spectra_annotation_indexes[ 'mass_error(ppm)']]) if 'mass_error(ppm)' in self.spectra_annotation_indexes else None, self.current_id_meta) self.spectra_annotation_all.append(sarow) self.current_id_spectra_annotation += 1" 163,"def _parse_spectra(self, line): """"""Parse and store the spectral details """""" if line in ['\n', '\r\n', '//\n', '//\r\n', '', '//']: self.start_spectra = False self.current_id_meta += 1 self.collect_meta = True return splist = line.split() if len(splist) > 2 and not self.ignore_additional_spectra_info: additional_info = ''.join(map(str, splist[2:len(splist)])) else: additional_info = '' srow = ( self.current_id_spectra, float(splist[0]), float(splist[1]), additional_info, self.current_id_meta) self.spectra_all.append(srow) self.current_id_spectra += 1" 164,"def _set_inchi_pcc(self, in_str, pcp_type, elem): """"""Check pubchem compounds via API for both an inchikey and any available compound details """""" if not in_str: return 0 try: pccs = pcp.get_compounds(in_str, pcp_type) except pcp.BadRequestError as e: print(e) return 0 except pcp.TimeoutError as e: print(e) return 0 except pcp.ServerError as e: print(e) return 0 except URLError as e: print(e) return 0 except BadStatusLine as e: print(e) return 0 if pccs: pcc = pccs[elem] self.compound_info['inchikey_id'] = pcc.inchikey self.compound_info['pubchem_id'] = pcc.cid self.compound_info['molecular_formula'] = pcc.molecular_formula self.compound_info['molecular_weight'] = pcc.molecular_weight self.compound_info['exact_mass'] = pcc.exact_mass self.compound_info['smiles'] = pcc.canonical_smiles if len(pccs) > 1: print('WARNING, multiple compounds for ', self.compound_info)" 165,"def _get_other_names(self, line): """"""Parse and extract any other names that might be recorded for the compound Args: line (str): line of the msp file """""" m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE) if m: self.other_names.append(m.group(1).strip())" 166,"def _parse_meta_info(self, line): """"""Parse and extract all meta data by looping through the dictionary of meta_info regexs updates self.meta_info Args: line (str): line of the msp file """""" if self.mslevel: self.meta_info['ms_level'] = self.mslevel if self.polarity: self.meta_info['polarity'] = self.polarity for k, regexes in six.iteritems(self.meta_regex): for reg in regexes: m = re.search(reg, line, re.IGNORECASE) if m: self.meta_info[k] = m.group(1).strip()" 167,"def _parse_compound_info(self, line): """"""Parse and extract all compound data by looping through the dictionary of compound_info regexs updates self.compound_info Args: line (str): line of the msp file """""" for k, regexes in six.iteritems(self.compound_regex): for reg in regexes: if self.compound_info[k]: continue m = re.search(reg, line, re.IGNORECASE) if m: self.compound_info[k] = m.group(1).strip() self._get_other_names(line)" 168,"def insert_data(self, remove_data=False, db_type='sqlite'): """"""Insert data stored in the current chunk of parsing into the selected database Args: remove_data (boolean): Remove the data stored within the LibraryData object for the current chunk of processing db_type (str): The type of database to submit to either 'sqlite', 'mysql' or 'django_mysql' [default sqlite] """""" if self.update_source: # print ""insert ref id"" import msp2db self.c.execute( ""INSERT INTO library_spectra_source (id, name, parsing_software) VALUES"" "" ({a}, '{b}', 'msp2db-v{c}')"".format(a=self.current_id_origin, b=self.source, c=msp2db.__version__)) self.conn.commit() if self.compound_info_all: self.compound_info_all = _make_sql_compatible(self.compound_info_all) cn = ', '.join(self.compound_info.keys()) + ',created_at,updated_at' insert_query_m(self.compound_info_all, columns=cn, conn=self.conn, table='metab_compound', db_type=db_type) self.meta_info_all = _make_sql_compatible(self.meta_info_all) cn = 'id,' + ', '.join(self.meta_info.keys()) + ',library_spectra_source_id, inchikey_id' insert_query_m(self.meta_info_all, columns=cn, conn=self.conn, table='library_spectra_meta', db_type=db_type) cn = ""id, mz, i, other, library_spectra_meta_id"" insert_query_m(self.spectra_all, columns=cn, conn=self.conn, table='library_spectra', db_type=db_type) if self.spectra_annotation_all: cn = ""id, mz, tentative_formula, mass_error, library_spectra_meta_id"" insert_query_m(self.spectra_annotation_all, columns=cn, conn=self.conn, table='library_spectra_annotation', db_type=db_type) # self.conn.close() if remove_data: self.meta_info_all = [] self.spectra_all = [] self.spectra_annotation_all = [] self.compound_info_all = [] self._get_current_ids(source=False)" 169,"def line(line_def, **kwargs): """"""Highlights a character in the line"""""" def replace(s): return ""(%s)"" % ansi.aformat(s.group()[1:], attrs=[""bold"", ]) return ansi.aformat( re.sub('@.?', replace, line_def), **kwargs)" 170,"def try_and_error(*funcs): """"""Apply multiple validation functions Parameters ---------- ``*funcs`` Validation functions to test Returns ------- function"""""" def validate(value): exc = None for func in funcs: try: return func(value) except (ValueError, TypeError) as e: exc = e raise exc return validate" 171,"def validate_text(value): """"""Validate a text formatoption Parameters ---------- value: see :attr:`psyplot.plotter.labelplotter.text` Raises ------ ValueError"""""" possible_transform = ['axes', 'fig', 'data'] validate_transform = ValidateInStrings('transform', possible_transform, True) tests = [validate_float, validate_float, validate_str, validate_transform, dict] if isinstance(value, six.string_types): xpos, ypos = rcParams['texts.default_position'] return [(xpos, ypos, value, 'axes', {'ha': 'right'})] elif isinstance(value, tuple): value = [value] try: value = list(value)[:] except TypeError: raise ValueError(""Value must be string or list of tuples!"") for i, val in enumerate(value): try: val = tuple(val) except TypeError: raise ValueError( ""Text must be an iterable of the form "" ""(x, y, s[, trans, params])!"") if len(val) < 3: raise ValueError( ""Text tuple must at least be like [x, y, s], with floats x, "" ""y and string s!"") elif len(val) == 3 or isinstance(val[3], dict): val = list(val) val.insert(3, 'data') if len(val) == 4: val += [{}] val = tuple(val) if len(val) > 5: raise ValueError( ""Text tuple must not be longer then length 5. It can be "" ""like (x, y, s[, trans, params])!"") value[i] = (validate(x) for validate, x in zip(tests, val)) return value" 172,"def validate_none(b): """"""Validate that None is given Parameters ---------- b: {None, 'none'} None or string (the case is ignored) Returns ------- None Raises ------ ValueError"""""" if isinstance(b, six.string_types): b = b.lower() if b is None or b == 'none': return None else: raise ValueError('Could not convert ""%s"" to None' % b)" 173,"def validate_axiscolor(value): """"""Validate a dictionary containing axiscolor definitions Parameters ---------- value: dict see :attr:`psyplot.plotter.baseplotter.axiscolor` Returns ------- dict Raises ------ ValueError"""""" validate = try_and_error(validate_none, validate_color) possible_keys = {'right', 'left', 'top', 'bottom'} try: value = dict(value) false_keys = set(value) - possible_keys if false_keys: raise ValueError(""Wrong keys (%s)!"" % (', '.join(false_keys))) for key, val in value.items(): value[key] = validate(val) except: value = dict(zip(possible_keys, repeat(validate(value)))) return value" 174,"def validate_cbarpos(value): """"""Validate a colorbar position Parameters ---------- value: bool or str A string can be a combination of 'sh|sv|fl|fr|ft|fb|b|r' Returns ------- list list of strings with possible colorbar positions Raises ------ ValueError"""""" patt = 'sh|sv|fl|fr|ft|fb|b|r' if value is True: value = {'b'} elif not value: value = set() elif isinstance(value, six.string_types): for s in re.finditer('[^%s]+' % patt, value): warn(""Unknown colorbar position %s!"" % s.group(), RuntimeWarning) value = set(re.findall(patt, value)) else: value = validate_stringset(value) for s in (s for s in value if not re.match(patt, s)): warn(""Unknown colorbar position %s!"" % s) value.remove(s) return value" 175,"def validate_cmap(val): """"""Validate a colormap Parameters ---------- val: str or :class:`mpl.colors.Colormap` Returns ------- str or :class:`mpl.colors.Colormap` Raises ------ ValueError"""""" from matplotlib.colors import Colormap try: return validate_str(val) except ValueError: if not isinstance(val, Colormap): raise ValueError( ""Could not find a valid colormap!"") return val" 176,"def validate_cmaps(cmaps): """"""Validate a dictionary of color lists Parameters ---------- cmaps: dict a mapping from a colormap name to a list of colors Raises ------ ValueError If one of the values in `cmaps` is not a color list Notes ----- For all items (listname, list) in `cmaps`, the reversed list is automatically inserted with the ``listname + '_r'`` key."""""" cmaps = {validate_str(key): validate_colorlist(val) for key, val in cmaps} for key, val in six.iteritems(cmaps): cmaps.setdefault(key + '_r', val[::-1]) return cmaps" 177,"def validate_lineplot(value): """"""Validate the value for the LinePlotter.plot formatoption Parameters ---------- value: None, str or list with mixture of both The value to validate"""""" if value is None: return value elif isinstance(value, six.string_types): return six.text_type(value) else: value = list(value) for i, v in enumerate(value): if v is None: pass elif isinstance(v, six.string_types): value[i] = six.text_type(v) else: raise ValueError('Expected None or string, found %s' % (v, )) return value" 178,"def validate_err_calc(val): """"""Validation function for the :attr:`psy_simple.plotter.FldmeanPlotter.err_calc` formatoption"""""" try: val = validate_float(val) except (ValueError, TypeError): pass else: if val <= 100 and val >= 0: return val raise ValueError(""Percentiles for the error calculation must lie "" ""between 0 and 100, not %s"" % val) try: val = ValidateList(float, 2)(val) except (ValueError, TypeError): pass else: if all((v <= 100 and v >= 0) for v in val): return val raise ValueError(""Percentiles for the error calculation must lie "" ""between 0 and 100, not %s"" % val) try: val = validate_str(val) except ValueError: pass else: if 'std' not in val: raise ValueError( 'A string for the error calculation must contain std!') return val" 179,"def visit_GpxModel(self, gpx_model, *args, **kwargs): """"""Render a GPXModel as a single JSON structure."""""" result = OrderedDict() put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, gpx_model, name, json_name) put_list = lambda name, json_name=None: self.optional_attribute_list(result, gpx_model, name, json_name) put_scalar('creator') put_scalar('metadata') put_list('waypoints') put_list('routes') put_list('tracks') put_list('extensions') return result" 180,"def visit_Metadata(self, metadata, *args, **kwargs): """"""Render GPX Metadata as a single JSON structure."""""" result = OrderedDict() put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, metadata, name, json_name) put_list = lambda name, json_name=None: self.optional_attribute_list(result, metadata, name, json_name) put_scalar('name') put_scalar('description') put_scalar('author') put_scalar('copyright') put_list('links') put_scalar('time') put_scalar('keywords') put_scalar('bounds') put_list('extensions') return result" 181,"def swap_default(mode, equation, symbol_names, default, **kwargs): ''' Given a `sympy` equation or equality, along with a list of symbol names, substitute the specified default value for each symbol for which a value is not provided through a keyword argument. For example, consider the following equality: >>> sp.pprint(H) V₂ Z₂ ── = ── V₁ Z₁ Let us substitute a default value of 1 for terms Z1 and Z2: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1)) V₂ ── = 1 V₁ Now, let us specify a default value of 1 for terms Z1 and Z2, but provide an overriding value for Z1: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1, Z1=4)) V₂ ── = 1/4 V₁ Note that keyword arguments for terms not specified in the list of symbol names are ignored: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1, Z1=4, Q=7)) V₂ ── = 1/4 V₁ ''' if mode == 'subs': swap_f = _subs default_swap_f = _subs elif mode == 'limit': swap_f = _limit default_swap_f = _subs elif mode == 'limit_default': swap_f = _subs default_swap_f = _limit else: raise ValueError('''Unsupported mode. `mode` must be one of: ''' '''('subs', 'limit').''') result = equation for s in symbol_names: if s in kwargs: if isinstance(kwargs[s], Iterable): continue else: result = swap_f(result, s, kwargs[s]) else: result = default_swap_f(result, s, default) return result" 182,"def z_transfer_functions(): r''' Return a symbolic equality representation of the transfer function of RMS voltage measured by either control board analog feedback circuits. According to the figure below, the transfer function describes the following relationship:: # Hardware V1 # # Hardware V2 # V₂ V₁ V₂ Z₁ ── = ─────── ── = ── Z₂ Z₁ + Z₂ V₁ Z₂ where $V_{1}$ denotes the high-voltage actuation signal from the amplifier output and $V_{2}$ denotes the signal sufficiently attenuated to fall within the measurable input range of the analog-to-digital converter *(approx. 5V)*. The feedback circuits for control board **hardware version 1** and **hardware version 2** are shown below. .. code-block:: none # Hardware V1 # # Hardware V2 # V_1 @ frequency V_1 @ frequency ┯ ┯ ┌─┴─┐ ┌─┴─┐ ┌───┐ │Z_1│ │Z_1│ ┌─┤Z_2├─┐ └─┬─┘ └─┬─┘ │ └───┘ │ ├───⊸ V_2 │ │ │╲ ├───⊸ V_2 ┌─┴─┐ └────┴──│-╲__│ │Z_2│ ┌──│+╱ └─┬─┘ │ │╱ ═╧═ │ ¯ ═╧═ ¯ Notes ----- - The symbolic equality can be solved for any symbol, _e.g.,_ $V_{1}$ or $V_{2}$. - A symbolically solved representation can be converted to a Python function using `sympy.utilities.lambdify.lambdify`_, to compute results for specific values of the remaining parameters. .. _`sympy.utilities.lambdify.lambdify`: http://docs.sympy.org/dev/modules/utilities/lambdify.html ''' # Define transfer function as a symbolic equality using SymPy. V1, V2, Z1, Z2 = sp.symbols('V1 V2 Z1 Z2') xfer_funcs = pd.Series([sp.Eq(V2 / Z2, V1 / (Z1 + Z2)), sp.Eq(V2 / V1, Z2 / Z1)], # Index by hardware version. index=[1, 2]) xfer_funcs.index.name = 'Hardware version' return xfer_funcs" 183,"def has_option(section, name): """""" Wrapper around ConfigParser's ``has_option`` method. """""" cfg = ConfigParser.SafeConfigParser({""working_dir"": ""/tmp"", ""debug"": ""0""}) cfg.read(CONFIG_LOCATIONS) return cfg.has_option(section, name)" 184,"def get(section, name): """""" Wrapper around ConfigParser's ``get`` method. """""" cfg = ConfigParser.SafeConfigParser({""working_dir"": ""/tmp"", ""debug"": ""0""}) cfg.read(CONFIG_LOCATIONS) val = cfg.get(section, name) return val.strip(""'"").strip('""')" 185,"def run(**options): """""" _run_ Run the dockerstache process to render templates based on the options provided If extend_context is passed as options it will be used to extend the context with the contents of the dictionary provided via context.update(extend_context) """""" with Dotfile(options) as conf: if conf['context'] is None: msg = ""No context file has been provided"" LOGGER.error(msg) raise RuntimeError(msg) if not os.path.exists(conf['context_path']): msg = ""Context file {} not found"".format(conf['context_path']) LOGGER.error(msg) raise RuntimeError(msg) LOGGER.info( ( ""{{dockerstache}}: In: {}\n"" ""{{dockerstache}}: Out: {}\n"" ""{{dockerstache}}: Context: {}\n"" ""{{dockerstache}}: Defaults: {}\n"" ).format(conf['input'], conf['output'], conf['context'], conf['defaults']) ) context = Context(conf['context'], conf['defaults']) context.load() if 'extend_context' in options: LOGGER.info(""{{dockerstache}} Extended context provided"") context.update(options['extend_context']) process_templates( conf['input'], conf['output'], context ) if conf['inclusive']: process_copies( conf['input'], conf['output'], conf['exclude'] ) return dict(conf)" 186,"def make_key(table_name, objid): """"""Create an object key for storage."""""" key = datastore.Key() path = key.path_element.add() path.kind = table_name path.name = str(objid) return key" 187,"def write_rec(table_name, objid, data, index_name_values): """"""Write (upsert) a record using a tran."""""" with DatastoreTransaction() as tx: entity = tx.get_upsert() entity.key.CopyFrom(make_key(table_name, objid)) prop = entity.property.add() prop.name = 'id' prop.value.string_value = objid prop = entity.property.add() prop.name = 'value' prop.value.string_value = data for name, val in index_name_values: prop = entity.property.add() prop.name = name prop.value.string_value = str(val)" 188,"def extract_entity(found): """"""Copy found entity to a dict."""""" obj = dict() for prop in found.entity.property: obj[prop.name] = prop.value.string_value return obj" 189,"def read_rec(table_name, objid): """"""Generator that yields keyed recs from store."""""" req = datastore.LookupRequest() req.key.extend([make_key(table_name, objid)]) for found in datastore.lookup(req).found: yield extract_entity(found)" 190,"def read_by_indexes(table_name, index_name_values=None): """"""Index reader."""""" req = datastore.RunQueryRequest() query = req.query query.kind.add().name = table_name if not index_name_values: index_name_values = [] for name, val in index_name_values: queryFilter = query.filter.property_filter queryFilter.property.name = name queryFilter.operator = datastore.PropertyFilter.EQUAL queryFilter.value.string_value = str(val) loop_its = 0 have_more = True while have_more: resp = datastore.run_query(req) found_something = False for found in resp.batch.entity_result: yield extract_entity(found) found_something = True if not found_something: # This is a guard against bugs or excessive looping - as long we # can keep yielding records we'll continue to execute loop_its += 1 if loop_its > 5: raise ValueError(""Exceeded the excessive query threshold"") if resp.batch.more_results != datastore.QueryResultBatch.NOT_FINISHED: have_more = False else: have_more = True end_cursor = resp.batch.end_cursor query.start_cursor.CopyFrom(end_cursor)" 191,"def delete_table(table_name): """"""Mainly for testing."""""" to_delete = [ make_key(table_name, rec['id']) for rec in read_by_indexes(table_name, []) ] with DatastoreTransaction() as tx: tx.get_commit_req().mutation.delete.extend(to_delete)" 192,"def get_commit_req(self): """"""Lazy commit request getter."""""" if not self.commit_req: self.commit_req = datastore.CommitRequest() self.commit_req.transaction = self.tx return self.commit_req" 193,"def find_one(self, cls, id): """"""Required functionality."""""" db_result = None for rec in read_rec(cls.get_table_name(), id): db_result = rec break # Only read the first returned - which should be all we get if not db_result: return None obj = cls.from_data(db_result['value']) return obj" 194,"def find_all(self, cls): """"""Required functionality."""""" final_results = [] for db_result in read_by_indexes(cls.get_table_name(), []): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results" 195,"def find_by_index(self, cls, index_name, value): """"""Required functionality."""""" table_name = cls.get_table_name() index_name_vals = [(index_name, value)] final_results = [] for db_result in read_by_indexes(table_name, index_name_vals): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results" 196,"def save(self, obj): """"""Required functionality."""""" if not obj.id: obj.id = uuid() index_names = obj.__class__.index_names() or [] index_dict = obj.indexes() or {} index_name_values = [ (key, index_dict.get(key, '')) for key in index_names ] write_rec( obj.__class__.get_table_name(), obj.id, obj.to_data(), index_name_values )" 197,"def call(command, stdin=None, stdout=subprocess.PIPE, env=os.environ, cwd=None, shell=False, output_log_level=logging.INFO, sensitive_info=False): """""" Better, smarter call logic """""" if not sensitive_info: logger.debug(""calling command: %s"" % command) else: logger.debug(""calling command with sensitive information"") try: args = command if shell else whitespace_smart_split(command) kw = {} if not shell and not which(args[0], cwd=cwd): raise CommandMissingException(args[0]) if shell: kw['shell'] = True process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=stdout, stderr=subprocess.STDOUT, env=env, cwd=cwd, **kw) output = process.communicate(input=stdin)[0] if output is not None: try: logger.log(output_log_level, output.decode('utf-8')) except UnicodeDecodeError: pass return (process.returncode, output) except OSError: e = sys.exc_info()[1] if not sensitive_info: logger.exception(""Error running command: %s"" % command) logger.error(""Root directory: %s"" % cwd) if stdin: logger.error(""stdin: %s"" % stdin) raise e" 198,"def whitespace_smart_split(command): """""" Split a command by whitespace, taking care to not split on whitespace within quotes. >>> whitespace_smart_split(""test this \\\""in here\\\"" again"") ['test', 'this', '""in here""', 'again'] """""" return_array = [] s = """" in_double_quotes = False escape = False for c in command: if c == '""': if in_double_quotes: if escape: s += c escape = False else: s += c in_double_quotes = False else: in_double_quotes = True s += c else: if in_double_quotes: if c == '\\': escape = True s += c else: escape = False s += c else: if c == ' ': return_array.append(s) s = """" else: s += c if s != """": return_array.append(s) return return_array" 199,"def skip(stackframe=1): """""" Must be called from within `__enter__()`. Performs some magic to have a #ContextSkipped exception be raised the moment the with context is entered. The #ContextSkipped must then be handled in `__exit__()` to suppress the propagation of the exception. > Important: This function does not raise an exception by itself, thus > the `__enter__()` method will continue to execute after using this function. """""" def trace(frame, event, args): raise ContextSkipped sys.settrace(lambda *args, **kwargs: None) frame = sys._getframe(stackframe + 1) frame.f_trace = trace" 200,"def sync(self): """""" execute the steps required to have the feature end with the desired state. """""" phase = _get_phase(self._formula_instance) self.logger.info(""%s %s..."" % (phase.verb.capitalize(), self.feature_name)) message = ""...finished %s %s."" % (phase.verb, self.feature_name) result = getattr(self, phase.name)() if result or phase in (PHASE.INSTALL, PHASE.REMOVE): self.logger.info(message) else: self.logger.debug(message) return result" 201,"def linear_insert(self, item, priority): """"""Linear search. Performance is O(n^2)."""""" with self.lock: self_data = self.data rotate = self_data.rotate maxlen = self._maxlen length = len(self_data) count = length # in practice, this is better than doing a rotate(-1) every # loop and getting self.data[0] each time only because deque # implements a very efficient iterator in C for i in self_data: if priority > i[1]: break count -= 1 rotate(-count) self_data.appendleft((item, priority)) rotate(length-count) try: self.items[item] += 1 except TypeError: self.items[repr(item)] += 1 if maxlen is not None and maxlen < len(self_data): self._poplast()" 202,"def binary_insert(self, item, priority): """"""Traditional binary search. Performance: O(n log n)"""""" with self.lock: self_data = self.data rotate = self_data.rotate maxlen = self._maxlen length = len(self_data) index = 0 min = 0 max = length - 1 while max - min > 10: mid = (min + max) // 2 # If index in 1st half of list if priority > self_data[mid][1]: max = mid - 1 # If index in 2nd half of list else: min = mid + 1 for i in range(min, max + 1): if priority > self_data[i][1]: index = i break elif i == max: index = max + 1 shift = length - index # Never shift more than half length of depq if shift > length // 2: shift = length % shift rotate(-shift) self_data.appendleft((item, priority)) rotate(shift) else: rotate(shift) self_data.append((item, priority)) rotate(-shift) try: self.items[item] += 1 except TypeError: self.items[repr(item)] += 1 if maxlen is not None and maxlen < len(self_data): self._poplast()" 203,"def isloaded(self, name): """"""Checks if given hook module has been loaded Args: name (str): The name of the module to check Returns: bool. The return code:: True -- Loaded False -- Not Loaded """""" if name is None: return True if isinstance(name, str): return (name in [x.__module__ for x in self]) if isinstance(name, Iterable): return set(name).issubset([x.__module__ for x in self]) return False" 204,"def hook(self, function, dependencies=None): """"""Tries to load a hook Args: function (func): Function that will be called when the event is called Kwargs: dependencies (str): String or Iterable with modules whose hooks should be called before this one Raises: :class:TypeError Note that the dependencies are module-wide, that means that if `parent.foo` and `parent.bar` are both subscribed to `example` event and `child` enumerates `parent` as dependcy, **both** `foo` and `bar` must be called in order for the dependcy to get resolved. """""" if not isinstance(dependencies, (Iterable, type(None), str)): raise TypeError(""Invalid list of dependencies provided!"") # Tag the function with its dependencies if not hasattr(function, ""__deps__""): function.__deps__ = dependencies # If a module is loaded before all its dependencies are loaded, put # it in _later list and don't load yet if self.isloaded(function.__deps__): self.append(function) else: self._later.append(function) # After each module load, retry to resolve dependencies for ext in self._later: if self.isloaded(ext.__deps__): self._later.remove(ext) self.hook(ext)" 205,"def parse_from_json(json_str): """""" Given a Unified Uploader message, parse the contents and return a MarketOrderList or MarketHistoryList instance. :param str json_str: A Unified Uploader message as a JSON string. :rtype: MarketOrderList or MarketHistoryList :raises: MalformedUploadError when invalid JSON is passed in. """""" try: message_dict = json.loads(json_str) except ValueError: raise ParseError(""Mal-formed JSON input."") upload_keys = message_dict.get('uploadKeys', False) if upload_keys is False: raise ParseError( ""uploadKeys does not exist. At minimum, an empty array is required."" ) elif not isinstance(upload_keys, list): raise ParseError( ""uploadKeys must be an array object."" ) upload_type = message_dict['resultType'] try: if upload_type == 'orders': return orders.parse_from_dict(message_dict) elif upload_type == 'history': return history.parse_from_dict(message_dict) else: raise ParseError( 'Unified message has unknown upload_type: %s' % upload_type) except TypeError as exc: # MarketOrder and HistoryEntry both raise TypeError exceptions if # invalid input is encountered. raise ParseError(exc.message)" 206,"def encode_to_json(order_or_history): """""" Given an order or history entry, encode it to JSON and return. :type order_or_history: MarketOrderList or MarketHistoryList :param order_or_history: A MarketOrderList or MarketHistoryList instance to encode to JSON. :rtype: str :return: The encoded JSON string. """""" if isinstance(order_or_history, MarketOrderList): return orders.encode_to_json(order_or_history) elif isinstance(order_or_history, MarketHistoryList): return history.encode_to_json(order_or_history) else: raise Exception(""Must be one of MarketOrderList or MarketHistoryList."")" 207,"def event_subscriber(event): """""" Register a method, which gets called when this event triggers. :param event: the event to register the decorator method on. """""" def wrapper(method): Registry.register_event(event.name, event, method) return wrapper" 208,"def dispatch_event(event, subject='id'): """""" Dispatch an event when the decorated method is called. :param event: the event class to instantiate and dispatch. :param subject_property: the property name to get the subject. """""" def wrapper(method): def inner_wrapper(*args, **kwargs): resource = method(*args, **kwargs) if isinstance(resource, dict): subject_ = resource.get(subject) data = resource else: subject_ = getattr(resource, subject) data = resource.__dict__ event(subject_, data).dispatch() return resource return inner_wrapper return wrapper" 209,"def add(self, classifier, threshold, begin=None, end=None): """"""Adds a new strong classifier with the given threshold to the cascade. **Parameters:** classifier : :py:class:`bob.learn.boosting.BoostedMachine` A strong classifier to add ``threshold`` : float The classification threshold for this cascade step ``begin``, ``end`` : int or ``None`` If specified, only the weak machines with the indices ``range(begin,end)`` will be added. """""" boosted_machine = bob.learn.boosting.BoostedMachine() if begin is None: begin = 0 if end is None: end = len(classifier.weak_machines) for i in range(begin, end): boosted_machine.add_weak_machine(classifier.weak_machines[i], classifier.weights[i]) self.cascade.append(boosted_machine) self.thresholds.append(threshold) self._indices()" 210,"def create_from_boosted_machine(self, boosted_machine, classifiers_per_round, classification_thresholds=-5.): """"""Creates this cascade from the given boosted machine, by simply splitting off strong classifiers that have classifiers_per_round weak classifiers. **Parameters:** ``boosted_machine`` : :py:class:`bob.learn.boosting.BoostedMachine` The strong classifier to split into a regular cascade. ``classifiers_per_round`` : int The number of classifiers that each cascade step should contain. ``classification_threshold`` : float A single threshold that will be applied in all rounds of the cascade. """""" indices = list(range(0, len(boosted_machine.weak_machines), classifiers_per_round)) if indices[-1] != len(boosted_machine.weak_machines): indices.append(len(boosted_machine.weak_machines)) self.cascade = [] self.indices = [] for i in range(len(indices)-1): machine = bob.learn.boosting.BoostedMachine() for index in range(indices[i], indices[i+1]): machine.add_weak_machine(boosted_machine.weak_machines[index], boosted_machine.weights[index, 0]) self.cascade.append(machine) if isinstance(classification_thresholds, (int, float)): self.thresholds = [classification_thresholds] * len(self.cascade) else: self.thresholds = classification_thresholds" 211,"def generate_boosted_machine(self): """"""generate_boosted_machine() -> strong Creates a single strong classifier from this cascade by concatenating all strong classifiers. **Returns:** ``strong`` : :py:class:`bob.learn.boosting.BoostedMachine` The strong classifier as a combination of all classifiers in this cascade. """""" strong = bob.learn.boosting.BoostedMachine() for machine, index in zip(self.cascade, self.indices): weak = machine.weak_machines weights = machine.weights for i in range(len(weak)): strong.add_weak_machine(weak[i], weights[i]) return strong" 212,"def save(self, hdf5): """"""Saves this cascade into the given HDF5 file. **Parameters:** ``hdf5`` : :py:class:`bob.io.base.HDF5File` An HDF5 file open for writing """""" # write the cascade to file hdf5.set(""Thresholds"", self.thresholds) for i in range(len(self.cascade)): hdf5.create_group(""Classifier_%d"" % (i+1)) hdf5.cd(""Classifier_%d"" % (i+1)) self.cascade[i].save(hdf5) hdf5.cd("".."") hdf5.create_group(""FeatureExtractor"") hdf5.cd(""FeatureExtractor"") self.extractor.save(hdf5) hdf5.cd("".."")" 213,"def load(self, hdf5): """"""Loads this cascade from the given HDF5 file. **Parameters:** ``hdf5`` : :py:class:`bob.io.base.HDF5File` An HDF5 file open for reading """""" # write the cascade to file self.thresholds = hdf5.read(""Thresholds"") self.cascade = [] for i in range(len(self.thresholds)): hdf5.cd(""Classifier_%d"" % (i+1)) self.cascade.append(bob.learn.boosting.BoostedMachine(hdf5)) hdf5.cd("".."") hdf5.cd(""FeatureExtractor"") self.extractor = FeatureExtractor(hdf5) hdf5.cd("".."") self._indices()" 214,"def check(ctx, repository, config): """"""Check commits."""""" ctx.obj = Repo(repository=repository, config=config)" 215,"def message(obj, commit='HEAD', skip_merge_commits=False): """"""Check the messages of the commits."""""" from ..kwalitee import check_message options = obj.options repository = obj.repository if options.get('colors') is not False: colorama.init(autoreset=True) reset = colorama.Style.RESET_ALL yellow = colorama.Fore.YELLOW green = colorama.Fore.GREEN red = colorama.Fore.RED else: reset = yellow = green = red = '' try: sha = 'oid' commits = _pygit2_commits(commit, repository) except ImportError: try: sha = 'hexsha' commits = _git_commits(commit, repository) except ImportError: click.echo('To use this feature, please install pygit2. ' 'GitPython will also work but is not recommended ' '(python <= 2.7 only).', file=sys.stderr) return 2 template = '{0}commit {{commit.{1}}}{2}\n\n'.format(yellow, sha, reset) template += '{message}{errors}' count = 0 ident = ' ' re_line = re.compile('^', re.MULTILINE) for commit in commits: if skip_merge_commits and _is_merge_commit(commit): continue message = commit.message errors = check_message(message, **options) message = re.sub(re_line, ident, message) if errors: count += 1 errors.insert(0, red) else: errors = [green, 'Everything is OK.'] errors.append(reset) click.echo(template.format(commit=commit, message=message.encode('utf-8'), errors='\n'.join(errors))) if min(count, 1): raise click.Abort" 216,"def files(obj, commit='HEAD', skip_merge_commits=False): """"""Check the files of the commits."""""" from ..kwalitee import check_file, SUPPORTED_FILES from ..hooks import run options = obj.options repository = obj.repository if options.get('colors') is not False: colorama.init(autoreset=True) reset = colorama.Style.RESET_ALL yellow = colorama.Fore.YELLOW green = colorama.Fore.GREEN red = colorama.Fore.RED else: reset = yellow = green = red = '' try: sha = 'oid' commits = _pygit2_commits(commit, repository) except ImportError: try: sha = 'hexsha' commits = _git_commits(commit, repository) except ImportError: click.echo( 'To use this feature, please install pygit2. GitPython will ' 'also work but is not recommended (python <= 2.7 only).', file=sys.stderr) click.exit(2) template = '{0}commit {{commit.{1}}}{2}\n\n'.format(yellow, sha, reset) template += '{message}{errors}\n' error_template = '\n{0}{{filename}}\n{1}{{errors}}{0}'.format(reset, red) no_errors = ['\n{0}Everything is OK.{1}'.format(green, reset)] msg_file_excluded = '\n{0}{{filename}} excluded.{1}'.format(yellow, reset) def _get_files_modified(commit): """"""Get the list of modified files that are Python or Jinja2."""""" cmd = ""git show --no-commit-id --name-only --diff-filter=ACMRTUXB {0}"" _, files_modified, _ = run(cmd.format(commit)) extensions = [re.escape(ext) for ext in list(SUPPORTED_FILES) + ["".rst""]] test = ""(?:{0})$"".format(""|"".join(extensions)) return list(filter(lambda f: re.search(test, f), files_modified)) def _ensure_directory(filename): dir_ = os.path.dirname(filename) if not os.path.exists(dir_): os.makedirs(dir_) def _format_errors(args): filename, errors = args if errors is None: return msg_file_excluded.format(filename=filename) else: return error_template.format(filename=filename, errors='\n'.join( errors if len(errors) else no_errors)) count = 0 ident = ' ' re_line = re.compile('^', re.MULTILINE) for commit in commits: if skip_merge_commits and _is_merge_commit(commit): continue message = commit.message commit_sha = getattr(commit, sha) tmpdir = mkdtemp() errors = {} try: for filename in _get_files_modified(commit): cmd = ""git show {commit_sha}:{filename}"" _, out, _ = run(cmd.format(commit_sha=commit_sha, filename=filename), raw_output=True) destination = os.path.join(tmpdir, filename) _ensure_directory(destination) with open(destination, 'w+') as f: f.write(out) errors[filename] = check_file(destination, **options) finally: shutil.rmtree(tmpdir, ignore_errors=True) message = re.sub(re_line, ident, message) if len(errors): count += 1 errors = map(_format_errors, errors.items()) else: errors = no_errors click.echo(template.format(commit=commit, message=message.encode('utf-8'), errors='\n'.join(errors))) if min(count, 1): raise click.Abort" 217,"def get_obj_subcmds(obj): """"""Fetch action in callable attributes which and commands Callable must have their attribute 'command' set to True to be recognised by this lookup. Please consider using the decorator ``@cmd`` to declare your subcommands in classes for instance. """""" subcmds = [] for label in dir(obj.__class__): if label.startswith(""_""): continue if isinstance(getattr(obj.__class__, label, False), property): continue rvalue = getattr(obj, label) if not callable(rvalue) or not is_cmd(rvalue): continue if isinstance(obj, types.MethodType) and \ label in (""im_func"", ""im_self"", ""im_class""): continue ## potential command command_name = getattr(rvalue, ""command_name"", label[:-1] if label.endswith(""_"") else label) subcmds.append((command_name, rvalue)) return OrderedDict(subcmds)" 218,"def get_module_resources(mod): """"""Return probed sub module names from given module"""""" path = os.path.dirname(os.path.realpath(mod.__file__)) prefix = kf.basename(mod.__file__, ("".py"", "".pyc"")) if not os.path.exists(mod.__file__): import pkg_resources for resource_name in pkg_resources.resource_listdir(mod.__name__, ''): if resource_name.startswith(""%s_"" % prefix) and resource_name.endswith("".py""): module_name, _ext = os.path.splitext(kf.basename(resource_name)) yield module_name for f in glob.glob(os.path.join(path, '%s_*.py' % prefix)): module_name, _ext = os.path.splitext(kf.basename(f)) yield module_name" 219,"def get_mod_subcmds(mod): """"""Fetch action in same directory in python module python module loaded are of this form: '%s_*.py' % prefix """""" ## Look in modules attributes subcmds = get_obj_subcmds(mod) path = os.path.dirname(os.path.realpath(mod.__file__)) if mod.__package__ is None: sys.path.insert(0, os.path.dirname(path)) mod.__package__ = kf.basename(path) for module_name in get_module_resources(mod): try: mod = importlib.import_module("".%s"" % module_name, mod.__package__) except ImportError as e: msg.warn(""%r could not be loaded: %s"" % (module_name, e.message)) continue except IOError as e: print(""%s"" % module_name) raise if hasattr(mod, ""Command"") and is_cmd(mod.Command): obj = mod.Command if obj.__doc__ is None: msg.warn(""Missing doc string for command from "" ""module %s"" % module_name) continue if isinstance(obj, type): obj = obj() ## instanciate it. name = module_name.split(""_"", 1)[1] if name in subcmds: raise ValueError( ""Module command %r conflicts with already defined object "" ""command."" % name) subcmds[name] = obj return subcmds" 220,"def get_help(obj, env, subcmds): """"""Interpolate complete help doc of given object Assumption that given object as a specific interface: obj.__doc__ is the basic help object. obj.get_actions_titles() returns the subcommand if any. """""" doc = txt.dedent(obj.__doc__ or """") env = env.copy() ## get a local copy doc = doc.strip() if not re.search(r""^usage:\s*$"", doc, flags=re.IGNORECASE | re.MULTILINE): doc += txt.dedent("""""" Usage: %(std_usage)s Options: %(std_options)s"""""") help_line = ("" %%-%ds %%s"" % (max([5] + [len(a) for a in subcmds]), )) env[""actions""] = ""\n"".join( help_line % ( name, get_help(subcmd, subcmd_env(env, name), {}).split(""\n"")[0]) for name, subcmd in subcmds.items()) env[""actions_help""] = """" if not env[""actions""] else ( ""ACTION could be one of:\n\n"" ""%(actions)s\n\n"" ""See '%(surcmd)s help ACTION' for more information "" ""on a specific command."" % env) if ""%(std_usage)s"" in doc: env[""std_usage""] = txt.indent( (""%(surcmd)s --help\n"" ""%(surcmd)s --version"" + ((""\n%(surcmd)s help [COMMAND]"" ""\n%(surcmd)s ACTION [ARGS...]"") if subcmds else """")) % env, _find_prefix(doc, ""%(std_usage)s""), first="""") if ""%(std_options)s"" in doc: env[""std_options""] = txt.indent( ""--help Show this screen.\n"" ""--version Show version."", _find_prefix(doc, ""%(std_options)s""), first="""") if subcmds and ""%(actions_help)s"" not in doc: doc += ""\n\n%(actions_help)s"" try: output = doc % env except KeyError as e: msg.err(""Doc interpolation of %s needed missing key %r"" % (aformat(env[""surcmd""], attrs=[""bold"", ]), e.args[0])) exit(1) except Exception as e: msg.err( ""Documentation of %s is not valid. Please check it:\n%s"" % (aformat(env[""surcmd""], attrs=[""bold"", ]), doc)) exit(1) return output" 221,"def get_calling_prototype(acallable): """"""Returns actual working calling prototype This means that the prototype given can be used directly in the same way by bound method, method, function, lambda:: >>> def f1(a, b, c=1): pass >>> get_calling_prototype(f1) (['a', 'b', 'c'], (1,)) >>> get_calling_prototype(lambda a, b: None) (['a', 'b'], ()) >>> get_calling_prototype(lambda a=None: None) (['a'], (None,)) >>> get_calling_prototype(lambda : None) ([], ()) >>> class A(object): ... def m1(self, a, b, c=None): pass ... @classmethod ... def cm(cls, a, b, c=None): pass ... @staticmethod ... def st(a, b, c=None): pass ... def __call__(self, a, b, c=None): pass >>> get_calling_prototype(A.m1) (['self', 'a', 'b', 'c'], (None,)) >>> A.m1(A(), 1, 2, 3) >>> get_calling_prototype(A().m1) (['a', 'b', 'c'], (None,)) >>> get_calling_prototype(A.cm) (['a', 'b', 'c'], (None,)) >>> get_calling_prototype(A().cm) (['a', 'b', 'c'], (None,)) >>> get_calling_prototype(A.st) (['a', 'b', 'c'], (None,)) >>> get_calling_prototype(A().st) (['a', 'b', 'c'], (None,)) >>> get_calling_prototype(A()) (['a', 'b', 'c'], (None,)) """""" assert callable(acallable) if inspect.ismethod(acallable) or inspect.isfunction(acallable): args, vargs, vkwargs, defaults = inspect.getargspec(acallable) elif not inspect.isfunction(acallable) and hasattr(acallable, ""__call__""): ## a class instance ? which is callable... args, vargs, vkwargs, defaults = inspect.getargspec(acallable.__call__) ## remove the 'self' argument args = args[1:] else: raise ValueError(""Hum, %r is a callable, but not a function/method, "" ""nor a instance with __call__ arg..."" % acallable) if vargs or vkwargs: raise SyntaxError(""variable *arg or **kwarg are not supported."") if is_bound(acallable): args = args[1:] if defaults is None: defaults = () ## be coherent return args, defaults" 222,"def match_prototype(acallable, arguments): """"""Return tuple (pos args, kwargs) to call given callable Let's define a callable that will printout >>> arguments = {'alphonse': 1, 'bertrand': 2, 'charlie': 3} >>> match_prototype(lambda arguments: None, arguments) ([{'bertrand': 2, 'charlie': 3, 'alphonse': 1}], {}) >>> match_prototype(lambda args: None, arguments) ([{'bertrand': 2, 'charlie': 3, 'alphonse': 1}], {}) >>> match_prototype(lambda bertrand, arguments: None, arguments) ([2, {'charlie': 3, 'alphonse': 1}], {}) >>> match_prototype(lambda bertrand, arguments, foo=None: None, arguments) ([2, {'charlie': 3, 'alphonse': 1}], {}) >>> match_prototype(lambda bertrand, arguments, charlie=None: None, ... arguments) ([2, {'alphonse': 1}], {'charlie': 3}) """""" args, defaults = get_calling_prototype(acallable) arguments = arguments.copy() defaults = [] if defaults is None else defaults p = [] kw = {} pos_args = len(args) - len(defaults) has_args = any(k in ('args', 'arguments') for k in args) args_label_pos = None for i, arg in enumerate(args): is_pos = i < pos_args val = None if not args_label_pos and arg in ('arguments', 'args'): val = arguments ## copy by reference here is important else: k = None for k in arguments: norm = k if norm.startswith(""--""): if is_pos: continue norm = norm[2:] elif k.startswith(""-""): if is_pos: continue norm = norm[1:] norm = norm.lower() norm = norm.replace('-', '_') if norm == arg: break else: if not has_args: raise SyntaxError( ""Can't match your function argument %r with "" ""command line keys (%s)."" % (arg, "", "".join(arguments.keys()))) else: k = None if k is not None: ## inplace removal is important here val = arguments.pop(k) if is_pos: p.append(val) else: if val is not None: ## we should only have strings if it was set. kw[arg] = val return p, kw" 223,"def initialize(self): """""" Generate the root directory root if it doesn't already exist """""" if not os.path.exists(self.root_dir): os.makedirs(self.root_dir) assert os.path.isdir(self.root_dir), ""%s is not a directory! Please move or remove it."" % self.root_dir for d in [""bin"", ""lib"", ""include""]: target_path = os.path.join(self.root_dir, d) if not os.path.exists(target_path): os.makedirs(target_path) if not os.path.exists(self.manifest_path): open(self.manifest_path, ""w+"").close() self.new = False" 224,"def finalize(self): """""" finalize any open file handles """""" if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close()" 225,"def remove(self): """""" Removes the sprinter directory, if it exists """""" if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close() shutil.rmtree(self.root_dir)" 226,"def symlink_to_bin(self, name, path): """""" Symlink an object at path to name in the bin folder. """""" self.__symlink_dir(""bin"", name, path) os.chmod(os.path.join(self.root_dir, ""bin"", name), os.stat(path).st_mode | stat.S_IXUSR | stat.S_IRUSR)" 227,"def remove_from_bin(self, name): """""" Remove an object from the bin folder. """""" self.__remove_path(os.path.join(self.root_dir, ""bin"", name))" 228,"def remove_from_lib(self, name): """""" Remove an object from the bin folder. """""" self.__remove_path(os.path.join(self.root_dir, ""lib"", name))" 229,"def remove_feature(self, feature_name): """""" Remove an feature from the environment root folder. """""" self.clear_feature_symlinks(feature_name) if os.path.exists(self.install_directory(feature_name)): self.__remove_path(self.install_directory(feature_name))" 230,"def clear_feature_symlinks(self, feature_name): """""" Clear the symlinks for a feature in the symlinked path """""" logger.debug(""Clearing feature symlinks for %s"" % feature_name) feature_path = self.install_directory(feature_name) for d in ('bin', 'lib'): if os.path.exists(os.path.join(self.root_dir, d)): for link in os.listdir(os.path.join(self.root_dir, d)): path = os.path.join(self.root_dir, d, link) if feature_path in os.path.realpath(path): getattr(self, 'remove_from_%s' % d)(link)" 231,"def add_to_env(self, content): """""" add content to the env script. """""" if not self.rewrite_config: raise DirectoryException(""Error! Directory was not intialized w/ rewrite_config."") if not self.env_file: self.env_path, self.env_file = self.__get_env_handle(self.root_dir) self.env_file.write(content + '\n')" 232,"def add_to_rc(self, content): """""" add content to the rc script. """""" if not self.rewrite_config: raise DirectoryException(""Error! Directory was not intialized w/ rewrite_config."") if not self.rc_file: self.rc_path, self.rc_file = self.__get_rc_handle(self.root_dir) self.rc_file.write(content + '\n')" 233,"def add_to_gui(self, content): """""" add content to the gui script. """""" if not self.rewrite_config: raise DirectoryException(""Error! Directory was not intialized w/ rewrite_config."") if not self.gui_file: self.gui_path, self.gui_file = self.__get_gui_handle(self.root_dir) self.gui_file.write(content + '\n')" 234,"def __remove_path(self, path): """""" Remove an object """""" curpath = os.path.abspath(os.curdir) if not os.path.exists(path): logger.warn(""Attempted to remove a non-existent path %s"" % path) return try: if os.path.islink(path): os.unlink(path) elif os.path.isdir(path): shutil.rmtree(path) else: os.unlink(path) # in the case we just deleted ourselves out of a valid directory, # we move to a valid directory. if curpath == path: os.chdir(tempfile.gettempdir()) except OSError: logger.error(""Unable to remove object at path %s"" % path) raise DirectoryException(""Unable to remove object at path %s"" % path)" 235,"def __get_env_handle(self, root_dir): """""" get the filepath and filehandle to the .env file for the environment """""" env_path = os.path.join(root_dir, '.env') gui_path = os.path.join(root_dir, '.gui') fh = open(env_path, ""w+"") # .env will source utils.sh if it hasn't already fh.write(source_template % (gui_path, gui_path)) fh.write(source_template % (self.shell_util_path, self.shell_util_path)) return (env_path, fh)" 236,"def __get_rc_handle(self, root_dir): """""" get the filepath and filehandle to the rc file for the environment """""" rc_path = os.path.join(root_dir, '.rc') env_path = os.path.join(root_dir, '.env') fh = open(rc_path, ""w+"") # .rc will always source .env fh.write(source_template % (env_path, env_path)) return (rc_path, fh)" 237,"def __get_gui_handle(self, root_dir): """""" get the filepath and filehandle to the .env file for the environment """""" gui_path = os.path.join(root_dir, '.gui') fh = open(gui_path, ""w+"") return (gui_path, fh)" 238,"def __symlink_dir(self, dir_name, name, path): """""" Symlink an object at path to name in the dir_name folder. remove it if it already exists. """""" target_dir = os.path.join(self.root_dir, dir_name) if not os.path.exists(target_dir): os.makedirs(target_dir) target_path = os.path.join(self.root_dir, dir_name, name) logger.debug(""Attempting to symlink %s to %s..."" % (path, target_path)) if os.path.exists(target_path): if os.path.islink(target_path): os.remove(target_path) else: logger.warn(""%s is not a symlink! please remove it manually."" % target_path) return os.symlink(path, target_path)" 239,"def create(self, options=None): """"""Create a new document job (sync or async)."""""" if options is None: raise ValueError(""Please pass in an options dict"") if not _has_content(options): raise NoContentError(""must supply 'document_content' or 'document_url'"") default_options = { ""name"": ""default"", ""document_type"": ""pdf"", ""test"": False, ""async"": False, ""raise_exception_on_failure"": False, } options = dict(list(default_options.items()) + list(options.items())) raise_exception_on_failure = options.pop(""raise_exception_on_failure"") query = {""user_credentials"": self.api_key} if options[""async""]: query[""output""] = ""json"" resp = requests.post( ""%sdocs"" % (self._url), json=options, params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentCreationFailure(resp.content, resp.status_code) if options[""async""]: return json.loads(resp.content.decode(""utf-8"")) else: return resp" 240,"def list_docs(self, options=None): """"""Return list of previously created documents."""""" if options is None: raise ValueError(""Please pass in an options dict"") default_options = { ""page"": 1, ""per_page"": 100, ""raise_exception_on_failure"": False, ""user_credentials"": self.api_key, } options = dict(list(default_options.items()) + list(options.items())) raise_exception_on_failure = options.pop(""raise_exception_on_failure"") resp = requests.get( ""%sdocs"" % (self._url), params=options, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentListingFailure(resp.content, resp.status_code) return resp" 241,"def status(self, status_id, raise_exception_on_failure=False): """"""Return the status of the generation job."""""" query = {""output"": ""json"", ""user_credentials"": self.api_key} resp = requests.get( ""%sstatus/%s"" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentStatusFailure(resp.content, resp.status_code) if resp.status_code == 200: as_json = json.loads(resp.content) if as_json[""status""] == ""completed"": as_json[""download_key""] = _get_download_key(as_json[""download_url""]) return as_json return resp" 242,"def download(self, download_key, raise_exception_on_failure=False): """"""Download the file represented by the download_key."""""" query = {""output"": ""json"", ""user_credentials"": self.api_key} resp = requests.get( ""%sdownload/%s"" % (self._url, download_key), params=query, timeout=self._timeout, ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentDownloadFailure(resp.content, resp.status_code) return resp" 243,"def merge_INIConf(a, b): """"""用 b 的内容覆盖 a 的内容(若重名),并返回 a """""" for sname in b.sections(): if a.has_section(sname): for oname in b.options(sname): a[sname][oname] = b[sname][oname] else: a[sname] = b[sname] return a" 244,"def copy_from_dict(self, adict, parent=None): """"""从一个已经存在的 dict 中复制所有的值。 :param adict: 被复制的 dict。 :type adict: dict :param parent: 复制到哪个父对象。 若为 None 则复制到 self 。 :type parent: rookout.PYConf """""" if not parent: parent = self for k,v in adict.items(): if isinstance(v, dict): vDict = PYConf(v) self.copy_from_dict(v, vDict) parent[k] = vDict else: parent[k] = v" 245,"def dump(self, human=False): """"""将自身内容打印成字符串 :param bool human: 若值为 True ,则打印成易读格式。 """""" txt = str(self) if human: txt = txt.replace("", '"", "",\n'"") txt = txt.replace(""{"", ""{\n"") txt = txt.replace(""}"", ""\n}"") txt = txt.replace(""["", ""[\n"") txt = txt.replace(""]"", ""\n]"") return txt" 246,"def save_to_file(self, path, human=True): """"""将自身内容保存到文件。 :param str path: 保存的文件路径。 :param bool human: 参见 :func:`dump()` """""" write_file(path, self.dump(human)) slog.info(""Save %a done."", path)" 247,"def read_from_file(self, path): """"""从一个文本文件中读入信息。 假设该文本文件的格式与 :func:`dump()` 相同。 :param str path: 待读入的文件路径。 """""" if not os.path.exists(path): slog.warning(""The file %s is not exist."", path) return False txt = read_file(path) dic = eval(txt) self.copy_from_dict(dic) return True" 248,"def _set_parameters(self, parameters): """"""Sort out the various possible parameter inputs and return a config object (dict) We have multiple input formats: 1) a list, tuple, or numpy.ndarray, containing the linear parameters in the following order: * for single term: rho0, m1, tau1, c1 * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ... 2) a dictionary with the entries ""rho0"", ""m"", ""tau"", ""c"" 2b) if the dictionary entries for ""m"", ""tau"", and ""c"" are lists, the entries correspond to mulitple polarisazion terms """""" nr_f = self.f.size # sort out parameters rho0, m, tau, c = self._sort_parameters(parameters) newsize = (nr_f, len(m)) # rho0_resized = np.resize(rho0, newsize) m_resized = np.resize(m, newsize) tau_resized = np.resize(tau, newsize) c_resized = np.resize(c, newsize) omega = np.atleast_2d(2 * np.pi * self.f).T self.w = np.resize(omega, (len(m), nr_f)).T self.rho0 = rho0 self.m = m_resized self.tau = tau_resized self.c = c_resized # compute some common terms self.otc = (self.w * self.tau) ** self.c self.otc2 = (self.w * self.tau) ** (2 * self.c) self.ang = self.c * np.pi / 2.0 # rad self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2" 249,"def response(self, parameters): r""""""Complex response of the Cole-Cole model:: :math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j \omega \tau_i)^c_i})\right)` Parameters ---------- parameters: list or tuple or numpy.ndarray Cole-Cole model parameters: rho0, m, tau, c (all linear) Returns ------- response: :class:`sip_models.sip_response.sip_response` model response object """""" # get a config object self._set_parameters(parameters) terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c))) # sum up terms specs = np.sum(terms, axis=1) rcomplex = self.rho0 * (1 - specs) response = sip_response.sip_response(self.f, rcomplex=rcomplex) return response" 250,"def dre_drho0(self, pars): r"""""" Compute partial derivative of real parts with respect to :math:`\rho_0` :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 - \frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` Note that partial derivatives towards :math:`\rho_0` are 1D, in contrast to the other parameter derivatives, which usually return 2D arrays! Returns ------- dre_drho0: :class:`numpy.ndarray` Size N (nr of frequencies) array with the derivatives """""" self._set_parameters(pars) numerator = self.m * self.otc * (np.cos(self.ang) + self.otc) term = numerator / self.denom specs = np.sum(term, axis=1) result = 1 - specs return result" 251,"def dre_dlog10rho0(self, pars): """"""Compute partial derivative of real parts to log10(rho0) """""" # first call the linear response to set the parameters linear_response = self.dre_drho0(pars) result = np.log(10) * self.rho0 * linear_response return result" 252,"def dre_dm(self, pars): r"""""" :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` """""" self._set_parameters(pars) numerator = -self.otc * (np.cos(self.ang) + self.otc) result = numerator / self.denom result *= self.rho0 return result" 253,"def dim_dm(self, pars): r"""""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` """""" self._set_parameters(pars) numerator = -self.otc * np.sin(self.ang) result = numerator / self.denom result *= self.rho0 return result" 254,"def dim_dtau(self, pars): r"""""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0 \frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2} \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c \pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}` """""" self._set_parameters(pars) # term1 nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\ self.c * self.tau ** (self.c - 1) term1 = nom1 / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) * np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) * self.tau ** (2 * self.c - 1)) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result" 255,"def dim_dc(self, pars): r"""""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}` """""" self._set_parameters(pars) # term1 nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) - 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) + 2 * np.log(self.w * self.tau) * self.otc2) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result" 256,"def Jacobian_re_im(self, pars): r"""""" :math:`J` >>> import sip_models.res.cc as cc >>> import numpy as np >>> f = np.logspace(-3, 3, 20) >>> pars = [100, 0.1, 0.04, 0.8] >>> obj = cc.cc(f) >>> J = obj.Jacobian_re_im(pars) """""" partials = [] # partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :]) partials.append(self.dre_drho0(pars)[:, np.newaxis]) partials.append(self.dre_dm(pars)) # partials.append(self.dre_dlog10tau(pars)) partials.append(self.dre_dtau(pars)) partials.append(self.dre_dc(pars)) # partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :]) partials.append(self.dim_drho0(pars)[:, np.newaxis]) partials.append(self.dim_dm(pars)) # partials.append(self.dim_dlog10tau(pars)) partials.append(self.dim_dtau(pars)) partials.append(self.dim_dc(pars)) print('SHAPES') for x in partials: print(x.shape) J = np.concatenate(partials, axis=1) return J" 257,"def read_dict_or_list_from_json(desired_type: Type[dict], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: """""" Helper method to read a dictionary from a .json file using json library :param file_object: :return: """""" # lazy import in order not to force use of jprops import json res = json.load(file_object) # convert if required return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger, **kwargs)" 258,"def get_default_collection_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: """""" Utility method to return the default parsers able to parse a dictionary from a file. :return: """""" return [SingleFileParserFunction(parser_function=read_dict_or_list_from_json, streaming_mode=True, custom_name='read_dict_or_list_from_json', supported_exts={'.json'}, supported_types={dict, list}, function_args={'conversion_finder': conversion_finder}), MultifileCollectionParser(parser_finder) ]" 259,"def get_default_collection_converters(conversion_finder: ConversionFinder) -> List[Union[Converter[Any, dict], Converter[dict, Any]]]: """""" Utility method to return the default converters associated to dict (from dict to other type, and from other type to dict) :return: """""" return [ConverterFunction(from_type=List, to_type=Set, conversion_method=list_to_set, custom_name='list_to_set', function_args={'conversion_finder': conversion_finder}), ConverterFunction(from_type=List, to_type=Tuple, conversion_method=list_to_tuple, custom_name='list_to_tuple', function_args={'conversion_finder': conversion_finder})]" 260,"def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: """""" Simply inspects the required type to find the base type expected for items of the collection, and relies on the ParserFinder to find the parsing plan :param obj_on_fs: :param desired_type: :param logger: :return: """""" # nb of file children n_children = len(obj_on_fs.get_multifile_children()) # first extract base collection type subtypes, key_type = _extract_collection_base_type(desired_type) if isinstance(subtypes, tuple): # -- check the tuple length if n_children != len(subtypes): raise FolderAndFilesStructureError.create_for_multifile_tuple(obj_on_fs, len(subtypes), len(obj_on_fs.get_multifile_children())) else: # -- repeat the subtype n times subtypes = [subtypes] * n_children # -- for each child create a plan with the appropriate parser children_plan = OrderedDict() # use sorting for reproducible results in case of multiple errors for (child_name, child_fileobject), child_typ in zip(sorted(obj_on_fs.get_multifile_children().items()), subtypes): # -- use the parserfinder to find the plan t, child_parser = self.parser_finder.build_parser_for_fileobject_and_desiredtype(child_fileobject, child_typ, logger) children_plan[child_name] = child_parser.create_parsing_plan(t, child_fileobject, logger, _main_call=False) return children_plan" 261,"def _parse_multifile(self, desired_type: Type[Union[Dict, List, Set, Tuple]], obj: PersistedObject, parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) \ -> Union[Dict, List, Set, Tuple]: """""" Options may contain a section with id 'MultifileCollectionParser' containing the following options: * lazy_parsing: if True, the method will return immediately without parsing all the contents. Instead, the returned collection will perform the parsing the first time an item is required. * background_parsing: if True, the method will return immediately while a thread parses all the contents in the background. Note that users cannot set both lazy_parsing and background_parsing to True at the same time :param desired_type: :param obj: :param parsing_plan_for_children: :param logger: :param options: :return: """""" # first get the options and check them lazy_parsing = False background_parsing = False opts = self._get_applicable_options(options) for opt_key, opt_val in opts.items(): if opt_key is 'lazy_parsing': lazy_parsing = opt_val elif opt_key is 'background_parsing': background_parsing = opt_val else: raise Exception('Invalid option in MultiFileCollectionParser : ' + opt_key) check_var(lazy_parsing, var_types=bool, var_name='lazy_parsing') check_var(background_parsing, var_types=bool, var_name='background_parsing') if lazy_parsing and background_parsing: raise ValueError('lazy_parsing and background_parsing cannot be set to true at the same time') if lazy_parsing: # build a lazy dictionary results = LazyDictionary(sorted(list(parsing_plan_for_children.keys())), loading_method=lambda x: parsing_plan_for_children[x].execute(logger, options)) # logger.debug('Assembling a ' + get_pretty_type_str(desired_type) + ' from all children of ' + str(obj) # + ' (lazy parsing: children will be parsed when used) ') logger.debug('(P) {loc} : lazy parsing ON, children will be parsed only if/when used'.format( loc=obj.get_pretty_location(blank_parent_part=(not GLOBAL_CONFIG.full_paths_in_logs), compact_file_ext=True))) elif background_parsing: # -- TODO create a thread to perform the parsing in the background raise ValueError('Background parsing is not yet supported') else: # Parse right now results = OrderedDict() # parse all children according to their plan # -- use key-based sorting on children to lead to reproducible results # (in case of multiple errors, the same error will show up first everytime) for child_name, child_plan in sorted(parsing_plan_for_children.items()): results[child_name] = child_plan.execute(logger, options) # logger.debug('Assembling a ' + get_pretty_type_str(desired_type) + ' from all parsed children of ' # + str(obj)) if issubclass(desired_type, list): # return a list facade return KeySortedListFacadeForDict(results) elif issubclass(desired_type, tuple): # return a tuple facade return KeySortedTupleFacadeForDict(results) elif issubclass(desired_type, set): # return a set facade return SetFacadeForDict(results) elif issubclass(desired_type, dict): # return the dict directly return results else: raise TypeError('Cannot build the desired collection out of the multifile children: desired type is not ' 'supported: ' + get_pretty_type_str(desired_type))" 262,"def dispatch(self, producer=None): """""" Dispatch the event, sending a message to the queue using a producer. :param producer: optional `Producer` to replace the default one. """""" log.info('@Event.dispatch `{}` with subject `{}`' .format(self.name, self.subject)) producer = (producer or Registry.get_producer()) if not producer: raise MissingProducerError('You have not registered a Producer') try: producer.produce(self.topic, self.name, self.subject, self.data) except: fallback = Registry.get_fallback() fallback(self) raise" 263,"def read_annotation_file(annotation_file, annotation_type): """"""read_annotation_file(annotation_file, annotation_type) -> annotations Reads annotations from the given ``annotation_file``. The way, how annotations are read depends on the given ``annotation_type``. Depending on the type, one or several annotations might be present in the annotation file. Currently, these variants are implemented: - ``'lr-eyes'``: Only the eye positions are stored, in a single row, like: ``le_x le_y re_x re_y``, comment lines starting with ``'#'`` are ignored. - ``'named'``: Each line of the file contains a name and two floats, like ``reye x y``; empty lines separate between sets of annotations. - ``'idiap'``: A special 22 point format, where each line contains the index and the locations, like ``1 x y``. - ``'fddb'``: a special format for the FDDB database; empty lines separate between sets of annotations Finally, a list of ``annotations`` is returned in the format: ``[{name: (y,x)}]``. **Parameters:** ``annotation_file`` : str The file name of the annotation file to read ``annotation_type`` : str (see above) The style of annotation file, in which the given ``annotation_file`` is **Returns:** ``annotations`` : [dict] A list of annotations read from the given file, grouped by annotated objects (faces). Each annotation is generally specified as the two eye coordinates, i.e., ``{'reye' : (rey, rex), 'leye' : (ley, lex)}``, but other types of annotations might occur as well. """""" annotations = [{}] with open(annotation_file) as f: if annotation_type == 'idiap': # This is a special format where we have enumerated annotations, and a 'gender' for line in f: positions = line.rstrip().split() if positions: if positions[0].isdigit(): # position field assert len(positions) == 3 id = int(positions[0]) annotations[-1]['key%d'%id] = (float(positions[2]),float(positions[1])) else: # another field, we take the first entry as key and the rest as values annotations[-1][positions[0]] = positions[1:] elif len(annotations[-1]) > 0: # empty line; split between annotations annotations.append({}) # finally, we add the eye center coordinates as the center between the eye corners; the annotations 3 and 8 seem to be the pupils... for annotation in annotations: if 'key1' in annotation and 'key5' in annotation: annotation['reye'] = ((annotation['key1'][0] + annotation['key5'][0])/2., (annotation['key1'][1] + annotation['key5'][1])/2.) if 'key6' in annotation and 'key10' in annotation: annotation['leye'] = ((annotation['key6'][0] + annotation['key10'][0])/2., (annotation['key6'][1] + annotation['key10'][1])/2.) elif annotation_type == 'lr-eyes': # In this format, the eyes are given in a single row ""le_x le_y re_x re_y"", possibly with a comment line # There is only a single annotation per image for line in f: if len(line) and line[0] != '#': positions = line.rstrip().split() annotations[0]['leye'] = (float(positions[1]),float(positions[0])) annotations[0]['reye'] = (float(positions[3]),float(positions[2])) elif annotation_type == 'named': # In this format, each line contains three entries: ""keyword x y"" for line in f: positions = line.rstrip().split() if positions: annotations[-1][positions[0]] = (float(positions[2]),float(positions[1])) elif len(annotations[-1]) > 0: # empty line; split between annotations annotations.append({}) elif annotation_type == 'fddb': # This is a special format for the FDDB database for line in f: positions = line.rstrip().split() if not len(positions): if len(annotations[-1]) > 0: # empty line; split between annotations annotations.append({}) elif len(positions) == 2: annotations[-1][positions[0]] = float(positions[1]) elif len(positions) == 3: annotations[-1][positions[0]] = (float(positions[2]),float(positions[1])) else: raise ValueError(""Could not interpret line %s of the annotation file"" % line) else: raise ValueError(""The given annotation type %s is not known"" % annotation_type) if not annotations[-1]: del annotations[-1] return annotations" 264,"def _build_opstackd(): """""" Builds a dictionary that maps the name of an op-code to the number of elemnts it adds to the stack when executed. For some opcodes, the dictionary may contain a function which requires the #dis.Instruction object to determine the actual value. The dictionary mostly only contains information for instructions used in expressions. """""" def _call_function_argc(argc): func_obj = 1 args_pos = (argc & 0xff) args_kw = ((argc >> 8) & 0xff) * 2 return func_obj + args_pos + args_kw def _make_function_argc(argc): args_pos = (argc + 0xff) args_kw = ((argc >> 8) & 0xff) * 2 annotations = (argc >> 0x7fff) anootations_names = 1 if annotations else 0 code_obj = 1 qualname = 1 return args_pos + args_kw + annotations + anootations_names + code_obj + qualname result = { 'NOP': 0, 'POP_TOP': -1, 'ROT_TWO': 0, 'ROT_THREE': 0, 'DUP_TOP': 1, 'DUP_TOP_TWO': 2, # Unary operations 'GET_ITER': 0, # Miscellaneous operations 'PRINT_EXPR': -1, 'BREAK_LOOP': 0, # xxx: verify 'CONTINUE_LOOP': 0, # xxx: verify 'SET_ADD': -1, # xxx: verify 'LIST_APPEND': -1, # xxx: verify 'MAP_ADD': -2, # xxx: verify 'RETURN_VALUE': -1, # xxx: verify 'YIELD_VALUE': -1, 'YIELD_FROM': -1, 'IMPORT_STAR': -1, # 'POP_BLOCK': # 'POP_EXCEPT': # 'END_FINALLY': # 'LOAD_BUILD_CLASS': # 'SETUP_WITH': # 'WITH_CLEANUP_START': # 'WITH_CLEANUP_FINISH': 'STORE_NAME': -1, 'DELETE_NAME': 0, 'UNPACK_SEQUENCE': lambda op: op.arg, 'UNPACK_EX': lambda op: (op.arg & 0xff) - (op.arg >> 8 & 0xff), # xxx: check 'STORE_ATTR': -2, 'DELETE_ATTR': -1, 'STORE_GLOBAL': -1, 'DELETE_GLOBAL': 0, 'LOAD_CONST': 1, 'LOAD_NAME': 1, 'BUILD_TUPLE': lambda op: 1 - op.arg, 'BUILD_LIST': lambda op: 1 - op.arg, 'BUILD_SET': lambda op: 1 - op.arg, 'BUILD_MAP': lambda op: 1 - op.arg, 'LOAD_ATTR': 0, 'COMPARE_OP': 1, # xxx: check # 'IMPORT_NAME': # 'IMPORT_FROM': # 'JUMP_FORWARD': # 'POP_JUMP_IF_TRUE': # 'POP_JUMP_IF_FALSE': # 'JUMP_IF_TRUE_OR_POP': # 'JUMP_IF_FALSE_OR_POP': # 'JUMP_ABSOLUTE': # 'FOR_ITER': 'LOAD_GLOBAL': 1, # 'SETUP_LOOP' # 'SETUP_EXCEPT' # 'SETUP_FINALLY': 'LOAD_FAST': 1, 'STORE_FAST': -1, 'DELETE_FAST': 0, # 'LOAD_CLOSURE': 'LOAD_DEREF': 1, 'LOAD_CLASSDEREF': 1, 'STORE_DEREF': -1, 'DELETE_DEREF': 0, 'RAISE_VARARGS': lambda op: -op.arg, 'CALL_FUNCTION': lambda op: 1 - _call_function_argc(op.arg), 'MAKE_FUNCTION': lambda op: 1 - _make_function_argc(op.arg), # 'MAKE_CLOSURE': 'BUILD_SLICE': lambda op: 1 - op.arg, # 'EXTENDED_ARG': 'CALL_FUNCTION_KW': lambda op: 1 - _call_function_argc(op.arg), } if sys.version >= '3.5': result.update({ 'BEFORE_ASYNC_WITH': 0, 'SETUP_ASYNC_WITH': 0, # Coroutine operations 'GET_YIELD_FROM_ITER': 0, 'GET_AWAITABLE': 0, 'GET_AITER': 0, 'GET_ANEXT': 0, }) if sys.version <= '3.5': result.update({ 'CALL_FUNCTION_VAR': lambda op: 1 - _call_function_argc(op.arg), 'CALL_FUNCTION_VAR_KW': lambda op: 1 - _call_function_argc(op.arg), }) for code in dis.opmap.keys(): if code.startswith('UNARY_'): result[code] = 0 elif code.startswith('BINARY_') or code.startswith('INPLACE_'): result[code] = -1 return result" 265,"def get_stackdelta(op): """""" Returns the number of elements that the instruction *op* adds to the stack. # Arguments op (dis.Instruction): The instruction to retrieve the stackdelta value for. # Raises KeyError: If the instruction *op* is not supported. """""" res = opstackd[op.opname] if callable(res): res = res(op) return res" 266,"def get_assigned_name(frame): """""" Checks the bytecode of *frame* to find the name of the variable a result is being assigned to and returns that name. Returns the full left operand of the assignment. Raises a #ValueError if the variable name could not be retrieved from the bytecode (eg. if an unpack sequence is on the left side of the assignment). > **Known Limitations**: The expression in the *frame* from which this > function is called must be the first part of that expression. For > example, `foo = [get_assigned_name(get_frame())] + [42]` works, > but `foo = [42, get_assigned_name(get_frame())]` does not! ```python >>> var = get_assigned_name(sys._getframe()) >>> assert var == 'var' ``` __Available in Python 3.4, 3.5__ """""" SEARCHING, MATCHED = 1, 2 state = SEARCHING result = '' stacksize = 0 for op in dis.get_instructions(frame.f_code): if state == SEARCHING and op.offset == frame.f_lasti: if not op.opname.startswith('CALL_FUNCTION'): raise RuntimeError('get_assigned_name() requires entry at CALL_FUNCTION') state = MATCHED # For a top-level expression, the stack-size should be 1 after # the function at which we entered was executed. stacksize = 1 elif state == MATCHED: # Update the would-be size of the stack after this instruction. # If we're at zero, we found the last instruction of the expression. try: stacksize += get_stackdelta(op) except KeyError: raise RuntimeError('could not determined assigned name, instruction ' '{} is not supported'.format(op.opname)) if stacksize == 0: if op.opname not in ('STORE_NAME', 'STORE_ATTR', 'STORE_GLOBAL', 'STORE_FAST'): raise ValueError('expression is not assigned or branch is not first part of the expression') return result + op.argval elif stacksize < 0: raise ValueError('not a top-level expression') if op.opname.startswith('CALL_FUNCTION'): # Chained or nested function call. raise ValueError('inside a chained or nested function call') elif op.opname == 'LOAD_ATTR': result += op.argval + '.' if not result: raise RuntimeError('last frame instruction not found') assert False" 267,"def load_actions(spec, group=None, expr_parser=None): """"""Each item can be an action name as a string or a dict. When using a dict, one key/item pair must be the action name and its options and the rest action decorator names and their options. Example: load_actions([""login_required"", {""flash"": {""message"": ""hello world"", ""label"": ""warning""}}]) """""" if expr_parser is None: expr_parser = ExpressionParser() actions = ActionList() for name in spec: options = {} as_ = None decorators = [] if isinstance(name, dict): actionspec = dict(name) as_ = actionspec.pop(""as"", None) for dec, dec_cls in action_decorators: if dec in actionspec: decorators.append((dec_cls, expr_parser.compile(actionspec.pop(dec)))) name, options = actionspec.popitem() if options: options = expr_parser.compile(options) if isinstance(name, Action): action = name elif isinstance(name, ActionFunction): action = name.action else: action = action_resolver.resolve_or_delayed(name, options, group, as_) for dec_cls, arg in decorators: action = dec_cls(action, arg) actions.append(action) return actions" 268,"def load_grouped_actions(spec, default_group=None, key_prefix=""actions"", pop_keys=False, expr_parser=None): """"""Instanciates actions from a dict. Will look for a key name key_prefix and for key starting with key_prefix followed by a dot and a group name. A group name can be any string and will can be used later to filter actions. Values associated to these keys should be lists that will be loaded using load_actions() """""" actions = ActionList() if expr_parser is None: expr_parser = ExpressionParser() for key in spec.keys(): if key != key_prefix and not key.startswith(key_prefix + "".""): continue group = default_group if ""."" in key: (_, group) = key.split(""."") actions.extend(load_actions(spec[key], group, expr_parser)) if pop_keys: spec.pop(key) return actions" 269,"def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False): """"""Creates an action class based on a dict loaded using load_grouped_actions() """""" actions = load_grouped_actions(spec, pop_keys=pop_keys) attrs = {""actions"": actions, ""name"": name} if ""as"" in spec: attrs[""as_""] = spec[""as""] if pop_keys: del spec[""as""] for k in (""requires"", ""methods"", ""defaults"", ""default_option""): if k in spec: attrs[k] = spec[k] if pop_keys: del spec[k] return metaclass(name, (base_class,), attrs)" 270,"def plot_stat_summary(df, fig=None): ''' Plot stats grouped by test capacitor load _and_ frequency. In other words, we calculate the mean of all samples in the data frame for each test capacitance and frequency pairing, plotting the following stats: - Root mean squared error - Coefficient of variation - Bias ## [Coefficient of variation][1] ## > In probability theory and statistics, the coefficient of > variation (CV) is a normalized measure of dispersion of a > probability distribution or frequency distribution. It is defined > as the ratio of the standard deviation to the mean. [1]: http://en.wikipedia.org/wiki/Coefficient_of_variation ''' if fig is None: fig = plt.figure(figsize=(8, 8)) # Define a subplot layout, 3 rows, 2 columns grid = GridSpec(3, 2) stats = calculate_stats(df, groupby=['test_capacitor', 'frequency']).dropna() for i, stat in enumerate(['RMSE %', 'cv %', 'bias %']): axis = fig.add_subplot(grid[i, 0]) axis.set_title(stat) # Plot a colormap to show how the statistical value changes # according to frequency/capacitance pairs. plot_colormap(stats, stat, axis=axis, fig=fig) axis = fig.add_subplot(grid[i, 1]) axis.set_title(stat) # Plot a histogram to show the distribution of statistical # values across all frequency/capacitance pairs. try: axis.hist(stats[stat].values, bins=50) except AttributeError: print stats[stat].describe() fig.tight_layout()" 271,"def calculate_inverse_document_frequencies(self): """"""Q.calculate_inverse_document_frequencies() -- measures how much information the term provides, i.e. whether the term is common or rare across all documents. This is obtained by dividing the total number of documents by the number of documents containing the term, and then taking the logarithm of that quotient. """""" for doc in self.processed_corpus: for word in doc: self.inverse_document_frequencies[word] += 1 for key,value in self.inverse_document_frequencies.iteritems(): idf = log((1.0 * len(self.corpus)) / value) self.inverse_document_frequencies[key] = idf" 272,"def calculate_term_frequencies(self): """"""Q.calculate_term_frequencies() -- calculate the number of times each term t occurs in document d. """""" for doc in self.processed_corpus: term_frequency_doc = defaultdict(int) for word in doc: term_frequency_doc[word] += 1 for key,value in term_frequency_doc.iteritems(): term_frequency_doc[key] = (1.0 * value) / len(doc) self.term_frequencies.append(term_frequency_doc)" 273,"def match_query_to_corpus(self): """"""Q.match_query_to_corpus() -> index -- return the matched corpus index of the user query """""" ranking = [] for i,doc in enumerate(self.processed_corpus): rank = 0.0 for word in self.processed_query: if word in doc: rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word] ranking.append((rank,i)) matching_corpus_index = 0 max_rank = 0 for rank,index in ranking: if rank > max_rank: matching_corpus_index = index max_rank = rank return matching_corpus_index" 274,"def process_corpus(self): """"""Q.process_corpus() -- processes the queries defined by us, by tokenizing, stemming, and removing stop words. """""" for doc in self.corpus_list: doc = wt(doc) sentence = [] for word in doc: if word not in self.stop_words and word not in self.punctuation: word = self.stemmer.stem(word) sentence.append(word) self.processed_corpus.append(sentence)" 275,"def process_query(self): """"""Q.process_query() -- processes the user query, by tokenizing and stemming words. """""" self.query = wt(self.query) self.processed_query = [] for word in self.query: if word not in self.stop_words and word not in self.punctuation: self.processed_query.append(self.stemmer.stem(word))" 276,"def query(self, query): """"""Q.query(query string) -> category string -- return the matched category for any user query """""" self.query = query self.process_query() matching_corpus_index = self.match_query_to_corpus() return self.category_list[matching_corpus_index].strip()" 277,"def load_manifest(raw_manifest, namespace=None, **kwargs): """""" wrapper method which generates the manifest from various sources """""" if isinstance(raw_manifest, configparser.RawConfigParser): return Manifest(raw_manifest) manifest = create_configparser() if not manifest.has_section('config'): manifest.add_section('config') _load_manifest_interpret_source(manifest, raw_manifest, **kwargs) return Manifest(manifest, namespace=namespace)" 278,"def _load_manifest_interpret_source(manifest, source, username=None, password=None, verify_certificate=True, do_inherit=True): """""" Interpret the , and load the results into """""" try: if isinstance(source, string_types): if source.startswith(""http""): # if manifest is a url _load_manifest_from_url(manifest, source, verify_certificate=verify_certificate, username=username, password=password) else: _load_manifest_from_file(manifest, source) if not manifest.has_option('config', 'source'): manifest.set('config', 'source', str(source)) else: # assume source is a file pointer manifest.readfp(source) if manifest.has_option('config', 'extends') and do_inherit: parent_manifest = configparser.RawConfigParser() _load_manifest_interpret_source(parent_manifest, manifest.get('config', 'extends'), username=username, password=password, verify_certificate=verify_certificate) for s in parent_manifest.sections(): for k, v in parent_manifest.items(s): if not manifest.has_option(s, k): manifest.set(s, k, v) except configparser.Error: logger.debug("""", exc_info=True) error_message = sys.exc_info()[1] raise ManifestException(""Unable to parse manifest!: {0}"".format(error_message))" 279,"def _load_manifest_from_url(manifest, url, verify_certificate=True, username=None, password=None): """""" load a url body into a manifest """""" try: if username and password: manifest_file_handler = StringIO(lib.authenticated_get(username, password, url, verify=verify_certificate).decode(""utf-8"")) else: manifest_file_handler = StringIO(lib.cleaned_request( 'get', url, verify=verify_certificate ).text) manifest.readfp(manifest_file_handler) except requests.exceptions.RequestException: logger.debug("""", exc_info=True) error_message = sys.exc_info()[1] raise ManifestException(""There was an error retrieving {0}!\n {1}"".format(url, str(error_message)))" 280,"def _load_manifest_from_file(manifest, path): """""" load manifest from file """""" path = os.path.abspath(os.path.expanduser(path)) if not os.path.exists(path): raise ManifestException(""Manifest does not exist at {0}!"".format(path)) manifest.read(path) if not manifest.has_option('config', 'source'): manifest.set('config', 'source', str(path))" 281,"def formula_sections(self): """""" Return all sections related to a formula, re-ordered according to the ""depends"" section. """""" if self.dtree is not None: return self.dtree.order else: return [s for s in self.manifest.sections() if s != ""config""]" 282,"def is_affirmative(self, section, option): """""" Return true if the section option combo exists and it is set to a truthy value. """""" return self.has_option(section, option) and \ lib.is_affirmative(self.get(section, option))" 283,"def write(self, file_handle): """""" write the current state to a file manifest """""" for k, v in self.inputs.write_values().items(): self.set('config', k, v) self.set('config', 'namespace', self.namespace) self.manifest.write(file_handle)" 284,"def get_context_dict(self): """""" return a context dict of the desired state """""" context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict[""%s:%s"" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict[""config:{0}"".format(k)] = v context_dict.update(self.additional_context_variables.items()) context_dict.update(dict([(""%s|escaped"" % k, re.escape(str(v) or """")) for k, v in context_dict.items()])) return context_dict" 285,"def get(self, section, key, default=MANIFEST_NULL_KEY): """""" Returns the value if it exist, or default if default is set """""" if not self.manifest.has_option(section, key) and default is not MANIFEST_NULL_KEY: return default return self.manifest.get(section, key)" 286,"def __parse_namespace(self): """""" Parse the namespace from various sources """""" if self.manifest.has_option('config', 'namespace'): return self.manifest.get('config', 'namespace') elif self.manifest.has_option('config', 'source'): return NAMESPACE_REGEX.search(self.manifest.get('config', 'source')).groups()[0] else: logger.warn('Could not parse namespace implicitely') return None" 287,"def __generate_dependency_tree(self): """""" Generate the dependency tree object """""" dependency_dict = {} for s in self.manifest.sections(): if s != ""config"": if self.manifest.has_option(s, 'depends'): dependency_list = [d.strip() for d in re.split('\n|,', self.manifest.get(s, 'depends'))] dependency_dict[s] = dependency_list else: dependency_dict[s] = [] try: return DependencyTree(dependency_dict) except DependencyTreeException: dte = sys.exc_info()[1] raise ManifestException(""Dependency tree for manifest is invalid! %s"" % str(dte))" 288,"def __substitute_objects(self, value, context_dict): """""" recursively substitute value with the context_dict """""" if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: e = sys.exc_info()[1] logger.warn(""Could not specialize %s! Error: %s"" % (value, e)) return value else: return value" 289,"def __setup_inputs(self): """""" Setup the inputs object """""" input_object = Inputs() # populate input schemas for s in self.manifest.sections(): if self.has_option(s, 'inputs'): input_object.add_inputs_from_inputstring(self.get(s, 'inputs')) # add in values for k, v in self.items('config'): if input_object.is_input(s): input_object.set_input(k, v) return input_object" 290,"def validate(self): """""" validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception """""" if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_options and k not in self.required_options and '*' not in self.valid_options): self.logger.warn(""Unused option %s in %s!"" % (k, self.feature_name)) for k in self.required_options: if not self.target.has(k): self._log_error( ""Required option %s not present in feature %s!"" % (k, self.feature_name))" 291,"def should_run(self): """""" Returns true if the feature should run """""" should_run = True config = self.target or self.source if config.has('systems'): should_run = False valid_systems = [s.lower() for s in config.get('systems').split("","")] for system_type, param in [('is_osx', 'osx'), ('is_debian', 'debian')]: if param in valid_systems and getattr(system, system_type)(): should_run = True return should_run" 292,"def resolve(self): """""" Resolve differences between the target and the source configuration """""" if self.source and self.target: for key in self.source.keys(): if (key not in self.dont_carry_over_options and not self.target.has(key)): self.target.set(key, self.source.get(key))" 293,"def _log_error(self, message): """""" Log an error for the feature """""" key = (self.feature_name, self.target.get('formula')) self.environment.log_feature_error(key, ""ERROR: "" + message)" 294,"def _prompt_value(self, key, prompt_string, default=None, only_if_empty=True): """"""prompts the user for a value, and saves it to either the target or source manifest (whichever is appropriate for the phase) this method takes will default to the original value passed by the user in the case one exists. e.g. if a user already answered 'yes' to a question, it will use 'yes' as the default vs the one passed into this method. """""" main_manifest = self.target or self.source if only_if_empty and main_manifest.has(key): return main_manifest.get(key) prompt_default = default if self.source and self.source.has(key): prompt_default = self.source.get(key) main_manifest.set(key, lib.prompt(prompt_string, default=prompt_default))" 295,"def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None): """"""Decorator to easily create a jinja extension which acts as a fragment. """""" if endtag is None: endtag = ""end"" + tag def decorator(f): def parse(self, parser): lineno = parser.stream.next().lineno args = [] kwargs = [] if allow_args: args, kwargs = parse_block_signature(parser) call = self.call_method(""support_method"", args, kwargs, lineno=lineno) if tag_only: return nodes.Output([call], lineno=lineno) call_args = [] if callblock_args is not None: for arg in callblock_args: call_args.append(nodes.Name(arg, 'param', lineno=lineno)) body = parser.parse_statements(['name:' + endtag], drop_needle=True) return nodes.CallBlock(call, call_args, [], body, lineno=lineno) def support_method(self, *args, **kwargs): return f(*args, **kwargs) attrs = {""tags"": set([tag]), ""parse"": parse, ""support_method"": support_method} return type(name or f.__name__, (Extension,), attrs) return decorator" 296,"def jinja_block_as_fragment_extension(name, tagname=None, classname=None): """"""Creates a fragment extension which will just act as a replacement of the block statement. """""" if tagname is None: tagname = name if classname is None: classname = ""%sBlockFragmentExtension"" % name.capitalize() return type(classname, (BaseJinjaBlockAsFragmentExtension,), { ""tags"": set([tagname]), ""end_tag"": ""end"" + tagname, ""block_name"": name})" 297,"def dir_visitor(dirname, visitor): """""" _dir_visitor_ walk through all files in dirname, find directories and call the callable on them. :param dirname: Name of directory to start visiting, all subdirs will be visited :param visitor: Callable invoked on each dir visited """""" visitor(dirname) for obj in os.listdir(dirname): obj_path = os.path.join(dirname, obj) if os.path.isdir(obj_path): dir_visitor(obj_path, visitor)" 298,"def replicate_directory_tree(input_dir, output_dir): """""" _replicate_directory_tree_ clone dir structure under input_dir into output dir All subdirs beneath input_dir will be created under output_dir :param input_dir: path to dir tree to be cloned :param output_dir: path to new dir where dir structure will be created """""" def transplant_dir(target, dirname): x = dirname.replace(input_dir, target) if not os.path.exists(x): LOGGER.info('Creating: {}'.format(x)) os.makedirs(x) dir_visitor( input_dir, functools.partial(transplant_dir, output_dir) )" 299,"def find_templates(input_dir): """""" _find_templates_ traverse the input_dir structure and return a list of template files ending with .mustache :param input_dir: Path to start recursive search for mustache templates :returns: List of file paths corresponding to templates """""" templates = [] def template_finder(result, dirname): for obj in os.listdir(dirname): if obj.endswith('.mustache'): result.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(template_finder, templates) ) return templates" 300,"def find_copies(input_dir, exclude_list): """""" find files that are not templates and not in the exclude_list for copying from template to image """""" copies = [] def copy_finder(copies, dirname): for obj in os.listdir(dirname): pathname = os.path.join(dirname, obj) if os.path.isdir(pathname): continue if obj in exclude_list: continue if obj.endswith('.mustache'): continue copies.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(copy_finder, copies) ) return copies" 301,"def render_template(template_in, file_out, context): """""" _render_template_ Render a single template file, using the context provided and write the file out to the location specified #TODO: verify the template is completely rendered, no missing values """""" renderer = pystache.Renderer() result = renderer.render_path(template_in, context) with open(file_out, 'w') as handle: LOGGER.info('Rendering: {} to {}'.format(template_in, file_out)) handle.write(result) shutil.copymode(template_in, file_out)" 302,"def copy_file(src, target): """""" copy_file copy source to target """""" LOGGER.info(""Copying {} to {}"".format(src, target)) shutil.copyfile(src, target) shutil.copymode(src, target)" 303,"def process_templates(input_dir, target_dir, context): """""" _process_templates_ Given the input dir containing a set of template, clone the structure under that directory into the target dir using the context to process any mustache templates that are encountered """""" if not target_dir.endswith('/'): target_dir = ""{}/"".format(target_dir) if not os.path.exists(target_dir): LOGGER.info('Creating: {}'.format(target_dir)) os.makedirs(target_dir) replicate_directory_tree(input_dir, target_dir) templates = find_templates(input_dir) for templ in templates: output_file = templ.replace(input_dir, target_dir) output_file = output_file[:-len('.mustache')] render_template(templ, output_file, context)" 304,"def process_copies(input_dir, target_dir, excludes): """""" _process_copies_ Handles files to be copied across, assumes that dir structure has already been replicated """""" copies = find_copies(input_dir, excludes) for c in copies: output_file = c.replace(input_dir, target_dir) copy_file(c, output_file)" 305,"def newDevice(deviceJson, lupusec): """"""Create new device object for the given type."""""" type_tag = deviceJson.get('type') if not type_tag: _LOGGER.info('Device has no type') if type_tag in CONST.TYPE_OPENING: return LupusecBinarySensor(deviceJson, lupusec) elif type_tag in CONST.TYPE_SENSOR: return LupusecBinarySensor(deviceJson, lupusec) elif type_tag in CONST.TYPE_SWITCH: return LupusecSwitch(deviceJson, lupusec) else: _LOGGER.info('Device is not known') return None" 306,"def get_devices(self, refresh=False, generic_type=None): """"""Get all devices from Lupusec."""""" _LOGGER.info(""Updating all devices..."") if refresh or self._devices is None: if self._devices is None: self._devices = {} responseObject = self.get_sensors() if (responseObject and not isinstance(responseObject, (tuple, list))): responseObject = responseObject for deviceJson in responseObject: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device # We will be treating the Lupusec panel itself as an armable device. panelJson = self.get_panel() _LOGGER.debug(""Get the panel in get_devices: %s"", panelJson) self._panel.update(panelJson) alarmDevice = self._devices.get('0') if alarmDevice: alarmDevice.update(panelJson) else: alarmDevice = ALARM.create_alarm(panelJson, self) self._devices['0'] = alarmDevice # Now we will handle the power switches switches = self.get_power_switches() _LOGGER.debug( 'Get active the power switches in get_devices: %s', switches) for deviceJson in switches: # Attempt to reuse an existing device device = self._devices.get(deviceJson['name']) # No existing device, create a new one if device: device.update(deviceJson) else: device = newDevice(deviceJson, self) if not device: _LOGGER.info('Device is unknown') continue self._devices[device.device_id] = device if generic_type: devices = [] for device in self._devices.values(): if (device.type is not None and device.type in generic_type[0]): devices.append(device) return devices return list(self._devices.values())" 307,"def parse_from_dict(json_dict): """""" Given a Unified Uploader message, parse the contents and return a MarketHistoryList instance. :param dict json_dict: A Unified Uploader message as a dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within. """""" history_columns = json_dict['columns'] history_list = MarketHistoryList( upload_keys=json_dict['uploadKeys'], history_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] history_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: history_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, history_columns, row) historical_date = parse_datetime(history_kwargs['historical_date']) history_kwargs.update({ 'type_id': type_id, 'region_id': region_id, 'historical_date': historical_date, 'generated_at': generated_at, }) history_list.add_entry(MarketHistoryEntry(**history_kwargs)) return history_list" 308,"def encode_to_json(history_list): """""" Encodes this MarketHistoryList instance to a JSON string. :param MarketHistoryList history_list: The history instance to serialize. :rtype: str """""" rowsets = [] for items_in_region_list in history_list._history.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] for entry in items_in_region_list.entries: historical_date = gen_iso_datetime_str(entry.historical_date) # The order in which these values are added is crucial. It must # match STANDARD_ENCODED_COLUMNS. rows.append([ historical_date, entry.num_orders, entry.total_quantity, entry.low_price, entry.high_price, entry.average_price, ]) rowsets.append(dict( generatedAt = generated_at, regionID = region_id, typeID = type_id, rows = rows, )) json_dict = { 'resultType': 'history', 'version': '0.1', 'uploadKeys': history_list.upload_keys, 'generator': history_list.history_generator, 'currentTime': gen_iso_datetime_str(now_dtime_in_utc()), # This must match the order of the values in the row assembling portion # above this. 'columns': STANDARD_ENCODED_COLUMNS, 'rowsets': rowsets, } return json.dumps(json_dict)" 309,"def load(self, configuration): """""" Load a YAML configuration file. :param configuration: Configuration filename or YAML string """""" try: self.config = yaml.load(open(configuration, ""rb"")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) # put customer data into self.customer if isinstance(self.config, dict): self.customer = self.config.get('customer', {}) self.instances_dict = self.config.get('instances', {}) self.web2py_dir = self.config.get('web2py', None) self.api_type = self.config.get('api_type', 'jsonrpc') self.valid = True else: self.customer = {} self.instances_dict = {} self.web2py_dir = None self.valid = False" 310,"def instances(self, test_type="".*""): """""" Returns a dict of all instances defined using a regex :param test_type: Regular expression to match for self.instance['test_type'] value names """""" import re data = {} for k, v in self.instances_dict.iteritems(): if re.match(test_type, v.get('test_type'), re.IGNORECASE): if 'filter_type' in v: hostfilter = { 'filtertype': v['filter_type'], 'content': v['filter_value'] } else: hostfilter = {} data[k] = { 'name': v.get('name'), 'start': v.get('start'), 'end': v.get('end'), 'url': v.get('url'), 'hostfilter': hostfilter, 'test_type': v.get('test_type') } return data" 311,"def none_to_blank(s, exchange=''): """"""Replaces NoneType with '' >>> none_to_blank(None, '') '' >>> none_to_blank(None) '' >>> none_to_blank('something', '') u'something' >>> none_to_blank(['1', None]) [u'1', ''] :param s: String to replace :para exchange: Character to return for None, default is blank ('') :return: If s is None, returns exchange """""" if isinstance(s, list): return [none_to_blank(z) for y, z in enumerate(s)] return exchange if s is None else unicode(s)" 312,"def make_good_url(url=None, addition=""/""): """"""Appends addition to url, ensuring the right number of slashes exist and the path doesn't get clobbered. >>> make_good_url('http://www.server.com/anywhere', 'else') 'http://www.server.com/anywhere/else' >>> make_good_url('http://test.com/', '/somewhere/over/the/rainbow/') 'http://test.com/somewhere/over/the/rainbow/' >>> make_good_url('None') 'None/' >>> make_good_url() >>> make_good_url({}) >>> make_good_url(addition='{}') :param url: URL :param addition: Something to add to the URL :return: New URL with addition"""""" if url is None: return None if isinstance(url, str) and isinstance(addition, str): return ""%s/%s"" % (url.rstrip('/'), addition.lstrip('/')) else: return None" 313,"def build_kvasir_url( proto=""https"", server=""localhost"", port=""8443"", base=""Kvasir"", user=""test"", password=""test"", path=KVASIR_JSONRPC_PATH): """""" Creates a full URL to reach Kvasir given specific data >>> build_kvasir_url('https', 'localhost', '8443', 'Kvasir', 'test', 'test') 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url() 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url(server='localhost', port='443', password='password', path='bad/path') 'https://test@password/localhost:443/Kvasir/bad/path' :param proto: Protocol type - http or https :param server: Hostname or IP address of Web2py server :param port: Port to reach server :param base: Base application name :param user: Username for basic auth :param password: Password for basic auth :param path: Full path to JSONRPC (/api/call/jsonrpc) :return: A full URL that can reach Kvasir's JSONRPC interface """""" uri = proto + '://' + user + '@' + password + '/' + server + ':' + port + '/' + base return make_good_url(uri, path)" 314,"def get_default(parser, section, option, default): """"""helper to get config settings with a default if not present"""""" try: result = parser.get(section, option) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): result = default return result" 315,"def set_db_application_prefix(prefix, sep=None): """"""Set the global app prefix and separator."""""" global _APPLICATION_PREFIX, _APPLICATION_SEP _APPLICATION_PREFIX = prefix if (sep is not None): _APPLICATION_SEP = sep" 316,"def find_by_index(self, cls, index_name, value): """"""Find records matching index query - defer to backend."""""" return self.backend.find_by_index(cls, index_name, value)" 317,"def humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k))" 318,"def humanTimeConverter(): ''' Cope whether we're passed a time in seconds on the command line or via stdin ''' if len(sys.argv) == 2: print humanFriendlyTime(seconds=int(sys.argv[1])) else: for line in sys.stdin: print humanFriendlyTime(int(line)) sys.exit(0)" 319,"def train(self, data, **kwargs): """""" Calculate the standard deviations and means in the training data """""" self.data = data for i in xrange(0,data.shape[1]): column_mean = np.mean(data.icol(i)) column_stdev = np.std(data.icol(i)) #Have to do += or ""list"" type will fail (ie with append) self.column_means += [column_mean] self.column_stdevs += [column_stdev] self.data = self.predict(data)" 320,"def predict(self, test_data, **kwargs): """""" Adjust new input by the values in the training data """""" if test_data.shape[1]!=self.data.shape[1]: raise Exception(""Test data has different number of columns than training data."") for i in xrange(0,test_data.shape[1]): test_data.loc[:,i] = test_data.icol(i) - self.column_means[i] if int(self.column_stdevs[i])!=0: test_data.loc[:,i] = test_data.icol(i) / self.column_stdevs[i] return test_data" 321,"def action_decorator(name): """"""Decorator to register an action decorator """""" def decorator(cls): action_decorators.append((name, cls)) return cls return decorator" 322,"def load_global_config(config_path): """""" Load a global configuration object, and query for any required variables along the way """""" config = configparser.RawConfigParser() if os.path.exists(config_path): logger.debug(""Checking and setting global parameters..."") config.read(config_path) else: _initial_run() logger.info(""Unable to find a global sprinter configuration!"") logger.info(""Creating one now. Please answer some questions"" + "" about what you would like sprinter to do."") logger.info("""") # checks and sets sections if not config.has_section('global'): config.add_section('global') configure_config(config) write_config(config, config_path) return config" 323,"def print_global_config(global_config): """""" print the global configuration """""" if global_config.has_section('shell'): print(""\nShell configurations:"") for shell_type, set_value in global_config.items('shell'): print(""{0}: {1}"".format(shell_type, set_value)) if global_config.has_option('global', 'env_source_rc'): print(""\nHave sprinter env source rc: {0}"".format( global_config.get('global', 'env_source_rc')))" 324,"def create_default_config(): """""" Create a default configuration object, with all parameters filled """""" config = configparser.RawConfigParser() config.add_section('global') config.set('global', 'env_source_rc', False) config.add_section('shell') config.set('shell', 'bash', ""true"") config.set('shell', 'zsh', ""true"") config.set('shell', 'gui', ""true"") return config" 325,"def _initial_run(): """""" Check things during the initial setting of sprinter's global config """""" if not system.is_officially_supported(): logger.warn(warning_template + ""===========================================================\n"" + ""Sprinter is not officially supported on {0}! Please use at your own risk.\n\n"".format(system.operating_system()) + ""You can find the supported platforms here:\n"" + ""(http://sprinter.readthedocs.org/en/latest/index.html#compatible-systems)\n\n"" + ""Conversely, please help us support your system by reporting on issues\n"" + ""(http://sprinter.readthedocs.org/en/latest/faq.html#i-need-help-who-do-i-talk-to)\n"" + ""==========================================================="") else: logger.info( ""\nThanks for using \n"" + ""="" * 60 + sprinter_template + ""="" * 60 )" 326,"def _configure_shell(config): """""" Checks and queries values for the shell """""" config.has_section('shell') or config.add_section('shell') logger.info( ""What shells or environments would you like sprinter to work with?\n"" ""(Sprinter will not try to inject into environments not specified here.)\n"" ""If you specify 'gui', sprinter will attempt to inject it's state into graphical programs as well.\n"" ""i.e. environment variables sprinter set will affect programs as well, not just shells\n"" ""WARNING: injecting into the GUI can be very dangerous. it usually requires a restart\n"" "" to modify any environmental configuration."" ) environments = list(enumerate(sorted(SHELL_CONFIG), start=1)) logger.info(""[0]: All, "" + "", "".join([""[%d]: %s"" % (index, val) for index, val in environments])) desired_environments = lib.prompt(""type the environment, comma-separated"", default=""0"") for index, val in environments: if str(index) in desired_environments or ""0"" in desired_environments: config.set('shell', val, 'true') else: config.set('shell', val, 'false')" 327,"def _configure_env_source_rc(config): """""" Configures wether to have .env source .rc """""" config.set('global', 'env_source_rc', False) if system.is_osx(): logger.info(""On OSX, login shells are default, which only source sprinter's 'env' configuration."") logger.info(""I.E. environment variables would be sourced, but not shell functions "" + ""or terminal status lines."") logger.info(""The typical solution to get around this is to source your rc file (.bashrc, .zshrc) "" + ""from your login shell."") env_source_rc = lib.prompt(""would you like sprinter to source the rc file too?"", default=""yes"", boolean=True) config.set('global', 'env_source_rc', env_source_rc)" 328,"def get_members(self): """"""Return all members in the group as CSHMember objects"""""" res = self.__con__.search_s( self.__ldap_base_dn__, ldap.SCOPE_SUBTREE, ""(memberof=%s)"" % self.__dn__, ['uid']) ret = [] for val in res: val = val[1]['uid'][0] try: ret.append(val.decode('utf-8')) except UnicodeDecodeError: ret.append(val) except KeyError: continue return [CSHMember(self.__lib__, result, uid=True) for result in ret]" 329,"def check_member(self, member, dn=False): """"""Check if a Member is in the bound group. Arguments: member -- the CSHMember object (or distinguished name) of the member to check against Keyword arguments: dn -- whether or not member is a distinguished name """""" if dn: res = self.__con__.search_s( self.__dn__, ldap.SCOPE_BASE, ""(member=%s)"" % dn, ['ipaUniqueID']) else: res = self.__con__.search_s( self.__dn__, ldap.SCOPE_BASE, ""(member=%s)"" % member.get_dn(), ['ipaUniqueID']) return len(res) > 0" 330,"def add_member(self, member, dn=False): """"""Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name """""" if dn: if self.check_member(member, dn=True): return mod = (ldap.MOD_ADD, 'member', member.encode('ascii')) else: if self.check_member(member): return mod = (ldap.MOD_ADD, 'member', member.get_dn().encode('ascii')) if self.__lib__.__batch_mods__: self.__lib__.enqueue_mod(self.__dn__, mod) elif not self.__lib__.__ro__: mod_attrs = [mod] self.__con__.modify_s(self.__dn__, mod_attrs) else: print(""ADD VALUE member = {} FOR {}"".format(mod[2], self.__dn__))" 331,"def read_object_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger, fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any: """""" Parses a yaml file. :param desired_type: :param file_object: :param logger: :param fix_imports: :param errors: :param args: :param kwargs: :return: """""" return yaml.load(file_object)" 332,"def read_collection_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, fix_imports: bool = True, errors: str = 'strict', **kwargs) -> Any: """""" Parses a collection from a yaml file. :param desired_type: :param file_object: :param logger: :param fix_imports: :param errors: :param args: :param kwargs: :return: """""" res = yaml.load(file_object) # convert if required return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger, **kwargs)" 333,"def get_default_yaml_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: """""" Utility method to return the default parsers able to parse an object from a file. Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in RootParser :return: """""" return [# yaml for any object SingleFileParserFunction(parser_function=read_object_from_yaml, streaming_mode=True, supported_exts={'.yaml','.yml'}, supported_types={AnyObject}, ), # yaml for collection objects SingleFileParserFunction(parser_function=read_collection_from_yaml, custom_name='read_collection_from_yaml', streaming_mode=True, supported_exts={'.yaml','.yml'}, supported_types={Tuple, Dict, List, Set}, function_args={'conversion_finder': conversion_finder} ) ]" 334,"def pass_feature(*feature_names): """"""Injects a feature instance into the kwargs """""" def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): for name in feature_names: kwargs[name] = feature_proxy(name) return f(*args, **kwargs) return wrapper return decorator" 335,"def extract_tar(url, target_dir, additional_compression="""", remove_common_prefix=False, overwrite=False): """""" extract a targz and install to the target directory """""" try: if not os.path.exists(target_dir): os.makedirs(target_dir) tf = tarfile.TarFile.open(fileobj=download_to_bytesio(url)) if not os.path.exists(target_dir): os.makedirs(target_dir) common_prefix = os.path.commonprefix(tf.getnames()) if not common_prefix.endswith('/'): common_prefix += ""/"" for tfile in tf.getmembers(): if remove_common_prefix: tfile.name = tfile.name.replace(common_prefix, """", 1) if tfile.name != """": target_path = os.path.join(target_dir, tfile.name) if target_path != target_dir and os.path.exists(target_path): if overwrite: remove_path(target_path) else: continue tf.extract(tfile, target_dir) except OSError: e = sys.exc_info()[1] raise ExtractException(str(e)) except IOError: e = sys.exc_info()[1] raise ExtractException(str(e))" 336,"def remove_path(target_path): """""" Delete the target path """""" if os.path.isdir(target_path): shutil.rmtree(target_path) else: os.unlink(target_path)" 337,"def ids(cls, values, itype=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. ''' instance = cls(ids={'values': values}) if itype is not None: instance['ids']['type'] = itype return instance" 338,"def geo_bounding_box(cls, field, top_left, bottom_right): ''' http://www.elasticsearch.org/guide/reference/query-dsl/geo-bounding-box-filter.html > bounds = ElasticFilter().geo_bounding_box('pin.location', [40.73, -74.1], [40.717, -73.99]) > bounds = ElasticFilter().geo_bounding_box('pin.location', dict(lat=40.73, lon=-74.1), dict(lat=40.717, lon=-73.99)) > bounds = ElasticFilter().geo_bounding_box('pin.location', ""40.73, -74.1"", ""40.717, -73.99"") And geohash > bounds = ElasticFilter().geo_bounding_box('pin.location', ""drm3btev3e86"", ""drm3btev3e86"") ''' return cls(geo_bounding_box={field: {'top_left': top_left, 'bottom_right': bottom_right}})" 339,"def geo_distance(cls, field, center, distance, distance_type=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html Filters documents that include only hits that exists within a specific distance from a geo point. field - Field name center - Center point (Geo point) distance - String for the distance distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc > bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km') ''' instance = cls(geo_distance={'distance': distance, field: center}) if distance_type is not None: instance['geo_distance']['distance_type'] = distance_type return instance" 340,"def geo_distance_range(cls, field, center, from_distance, to_distance, distance_type=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html Filters documents that exists within a range from a specific point ''' instance = cls(geo_distance_range={'from': from_distance, 'to': to_distance, field: center}) if distance_type is not None: instance['geo_distance_range']['distance_type'] = distance_type return instance" 341,"def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently. ''' instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}}) if include_lower is not None: instance['numeric_range'][field]['include_lower'] = include_lower if include_upper is not None: instance['numeric_range'][field]['include_upper'] = include_upper return instance" 342,"def range(cls, field, from_value=None, to_value=None, include_lower=None, include_upper=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filter. Can be placed within queries that accept a filter. ''' instance = cls({'range': {field: {}}}) if from_value is not None: instance['range'][field]['from'] = from_value if to_value is not None: instance['range'][field]['to'] = to_value if include_lower is not None: instance['range'][field]['include_lower'] = include_lower if include_upper is not None: instance['range'][field]['include_upper'] = include_upper return instance" 343,"def save(self, obj, id_code): """""" Save an object, and use id_code in the filename obj - any object id_code - unique identifier """""" filestream = open('{0}/{1}'.format(self.data_path, id_code), 'w+') pickle.dump(obj, filestream) filestream.close()" 344,"def load(self, id_code): """""" Loads a workflow identified by id_code id_code - unique identifier, previously must have called save with same id_code """""" filestream = open('{0}/{1}'.format(self.data_path, id_code), 'rb') workflow = pickle.load(filestream) return workflow" 345,"def init(self): """"""init `todo` file if file exists, then initialization self.todos and record current max index of todos : when add a new todo, the `idx` via only `self.current_max_idx + 1` """""" if os.path.isdir(self.path): raise InvalidTodoFile if os.path.exists(self.path): with open(self.path, 'r') as f: tls = [tl.strip() for tl in f if tl] todos = map(_todo_from_file, tls) self.todos = todos for todo in todos: if self.current_max_idx < todo['idx']: self.current_max_idx = todo['idx'] else: logger.warning('No todo files found, initialization a empty todo file') with open(self.path, 'w') as f: f.flush()" 346,"def _show(self, status=None, idx=None): """"""show todos after format :param status: what status's todos wants to show. default is None, means show all """""" _show('', 50) if not self.todos: self._show_no_todos() elif idx is not None: for todo in self.todos: if todo['idx'] == idx: self._show_todos(todo) elif status is not None: if status not in STATUS_CODE: raise InvalidTodoStatus _todos = [] for todo in self.todos: if todo['status'] == status: _todos.append(todo) if not _todos: self._show_no_todos(text_fix='No {} todos...'.format( STATUS_CODE.get(status, None))) else: for todo in _todos: self._show_todos(todo) else: for todo in self.todos: self._show_todos(todo) _show('', 50)" 347,"def write(self, delete_if_empty=False): """"""flush todos to file :param delete_if_empty: delete if todo is empty """""" with open(self.path, 'w') as f: if not self.todos: f.flush() else: for todo in _todo_to_file(self.todos): f.write(todo)" 348,"def read_object_from_pickle(desired_type: Type[T], file_path: str, encoding: str, fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any: """""" Parses a pickle file. :param desired_type: :param file_path: :param encoding: :param fix_imports: :param errors: :param args: :param kwargs: :return: """""" import pickle file_object = open(file_path, mode='rb') try: return pickle.load(file_object, fix_imports=fix_imports, encoding=encoding, errors=errors) finally: file_object.close()" 349,"def should_display_warnings_for(to_type): """""" Central method where we control whether warnings should be displayed """""" if not hasattr(to_type, '__module__'): return True elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \ or to_type.__name__ in {'DataFrame'}: return False elif issubclass(to_type, int) or issubclass(to_type, str) \ or issubclass(to_type, float) or issubclass(to_type, bool): return False else: return True" 350,"def _is_valid_for_dict_to_object_conversion(strict_mode: bool, from_type: Type, to_type: Type) -> bool: """""" Returns true if the provided types are valid for dict_to_object conversion Explicitly declare that we are not able to parse collections nor able to create an object from a dictionary if the object's constructor is non correctly PEP484-specified. None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time) :param strict_mode: :param from_type: :param to_type: :return: """""" # cache previous results try: res, subclasses_hash = _cache_valid_for_dict_to_object[to_type][strict_mode] # Check if are any new subclasses are available if not strict_mode and to_type is not None and not is_any_type(to_type): if hash(tuple(get_all_subclasses(to_type))) != subclasses_hash: raise KeyError('fake error to recompute the cache entry') except KeyError: res = __is_valid_for_dict_to_object_conversion(strict_mode=strict_mode, from_type=from_type, to_type=to_type) # Store an entry in the cache containing the result and the hash of the subclasses list subclasses_hash = None if not strict_mode and to_type is not None and not is_any_type(to_type): subclasses_hash = hash(tuple(get_all_subclasses(to_type))) entry = (res, subclasses_hash) try: _cache_valid_for_dict_to_object[to_type][strict_mode] = entry except KeyError: _cache_valid_for_dict_to_object[to_type] = {strict_mode: entry} return res" 351,"def __is_valid_for_dict_to_object_conversion(strict_mode: bool, from_type: Type, to_type: Type) -> bool: """""" Returns true if the provided types are valid for dict_to_object conversion Explicitly declare that we are not able to parse collections nor able to create an object from a dictionary if the object's constructor is non correctly PEP484-specified. None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time) :param strict_mode: :param from_type: :param to_type: :return: """""" # right now we're stuck with the default logger.. logr = default_logger if to_type is None or is_any_type(to_type): # explicitly handle the 'None' (joker) or 'any' type return True elif is_collection(to_type, strict=True): # if the destination type is 'strictly a collection' (not a subclass of a collection) we know that we can't # handle it here, the constructor is not pep484-typed return False else: # (1) Try the type itself try: # can we find enough pep-484 information in the constructor to be able to understand what is required ? get_constructor_attributes_types(to_type) return True except TypeInformationRequiredError as main_e: # # failed: we cant guess the required types of constructor arguments # if strict_mode: # # Warning and return NO # if should_display_warnings_for(to_type): # logr.warn('Object constructor signature for type {} does not allow parsyfiles to ' # 'automatically create instances from dict content. Caught {}: {}' # ''.format(get_pretty_type_str(to_type), type(main_e).__name__, main_e)) # return False # # # non-strict mode: (2) Check if any subclasses exist # subclasses = get_all_subclasses(to_type) # if len(subclasses) > GLOBAL_CONFIG.dict_to_object_subclass_limit: # logr.warn('WARNING: Type {} has {} subclasses, only {} will be tried by parsyfiles when attempting to ' # 'create it from a subclass. You can raise this limit by setting the appropriate option with ' # '`parsyfiles_global_config()`' # ''.format(to_type, len(subclasses), GLOBAL_CONFIG.dict_to_object_subclass_limit)) # # # Then for each subclass also try (with a configurable limit in nb of subclasses) # for subclass in subclasses[0:GLOBAL_CONFIG.dict_to_object_subclass_limit]: # try: # get_constructor_attributes_types(subclass) # # OK, but issue warning for the root type still # if should_display_warnings_for(to_type): # logr.warn('WARNING: Object constructor signature for type {} does not allow parsyfiles to ' # 'automatically create instances from dict content, but it can for at least one of ' # 'its subclasses ({}) so it might be ok for you. Caught {}: {}' # ''.format(get_pretty_type_str(to_type), get_pretty_type_str(subclass), # type(main_e).__name__, main_e)) # return True # except TypeInformationRequiredError as e: # # failed: we cant guess the required types of constructor arguments # if should_display_warnings_for(to_type): # logr.warn('WARNING: Object constructor signature for type {} does not allow parsyfiles to ' # 'automatically create instances from dict content. Caught {}: {}' # ''.format(subclass, type(e).__name__, e)) # # # Nothing succeeded if should_display_warnings_for(to_type): logr.warn('WARNING: Object constructor signature for type {} does not allow parsyfiles to ' 'automatically create instances from dict content. Caught {}: {}' ''.format(get_pretty_type_str(to_type), type(main_e).__name__, main_e)) return False" 352,"def dict_to_object(desired_type: Type[T], contents_dict: Dict[str, Any], logger: Logger, options: Dict[str, Dict[str, Any]], conversion_finder: ConversionFinder = None, is_dict_of_dicts: bool = False) -> T: """""" Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have the correct type are intelligently converted if possible :param desired_type: :param contents_dict: :param logger: :param options: :param conversion_finder: :param is_dict_of_dicts: :return: """""" check_var(desired_type, var_types=type, var_name='obj_type') check_var(contents_dict, var_types=dict, var_name='contents_dict') if is_collection(desired_type, strict=True): # if the destination type is 'strictly a collection' (not a subclass of a collection) we know that we can't # handle it here, the constructor is not pep484-typed raise TypeError('Desired object type \'' + get_pretty_type_str(desired_type) + '\' is a collection, ' 'so it cannot be created using this generic object creator') else: # Try the type itself # try: return _dict_to_object(desired_type, contents_dict, logger=logger, options=options, conversion_finder=conversion_finder, is_dict_of_dicts=is_dict_of_dicts)" 353,"def _dict_to_object(desired_type: Type[T], contents_dict: Dict[str, Any], logger: Logger, options: Dict[str, Dict[str, Any]], conversion_finder: ConversionFinder = None, is_dict_of_dicts: bool = False) -> T: """""" Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have the correct type are intelligently converted if possible :param desired_type: :param contents_dict: :param logger: :param options: :param conversion_finder: :param is_dict_of_dicts: :return: """""" # collect pep-484 information in the constructor to be able to understand what is required constructor_args_types_and_opt = get_constructor_attributes_types(desired_type) try: # for each attribute, convert the types of its parsed values if required dict_for_init = dict() for attr_name, provided_attr_value in contents_dict.items(): # check if this attribute name is required by the constructor if attr_name in constructor_args_types_and_opt.keys(): # check the theoretical type wanted by the constructor attr_type_required = constructor_args_types_and_opt[attr_name][0] # resolve forward references attr_type_required = resolve_forward_ref(attr_type_required) if not is_dict_of_dicts: if is_valid_pep484_type_hint(attr_type_required): # this will not fail if type information is not present;the attribute will only be used 'as is' full_attr_name = get_pretty_type_str(desired_type) + '.' + attr_name dict_for_init[attr_name] = ConversionFinder.try_convert_value(conversion_finder, full_attr_name, provided_attr_value, attr_type_required, logger, options) else: warn(""Constructor for type <{t}> has no valid PEP484 Type hint for attribute {att}, trying to "" ""use the parsed value in the dict directly"".format(t=get_pretty_type_str(desired_type), att=attr_name)) dict_for_init[attr_name] = provided_attr_value else: # in that mode, the attribute value itself is a dict, so the attribute needs to be built from that # dict first if isinstance(provided_attr_value, dict): # recurse : try to build this attribute from the dictionary provided. We need to know the type # for this otherwise we wont be able to call the constructor :) if (attr_type_required is None) or (attr_type_required is Parameter.empty): raise TypeInformationRequiredError.create_for_object_attributes(desired_type, attr_name, attr_type_required) elif not is_valid_pep484_type_hint(attr_type_required): raise InvalidPEP484TypeHint.create_for_object_attributes(desired_type, attr_name, attr_type_required) else: # we can build the attribute from the sub-dict dict_for_init[attr_name] = dict_to_object(attr_type_required, provided_attr_value, logger, options, conversion_finder=conversion_finder) else: raise ValueError('Error while trying to build object of type ' + str(desired_type) + ' from a ' 'dictionary of dictionaries. Entry \'' + attr_name + '\' is not a dictionary') else: if is_dict_of_dicts and attr_name is 'DEFAULT': # -- tolerate but ignore - this is probably due to a configparser # warning('Property name \'' + attr_name + '\' is not an attribute of the object constructor. <' # + get_pretty_type_str(desired_type) + '> constructor attributes are : ' # + list(set(constructor_args_types.keys()) - {'self'}) + '. However it is named DEFAULT') pass else: # the dictionary entry does not correspond to a valid attribute of the object raise InvalidAttributeNameForConstructorError.create(desired_type, list(set(constructor_args_types_and_opt.keys()) - {'self'}), attr_name) # create the object using its constructor try: return desired_type(**dict_for_init) except Exception as e: # Wrap into an Exception raise ObjectInstantiationException.create(desired_type, dict_for_init, e) except TypeError as e: raise CaughtTypeErrorDuringInstantiation.create(desired_type, contents_dict, e)" 354,"def print_dict(dict_name, dict_value, logger: Logger = None): """""" Utility method to print a named dictionary :param dict_name: :param dict_value: :return: """""" if logger is None: print(dict_name + ' = ') try: from pprint import pprint pprint(dict_value) except: print(dict_value) else: logger.info(dict_name + ' = ') try: from pprint import pformat logger.info(pformat(dict_value)) except: logger.info(dict_value)" 355,"def get_default_object_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: """""" Utility method to return the default parsers able to parse an object from a file. Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in RootParser :return: """""" return [SingleFileParserFunction(parser_function=read_object_from_pickle, streaming_mode=False, supported_exts={'.pyc'}, supported_types={AnyObject}), MultifileObjectParser(parser_finder, conversion_finder) ]" 356,"def get_default_object_converters(conversion_finder: ConversionFinder) \ -> List[Union[Converter[Any, Type[None]], Converter[Type[None], Any]]]: """""" Utility method to return the default converters associated to dict (from dict to other type, and from other type to dict) :return: """""" return [ ConverterFunction(from_type=b64str, to_type=AnyObject, conversion_method=base64_ascii_str_pickle_to_object), ConverterFunction(from_type=DictOfDict, to_type=Any, conversion_method=dict_to_object, custom_name='dict_of_dict_to_object', is_able_to_convert_func=_is_valid_for_dict_to_object_conversion, unpack_options=False, function_args={'conversion_finder': conversion_finder, 'is_dict_of_dicts': True}), ConverterFunction(from_type=dict, to_type=AnyObject, conversion_method=dict_to_object, custom_name='dict_to_object', unpack_options=False, is_able_to_convert_func=_is_valid_for_dict_to_object_conversion, function_args={'conversion_finder': conversion_finder, 'is_dict_of_dicts': False}) ]" 357,"def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param arg_name: :return: """""" return MissingMandatoryAttributeFiles('Multifile object ' + str(obj) + ' cannot be built from constructor of ' 'type ' + get_pretty_type_str(obj_type) + ', mandatory constructor argument \'' + arg_name + '\'was not found on ' 'filesystem')" 358,"def create(item_type: Type[Any], constructor_atts: List[str], invalid_property_name: str): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param item_type: :return: """""" return InvalidAttributeNameForConstructorError('Cannot parse object of type <' + get_pretty_type_str(item_type) + '> using the provided configuration file: configuration ' + 'contains a property name (\'' + invalid_property_name + '\')'\ + 'that is not an attribute of the object constructor. <' + get_pretty_type_str(item_type) + '> constructor attributes ' + 'are : ' + str(constructor_atts))" 359,"def create(item_type: Type[Any], constructor_args: Dict[str, Any], cause: Exception): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param item_type: :return: """""" return ObjectInstantiationException('Error while building object of type <' + get_pretty_type_str(item_type) + '> using its constructor and parsed contents : ' + str(constructor_args) + ' : \n' + str(cause.__class__) + ' ' + str(cause) ).with_traceback(cause.__traceback__)" 360,"def create(desired_type: Type[Any], contents_dict: Dict, caught: Exception): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param desired_type: :param contents_dict: :param caught: :return: """""" msg = 'Error while trying to instantiate object of type ' + str(desired_type) + ' using dictionary input_dict:'\ + 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) + '\n' try: from pprint import pformat msg += 'Dict provided was ' + pformat(contents_dict) except: msg += 'Dict provided was ' + str(contents_dict) return CaughtTypeErrorDuringInstantiation(msg).with_traceback(caught.__traceback__)" 361,"def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool): """""" Explicitly declare that we are not able to parse collections :param desired_type: :param desired_ext: :param strict: :return: """""" if not _is_valid_for_dict_to_object_conversion(strict, None, None if desired_type is JOKER else desired_type): return False, None else: return super(MultifileObjectParser, self).is_able_to_parse_detailed(desired_type, desired_ext, strict)" 362,"def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: """""" Simply inspects the required type to find the names and types of its constructor arguments. Then relies on the inner ParserFinder to parse each of them. :param obj_on_fs: :param desired_type: :param logger: :return: """""" if is_collection(desired_type, strict=True): # if the destination type is 'strictly a collection' (not a subclass of a collection) we know that we can't # handle it here, the constructor is not pep484-typed raise TypeError('Desired object type \'' + get_pretty_type_str(desired_type) + '\' is a collection, ' 'so it cannot be parsed with this default object parser') else: # First get the file children children_on_fs = obj_on_fs.get_multifile_children() # Try the type itself # try: return self.__get_parsing_plan_for_multifile_children(obj_on_fs, desired_type, children_on_fs, logger=logger)" 363,"def __get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], children_on_fs: Dict[str, PersistedObject], logger: Logger) \ -> Dict[str, Any]: """""" Simply inspects the required type to find the names and types of its constructor arguments. Then relies on the inner ParserFinder to parse each of them. :param obj_on_fs: :param desired_type: :param children_on_fs: :param logger: :return: """""" # -- (a) collect pep-484 information in the class constructor to be able to understand what is required constructor_args_types_and_opt = get_constructor_attributes_types(desired_type) # -- (b) plan to parse each attribute required by the constructor children_plan = dict() # results will be put in this object # --use sorting in order to lead to reproducible results in case of multiple errors for attribute_name, att_desc in sorted(constructor_args_types_and_opt.items()): attribute_is_mandatory = att_desc[1] attribute_type = att_desc[0] # get the child if attribute_name in children_on_fs.keys(): child_on_fs = children_on_fs[attribute_name] # find a parser t, parser_found = self.parser_finder.build_parser_for_fileobject_and_desiredtype(child_on_fs, attribute_type, logger=logger) # create a parsing plan children_plan[attribute_name] = parser_found.create_parsing_plan(t, child_on_fs, logger=logger, _main_call=False) else: if attribute_is_mandatory: raise MissingMandatoryAttributeFiles.create(obj_on_fs, desired_type, attribute_name) else: # we don't care : optional attribute # dont use warning since it does not show up nicely msg = 'NOT FOUND - This optional constructor attribute for type ' \ + get_pretty_type_str(desired_type) + ' was not found on file system, but this may be normal'\ ' - this message is displayed \'just in case\'.' if logger.isEnabledFor(DEBUG): logger.warning('(B) ' + obj_on_fs.get_pretty_child_location(attribute_name, blank_parent_part=True) + ': ' + msg) else: logger.warning('WARNING parsing [{loc}] as a [{typ}]: optional constructor attribute [{att}] ' 'not found on file system. This may be normal - this message is displayed \'just' ' in case\'.'.format( loc=obj_on_fs.get_pretty_location(blank_parent_part=False, append_file_ext=False), typ=get_pretty_type_str(desired_type), att=attribute_name)) return children_plan" 364,"def parsyfiles_global_config(multiple_errors_tb_limit: int = None, full_paths_in_logs: bool = None, dict_to_object_subclass_limit: int = None): """""" This is the method you should use to configure the parsyfiles library :param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when parsyfiles tries several parsing chains and all of them fail. :param full_paths_in_logs: if True, full file paths will be displayed in logs. Otherwise only the parent path will be displayed and children paths will be indented (default is False) :param dict_to_object_subclass_limit: the number of subclasses that the converter will try, when instantiating an object from a dictionary. Default is 50 :return: """""" if multiple_errors_tb_limit is not None: GLOBAL_CONFIG.multiple_errors_tb_limit = multiple_errors_tb_limit if full_paths_in_logs is not None: GLOBAL_CONFIG.full_paths_in_logs = full_paths_in_logs if dict_to_object_subclass_limit is not None: GLOBAL_CONFIG.dict_to_object_subclass_limit = dict_to_object_subclass_limit" 365,"def is_valid(self, context): """"""Checks through the previous_actions iterable if required actions have been executed """""" if self.requires: for r in self.requires: if not r in context.executed_actions: raise RequirementMissingError(""Action '%s' requires '%s'"" % (self.name, r)) return True" 366,"def get_file_contents(file_path): """"""Get the context of the file using full path name"""""" full_path = os.path.join(package_dir, file_path) return open(full_path, 'r').read()" 367,"def refresh(self): """"""Refresh a device"""""" # new_device = {} if self.type in CONST.BINARY_SENSOR_TYPES: response = self._lupusec.get_sensors() for device in response: if device['device_id'] == self._device_id: self.update(device) return device elif self.type == CONST.ALARM_TYPE: response = self._lupusec.get_panel() self.update(response) return response elif self.type == CONST.TYPE_POWER_SWITCH: response = self._lupusec.get_power_switches() for pss in response: if pss['device_id'] == self._device_id: self.update(pss) return pss" 368,"def update(self, json_state): """"""Update the json data from a dictionary. Only updates if it already exists in the device. """""" if self._type in CONST.BINARY_SENSOR_TYPES: self._json_state['status'] = json_state['status'] else: self._json_state.update( {k: json_state[k] for k in json_state if self._json_state.get(k)})" 369,"def desc(self): """"""Get a short description of the device."""""" return '{0} (ID: {1}) - {2} - {3}'.format( self.name, self.device_id, self.type, self.status)" 370,"def list(declared, undeclared): """"""List configured queues."""""" queues = current_queues.queues.values() if declared: queues = filter(lambda queue: queue.exists, queues) elif undeclared: queues = filter(lambda queue: not queue.exists, queues) queue_names = [queue.routing_key for queue in queues] queue_names.sort() for queue in queue_names: click.secho(queue)" 371,"def declare(queues): """"""Initialize the given queues."""""" current_queues.declare(queues=queues) click.secho( 'Queues {} have been declared.'.format( queues or current_queues.queues.keys()), fg='green' )" 372,"def purge_queues(queues=None): """"""Purge the given queues."""""" current_queues.purge(queues=queues) click.secho( 'Queues {} have been purged.'.format( queues or current_queues.queues.keys()), fg='green' )" 373,"def delete_queue(queues): """"""Delete the given queues."""""" current_queues.delete(queues=queues) click.secho( 'Queues {} have been deleted.'.format( queues or current_queues.queues.keys()), fg='green' )" 374,"def find_needed_formatter(input_format, output_format): """""" Find a data formatter given an input and output format input_format - needed input format. see utils.input.dataformats output_format - needed output format. see utils.input.dataformats """""" #Only take the formatters in the registry selected_registry = [re.cls for re in registry if re.category==RegistryCategories.formatters] needed_formatters = [] for formatter in selected_registry: #Initialize the formatter (needed so it can discover its formats) formatter_inst = formatter() if input_format in formatter_inst.input_formats and output_format in formatter_inst.output_formats: needed_formatters.append(formatter) if len(needed_formatters)>0: return needed_formatters[0] return None" 375,"def find_needed_input(input_format): """""" Find a needed input class input_format - needed input format, see utils.input.dataformats """""" needed_inputs = [re.cls for re in registry if re.category==RegistryCategories.inputs and re.cls.input_format == input_format] if len(needed_inputs)>0: return needed_inputs[0] return None" 376,"def exists_in_registry(category, namespace, name): """""" See if a given category, namespace, name combination exists in the registry category - See registrycategories. Type of module namespace - Namespace of the module, defined in settings name - the lowercase name of the module """""" selected_registry = [re for re in registry if re.category==category and re.namespace==namespace and re.name == name] if len(selected_registry)>0: return True return False" 377,"def register(cls): """""" Register a given model in the registry """""" registry_entry = RegistryEntry(category = cls.category, namespace = cls.namespace, name = cls.name, cls=cls) if registry_entry not in registry and not exists_in_registry(cls.category, cls.namespace, cls.name): registry.append(registry_entry) else: log.warn(""Class {0} already in registry"".format(cls))" 378,"def _set_fields(self): """""" Initialize the fields for data caching. """""" self.fields = [] self.required_input = [] for member_name, member_object in inspect.getmembers(self.__class__): if inspect.isdatadescriptor(member_object) and not member_name.startswith(""__""): self.fields.append(member_name) if member_object.required_input: self.required_input.append(member_name)" 379,"def subscriber(address,topics,callback,message_type): """""" Creates a subscriber binding to the given address and subscribe the given topics. The callback is invoked for every message received. Args: - address: the address to bind the PUB socket to. - topics: the topics to subscribe - callback: the callback to invoke for every message. Must accept 2 variables - topic and message - message_type: the type of message to receive """""" return Subscriber(address,topics,callback,message_type)" 380,"def start(self): """""" Start a thread that consumes the messages and invokes the callback """""" t=threading.Thread(target=self._consume) t.start()" 381,"def _get_forecast(api_result: dict) -> List[SmhiForecast]: """"""Converts results fråm API to SmhiForeCast list"""""" forecasts = [] # Need the ordered dict to get # the days in order in next stage forecasts_ordered = OrderedDict() forecasts_ordered = _get_all_forecast_from_api(api_result) # Used to calc the daycount day_nr = 1 for day in forecasts_ordered: forecasts_day = forecasts_ordered[day] if day_nr == 1: # Add the most recent forecast forecasts.append(copy.deepcopy(forecasts_day[0])) total_precipitation = float(0.0) forecast_temp_max = -100.0 forecast_temp_min = 100.0 forecast = None for forcast_day in forecasts_day: temperature = forcast_day.temperature if forecast_temp_min > temperature: forecast_temp_min = temperature if forecast_temp_max < temperature: forecast_temp_max = temperature if forcast_day.valid_time.hour == 12: forecast = copy.deepcopy(forcast_day) total_precipitation = total_precipitation + \ forcast_day._total_precipitation if forecast is None: # We passed 12 noon, set to current forecast = forecasts_day[0] forecast._temperature_max = forecast_temp_max forecast._temperature_min = forecast_temp_min forecast._total_precipitation = total_precipitation forecast._mean_precipitation = total_precipitation/24 forecasts.append(forecast) day_nr = day_nr + 1 return forecasts" 382,"def _get_all_forecast_from_api(api_result: dict) -> OrderedDict: """"""Converts results fråm API to SmhiForeCast list"""""" # Total time in hours since last forecast total_hours_last_forecast = 1.0 # Last forecast time last_time = None # Need the ordered dict to get # the days in order in next stage forecasts_ordered = OrderedDict() # Get the parameters for forecast in api_result['timeSeries']: valid_time = datetime.strptime( forecast['validTime'], ""%Y-%m-%dT%H:%M:%SZ"") for param in forecast['parameters']: if param['name'] == 't': temperature = float(param['values'][0]) # Celcisus elif param['name'] == 'r': humidity = int(param['values'][0]) # Percent elif param['name'] == 'msl': pressure = int(param['values'][0]) # hPa elif param['name'] == 'tstm': thunder = int(param['values'][0]) # Percent elif param['name'] == 'tcc_mean': octa = int(param['values'][0]) # Cloudiness in octas if 0 <= octa <= 8: # Between 0 -> 8 cloudiness = round(100*octa/8) # Convert octas to percent else: cloudiness = 100 # If not determined use 100% elif param['name'] == 'Wsymb2': symbol = int(param['values'][0]) # category elif param['name'] == 'pcat': precipitation = int(param['values'][0]) # percipitation elif param['name'] == 'pmean': mean_precipitation = float( param['values'][0]) # mean_percipitation elif param['name'] == 'ws': wind_speed = float(param['values'][0]) # wind speed elif param['name'] == 'wd': wind_direction = int(param['values'][0]) # wind direction elif param['name'] == 'vis': horizontal_visibility = float(param['values'][0]) # Visibility elif param['name'] == 'gust': wind_gust = float(param['values'][0]) # wind gust speed roundedTemp = int(round(temperature)) if last_time is not None: total_hours_last_forecast = (valid_time - last_time).seconds/60/60 # Total precipitation, have to calculate with the nr of # hours since last forecast to get correct total value tp = round(mean_precipitation*total_hours_last_forecast, 2) forecast = \ SmhiForecast(roundedTemp, roundedTemp, roundedTemp, humidity, pressure, thunder, cloudiness, precipitation, wind_direction, wind_speed, horizontal_visibility, wind_gust, round(mean_precipitation, 1), tp, symbol, valid_time) if valid_time.day not in forecasts_ordered: # add a new list forecasts_ordered[valid_time.day] = [] forecasts_ordered[valid_time.day].append(forecast) last_time = valid_time return forecasts_ordered" 383,"def get_forecast_api(self, longitude: str, latitude: str) -> {}: """"""gets data from API"""""" api_url = APIURL_TEMPLATE.format(longitude, latitude) response = urlopen(api_url) data = response.read().decode('utf-8') json_data = json.loads(data) return json_data" 384,"async def async_get_forecast_api(self, longitude: str, latitude: str) -> {}: """"""gets data from API asyncronious"""""" api_url = APIURL_TEMPLATE.format(longitude, latitude) if self.session is None: self.session = aiohttp.ClientSession() async with self.session.get(api_url) as response: if response.status != 200: raise SmhiForecastException( ""Failed to access weather API with status code {}"".format( response.status) ) data = await response.text() return json.loads(data)" 385,"def get_forecast(self) -> List[SmhiForecast]: """""" Returns a list of forecasts. The first in list are the current one """""" json_data = self._api.get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)" 386,"async def async_get_forecast(self) -> List[SmhiForecast]: """""" Returns a list of forecasts. The first in list are the current one """""" json_data = await self._api.async_get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)" 387,"def _make_decorator(measuring_func): """"""morass of closures for making decorators/descriptors"""""" def _decorator(name = None, metric = call_default): def wrapper(func): name_ = name if name is not None else func.__module__ + '.' +func.__name__ class instrument_decorator(object): # must be a class for descriptor magic to work @wraps(func) def __call__(self, *args, **kwargs): return measuring_func(func(*args, **kwargs), name_, metric) def __get__(self, instance, class_): name_ = name if name is not None else\ ""."".join((class_.__module__, class_.__name__, func.__name__)) @wraps(func) def wrapped_method(*args, **kwargs): return measuring_func(func(instance, *args, **kwargs), name_, metric) return wrapped_method return instrument_decorator() return wrapper return _decorator" 388,"def all(iterable = None, *, name = None, metric = call_default): """"""Measure total time and item count for consuming an iterable :arg iterable: any iterable :arg function metric: f(name, count, total_time) :arg str name: name for the metric """""" if iterable is None: return _iter_decorator(name, metric) else: return _do_all(iterable, name, metric)" 389,"def each(iterable = None, *, name = None, metric = call_default): """"""Measure time elapsed to produce each item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """""" if iterable is None: return _each_decorator(name, metric) else: return _do_each(iterable, name, metric)" 390,"def first(iterable = None, *, name = None, metric = call_default): """"""Measure time elapsed to produce first item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric """""" if iterable is None: return _first_decorator(name, metric) else: return _do_first(iterable, name, metric)" 391,"def _iterable_to_varargs_method(func): """"""decorator to convert a method taking a iterable to a *args one"""""" def wrapped(self, *args, **kwargs): return func(self, args, **kwargs) return wrapped" 392,"def _varargs_to_iterable_method(func): """"""decorator to convert a *args method to one taking a iterable"""""" def wrapped(self, iterable, **kwargs): return func(self, *iterable, **kwargs) return wrapped" 393,"def reducer(*, name = None, metric = call_default): """"""Decorator to measure a function that consumes many items. The wrapped ``func`` should take either a single ``iterable`` argument or ``*args`` (plus keyword arguments). :arg function metric: f(name, count, total_time) :arg str name: name for the metric """""" class instrument_reducer_decorator(object): def __init__(self, func): self.orig_func = func self.wrapping = wraps(func) self.metric_name = name if name is not None else func.__module__ + '.' +func.__name__ self.varargs = inspect.getargspec(func).varargs is not None if self.varargs: self.method = _varargs_to_iterable_method(func) self.func = _varargs_to_iterable_func(func) self.callme = _iterable_to_varargs_func(self._call) else: self.method = func self.func = func self.callme = self._call # we need _call/callme b/c CPython short-circurits CALL_FUNCTION to # directly access __call__, bypassing our varargs decorator def __call__(self, *args, **kwargs): return self.callme(*args, **kwargs) def _call(self, iterable, **kwargs): it = counted_iterable(iterable) t = time.time() try: return self.func(it, **kwargs) finally: metric(self.metric_name, it.count, time.time() - t) def __get__(self, instance, class_): metric_name = name if name is not None else\ ""."".join((class_.__module__, class_.__name__, self.orig_func.__name__)) def wrapped_method(iterable, **kwargs): it = counted_iterable(iterable) t = time.time() try: return self.method(instance, it, **kwargs) finally: metric(metric_name, it.count, time.time() - t) # wrap in func version b/c self is handled for us by descriptor (ie, `instance`) if self.varargs: wrapped_method = _iterable_to_varargs_func(wrapped_method) wrapped_method = self.wrapping(wrapped_method) return wrapped_method return instrument_reducer_decorator" 394,"def producer(*, name = None, metric = call_default): """"""Decorator to measure a function that produces many items. The function should return an object that supports ``__len__`` (ie, a list). If the function returns an iterator, use :func:`all` instead. :arg function metric: f(name, count, total_time) :arg str name: name for the metric """""" def wrapper(func): def instrumenter(name_, *args, **kwargs): t = time.time() try: ret = func(*args, **kwargs) except Exception: # record a metric for other exceptions, than raise metric(name_, 0, time.time() - t) raise else: # normal path, record metric & return metric(name_, len(ret), time.time() - t) return ret name_ = name if name is not None else func.__module__ + '.' +func.__name__ class instrument_decorator(object): # must be a class for descriptor magic to work @wraps(func) def __call__(self, *args, **kwargs): return instrumenter(name_, *args, **kwargs) def __get__(self, instance, class_): name_ = name if name is not None else\ ""."".join((class_.__module__, class_.__name__, func.__name__)) @wraps(func) def wrapped_method(*args, **kwargs): return instrumenter(name_, instance, *args, **kwargs) return wrapped_method return instrument_decorator() return wrapper" 395,"def block(*, name = None, metric = call_default, count = 1): """"""Context manager to measure execution time of a block :arg function metric: f(name, 1, time) :arg str name: name for the metric :arg int count: user-supplied number of items, defaults to 1 """""" t = time.time() try: yield finally: metric(name, count, time.time() - t)" 396,"def import_from_string(import_string): """""" Import a class from a string import_string - string path to module to import using dot notation (foo.bar) """""" import_split = import_string.split(""."") import_class = import_split[-1] module_path = ""."".join(import_split[:-1]) mod = __import__(module_path, fromlist=[import_class]) klass = getattr(mod, import_class) return klass" 397,"def send(self,message,message_type,topic=''): """""" Send the message on the socket. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''. """""" if message_type == RAW: self._sock.send(message) elif message_type == PYOBJ: self._sock.send_pyobj(message) elif message_type == JSON: self._sock.send_json(message) elif message_type == MULTIPART: self._sock.send_multipart([topic, message]) elif message_type == STRING: self._sock.send_string(message) elif message_type == UNICODE: self._sock.send_unicode(message) else: raise Exception(""Unknown message type %s""%(message_type,))" 398,"def receive(self,message_type): """""" Receive the message of the specified type and retun Args: - message_type: the type of the message to receive Returns: - the topic of the message - the message received from the socket """""" topic = None message = None if message_type == RAW: message = self._sock.recv(flags=zmq.NOBLOCK) elif message_type == PYOBJ: message = self._sock.recv_pyobj(flags=zmq.NOBLOCK) elif message_type == JSON: message = self._sock.recv_json(flags=zmq.NOBLOCK) elif message_type == MULTIPART: data = self._sock.recv_multipart(flags=zmq.NOBLOCK) message = data[1] topic = data[0] elif message_type == STRING: message = self._sock.recv_string(flags=zmq.NOBLOCK) elif message_type == UNICODE: message = self._sock.recv_unicode(flags=zmq.NOBLOCK) else: raise Exception(""Unknown message type %s""%(self._message_type,)) return (topic, message)" 399,"def dynamic_exec(code, resolve, assign=None, delete=None, automatic_builtins=True, filename=None, module_name=None, _type='exec'): """""" Transforms the Python source code *code* and evaluates it so that the *resolve* and *assign* functions are called respectively for when a global variable is access or assigned. If *resolve* is a mapping, *assign* must be omitted. #KeyError#s raised by the mapping are automatically converted to #NameError#s. Otherwise, *resolve* and *assign* must be callables that have the same interface as `__getitem__()`, and `__setitem__()`. If *assign* is omitted in that case, assignments will be redirected to a separate dictionary and keys in that dictionary will be checked before continuing with the *resolve* callback. """""" parse_filename = filename or '' ast_node = transform(ast.parse(code, parse_filename, mode=_type)) code = compile(ast_node, parse_filename, _type) if hasattr(resolve, '__getitem__'): if assign is not None: raise TypeError('""assign"" parameter specified where ""resolve"" is a mapping') if delete is not None: raise TypeError('""delete"" parameter specified where ""resolve"" is a mapping') input_mapping = resolve def resolve(x): try: return input_mapping[x] except KeyError: raise NameError(x) assign = input_mapping.__setitem__ delete = input_mapping.__delitem__ else: input_mapping = False class DynamicMapping(object): _data = {} _deleted = set() def __repr__(self): if input_mapping: return 'DynamicMapping({!r})'.format(input_mapping) else: return 'DynamicMapping(resolve={!r}, assign={!r})'.format(resolve, assign) def __getitem__(self, key): if key in self._deleted: raise NameError(key) if assign is None: try: return self._data[key] except KeyError: pass # Continue with resolve() try: return resolve(key) except NameError as exc: if automatic_builtins and not key.startswith('_'): try: return getattr(builtins, key) except AttributeError: pass raise exc def __setitem__(self, key, value): self._deleted.discard(key) if assign is None: self._data[key] = value else: assign(key, value) def __delitem__(self, key): if delete is None: self._deleted.add(key) else: delete(key) def get(self, key, default=None): try: return self[key] except NameError: return default mapping = DynamicMapping() globals_ = {'__dict__': mapping} if filename: mapping['__file__'] = filename globals_['__file__'] = filename if module_name: mapping['__name__'] = module_name globals_['__name__'] = module_name return (exec_ if _type == 'exec' else eval)(code, globals_)" 400,"def __get_subscript(self, name, ctx=None): """""" Returns `[""""]` """""" assert isinstance(name, string_types), name return ast.Subscript( value=ast.Name(id=self.data_var, ctx=ast.Load()), slice=ast.Index(value=ast.Str(s=name)), ctx=ctx)" 401,"def __get_subscript_assign(self, name): """""" Returns `[""""] = `. """""" return ast.Assign( targets=[self.__get_subscript(name, ast.Store())], value=ast.Name(id=name, ctx=ast.Load()))" 402,"def __get_subscript_delete(self, name): """""" Returns `del [""""]`. """""" return ast.Delete(targets=[self.__get_subscript(name, ast.Del())])" 403,"def __visit_target(self, node): """""" Call this method to visit assignment targets and to add local variables to the current stack frame. Used in #visit_Assign() and #__visit_comprehension(). """""" if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store): self.__add_variable(node.id) elif isinstance(node, (ast.Tuple, ast.List)): [self.__visit_target(x) for x in node.elts]" 404,"def __get_package_manager(self): """""" Installs and verifies package manager """""" package_manager = """" args = """" sudo_required = True if system.is_osx(): package_manager = ""brew"" sudo_required = False args = "" install"" elif system.is_debian(): package_manager = ""apt-get"" args = "" -y install"" elif system.is_fedora(): package_manager = ""yum"" args = "" install"" elif system.is_arch(): package_manager = ""pacman"" args = "" --noconfirm -S"" if lib.which(package_manager) is None: self.logger.warn(""Package manager %s not installed! Packages will not be installed."" % package_manager) self.package_manager = None self.package_manager = package_manager self.sudo_required = sudo_required self.args = args" 405,"def eval_expr(expr, context): """"""Recursively evaluates a compiled expression using the specified context. Dict instances can contain a ""__kwargs"" key which will be used to update the dict with its content """""" if isinstance(expr, list): rv = [] for item in expr: rv.append(eval_expr(item, context)) return rv if isinstance(expr, dict): rv = {} for k, v in expr.iteritems(): rv[k] = eval_expr(v, context) kwargs = rv.pop(""__kwargs"", None) if kwargs: rv.update(kwargs) return rv if isinstance(expr, Expression): return expr.eval(context) return expr" 406,"def can_convert(strict: bool, from_type: Type[S], to_type: Type[T]): """""" None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time) :param strict: :param from_type: :param to_type: :return: """""" if (to_type is not None) and (to_type not in (all_primitive_types + all_np_primitive_types)): return False else: return True" 407,"def parse(self, data, doctype): ''' Parse an input string, and return an AST doctype must have WCADocument as a baseclass ''' self.doctype = doctype self.lexer.lineno = 0 del self.errors[:] del self.warnings[:] self.lexer.lexerror = False ast = self.parser.parse(data, lexer=self.lexer) if self.lexer.lexerror: ast = None if ast is None: self.errors.append(""Couldn't build AST."") else: for check in self.sema[self.doctype]: visitor = check() if not visitor.visit(ast): self.errors.append(""Couldn't visit AST."") self.errors.extend(visitor.errors) self.warnings.extend(visitor.warnings) return (ast, list(self.errors), list(self.warnings))" 408,"def _act_on_list(self, lhs): ''' Act on the following rule : items : items item | item ''' lhs[0] = [] if len(lhs) == 3: lhs[0] = lhs[1] # lhs[len(lhs)-1] may be different from lhs[-1] # Yacc use some internal method to get the element, see yacc.py:240 item = lhs[len(lhs) - 1] if item: lhs[0].append(item)" 409,"def p_content(self, content): '''content : TITLE opttexts VERSION opttexts sections | TITLE STATESTAG VERSION opttexts states_sections''' content[0] = self.doctype(content[1], content[3], content[4], content[5]) if self.toc: self.toc.set_articles([a for a in content[0].sections if isinstance(a, Article)])" 410,"def p_text(self, text): '''text : TEXT PARBREAK | TEXT | PARBREAK''' item = text[1] text[0] = item if item[0] != ""\n"" else u"""" if len(text) > 2: text[0] += ""\n""" 411,"def p_toc(self, toc): '''toc : HEADERSEC opttexts TOC opttexts''' toc[0] = TableOfContent(toc[1], toc[2], []) self.toc = toc[0]" 412,"def p_article(self, article): '''article : ARTICLEHEADER opttexts rules opttexts''' article[0] = Article(article[1][4], article[2], article[3], article[1][0], article[1][1], article[1][2], article[1][3], article[1][5])" 413,"def p_regularsec(self, regularsec): '''regularsec : HEADERSEC opttexts optsubsections''' texts = [] sections = regularsec[2] if len(regularsec) > 3: texts = regularsec[2] sections = regularsec[3] regularsec[0] = Section(regularsec[1], texts, sections)" 414,"def p_subsection(self, subsection): '''subsection : HEADERSUBSEC texts | HEADERSUBSEC texts labeldecls opttexts''' content = subsection[3] if len(subsection) > 3 else [] subsection[0] = Subsection(subsection[1], subsection[2], content)" 415,"def p_rule(self, rule): '''rule : GUIDELINE | REGULATION''' if len(rule[1]) == 4: # This is a guideline rule[0] = Guideline(rule[1][1], rule[1][2], rule[1][3]) else: # This is a regulation indentsize = rule[1][0] number = rule[1][1] text = rule[1][2] parent = None # If we just ""un""nested, shrink the current rule to our level if self.prev_indent > indentsize: self.current_rule = self.current_rule[0:indentsize+1] # We just added a nested level, the parent is the list's last elem if self.prev_indent < indentsize: parent = self.current_rule[-1] # Else, if we are nested the parent is the one before the last elem elif len(self.current_rule) > 1: parent = self.current_rule[-2] # Else if we are not nested, then we are a root rule and parent is none # (do nothing as parent is initialized to none) # Create the regulation node reg = Regulation(number, text, parent) # Let our parent knows he has a new child, if we don't have a parent # let's create an item in the article rules list if parent: parent.add_child(reg) else: rule[0] = reg # Unless we nested, pop and replace the last rule by ourself # If we added a nesting level, we just need to add ourself if self.prev_indent >= indentsize: self.current_rule.pop() self.current_rule.append(reg) self.prev_indent = indentsize" 416,"def p_state(self, state): '''state : STATE opttexts''' state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2])" 417,"def p_error(self, elem): '''Handle syntax error''' self.errors.append(""Syntax error on line "" + str(self.lexer.lineno) + "". Got unexpected token "" + elem.type)" 418,"def set_progress_brackets(self, start, end): """"""Set brackets to set around a progress bar."""""" self.sep_start = start self.sep_end = end" 419,"def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): """"""Add a section of progress to the progressbar. The progress is captured by ""count"" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the ""symbol"" character and the foreground and background colours and display style determined by the the ""color"", ""on_color"" and ""attrs"" parameters. These parameters work as the termcolor.colored function. """""" chunk = _ProgressChunk(count, symbol, color, on_color, attrs) self._progress_chunks.append(chunk)" 420,"def format_progress(self, width): """"""Create the formatted string that displays the progress."""""" chunk_widths = self._get_chunk_sizes(width) progress_chunks = [chunk.format_chunk(chunk_width) for (chunk, chunk_width) in zip(self._progress_chunks, chunk_widths)] return ""{sep_start}{progress}{sep_end}"".format( sep_start=self.sep_start, progress="""".join(progress_chunks), sep_end=self.sep_end )" 421,"def summary_width(self): """"""Calculate how long a string is needed to show a summary string. This is not simply the length of the formatted summary string since that string might contain ANSI codes. """""" chunk_counts = [chunk.count for chunk in self._progress_chunks] numbers_width = sum(max(1, ceil(log10(count + 1))) for count in chunk_counts) separators_with = len(chunk_counts) - 1 return numbers_width + separators_with" 422,"def format_summary(self): """"""Generate a summary string for the progress bar."""""" chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return ""/"".join(chunks)" 423,"def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): """"""Add a section of progress to the progressbar. The progress is captured by ""count"" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the ""symbol"" character and the foreground and background colours and display style determined by the the ""fg"", ""bg"" and ""style"" parameters. For these, use the colorama package to set up the formatting. """""" self._progress.add_progress(count, symbol, color, on_color, attrs)" 424,"def format_status(self, width=None, label_width=None, progress_width=None, summary_width=None): """"""Generate the formatted status bar string."""""" if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] if label_width is None: label_width = len(self.label) if summary_width is None: summary_width = self.summary_width() if progress_width is None: progress_width = width - label_width - summary_width - 2 if len(self.label) > label_width: # FIXME: This actually *will* break if we ever have fewer than # three characters assigned to format the label, but that would # be an extreme situation so I won't fix it just yet. label = self.label[:label_width - 3] + ""..."" else: label_format = ""{{label:{fill_char}<{width}}}"".format( width=label_width, fill_char=self.fill_char) label = label_format.format(label=self.label) summary_format = ""{{:>{width}}}"".format(width=summary_width) summary = summary_format.format(self._progress.format_summary()) progress = self._progress.format_progress(width=progress_width) return ""{label} {progress} {summary}"".format( label=label, progress=progress, summary=summary )" 425,"def add_status_line(self, label): """"""Add a status bar line to the table. This function returns the status bar and it can be modified from this return value. """""" status_line = StatusBar(label, self._sep_start, self._sep_end, self._fill_char) self._lines.append(status_line) return status_line" 426,"def calculate_field_widths(self, width=None, min_label_width=10, min_progress_width=10): """"""Calculate how wide each field should be so we can align them. We always find room for the summaries since these are short and packed with information. If possible, we will also find room for labels, but if this would make the progress bar width shorter than the specified minium then we will shorten the labels, though never below the minium there. If this mean we have bars that are too wide for the terminal, then your terminal needs to be wider. """""" if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] summary_width = self.summary_width() label_width = self.label_width() remaining = width - summary_width - label_width - 2 if remaining >= min_progress_width: progress_width = remaining else: progress_width = min_progress_width remaining = width - summary_width - progress_width - 2 if remaining >= min_label_width: label_width = remaining else: label_width = min_label_width return (label_width, progress_width, summary_width)" 427,"def format_table(self, width=None, min_label_width=10, min_progress_width=10): """"""Format the entire table of progress bars. The function first computes the widths of the fields so they can be aligned across lines and then returns formatted lines as a list of strings. """""" # handle the special case of an empty table. if len(self._lines) == 0: return [] if width is None: # pragma: no cover width = shutil.get_terminal_size()[0] labelw, progw, summaryw = self.calculate_field_widths( width=width, min_label_width=min_label_width, min_progress_width=min_progress_width ) output = [ sb.format_status( label_width=labelw, progress_width=progw, summary_width=summaryw ) for sb in self._lines ] return output" 428,"def create_log_dict(request, response): """""" Create a dictionary with logging data. """""" remote_addr = request.META.get('REMOTE_ADDR') if remote_addr in getattr(settings, 'INTERNAL_IPS', []): remote_addr = request.META.get( 'HTTP_X_FORWARDED_FOR') or remote_addr user_email = ""-"" if hasattr(request, 'user'): user_email = getattr(request.user, 'email', '-') if response.streaming: content_length = 'streaming' else: content_length = len(response.content) return { # 'event' makes event-based filtering possible in logging backends # like logstash 'event': settings.LOGUTILS_LOGGING_MIDDLEWARE_EVENT, 'remote_address': remote_addr, 'user_email': user_email, 'method': request.method, 'url': request.get_full_path(), 'status': response.status_code, 'content_length': content_length, 'request_time': -1, # NA value: real value added by LoggingMiddleware }" 429,"def create_log_message(log_dict, use_sql_info=False, fmt=True): """""" Create the logging message string. """""" log_msg = ( ""%(remote_address)s %(user_email)s %(method)s %(url)s %(status)d "" ""%(content_length)d (%(request_time).2f seconds)"" ) if use_sql_info: sql_time = sum( float(q['time']) for q in connection.queries) * 1000 extra_log = { 'nr_queries': len(connection.queries), 'sql_time': sql_time} log_msg += "" (%(nr_queries)d SQL queries, %(sql_time)f ms)"" log_dict.update(extra_log) return log_msg % log_dict if fmt else log_msg" 430,"def process_response(self, request, response): """""" Create the logging message.. """""" try: log_dict = create_log_dict(request, response) # add the request time to the log_dict; if no start time is # available, use -1 as NA value request_time = ( time.time() - self.start_time if hasattr(self, 'start_time') and self.start_time else -1) log_dict.update({'request_time': request_time}) is_request_time_too_high = ( request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD)) use_sql_info = settings.DEBUG or is_request_time_too_high log_msg = create_log_message(log_dict, use_sql_info, fmt=False) if is_request_time_too_high: logger.warning(log_msg, log_dict, extra=log_dict) else: logger.info(log_msg, log_dict, extra=log_dict) except Exception as e: logger.exception(e) return response" 431,"def synchronized(obj): """""" This function has two purposes: 1. Decorate a function that automatically synchronizes access to the object passed as the first argument (usually `self`, for member methods) 2. Synchronize access to the object, used in a `with`-statement. Note that you can use #wait(), #notify() and #notify_all() only on synchronized objects. # Example ```python class Box(Synchronizable): def __init__(self): self.value = None @synchronized def get(self): return self.value @synchronized def set(self, value): self.value = value box = Box() box.set('foobar') with synchronized(box): box.value = 'taz\'dingo' print(box.get()) ``` # Arguments obj (Synchronizable, function): The object to synchronize access to, or a function to decorate. # Returns 1. The decorated function. 2. The value of `obj.synchronizable_condition`, which should implement the context-manager interface (to be used in a `with`-statement). """""" if hasattr(obj, 'synchronizable_condition'): return obj.synchronizable_condition elif callable(obj): @functools.wraps(obj) def wrapper(self, *args, **kwargs): with self.synchronizable_condition: return obj(self, *args, **kwargs) return wrapper else: raise TypeError('expected Synchronizable instance or callable to decorate')" 432,"def wait(obj, timeout=None): """""" Wait until *obj* gets notified with #notify() or #notify_all(). If a timeout is specified, the function can return without the object being notified if the time runs out. Note that you can only use this function on #synchronized() objects. # Arguments obj (Synchronizable): An object that can be synchronized. timeout (number, None): The number of seconds to wait for the object to get notified before returning. If not value or the value #None is specified, the function will wait indefinetily. """""" if timeout is None: return obj.synchronizable_condition.wait() else: return obj.synchronizable_condition.wait(timeout)" 433,"def wait_for_condition(obj, cond, timeout=None): """""" This is an extended version of #wait() that applies the function *cond* to check for a condition to break free from waiting on *obj*. Note that *obj* must be notified when its state changes in order to check the condition. Note that access to *obj* is synchronized when *cond* is called. # Arguments obj (Synchronizable): The object to synchronize and wait for *cond*. cond (function): A function that accepts *obj* as an argument. Must return #True if the condition is met. timeout (number, None): The maximum number of seconds to wait. # Returns bool: #True if the condition was met, #False if not and a timeout ocurred. """""" with synchronized(obj): if timeout is None: while not cond(obj): wait(obj) else: t_start = time.time() while not cond(obj): t_delta = time.time() - t_start if t_delta >= timeout: return False wait(obj, timeout - t_delta) return True" 434,"def as_completed(jobs): ''' Generator function that yields the jobs in order of their completion. Attaches a new listener to each job. ''' jobs = tuple(jobs) event = threading.Event() callback = lambda f, ev: event.set() [job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs] [job.add_listener(Job.ERROR, callback, once=True) for job in jobs] while jobs: event.wait() event.clear() jobs, finished = split_list_by(jobs, lambda x: x.finished) for job in finished: yield job" 435,"def split_list_by(lst, key): """""" Splits a list by the callable *key* where a negative result will cause the item to be put in the first list and a positive into the second list. """""" first, second = [], [] for item in lst: if key(item): second.append(item) else: first.append(item) return (first, second)" 436,"def reraise(tpe, value, tb=None): "" Reraise an exception from an exception info tuple. "" Py3 = (sys.version_info[0] == 3) if value is None: value = tpe() if Py3: if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: exec('raise tpe, value, tb')" 437,"def result(self): """""" The result of the jobs execution. Accessing this property while the job is pending or running will raise #InvalidState. If an exception occured during the jobs execution, it will be raised. # Raises InvalidState: If the job is not in state #FINISHED. Cancelled: If the job was cancelled. any: If an exception ocurred during the job's execution. """""" if self.__cancelled: raise Job.Cancelled elif self.__state in (Job.PENDING, Job.RUNNING): raise Job.InvalidState('job is {0}'.format(self.__state)) elif self.__state == Job.ERROR: reraise(*self.__exception) elif self.__state == Job.SUCCESS: return self.__result else: raise RuntimeError('invalid job state {0!r}'.format(self.__state))" 438,"def exception(self): """""" The exception that occured while the job executed. The value is #None if no exception occurred. # Raises InvalidState: If the job is #PENDING or #RUNNING. """""" if self.__state in (Job.PENDING, Job.RUNNING): raise self.InvalidState('job is {0}'.format(self.__state)) elif self.__state == Job.ERROR: assert self.__exception is not None return self.__exception elif self.__state in (Job.RUNNING, Job.SUCCESS, Job.CANCELLED): assert self.__exception is None return None else: raise RuntimeError('invalid job state {0!r}'.format(self.__state))" 439,"def finished(self): """""" True if the job run and finished. There is no difference if the job finished successfully or errored. """""" return self.__state in (Job.ERROR, Job.SUCCESS, Job.CANCELLED)" 440,"def get(self, default=None): """""" Get the result of the Job, or return *default* if the job is not finished or errored. This function will never explicitly raise an exception. Note that the *default* value is also returned if the job was cancelled. # Arguments default (any): The value to return when the result can not be obtained. """""" if not self.__cancelled and self.__state == Job.SUCCESS: return self.__result else: return default" 441,"def cancel(self): """""" Cancels the job. Functions should check the #Job.cancelled flag from time to time to be able to abort pre-emptively if the job was cancelled instead of running forever. """""" with synchronized(self): cancelled = self.__cancelled if not cancelled: self.__cancelled = True notify_all(self) if not cancelled: self._trigger_event(Job.CANCELLED)" 442,"def _trigger_event(self, event): """""" Private. Triggers and event and removes all one-off listeners for that event. """""" if event is None or event not in self.__listeners: raise ValueError('invalid event type: {0!r}'.format(event)) # Check the event has not already been triggered, then mark # the event as triggered. if event in self.__event_set: raise RuntimeError('event already triggered: {0!r}'.format(event)) self.__event_set.add(event) listeners = self.__listeners[event] + self.__listeners[None] # Remove one-off listeners. self.__listeners[event][:] = (l for l in self.__listeners[event] if not l.once) self.__listeners[None][:] = (l for l in self.__listeners[None] if not l.once) for listener in listeners: # XXX: What to do on exceptions? Catch and make sure all listeners # run through? What to do with the exception(s) then? listener.callback(self, event)" 443,"def add_listener(self, event, callback, once=False): """""" Register a *callback* for the specified *event*. The function will be called with the #Job as its first argument. If *once* is #True, the listener will be removed after it has been invoked once or when the job is re-started. Note that if the event already ocurred, *callback* will be called immediately! # Arguments event (str, list of str): The name or multiple names of an event, or None to register the callback to be called for any event. callback (callable): A function. once (bool): Whether the callback is valid only once. """""" if not callable(callback): raise TypeError('callback must be callable') if isinstance(event, str): event = [event] for evn in event: if evn not in self.__listeners: raise ValueError('invalid event type: {0!r}'.format(evn)) for evn in event: event_passed = False with synchronized(self): event_passed = (evn in self.__event_set) if not (once and event_passed): self.__listeners[evn].append(Job._Listener(callback, once)) # If the event already happened, we'll invoke the callback # immediately to make up for what it missed. if event_passed: callback(self, event)" 444,"def wait(self, timeout=None): """""" Waits for the job to finish and returns the result. # Arguments timeout (number, None): A number of seconds to wait for the result before raising a #Timeout exception. # Raises Timeout: If the timeout limit is exceeded. """""" def cond(self): return self.__state not in (Job.PENDING, Job.RUNNING) or self.__cancelled if not wait_for_condition(self, cond, timeout): raise Job.Timeout return self.result" 445,"def start(self, as_thread=True, daemon=False, __state_check=True): """""" Starts the job. If the job was run once before, resets it completely. Can not be used while the job is running (raises #InvalidState). # Arguments as_thread (bool): Start the job in a separate thread. This is #True by default. Classes like the #ThreadPool calls this function from its own thread and passes #False for this argument. daemon (bool): If a thread is created with *as_thread* set to #True, defines whether the thread is started as a daemon or not. Defaults to #False. # Returns Job: The job object itself. """""" if __state_check: # We need to manually manage the lock to be able to release it # pre-emptively when needed. with synchronized(self): if self.__cancelled and self.__state == Job.PENDING: # Cancelled in PENDING state. Do not run the target function at all. self.__state = Job.CANCELLED assert self.__exception is None assert self.__result is None self._trigger_event(Job.CANCELLED) return None if self.__state == Job.RUNNING: raise Job.InvalidState('job is already running') elif self.__state not in (Job.PENDING, Job.ERROR, Job.SUCCESS, Job.CANCELLED): raise RuntimeError('invalid job state {0!r}'.format(self.__state)) # Reset the Job attributes. self.__state = Job.RUNNING self.__cancelled = False self.__result = None self.__exception = None self.__event_set.clear() self.__thread = None # Remove all listeners that have been registered with the ""once"" flag. for listeners in self.__listeners.values(): listeners[:] = (l for l in listeners if not l.once) if as_thread: thread = threading.Thread(target=self.start, args=(False, False, False)) thread.setDaemon(daemon) with synchronized(self): assert not self.__thread or not self.__thread.running self.__thread = thread thread.start() return self try: result = None exception = None try: result = self.run() state = Job.SUCCESS except Exception: # XXX: Catch BaseException? if self.print_exc: traceback.print_exc() exception = Job.ExceptionInfo(*sys.exc_info()) state = Job.ERROR with synchronized(self): cancelled = self.__cancelled self.__result = result self.__exception = exception self.__state = Job.CANCELLED if cancelled else state self._trigger_event(state) finally: with synchronized(self): notify_all(self) if self.__dispose_inputs: self.__target = None self.__args = None self.__kwargs = None self.data = None for listeners in self.__listeners.values(): listeners[:] = [] return self" 446,"def run(self): """""" This method is the actual implementation of the job. By default, it calls the target function specified in the #Job constructor. """""" if self.__target is not None: return self.__target(self, *self.__args, **self.__kwargs) raise NotImplementedError" 447,"def factory(start_immediately=True): """""" This is a decorator function that creates new `Job`s with the wrapped function as the target. # Example ```python @Job.factory() def some_longish_function(job, seconds): time.sleep(seconds) return 42 job = some_longish_function(2) print(job.wait()) ``` # Arguments start_immediately (bool): #True if the factory should call #Job.start() immediately, #False if it should return the job in pending state. """""" def decorator(func): def wrapper(*args, **kwargs): job = Job(task=lambda j: func(j, *args, **kwargs)) if start_immediately: job.start() return job return wrapper return decorator" 448,"def start(self): """""" Starts the #ThreadPool. Must be ended with #stop(). Use the context-manager interface to ensure starting and the #ThreadPool. """""" if self.__running: raise RuntimeError('ThreadPool already running') [t.start() for t in self.__threads] self.__running = True" 449,"def current_jobs(self): """""" Returns a snapshot of the Jobs that are currently being processed by the ThreadPool. These jobs can not be found in the #pending_jobs() list. """""" jobs = [] with synchronized(self.__queue): for worker in self.__threads: with synchronized(worker): if worker.current: jobs.append(worker.current) return jobs" 450,"def clear(self): """""" Removes all pending Jobs from the queue and return them in a list. This method does **no**t call #Job.cancel() on any of the jobs. If you want that, use #cancel_all() or call it manually. """""" with synchronized(self.__queue): jobs = self.__queue.snapshot() self.__queue.clear() return jobs" 451,"def cancel_all(self, cancel_current=True): """""" Similar to #clear(), but this function also calls #Job.cancel() on all jobs. Also, it **includes** all jobs that are currently being executed if *cancel_current* is True. # Arguments cancel_current (bool): Also cancel currently running jobs and include them in the returned list of jobs. # Returns list: A list of the #Job#s that were canceled. """""" with synchronized(self.__queue): jobs = self.clear() if cancel_current: jobs.extend(self.current_jobs()) [j.cancel() for j in jobs] return jobs" 452,"def submit(self, target=None, task=None, args=(), kwargs=None, front=False, dispose_inputs=None): """""" Submit a new #Job to the ThreadPool. # Arguments task (function, Job): Either a function that accepts a #Job, *args* and *kwargs* or a #Job object that is in #~Job.PENDING state. target (function): A function object that accepts *args* and *kwargs*. Only if *task* is not specified. args (list, tuple): A list of arguments to be passed to *job*, if it is a function. kwargs (dict): A dictionary to be passed as keyword arguments to *job*, if it is a function. front (bool): If #True, the job will be inserted in the front of the queue. # Returns Job: The job that was added to the queue. # Raises TypeError: If a #Job object was passed but *args* or *kwargs* are non-empty. RuntimeError: If the ThreadPool is not running (ie. if it was shut down). """""" if not self.__running: raise RuntimeError(""ThreadPool ain't running"") if dispose_inputs is None: dispose_inputs = self.dispose_inputs if isinstance(task, Job): if args or kwargs: raise TypeError('can not provide additional arguments for Job') if task.state != Job.PENDING: raise RuntimeError('job is not pending') job = task elif task is not None: if kwargs is None: kwargs = {} job = Job(task=task, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs) elif target is not None: if kwargs is None: kwargs = {} job = Job(target=target, args=args, kwargs=kwargs, dispose_inputs=dispose_inputs) else: raise TypeError('expected Job or callable') job.print_exc = self.print_exc if front: self.__queue.appendleft(job) else: self.__queue.append(job) return job" 453,"def wait(self, timeout=None): """""" Block until all jobs in the ThreadPool are finished. Beware that this can make the program run into a deadlock if another thread adds new jobs to the pool! # Raises Timeout: If the timeout is exceeded. """""" if not self.__running: raise RuntimeError(""ThreadPool ain't running"") self.__queue.wait(timeout)" 454,"def shutdown(self, wait=True): """""" Shut down the ThreadPool. # Arguments wait (bool): If #True, wait until all worker threads end. Note that pending jobs are still executed. If you want to cancel any pending jobs, use the #clear() or #cancel_all() methods. """""" if self.__running: # Add a Non-entry for every worker thread we have. for thread in self.__threads: assert thread.isAlive() self.__queue.append(None) self.__running = False if wait: self.__queue.wait() for thread in self.__threads: thread.join()" 455,"def submit_multiple(self, functions, target=False, task=False): """""" Submits a #Job for each element in *function* and returns a #JobCollection. """""" if target or not task: return JobCollection([self.submit(target=func) for func in functions]) else: return JobCollection([self.submit(task=func) for func in functions])" 456,"def new_event_type(self, name, mergeable=False): ''' Declare a new event. May overwrite an existing entry. ''' self.event_types[name] = self.EventType(name, mergeable)" 457,"def add_event(self, name, data=None): ''' Add an event of type *name* to the queue. May raise a `ValueError` if the event type is mergeable and *data* is not None or if *name* is not a declared event type (in strict mode). ''' try: mergeable = self.event_types[name].mergeable except KeyError: if self.strict: raise ValueError('unknown event type {0!r}'.format(name)) mergeable = False if mergeable and data is not None: raise ValueError('mergable event can not have data attached') with self.lock: if mergeable: # Check if such an event already exists. for ev in self.events: if ev.type == name: return self.events.append(self.Event(name, data, time.clock()))" 458,"def pop_event(self): ''' Pop the next queued event from the queue. :raise ValueError: If there is no event queued. ''' with self.lock: if not self.events: raise ValueError('no events queued') return self.events.popleft()" 459,"def pop_events(self): ''' Pop all events and return a `collections.deque` object. The returned container can be empty. This method is preferred over `pop_event()` as it is much faster as the lock has to be acquired only once and also avoids running into an infinite loop during event processing. ''' with self.lock: events = self.events self.events = collections.deque() return events" 460,"def clear(self): """""" Clears the queue. Note that calling #wait*( immediately after clear can still block when tasks are currently being processed since this method can only clear queued items. """""" self._tasks -= len(self._deque) self._deque.clear() notify_all(self)" 461,"def get(self, block=True, timeout=None, method='pop'): """""" If *block* is True, this method blocks until an element can be removed from the deque with the specified *method*. If *block* is False, the function will raise #Empty if no elements are available. # Arguments block (bool): #True to block and wait until an element becomes available, #False otherwise. timeout (number, None): The timeout in seconds to use when waiting for an element (only with `block=True`). method (str): The name of the method to use to remove an element from the queue. Must be either `'pop'` or `'popleft'`. # Raises ValueError: If *method* has an invalid value. Timeout: If the *timeout* is exceeded. """""" if method not in ('pop', 'popleft'): raise ValueError('method must be ""pop"" or ""popleft"": {0!r}'.format(method)) t_start = time.clock() while not self: if not block: raise self.Empty if timeout is None: wait(self) else: t_delta = time.clock() - t_start if t_delta > timeout: raise Timeout wait(self, timeout - t_delta) return getattr(self, method)()" 462,"def wait(self, timeout=None): """""" Waits until all tasks completed or *timeout* seconds passed. # Raises Timeout: If the *timeout* is exceeded. """""" t_start = time.clock() if not wait_for_condition(self, lambda s: s._tasks == 0, timeout): raise Timeout" 463,"def sleep(self): """""" Sleeps until the interval has passed since the last time this function was called. This is a synonym for #__call__(). The first time the function is called will return immediately and not block. Therefore, it is important to put the call at the beginning of the timed block, like this: # Example ```python clock = Clock(fps=50) while True: clock.sleep() # Processing ... ``` """""" current = time.time() if self.last < 0: self.last = current return delta = current - self.last if delta < self.seconds: time.sleep(self.seconds - delta) self.last = time.time()" 464,"def read_config(desired_type: Type[ConfigParser], file_object: TextIOBase, logger: Logger, *args, **kwargs) -> ConfigParser: """""" Helper method to read a configuration file according to the 'configparser' format, and return it as a dictionary of dictionaries (section > [property > value]) :param file_object: :return: """""" # see https://docs.python.org/3/library/configparser.html for details config = ConfigParser() config.read_file(file_object) return config" 465,"def get_default_config_parsers() -> List[AnyParser]: """""" Utility method to return the default parsers able to parse a dictionary from a file. :return: """""" return [SingleFileParserFunction(parser_function=read_config, streaming_mode=True, supported_exts={'.cfg', '.ini'}, supported_types={ConfigParser}), ]" 466,"def config_to_dict_of_dict(desired_type: Type[T], config: ConfigParser, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> DictOfDict: """""" Helper method to read a configuration file according to the 'configparser' format, and return it as a dictionary of dictionaries [section > [property > value]]. :param file_object: :return: """""" # return dict(config) # get the base collection type if provided base_typ, discarded = _extract_collection_base_type(desired_type, exception_if_none=False) # if none, at least declare dict base_typ = base_typ or Dict # convert the whole config to a dictionary by flattening all sections. If a key is found twice in two different # sections an error is raised results = dict() for section, props in config.items(): # convert all values of the sub-dictionary results[section] = ConversionFinder.convert_collection_values_according_to_pep(props, base_typ, conversion_finder, logger, **kwargs) return results" 467,"def merge_all_config_sections_into_a_single_dict(desired_type: Type[T], config: ConfigParser, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: """""" Helper method to convert a 'configparser' into a dictionary [property > value]. Properties from all sections are collected. If the same key appears in several sections, an error will be thrown :param file_object: :return: """""" # convert the whole config to a dictionary by flattening all sections. If a key is found twice in two different # sections an error is raised results = dict() for section, props in config.items(): for key, value in props.items(): if key in results.keys(): # find all sections where it appears sections_where_it_appears = [s for s, p in config.items() if key in p.keys()] raise MultipleKeyOccurenceInConfigurationError.create(key, sections_where_it_appears) else: results[key] = value return ConversionFinder.convert_collection_values_according_to_pep(results, desired_type, conversion_finder, logger, **kwargs)" 468,"def get_default_config_converters(conv_finder: ConversionFinder) -> List[Union[Converter[Any, ConfigParser], Converter[ConfigParser, Any]]]: """""" Utility method to return the default converters associated to ConfigParser (from ConfigParser to other type, and from other type to ConfigParser) :return: """""" return [ConverterFunction(ConfigParser, DictOfDict, config_to_dict_of_dict, custom_name='config_to_dict_of_dict', function_args={'conversion_finder': conv_finder}), ConverterFunction(ConfigParser, dict, merge_all_config_sections_into_a_single_dict, custom_name='merge_all_config_sections_into_a_single_dict', function_args={'conversion_finder': conv_finder})]" 469,"def create(key_name: str, sections: List[str]): # -> NoParserFoundForObject: """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param key_name: :param sections: :return: """""" return MultipleKeyOccurenceInConfigurationError('Cannot read the provided config file as a flat dictionary : ' 'key \'' + key_name + '\' appears several times, in sections' '\'' + str(sections) + '\'.')" 470,"def logger(message, level=10): """"""Handle logging."""""" logging.getLogger(__name__).log(level, str(message))" 471,"async def get_data(self): """"""Get Tautulli data."""""" try: await self.get_session_data() await self.get_home_data() await self.get_users() await self.get_user_data() except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror): msg = ""Can not load data from Tautulli."" logger(msg, 40)" 472,"async def get_session_data(self): """"""Get Tautulli sessions."""""" cmd = 'get_activity' url = self.base_url + cmd try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger(""Status from Tautulli: "" + str(response.status)) self.tautulli_session_data = await response.json() logger(self.tautulli_session_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = ""Can not load data from Tautulli: {} - {}"".format(url, error) logger(msg, 40)" 473,"async def get_home_data(self): """"""Get Tautulli home stats."""""" cmd = 'get_home_stats' url = self.base_url + cmd data = {} try: async with async_timeout.timeout(8, loop=self._loop): request = await self._session.get(url) response = await request.json() for stat in response.get('response', {}).get('data', {}): if stat.get('stat_id') == 'top_movies': try: row = stat.get('rows', {})[0] data['movie'] = row.get('title') except (IndexError, KeyError): data['movie'] = None if stat.get('stat_id') == 'top_tv': try: row = stat.get('rows', {})[0] data['tv'] = row.get('title') except (IndexError, KeyError): data['tv'] = None if stat.get('stat_id') == 'top_users': try: row = stat.get('rows', {})[0] data['user'] = row.get('user') except (IndexError, KeyError): data['user'] = None logger(""Status from Tautulli: "" + str(request.status)) self.tautulli_home_data = data logger(self.tautulli_home_data) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = ""Can not load data from Tautulli: {} - {}"".format(url, error) logger(msg, 40)" 474,"async def get_users(self): """"""Get Tautulli users."""""" cmd = 'get_users' url = self.base_url + cmd users = [] try: async with async_timeout.timeout(8, loop=self._loop): response = await self._session.get(url) logger(""Status from Tautulli: "" + str(response.status)) all_user_data = await response.json() for user in all_user_data['response']['data']: if user['username'] != 'Local': users.append(user['username']) self.tautulli_users = users logger(self.tautulli_users) except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror, AttributeError) as error: msg = ""Can not load data from Tautulli: {} - {}"".format(url, error) logger(msg, 40)" 475,"async def get_user_data(self): """"""Get Tautulli userdata."""""" userdata = {} sessions = self.session_data.get('sessions', {}) try: async with async_timeout.timeout(8, loop=self._loop): for username in self.tautulli_users: userdata[username] = {} userdata[username]['Activity'] = None for session in sessions: if session['username'].lower() == username.lower(): userdata[username]['Activity'] = session['state'] for key in session: if key != 'Username': userdata[username][key] = session[key] break self.tautulli_user_data = userdata except (asyncio.TimeoutError, aiohttp.ClientError, KeyError): msg = ""Can not load data from Tautulli."" logger(msg, 40)" 476,"def import_string(impstr, attr=None): """"""Imports a string. Can import an attribute of the imported class/module using a double colon as a separator """""" if ""::"" in impstr: impstr, attr = impstr.split(""::"") imported = wz_import_string(impstr) if attr is not None: return getobjpath(imported, attr) return imported" 477,"def getobjpath(obj, path): """"""Returns an item or attribute of the object recursively. Item names are specified between brackets, eg: [item]. Attribute names are prefixed with a dot (the first one is optional), eg: .attr Example: getobjpath(obj, ""attr1.attr2[item].attr3"") """""" if not path: return obj if path.startswith(""[""): item = path[1:path.index(""]"")] return getobjpath(obj[item], path[len(item) + 2:]) if path.startswith("".""): path = path[1:] if ""."" in path or ""["" in path: dot_idx = path.find(""."") bracket_idx = path.find(""["") if dot_idx == -1 or bracket_idx < dot_idx: idx = bracket_idx next_idx = idx else: idx = dot_idx next_idx = idx + 1 attr = path[:idx] return getobjpath(getattr(obj, attr), path[next_idx:]) return getattr(obj, path)" 478,"def find_classes_in_module(module, clstypes): """"""Find classes of clstypes in module """""" classes = [] for item in dir(module): item = getattr(module, item) try: for cls in clstypes: if issubclass(item, cls) and item != cls: classes.append(item) except Exception as e: pass return classes" 479,"def remove_yaml_frontmatter(source, return_frontmatter=False): """"""If there's one, remove the YAML front-matter from the source """""" if source.startswith(""---\n""): frontmatter_end = source.find(""\n---\n"", 4) if frontmatter_end == -1: frontmatter = source source = """" else: frontmatter = source[0:frontmatter_end] source = source[frontmatter_end + 5:] if return_frontmatter: return (source, frontmatter) return source if return_frontmatter: return (source, None) return source" 480,"def populate_obj(obj, attrs): """"""Populates an object's attributes using the provided dict """""" for k, v in attrs.iteritems(): setattr(obj, k, v)" 481,"def insert_element_to_dict_of_list(dict_of_list, key, parser): """""" Utility method :param dict_of_list: :param key: :param parser: :return: """""" if key in dict_of_list.keys(): dict_of_list[key].append(parser) else: dict_of_list[key] = [parser]" 482,"def insert_element_to_dict_of_dicts_of_list(dict_of_dict_of_list, first_key, second_key, parser): """""" Utility method :param dict_of_dict_of_list: :param first_key: :param second_key: :param parser: :return: """""" list_to_insert = parser if isinstance(parser, list) else [parser] if first_key not in dict_of_dict_of_list.keys(): dict_of_dict_of_list[first_key] = {second_key: list_to_insert} else: if second_key not in dict_of_dict_of_list[first_key].keys(): dict_of_dict_of_list[first_key][second_key] = list_to_insert else: dict_of_dict_of_list[first_key][second_key] += list_to_insert" 483,"def insert_element_to_dict_of_dicts(dict_of_dicts: Dict[str, Dict[str, str]], first_key: str, second_key: str, contents): """""" Utility method :param dict_of_dicts: :param first_key: :param second_key: :param contents: :return: """""" if first_key not in dict_of_dicts.keys(): dict_of_dicts[first_key] = {second_key: contents} else: if second_key not in dict_of_dicts[first_key].keys(): dict_of_dicts[first_key][second_key] = contents else: warn('Overriding contents for ' + first_key + '/' + second_key) dict_of_dicts[first_key][second_key] = contents" 484,"def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T], logger: Logger = None) -> Parser: """""" Returns the most appropriate parser to use to parse object obj_on_filesystem as an object of type object_type :param obj_on_filesystem: the filesystem object to parse :param object_type: the type of object that the parser is expected to produce :param logger: :return: """""" pass" 485,"def create(obj: PersistedObject, obj_type: Type[T], extensions_supported: Iterable[str]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param extensions_supported: :return: """""" # base message msg = ""{obj} cannot be parsed as a {typ} because no parser supporting that extension ({ext}) is able to "" \ ""create this type of object."" \ """".format(obj=obj, typ=get_pretty_type_str(obj_type), ext=obj.get_pretty_file_ext()) # add details if extensions_supported is not None and len(extensions_supported) > 0: msg += "" If you wish to parse this fileobject to that precise type, you may wish to either "" \ ""(1) replace the file with any of the following extensions currently supported : {exts} "" \ ""(see get_capabilities_for_type({typ}, strict_type_matching=False) for details)."" \ "" Or (2) register a new parser."" \ """".format(exts=extensions_supported, typ=get_pretty_type_str(obj_type)) else: raise ValueError('extensions_supported should be provided to create a NoParserFoundForObjectExt. If no ' 'extension is supported, use NoParserFoundForObjectType.create instead') e = NoParserFoundForObjectExt(msg) # save the extensions supported e.extensions_supported = extensions_supported return e" 486,"def create(obj: PersistedObject, obj_type: Type[T], types_supported: Iterable[str]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param obj_type: :param types_supported: :return: """""" # base message msg = str(obj) + ' cannot be parsed as a ' + get_pretty_type_str(obj_type) + ' because no parser supporting ' \ 'that type is registered for ' + obj.get_pretty_file_ext() + '.\n' # add details if types_supported is not None and len(types_supported) > 0: msg += ' If you wish to parse this object from this extension, you may wish to parse it as one of the ' \ 'following supported types : ' + str(types_supported) + '. \n' \ + 'Otherwise, please register a new parser for type ' + get_pretty_type_str(obj_type) \ + ' and extension ' + obj.get_pretty_file_ext() + '\n Reminder: use print_capabilities_by_ext()' \ + ' and print_capabilities_by_type() to diagnose what are the parsers available' else: raise ValueError('extensions_supported should be provided to create a NoParserFoundForObjectExt. If no ' 'extension is supported, use NoParserFoundForObjectType.create instead') e = NoParserFoundForObjectType(msg) # save the extensions supported e.types_supported = types_supported return e" 487,"def create(obj: PersistedObject, obj_type: Type[T], errors: Dict[Type, Exception]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param obj: :param errors: a dictionary of the errors raised for each alternate type tried :return: """""" e = NoParserFoundForUnionType('{obj} cannot be parsed as a {typ} because no parser could be found for any of ' 'the alternate types. Caught exceptions: {errs}' ''.format(obj=obj, typ=get_pretty_type_str(obj_type), errs=errors)) # save the errors e.errors = errors return e" 488,"def register_parsers(self, parsers: List[Parser]): """""" Utility method to register any list of parsers. :return: """""" check_var(parsers, var_types=list, var_name='parsers') for parser in parsers: self.register_parser(parser)" 489,"def print_capabilities_by_ext(self, strict_type_matching: bool = False): """""" Used to print the list of all file extensions that can be parsed by this parser registry. :return: """""" print('\nCapabilities by file extension: ') l = self.get_capabilities_by_ext(strict_type_matching=strict_type_matching) pprint({ext: get_pretty_type_keys_dict(parsers) for ext, parsers in l.items()}) print('\n')" 490,"def print_capabilities_by_type(self, strict_type_matching: bool = False): """""" Used to print the list of all file extensions that can be parsed by this parser registry. :return: """""" print('\nCapabilities by object type: ') l = self.get_capabilities_by_type(strict_type_matching=strict_type_matching) pprint({get_pretty_type_str(typ): parsers for typ, parsers in l.items()}) print('\n')" 491,"def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]: """""" For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is ""most pertinent first"" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """""" check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # List all types that can be parsed for typ in self.get_all_supported_types(): res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) return res" 492,"def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]: """""" For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is ""most pertinent first"" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """""" check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # For all extensions that are supported, for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching): res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching) return res" 493,"def get_capabilities_for_ext(self, ext, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Parser]]: """""" Utility method to return, for a given file extension, all known ways to parse a file with this extension, organized by target object type. :param ext: :param strict_type_matching: :return: """""" r = dict() # List all types that can be parsed from this extension. for typ in self.get_all_supported_types_for_ext(ext): # Use the query to fill matching = self.find_all_matching_parsers(strict_type_matching, desired_type=typ, required_ext=ext)[0] # matching_list = matching[0] + matching[1] + matching[2] # insert_element_to_dict_of_dicts_of_list(res, ext, typ, list(reversed(matching_list))) r[typ] = dict() exact = list(reversed(matching[2])) if len(exact) > 0: r[typ]['1_exact_match'] = exact approx = list(reversed(matching[1])) if len(approx) > 0: r[typ]['2_approx_match'] = approx generic = list(reversed(matching[0])) if len(generic) > 0: r[typ]['3_generic'] = generic # insert_element_to_dict_of_dicts(res, ext, typ, matching_dict) return r" 494,"def register_parser(self, parser: Parser): """""" Utility method to register any parser. Parsers that support any type will be stored in the ""generic"" list, and the others will be stored in front of the types they support :return: """""" check_var(parser, var_types=Parser, var_name='parser') if (not parser.supports_multifile()) and (not parser.supports_singlefile()): # invalid raise _InvalidParserException.create(parser) # (0) sanity check : check that parser handles jokers properly res = parser.is_able_to_parse_detailed(desired_type=JOKER, desired_ext=JOKER, strict=True) if not (res[0] is True and res[1] is None): raise ValueError('Parser ' + str(parser) + ' can not be registered since it does not handle the JOKER cases ' 'correctly') # (1) store in the main lists if parser.is_generic(): self._generic_parsers.append(parser) else: self._specific_parsers.append(parser) # (2) simpler : simply store the ext <> type maps for ext in parser.supported_exts: for typ in parser.supported_types: insert_element_to_dict_of_list(self._strict_types_to_ext, typ, ext) insert_element_to_dict_of_list(self._ext_to_strict_types, ext, typ)" 495,"def get_all_parsers(self, strict_type_matching: bool = False) -> List[Parser]: """""" Returns the list of all parsers in order of relevance. :return: """""" matching = self.find_all_matching_parsers(strict=strict_type_matching)[0] # matching[1] (approx match) is supposed to be empty since we use a joker on type and a joker on ext : only # exact and generic match should exist, no approx match if len(matching[1]) > 0: raise Exception('Internal error - this matching[1] list is supposed to be empty for such a query') return matching[0] + matching[2]" 496,"def get_all_supported_types_for_ext(self, ext_to_match: str, strict_type_matching: bool = False) -> Set[Type]: """""" Utility method to return the set of all supported types that may be parsed from files with the given extension. ext=JOKER is a joker that means all extensions :param ext_to_match: :param strict_type_matching: :return: """""" matching = self.find_all_matching_parsers(required_ext=ext_to_match, strict=strict_type_matching)[0] return {typ for types in [p.supported_types for p in (matching[0] + matching[1] + matching[2])] for typ in types}" 497,"def get_all_supported_exts_for_type(self, type_to_match: Type[Any], strict: bool) -> Set[str]: """""" Utility method to return the set of all supported file extensions that may be converted to objects of the given type. type=JOKER is a joker that means all types :param type_to_match: :param strict: :return: """""" matching = self.find_all_matching_parsers(desired_type=type_to_match, strict=strict)[0] return {ext for exts in [p.supported_exts for p in (matching[0] + matching[1] + matching[2])] for ext in exts}" 498,"def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \ -> Tuple[Tuple[List[Parser], List[Parser], List[Parser]], List[Parser], List[Parser], List[Parser]]: """""" Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in order :param strict: :param desired_type: the desired type, or 'JOKER' for a wildcard :param required_ext: :return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), no_type_match_but_ext_match, no_ext_match_but_type_match, no_match """""" # if desired_type is JOKER and required_ext is JOKER: # # Easy : return everything (GENERIC first, SPECIFIC then) in order (make a copy first :) ) # matching_parsers_generic = self._generic_parsers.copy() # matching_parsers_approx = [] # matching_parsers_exact = self._specific_parsers.copy() # no_type_match_but_ext_match = [] # no_ext_match_but_type_match = [] # no_match = [] # else: # # Although the above could be thought as an easy way to accelerate the process, it does not any more since the # JOKER special cases are handled in parser.is_able_to_parse and converter.is_able_to_convert functions. # # It was also dangerous since it prevented us to get consistency across views - hence parser/converter # implementors could get the feeling that their parser was correctly registered where it wasn't check_var(strict, var_types=bool, var_name='strict') # first transform any 'Any' type requirement into the official class for that desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False) matching_parsers_generic = [] matching_parsers_approx = [] matching_parsers_exact = [] no_type_match_but_ext_match = [] no_ext_match_but_type_match = [] no_match = [] # handle generic parsers first - except if desired type is Any for p in self._generic_parsers: match = p.is_able_to_parse(desired_type=desired_type, desired_ext=required_ext, strict=strict) if match: # match if is_any_type(desired_type): # special case : what is required is Any, so put in exact match matching_parsers_exact.append(p) else: matching_parsers_generic.append(p) else: # check if by releasing the constraint on ext it makes a match if p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict): no_ext_match_but_type_match.append(p) else: # there will be no way to use this: it is a generic parser that is not able to parse this type... # no_type_match_but_ext_match.append(p) pass # then the specific for p in self._specific_parsers: match, exact_match = p.is_able_to_parse_detailed(desired_type=desired_type, desired_ext=required_ext, strict=strict) if match: if is_any_type(desired_type): # special case: dont register as a type match no_type_match_but_ext_match.append(p) else: if exact_match is None or exact_match: matching_parsers_exact.append(p) else: matching_parsers_approx.append(p) else: # try to set the type to a supported type to see if that makes a match if p.is_able_to_parse(desired_type=JOKER, desired_ext=required_ext, strict=strict): no_type_match_but_ext_match.append(p) # try to set the ext to a supported ext to see if that makes a match elif p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict): no_ext_match_but_type_match.append(p) # no match at all else: no_match.append(p) return (matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), \ no_type_match_but_ext_match, no_ext_match_but_type_match, no_match" 499,"def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False) -> ParsingPlan[T]: """""" Implementation of Parser API Relies on the underlying registry of parsers to provide the best parsing plan :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return: """""" # find the parser for this object t, combined_parser = self.build_parser_for_fileobject_and_desiredtype(filesystem_object, desired_type, logger=logger) # ask the parser for the parsing plan return combined_parser.create_parsing_plan(t, filesystem_object, logger)" 500,"def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T], logger: Logger = None) -> Tuple[Type, Parser]: """""" Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. If several alternatives are requested (through a root Union type), this is done independently for each alternative. :param obj_on_filesystem: :param object_type: :param logger: :return: a type to use and a parser. The type to use is either directly the one provided, or a resolved one in case of TypeVar """""" # First resolve TypeVars and Unions to get a list of compliant types object_types = get_alternate_types_resolving_forwardref_union_and_typevar(object_type) if len(object_types) == 1: # One type: proceed as usual parsers = self._build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=object_types[0], logger=logger) if len(parsers) > 1: return object_types[0], CascadingParser(parsers) else: return next(iter(parsers.items())) else: # Several alternate types are supported: try to build a parser for each parsers = OrderedDict() errors = OrderedDict() for typ in object_types: try: parsers.update(self._build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=typ, logger=logger)) except NoParserFoundForObjectExt as e: logger.warning(""{} - {}"".format(type(e).__name__, e)) errors[e] = e except NoParserFoundForObjectType as f: logger.warning(""{} - {}"".format(type(f).__name__, f)) errors[f] = f # Combine if there are remaining, otherwise raise if len(parsers) > 0: return object_type, CascadingParser(parsers) else: raise NoParserFoundForUnionType.create(obj_on_filesystem, object_type, errors)" 501,"def _build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T], logger: Logger = None) -> Dict[Type, Parser]: """""" Builds a parser for each subtype of object_typ :param obj_on_filesystem: :param object_typ: :param logger: :return: """""" parsers = OrderedDict() errors = OrderedDict() try: p = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=object_typ, logger=logger) parsers[object_typ] = p except NoParserFoundForObjectExt as e: logger.warning(""{} - {}"".format(type(e).__name__, e)) errors[e] = e except NoParserFoundForObjectType as f: logger.warning(""{} - {}"".format(type(f).__name__, f)) errors[f] = f # do not explore subclasses for collections if is_collection(object_typ, strict=True): if len(errors) > 0: raise next(iter(errors.values())) else: return parsers # Finally create one such parser for each subclass subclasses = get_all_subclasses(object_typ) # Then for each subclass also try (with a configurable limit in nb of subclasses) for subclass in subclasses[0:GLOBAL_CONFIG.dict_to_object_subclass_limit]: try: parsers[subclass] = self.__build_parser_for_fileobject_and_desiredtype(obj_on_filesystem, object_typ=subclass, logger=logger) except NoParserFoundForObjectExt as e: logger.warning(""{} - {}"".format(type(e).__name__, e)) errors[e] = e except NoParserFoundForObjectType as f: logger.warning(""{} - {}"".format(type(f).__name__, f)) errors[f] = f if len(subclasses) > GLOBAL_CONFIG.dict_to_object_subclass_limit: warn('Type {} has more than {} subclasses, only {} were tried to convert it, with no success. You ' 'can raise this limit by setting the appropriate option with `parsyfiles_global_config()`' ''.format(object_typ, len(subclasses), GLOBAL_CONFIG.dict_to_object_subclass_limit)) return parsers" 502,"def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T], logger: Logger = None) -> Parser: """""" Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. :param obj_on_filesystem: :param object_typ: :param logger: :return: """""" # first remove any non-generic customization object_type = get_base_generic_type(object_typ) # find all matching parsers for this matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \ self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type, required_ext=obj_on_filesystem.ext) matching_parsers = matching[0] + matching[1] + matching[2] if len(matching_parsers) == 0: # No match. Do we have a close match ? (correct type, but not correct extension ?) if len(no_ext_match_but_type_match) > 0: raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type, set([ext_ for ext_set in [p.supported_exts for p in no_ext_match_but_type_match] for ext_ in ext_set])) else: # no, no match at all raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type, set([typ_ for typ_set in [p.supported_types for p in no_type_match_but_ext_match] for typ_ in typ_set])) elif len(matching_parsers) == 1: # return the match directly return matching_parsers[0] else: # return a cascade of all parsers, in reverse order (since last is our preferred one) # print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.') return CascadingParser(list(reversed(matching_parsers)))" 503,"def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param att_name: :param parsed_att: :param attribute_type: :param caught_exec: :return: """""" base_msg = ""Error while trying to convert value for attribute '{a}' to type <{t}>:\n"" \ "" - parsed value is : '{v}' of type <{tv}>\n"" \ """".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att, tv=get_pretty_type_str(type(parsed_att))) msg = StringIO() if len(list(caught_exec.keys())) > 0: msg.writelines(' - converters tried are : \n * ') msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()])) msg.writelines(' \n Caught the following exceptions: \n') for converter, err in caught_exec.items(): msg.writelines('--------------- From ' + str(converter) + ' caught: \n') print_error_to_io_stream(err, msg) msg.write('\n') return AttrConversionException(base_msg + msg.getvalue())" 504,"def create(conversion_finder, parsed_att: Any, attribute_type: Type[Any], errors: Dict[Type, Exception] = None): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parsed_att: :param attribute_type: :param conversion_finder: :return: """""" if conversion_finder is None: msg = ""No conversion finder provided to find a converter between parsed attribute '{patt}' of type "" \ ""'{typ}' and expected type '{expt}'."".format(patt=str(parsed_att), typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type)) else: msg = ""No conversion chain found between parsed attribute '{patt}' of type '{typ}' and expected type "" \ ""'{expt}' using conversion finder {conv}."".format(patt=parsed_att, typ=get_pretty_type_str(type(parsed_att)), expt=get_pretty_type_str(attribute_type), conv=conversion_finder) if errors is not None: msg = msg + ' ' + str(errors) return NoConverterFoundForObjectType(msg)" 505,"def get_all_conversion_chains_to_type(self, to_type: Type[Any])\ -> Tuple[List[Converter], List[Converter], List[Converter]]: """""" Utility method to find all converters to a given type :param to_type: :return: """""" return self.get_all_conversion_chains(to_type=to_type)" 506,"def get_all_conversion_chains_from_type(self, from_type: Type[Any]) \ -> Tuple[List[Converter], List[Converter], List[Converter]]: """""" Utility method to find all converters from a given type. :param from_type: :return: """""" return self.get_all_conversion_chains(from_type=from_type)" 507,"def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER)\ -> Tuple[List[Converter], List[Converter], List[Converter]]: """""" Utility method to find all converters or conversion chains matching the provided query. :param from_type: a required type of input object, or JOKER for 'wildcard'(*) . WARNING: ""from_type=AnyObject/object/Any"" means ""all converters able to source from anything"", which is different from ""from_type=JOKER"" which means ""all converters whatever their source type"". :param to_type: a required type of output object, or JOKER for 'wildcard'(*) . WARNING: ""to_type=AnyObject/object/Any"" means ""all converters able to produce any type of object"", which is different from ""to_type=JOKER"" which means ""all converters whatever type they are able to produce"". :return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact """""" pass" 508,"def find_and_convert(self, attr_name: str, attr_value: S, desired_attr_type: Type[T], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Utility method to convert some value into the desired type. It relies on get_all_conversion_chains to find the converters, and apply them in correct order :return: """""" if robust_isinstance(attr_value, desired_attr_type) and not is_collection(desired_attr_type): # value is already of the correct type return attr_value else: # try to find conversion chains generic, approx, exact = self.get_all_conversion_chains(type(attr_value), desired_attr_type) all_chains = generic + approx + exact if len(all_chains) > 0: all_errors = dict() for chain in reversed(all_chains): try: return chain.convert(desired_attr_type, attr_value, logger, options) except Exception as e: all_errors[chain] = e raise AttrConversionException.create(attr_name, attr_value, desired_attr_type, all_errors) else: # did not find any conversion chain raise NoConverterFoundForObjectType.create(self, attr_value, desired_attr_type)" 509,"def convert_collection_values_according_to_pep(coll_to_convert: Union[Dict, List, Set, Tuple], desired_type: Type[T], conversion_finder: 'ConversionFinder', logger: Logger, **kwargs) \ -> T: """""" Helper method to convert the values of a collection into the required (pep-declared) value type in desired_type. If desired_type does not explicitly mention a type for its values, the collection will be returned as is, otherwise a copy will be created and filled with conversions of the values, performed by the provided conversion_finder :param coll_to_convert: :param desired_type: :param conversion_finder: :param logger: :param kwargs: :return: """""" base_desired_type = get_base_generic_type(desired_type) if issubclass(base_desired_type, Mapping): # or issubclass(base_desired_type, dict): # get the base collection type if provided (this raises an error if key type is not str) item_typ, _ = _extract_collection_base_type(desired_type, exception_if_none=False) if item_typ is None: # nothing is required in terms of dict values: use the base method return ConversionFinder.try_convert_value(conversion_finder, '', coll_to_convert, desired_type, logger=logger, options=kwargs) else: # TODO resuse appropriate container type (not necessary a dict) according to type of coll_to_convert # there is a specific type required for the dict values. res = dict() # convert if required for key, val in coll_to_convert.items(): res[key] = ConversionFinder.try_convert_value(conversion_finder, key, val, item_typ, logger, options=kwargs) return res elif issubclass(base_desired_type, Sequence): # or issubclass(base_desired_type, list): # get the base collection type if provided item_typ, _ = _extract_collection_base_type(desired_type, exception_if_none=False) if item_typ is None: # nothing is required in terms of dict values: use the base method return ConversionFinder.try_convert_value(conversion_finder, '', coll_to_convert, desired_type, logger=logger, options=kwargs) else: # TODO resuse appropriate container type (not necessary a list) according to type of coll_to_convert # there is a specific type required for the list values. res = list() # special case where base_desired_type is a Tuple: in that case item_typ may be a tuple or else if type(item_typ) != tuple: # convert each item if required for val in coll_to_convert: res.append(ConversionFinder.try_convert_value(conversion_finder, '', val, item_typ, logger, options=kwargs)) else: if len(item_typ) == 1: item_typ_tuple = item_typ * len(coll_to_convert) elif len(item_typ) == len(coll_to_convert): item_typ_tuple = item_typ else: raise ValueError('Collection to convert is of length {} which is not compliant with desired ' 'type {}'.format(len(coll_to_convert), item_typ)) for val, item_t in zip(coll_to_convert, item_typ_tuple): res.append(ConversionFinder.try_convert_value(conversion_finder, '', val, item_t, logger, options=kwargs)) res = tuple(res) return res elif issubclass(base_desired_type, AbstractSet): # or issubclass(base_desired_type, set): # get the base collection type if provided item_typ, _ = _extract_collection_base_type(desired_type, exception_if_none=False) if item_typ is None: # nothing is required in terms of dict values: use the base method return ConversionFinder.try_convert_value(conversion_finder, '', coll_to_convert, desired_type, logger=logger, options=kwargs) else: # TODO resuse appropriate container type (not necessary a set) according to type of coll_to_convert # there is a specific type required for the list values. res = set() # convert if required for val in coll_to_convert: res.add(ConversionFinder.try_convert_value(conversion_finder, '', val, item_typ, logger, options=kwargs)) return res else: raise TypeError('Cannot convert collection values, expected type is not a supported collection ' '(dict, list, set, Mapping, Sequence, AbstractSet)! : ' + str(desired_type))" 510,"def _try_convert_value(conversion_finder, attr_name: str, attr_value: S, desired_attr_type: Type[T], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Utility method to try to use provided conversion_finder to convert attr_value into desired_attr_type. If no conversion is required, the conversion finder is not even used (it can be None) :param conversion_finder: :param attr_name: :param attr_value: :param desired_attr_type: :param logger: :param options: :return: """""" # check if we need additional conversion # (a) a collection with details about the internal item type if is_typed_collection(desired_attr_type): return ConversionFinder.convert_collection_values_according_to_pep(coll_to_convert=attr_value, desired_type=desired_attr_type, conversion_finder=conversion_finder, logger=logger, **options) # --- typing types do not work with isinstance so there is a special check here elif not robust_isinstance(attr_value, desired_attr_type): if conversion_finder is not None: return conversion_finder.find_and_convert(attr_name, attr_value, desired_attr_type, logger, options) else: raise NoConverterFoundForObjectType.create(conversion_finder, attr_value, desired_attr_type) else: # we can safely use the value: it is already of the correct type return attr_value" 511,"def register_converter(self, converter: Converter[S, T]): """""" Utility method to register any converter. Converters that support any type will be stored in the ""generic"" lists, and the others will be stored in front of the types they support :return: """""" check_var(converter, var_types=Converter, var_name='converter') # (0) sanity check : check that parser handles jokers properly res = converter.is_able_to_convert_detailed(from_type=JOKER, to_type=JOKER, strict=True) if not (res[0] is True and res[1] is None and res[2] is None): raise ValueError('Converter ' + str(converter) + ' can not be registered since it does not handle the JOKER' ' cases correctly') # compute all possible chains and save them generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains \ = self._create_all_new_chains(converter) self._generic_nonstrict_conversion_chains += generic_nonstrict_chains self._generic_conversion_chains += generic_chains self._specific_non_strict_conversion_chains += specific_nonstrict_chains self._specific_conversion_chains += specific_chains # sort all lists by length self._generic_nonstrict_conversion_chains = sorted(self._generic_nonstrict_conversion_chains, key=len, reverse=True) self._generic_conversion_chains = sorted(self._generic_conversion_chains, key=len, reverse=True) self._specific_non_strict_conversion_chains = sorted(self._specific_non_strict_conversion_chains, key=len, reverse=True) self._specific_conversion_chains = sorted(self._specific_conversion_chains, key=len, reverse=True)" 512,"def _create_all_new_chains(self, converter) -> Tuple[List[Converter], List[Converter], List[Converter], List[Converter]]: """""" Creates all specific and generic chains that may be built by adding this converter to the existing chains. :param converter: :return: generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains """""" specific_chains, specific_nonstrict_chains, generic_chains, generic_nonstrict_chains = [], [], [], [] if converter.is_generic(): # the smaller chain : the singleton :) generic_chains.append(ConversionChain(initial_converters=[converter], strict_chaining=True)) else: specific_chains.append(ConversionChain(initial_converters=[converter], strict_chaining=True)) # 1) create new specific chains by adding this converter at the beginning or end of all *non-generic* ones # -- non-strict new_c_at_end_ns = [] new_c_at_beginning_ns = [] if not self.strict: # then there are non-strict chains already. Try to connect to them for existing_specific_nonstrict in self._specific_non_strict_conversion_chains: if converter.can_be_appended_to(existing_specific_nonstrict, strict=False): if ConversionChain.are_worth_chaining(existing_specific_nonstrict, converter): new_c_at_end_ns.append(ConversionChain.chain(existing_specific_nonstrict, converter, strict=False)) if existing_specific_nonstrict.can_be_appended_to(converter, strict=False): if ConversionChain.are_worth_chaining(converter, existing_specific_nonstrict): new_c_at_beginning_ns.append(ConversionChain.chain(converter, existing_specific_nonstrict, strict=False)) # -- strict new_c_at_end = [] new_c_at_beginning = [] for existing_specific in self._specific_conversion_chains: # first try *strict* mode if converter.can_be_appended_to(existing_specific, strict=True): if ConversionChain.are_worth_chaining(existing_specific, converter): new_c_at_end.append(ConversionChain.chain(existing_specific, converter, strict=True)) elif (not self.strict) and converter.can_be_appended_to(existing_specific, strict=False): if ConversionChain.are_worth_chaining(existing_specific, converter): new_c_at_end_ns.append(ConversionChain.chain(existing_specific, converter, strict=False)) if existing_specific.can_be_appended_to(converter, strict=True): if ConversionChain.are_worth_chaining(converter, existing_specific): # TODO this is where when chaining a generic to a specific, we would actually have to restrict it # note: but maybe we dont care since now this is checked and prevented in the convert() method new_c_at_beginning.append(ConversionChain.chain(converter, existing_specific, strict=True)) elif (not self.strict) and existing_specific.can_be_appended_to(converter, strict=False): if ConversionChain.are_worth_chaining(converter, existing_specific): # TODO this is where when chaining a generic to a specific, we would actually have to restrict it # note: but maybe we dont care since now this is checked and prevented in the convert() method new_c_at_beginning_ns.append(ConversionChain.chain(converter, existing_specific, strict=False)) # append to the right list depending on the nature of this converter if converter.is_generic(): generic_chains += new_c_at_end generic_nonstrict_chains += new_c_at_end_ns else: specific_chains += new_c_at_end specific_nonstrict_chains += new_c_at_end_ns # common for both types specific_chains += new_c_at_beginning specific_nonstrict_chains += new_c_at_beginning_ns # by combining all created chains into a big one for a in new_c_at_end: for b in new_c_at_beginning: b_ = b.remove_first() if b_.can_be_appended_to(a, strict=True): if ConversionChain.are_worth_chaining(a, b_): specific_chains.append(ConversionChain.chain(a, b_, strict=True)) for b in new_c_at_beginning_ns: b_ = b.remove_first() if b_.can_be_appended_to(a, strict=False): if ConversionChain.are_worth_chaining(a, b_): specific_nonstrict_chains.append(ConversionChain.chain(a, b_, strict=False)) for a in new_c_at_end_ns: for b in (new_c_at_beginning_ns + new_c_at_beginning): b_ = b.remove_first() if b_.can_be_appended_to(a, strict=False): if ConversionChain.are_worth_chaining(a, b_): specific_nonstrict_chains.append(ConversionChain.chain(a, b_, strict=False)) # by inserting this converter at the beginning of an existing *generic* if converter.is_generic(): # we want to avoid chaining generic converters together pass else: new_c_at_beginning_generic = [] new_c_at_beginning_generic_ns = [] for existing_specific in self._generic_conversion_chains: # start with strict if existing_specific.can_be_appended_to(converter, strict=True): if ConversionChain.are_worth_chaining(converter, existing_specific): new_c_at_beginning_generic.append(ConversionChain.chain(converter, existing_specific, strict=True)) elif (not self.strict) and existing_specific.can_be_appended_to(converter, strict=False): if ConversionChain.are_worth_chaining(converter, existing_specific): new_c_at_beginning_generic_ns.append(ConversionChain.chain(converter, existing_specific, strict=False)) for existing_specific_ns in self._generic_nonstrict_conversion_chains: if existing_specific_ns.can_be_appended_to(converter, strict=False): if ConversionChain.are_worth_chaining(converter, existing_specific_ns): new_c_at_beginning_generic_ns.append(ConversionChain.chain(converter, existing_specific_ns, strict=False)) generic_chains += new_c_at_beginning_generic generic_nonstrict_chains += new_c_at_beginning_generic_ns # by combining specific and generic created chains into a big one for a in new_c_at_end: for b in new_c_at_beginning_generic: b_ = b.remove_first() if b_.can_be_appended_to(a, strict=True): if ConversionChain.are_worth_chaining(a, b_): generic_chains.append(ConversionChain.chain(a, b_, strict=True)) for b in new_c_at_beginning_generic_ns: b_ = b.remove_first() if b_.can_be_appended_to(a, strict=False): if ConversionChain.are_worth_chaining(a, b_): generic_nonstrict_chains.append(ConversionChain.chain(a, b_, strict=False)) for a in new_c_at_end_ns: for b in (new_c_at_beginning_generic_ns + new_c_at_beginning_generic): b_ = b.remove_first() if b_.can_be_appended_to(a, strict=False): if ConversionChain.are_worth_chaining(a, b_): generic_nonstrict_chains.append(ConversionChain.chain(a, b_, strict=False)) return generic_chains, generic_nonstrict_chains, specific_chains, specific_nonstrict_chains" 513,"def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER) \ -> Tuple[List[Converter], List[Converter], List[Converter]]: """""" Utility method to find matching converters or conversion chains. :param from_type: a required type of input object, or JOKER for 'wildcard'(*) . WARNING: ""from_type=AnyObject/object/Any"" means ""all converters able to source from anything"", which is different from ""from_type=JOKER"" which means ""all converters whatever their source type"". :param to_type: a required type of output object, or JOKER for 'wildcard'(*) . WARNING: ""to_type=AnyObject/object/Any"" means ""all converters able to produce any type of object"", which is different from ""to_type=JOKER"" which means ""all converters whatever type they are able to produce"". :return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact. The order of each list is from *less relevant* to *most relevant* """""" if from_type is JOKER and to_type is JOKER: matching_dest_generic = self._generic_nonstrict_conversion_chains.copy() + \ self._generic_conversion_chains.copy() matching_dest_approx = [] matching_dest_exact = self._specific_non_strict_conversion_chains.copy() + \ self._specific_conversion_chains.copy() else: matching_dest_generic, matching_dest_approx, matching_dest_exact = [], [], [] # first transform any 'Any' type requirement into the official class for that to_type = get_validated_type(to_type, 'to_type', enforce_not_joker=False) # handle generic converters first for c in (self._generic_nonstrict_conversion_chains + self._generic_conversion_chains): match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict, from_type=from_type, to_type=to_type) if match: # match if is_any_type(to_type): # special case where desired to_type is already Any : in that case a generic converter will # appear in 'exact match' matching_dest_exact.append(c) else: # this is a match from a generic parser to a specific type : add in 'generic' cataegory matching_dest_generic.append(c) # then the specific for c in (self._specific_non_strict_conversion_chains + self._specific_conversion_chains): match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict, from_type=from_type, to_type=to_type) if match: if not is_any_type(to_type): if dest_exact: # we dont care if source is exact or approximate as long as dest is exact matching_dest_exact.append(c) else: # this means that dest is approximate. matching_dest_approx.append(c) else: # we only want to keep the generic ones, and they have already been added pass return matching_dest_generic, matching_dest_approx, matching_dest_exact" 514,"def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \ -> Tuple[Tuple[List[Parser], List[Parser], List[Parser]], List[Parser], List[Parser], List[Parser]]: """""" Overrides the parent method to find parsers appropriate to a given extension and type. This leverages both the parser registry and the converter registry to propose parsing chains in a relevant order :param strict: :param desired_type: the type of object to match. :param required_ext: the required extension. :return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), no_type_match_but_ext_match, no_ext_match_but_type_match, no_match """""" # transform any 'Any' type requirement into the official class for that desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False) # (1) call the super method to find all parsers matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \ super(ParserRegistryWithConverters, self).find_all_matching_parsers(strict=self.is_strict, desired_type=desired_type, required_ext=required_ext) # these are ordered with 'preferred last' matching_p_generic, matching_p_approx, matching_p_exact = matching if desired_type is JOKER: # then we want to try to append every possible converter chain, even if we have already found an exact match # (exact match will probably contain all parsers in that case?...) parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx \ + matching_p_exact else: # then we can try to complete all the ones matching the extension (even the generic because combining them # with a conversion chain might provide another way to reach the result - different from using the generic # alone to reach the to_type) parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx # (2) find all conversion chains that lead to the expected result matching_c_generic_to_type, matching_c_approx_to_type, matching_c_exact_to_type = \ self.get_all_conversion_chains_to_type(to_type=desired_type) all_matching_converters = matching_c_generic_to_type + matching_c_approx_to_type + matching_c_exact_to_type # (3) combine both (parser + conversion chain), and append to the appropriate list depending on the match # -- (a) first Parsers matching EXT (not type) + Converters matching their type # for all parsers able to parse this extension, and for all the types they support # # (we have to reverse the list because now we want 'preferred first'. Indeed we wish to prepend to the match # lists in order not to hide the parser direct matches) matching_p_generic_with_approx_chain, matching_p_approx_with_approx_chain, matching_p_exact_with_approx_chain\ = [], [], [] for parser in reversed(parsers_to_complete_with_converters): for typ in parser.supported_types: match_results = self._complete_parsers_with_converters(parser, typ, desired_type, matching_c_generic_to_type, matching_c_approx_to_type, matching_c_exact_to_type) # prepend the existing lists with the new match matching_p_generic = match_results[1] + matching_p_generic matching_p_approx = match_results[3] + matching_p_approx matching_p_exact = match_results[5] + matching_p_exact # store the approximate matchs in the separate lists matching_p_generic_with_approx_chain = match_results[0] + matching_p_generic_with_approx_chain matching_p_approx_with_approx_chain = match_results[2] + matching_p_approx_with_approx_chain matching_p_exact_with_approx_chain = match_results[4] + matching_p_exact_with_approx_chain # finally prepend the approximate match lists matching_p_generic = matching_p_generic_with_approx_chain + matching_p_generic matching_p_approx = matching_p_approx_with_approx_chain + matching_p_approx matching_p_exact = matching_p_exact_with_approx_chain + matching_p_exact # -- (b) then parsers that do not match at all (not the file extension nor the type): we can find parsing chains # that make them at least match the type # # (we have to reverse it because it was 'best last', now it will be 'best first') for parser in reversed(no_match): for typ in parser.supported_types: for converter in reversed(all_matching_converters): # if converter is able to source from this parser if converter.is_able_to_convert(self.is_strict, from_type=typ, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, typ, converter): # insert it at the beginning since it should have less priority no_ext_match_but_type_match.insert(0, ParsingChain(parser, converter, strict=self.is_strict, base_parser_chosen_dest_type=typ)) # Finally sort by chain length matching_p_generic = sorted(matching_p_generic, key=len, reverse=True) matching_p_approx = sorted(matching_p_approx, key=len, reverse=True) matching_p_exact = sorted(matching_p_exact, key=len, reverse=True) # Return return (matching_p_generic, matching_p_approx, matching_p_exact), no_type_match_but_ext_match, \ no_ext_match_but_type_match, no_match" 515,"def _complete_parsers_with_converters(self, parser, parser_supported_type, desired_type, matching_c_generic_to_type, matching_c_approx_to_type, matching_c_exact_to_type): """""" Internal method to create parsing chains made of a parser and converters from the provided lists. Once again a JOKER for a type means 'joker' here. :param parser: :param parser_supported_type: :param desired_type: :param matching_c_generic_to_type: :param matching_c_approx_to_type: :param matching_c_exact_to_type: :return: """""" matching_p_generic, matching_p_generic_with_approx_chain, \ matching_p_approx, matching_p_approx_with_approx_chain,\ matching_p_exact, matching_p_exact_with_approx_chain = [], [], [], [], [], [] # resolve Union and TypeVar desired_types = get_alternate_types_resolving_forwardref_union_and_typevar(desired_type) for desired_type in desired_types: # first transform any 'Any' type requirement into the official class for that desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False) # ---- Generic converters - only if the parsed type is not already 'any' if not is_any_type(parser_supported_type): for cv in matching_c_generic_to_type: # if the converter can attach to this parser, we have a matching parser ! # --start from strict if cv.is_able_to_convert(strict=True, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=True, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_generic.append(chain) # --then non-strict elif (not self.strict) \ and cv.is_able_to_convert(strict=False, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=False, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_generic_with_approx_chain.append(chain) # ---- Approx to_type for cv in matching_c_approx_to_type: # if the converter can attach to this parser, we have a matching parser ! # -- start from strict if cv.is_able_to_convert(strict=True, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=True, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_approx.append(chain) # then non-strict elif (not self.strict) \ and cv.is_able_to_convert(strict=False, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=False, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_approx_with_approx_chain.append(chain) # ---- Exact to_type for cv in matching_c_exact_to_type: # if the converter can attach to this parser, we have a matching parser ! if cv.is_able_to_convert(strict=True, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=True, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_exact.append(chain) elif (not self.strict) \ and cv.is_able_to_convert(strict=False, from_type=parser_supported_type, to_type=desired_type): if ParsingChain.are_worth_chaining(parser, parser_supported_type, cv): chain = ParsingChain(parser, cv, strict=False, base_parser_chosen_dest_type=parser_supported_type) # insert it at the beginning since it should have less priority matching_p_exact_with_approx_chain.append(chain) # Preferred is LAST, so approx should be first return matching_p_generic_with_approx_chain, matching_p_generic, \ matching_p_approx_with_approx_chain, matching_p_approx, \ matching_p_exact_with_approx_chain, matching_p_exact" 516,"def parse_config(filename): """""" Parses a versionupgrade configuration file. Example: tag v{VERSION} branch v{VERSION} message Prepare {VERSION} release upgrade setup.py: version = '{VERSION}' upgrade __init__.py:__version__ = '{VERSION}' sub docs/changelog/v{VERSION}.md:# v{VERSION} (unreleased):# v{VERSION} ({DATE}) Available commands: - tag: Create a Git tag with the specified name. - branch: Create a Git branch with the specified name. - message: The commit message for upgraded version numbers. - upgrade: Upgrade the version number in the file matching the pattern. The same file may be listed multiple times. The pattern may actually be a regular expression and will be searched in every line of the file. - sub: Specify a file where the part of the file matching the first string will be replaced by the second string. Returns a #Config object. """""" tag = None branch = None message = 'Prepare {VERSION} release.' upgrades = {} subs = {} with open(filename) as fp: for i, line in enumerate(fp): line = line.strip() if not line or line.startswith('#'): continue key, sep, value = line.partition(' ') if not key or not value: raise ValueError('invalid configuration file at line {}'.format(i+1)) if key == 'tag': tag = value.strip() elif key == 'branch': branch = value.strip() elif key == 'message': message = value.strip() elif key == 'upgrade': filename, sep, pattern = value.partition(':') if not filename or not sep or not pattern or '{VERSION}' not in pattern: raise ValueError('invalid upgrade argument at line {}'.format(i+1)) upgrade = upgrades.setdefault(filename, []) upgrade.append(pattern) elif key == 'sub': filename, sep, pattern = value.partition(':') pattern = pattern.partition(':')[::2] if not pattern[0] or not pattern[1]: raise ValueError('invalid sub argument at line {}'.format(i+1)) subs.setdefault(filename, []).append(pattern) else: raise ValueError('invalid command {!r} at line {}'.format(key, i+1)) return Config(tag, branch, message, upgrades, subs)" 517,"def match_version_pattern(filename, pattern): """""" Matches a single version upgrade pattern in the specified *filename* and returns the match information. Returns a #Match object or #None if the *pattern* did not match. """""" if ""{VERSION}"" not in pattern: raise ValueError(""pattern does not contain a {VERSION} reference"") pattern = pattern.replace('{VERSION}', '(?P[\d\w\.\-_]+)') expr = re.compile(pattern) with open(filename) as fp: lines = fp.read().split('\n') for i, line in enumerate(lines): match = expr.search(line) if match: return Match(filename, lines, line_index=i, version=Version(match.group('v')), span=match.span('v')) return None" 518,"def get_changed_files(include_staged=False): """""" Returns a list of the files that changed in the Git repository. This is used to check if the files that are supposed to be upgraded have changed. If so, the upgrade will be prevented. """""" process = subprocess.Popen(['git', 'status', '--porcelain'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, __ = process.communicate() if process.returncode != 0: raise ValueError(stdout) files = [] for line in stdout.decode().split('\n'): if not line or line.startswith('#'): continue assert line[2] == ' ' if not include_staged and line[1] == ' ': continue files.append(line[3:]) return files" 519,"def _parse_doc(docs): """""" Converts a well-formed docstring into documentation to be fed into argparse. See signature_parser for details. shorts: (-k for --keyword -k, or ""from"" for ""frm/from"") metavars: (FILE for --input=FILE) helps: (docs for --keyword: docs) description: the stuff before epilog: the stuff after """""" name = ""(?:[a-zA-Z][a-zA-Z0-9-_]*)"" re_var = re.compile(r""^ *(%s)(?: */(%s))? *:(.*)$"" % (name, name)) re_opt = re.compile(r""^ *(?:(-[a-zA-Z0-9]),? +)?--(%s)(?: *=(%s))? *:(.*)$"" % (name, name)) shorts, metavars, helps, description, epilog = {}, {}, {}, """", """" if docs: for line in docs.split(""\n""): line = line.strip() # remove starting ':param' if line.startswith(':param'): line = line[len(':param'):] # skip ':rtype:' row if line.startswith(':rtype:'): continue if line.strip() == ""----"": break m = re_var.match(line) if m: if epilog: helps[prev] += epilog.strip() epilog = """" if m.group(2): shorts[m.group(1)] = m.group(2) helps[m.group(1)] = m.group(3).strip() prev = m.group(1) previndent = len(line) - len(line.lstrip()) continue m = re_opt.match(line) if m: if epilog: helps[prev] += epilog.strip() epilog = """" name = m.group(2).replace(""-"", ""_"") helps[name] = m.group(4) prev = name if m.group(1): shorts[name] = m.group(1) if m.group(3): metavars[name] = m.group(3) previndent = len(line) - len(line.lstrip()) continue if helps: if line.startswith("" "" * (previndent + 1)): helps[prev] += ""\n"" + line.strip() else: epilog += ""\n"" + line.strip() else: description += ""\n"" + line.strip() if line.strip(): previndent = len(line) - len(line.lstrip()) return shorts, metavars, helps, description, epilog" 520,"def signature_parser(func): """""" Creates an argparse.ArgumentParser from the function's signature. Arguments with no default are compulsary positional arguments, Arguments with defaults are optional --flags. If the default is True or False, the action of the flag will toggle the argument and the flag takes no parameters. If the default is None or a unicode string, the flag takes a string argument that passed to the function as a unicode string decoded using entrypoint.ENCODING If the default is a string, then the argument is passed as a binary string (be careful!), an int and a float cause parsing of those too. If you want the arguments to be a file, consider using the @withfile decorator. Documentation can be read out of the function's docstring, which should be of the basic form: ''' A short introduction to your program. arg: Help for positional parameter. frm/from: Help for a positional parameter with a reserved public name (i.e. this displays to the user as ""from"" but sets the ""frm"" variable) --opt: Help for optional parameter. -f --flag: An optional parameter that has a short version. --mode=MODE: An optional parameter that takes a MODE -t --type: A combination of both of the above, and one which requires continuing of the documentation on an indented line An epilog explaining anything you feel needs further clarity. ---- Any documentation for the function itself that isn't covered by the public documentation above the line. ''' All sections, and indeed the presence of a docstring, are not required. NOTE: for this to work, the function's signature must be in-tact some decorators (like @acceptargv for example) destroy, or mutilate the signature. """""" args, trail, kwargs, defaults = inspect.getargspec(func) if not args: args = [] if not defaults: defaults = [] if kwargs: raise Exception(""Can't wrap a function with **kwargs"") # Compulsary positional options needed = args[0:len(args) - len(defaults)] # Optional flag options params = args[len(needed):] shorts, metavars, helps, description, epilog = _parse_doc(func.__doc__) parser = argparse.ArgumentParser( description=description, epilog=epilog, formatter_class=ParagraphPreservingArgParseFormatter) # special flags special_flags = [] special_flags += ['debug'] defaults += (False,) helps['debug'] = 'set logging level to DEBUG' if module_version(func): special_flags += ['version'] defaults += (False,) helps['version'] = ""show program's version number and exit"" params += special_flags # Optional flag options used_shorts = set() for param, default in zip(params, defaults): args = [""--%s"" % param.replace(""_"", ""-"")] short = None if param in shorts: short = shorts[param] else: if param not in special_flags and len(param) > 1: first_char = param[0] if first_char not in used_shorts: used_shorts.add(first_char) short = '-' + first_char # -h conflicts with 'help' if short and short != '-h': args = [short] + args kwargs = {'default': default, 'dest': param.replace(""-"", ""_"")} if param == 'version': kwargs['action'] = 'version' kwargs['version'] = module_version(func) elif default is True: kwargs['action'] = 'store_false' elif default is False: kwargs['action'] = 'store_true' elif isinstance(default, list): kwargs['action'] = 'append' # default is not working # if len(default): # first = default[0] # if type(first) in [type(None), unicode]: # kwargs['type'] = unidecode # else: # kwargs['type'] = type(first) # kwargs['default'] = [] # else: kwargs['type'] = unidecode else: kwargs['action'] = 'store' if type(default) in [type(None), unicode]: kwargs['type'] = unidecode else: kwargs['type'] = type(default) if param in helps: kwargs['help'] = helps[param] if param in metavars: kwargs['metavar'] = metavars[param] parser.add_argument(*args, **kwargs) # Compulsary positional options for need in needed: kwargs = {'action': 'store', 'type': unidecode} if need in helps: kwargs['help'] = helps[need] if need in shorts: args = [shorts[need]] else: args = [need] parser.add_argument(*args, **kwargs) # The trailing arguments if trail: kwargs = {'action': 'store', 'type': unidecode, 'nargs': ""*""} if trail in helps: kwargs['help'] = helps[trail] if trail in shorts: kwargs['metavar'] = shorts[trail] else: kwargs['metavar'] = trail parser.add_argument('__args', **kwargs) return parser" 521,"def _correct_args(func, kwargs): """""" Convert a dictionary of arguments including __argv into a list for passing to the function. """""" args = inspect.getargspec(func)[0] return [kwargs[arg] for arg in args] + kwargs['__args']" 522,"def entrypoint(func): """""" A decorator for your main() function. Really a combination of @autorun and @acceptargv, so will run the function if __name__ == '__main__' with arguments extricated from argparse. As with @acceptargv, this must either be the innermost decorator, or separated only by ""well-behaved"" decorators that preserve the __doc__ attribute AND the function signature. As with @autorun, this must be theoutermost decorator, as any decorators further out will not be applied to the function until after it is run. """""" frame_local = sys._getframe(1).f_locals if '__name__' in frame_local and frame_local['__name__'] == '__main__': argv = sys.argv[1:] parser = signature_parser(func) try: kwargs = parser.parse_args(argv).__dict__ # special cli flags # --version is handled by ArgParse # if kwargs.get('version'): # print module_version(func) # return if 'version' in kwargs.keys(): del kwargs['version'] # --debug FORMAT = '%(asctime)-6s: %(name)s - %(levelname)s - %(message)s' if kwargs.get('debug'): logging.basicConfig( level=logging.DEBUG, format=FORMAT, ) del kwargs['debug'] if ""__args"" in kwargs: return func(*_correct_args(func, kwargs)) else: return func(**kwargs) except UsageError, e: parser.error(e.message) return func" 523,"def autorun(func, _depth=1): """""" Runs the function if the module in which it is declared is being run directly from the commandline. Putting the following after the function definition would be similar: if __name__ == '__main__': func() NOTE: This will work most expectedly as the outermost decorator, as it will call the function before any more outwards decorators have been applied. """""" frame_local = sys._getframe(_depth).f_locals if '__name__' in frame_local and frame_local['__name__'] == '__main__': func(argv=sys.argv[1:]) return func" 524,"def acceptargv(func): """""" Transforms the signature of the function, and it's associated __doc__ into an argparse-parser, then calls the function with the results of using said parser. The function returned takes an optional argument, which is the list of parameters, if they are not given, sys.argv[1:] is used instead. The function may raise a UsageError() if it wants to signal an error that the user has made with the parameters, this is done by @withuserfile for example. CAVEAT: this relies on the argument signature of the function, if that has been destroyed, perhaps by a badly behaved decorator, this won't work as expected. CAVEAT2: this destroys the argument signature of the function ;) """""" parser = signature_parser(func) def main(*args, **kw): argv = kw.get('argv', None) if argv == None: return func(*args, **kw) else: try: kwargs = parser.parse_args(argv).__dict__ # special cli flags # --version is handled by ArgParse # if kwargs.get('version'): # print module_version(func) # return if 'version' in kwargs.keys(): del kwargs['version'] # --debug if kwargs.get('debug'): logging.basicConfig(level=logging.DEBUG) del kwargs['debug'] if ""__args"" in kwargs: return func(*_correct_args(func, kwargs)) else: return func(**kwargs) except UsageError, e: parser.error(e.message) main.__doc__ = func.__doc__ main.__name__ = func.__name__ main.__module__ = func.__module__ main.__dict__ = func.__dict__.copy() return main" 525,"def quote(text): 'Handle quote characters' # Convert to unicode. if not isinstance(text, unicode): text = text.decode('utf-8') # Look for quote characters. Keep the text as is if it's already quoted. for qp in QUOTEPAIRS: if text[0] == qp[0] and text[-1] == qp[-1] and len(text) >= 2: return text # If it's not quoted, try quoting for qp in QUOTEPAIRS: if qp[1] not in text: return qp[0] + text + qp[1] #Darn raise ValueError(u'The value ""%s"" is not quoted and contains too many quote characters to quote' % text)" 526,"def register_opener(suffix, opener=None): """""" Register a callback that opens an archive with the specified *suffix*. The object returned by the *opener* must implement the #tarfile.Tarfile interface, more specifically the following methods: - `add(filename, arcname) -> None` - `getnames() -> list of str` - `getmember(filename) -> TarInfo` - `extractfile(filename) -> file obj` This function can be used as a decorator when *opener* is not provided. The opener must accept the following arguments: %%arglist file (file-like): A file-like object to read the archive data from. mode (str): The mode to open the file in. Valid values are `'w'`, `'r'` and `'a'`. options (dict): A dictionary with possibly additional arguments. """""" if opener is None: def decorator(func): register_opener(suffix, func) return func return decorator if suffix in openers: raise ValueError('opener suffix {0!r} already registered'.format(suffix)) openers[suffix] = opener" 527,"def get_opener(filename): """""" Finds a matching opener that is registed with :func:`register_opener` and returns a tuple ``(suffix, opener)``. If there is no opener that can handle this filename, :class:`UnknownArchive` is raised. """""" for suffix, opener in openers.items(): if filename.endswith(suffix): return suffix, opener raise UnknownArchive(filename)" 528,"def open(filename=None, file=None, mode='r', suffix=None, options=None): """""" Opens the archive at the specified *filename* or from the file-like object *file* using the appropriate opener. A specific opener can be specified by passing the *suffix* argument. # Parameters filename (str): A filename to open the archive from. file (file-like): A file-like object as source/destination. mode (str): The mode to open the archive in. suffix (str): Possible override for the *filename* suffix. Must be specified when *file* is passed instead of *filename*. options (dict): A dictionary that will be passed to the opener with which additional options can be specified. return (archive-like): An object that represents the archive and follows the interface of the #tarfile.TarFile class. """""" if mode not in ('r', 'w', 'a'): raise ValueError(""invalid mode: {0!r}"".format(mode)) if suffix is None: suffix, opener = get_opener(filename) if file is not None: filename = None # We don't need it anymore. else: if file is not None and filename is not None: raise ValueError(""filename must not be set with file & suffix specified"") try: opener = openers[suffix] except KeyError: raise UnknownArchive(suffix) if options is None: options = {} if file is not None: if mode in 'wa' and not hasattr(file, 'write'): raise TypeError(""file.write() does not exist"", file) if mode == 'r' and not hasattr(file, 'read'): raise TypeError(""file.read() does not exist"", file) if [filename, file].count(None) != 1: raise ValueError(""either filename or file must be specified"") if filename is not None: file = builtins.open(filename, mode + 'b') try: return opener(file, mode, options) except: if filename is not None: file.close() raise" 529,"def extract(archive, directory, suffix=None, unpack_single_dir=False, check_extract_file=None, progress_callback=None, default_mode='755'): """""" Extract the contents of *archive* to the specified *directory*. This function ensures that no file is extracted outside of the target directory (which can theoretically happen if the arcname is not relative or points to a parent directory). # Parameters archive (str, archive-like): The filename of an archive or an already opened archive. directory (str): Path to the directory to unpack the contents to. unpack_single_dir (bool): If this is True and if the archive contains only a single top-level directory, its contents will be placed directly into the target *directory*. """""" if isinstance(archive, str): with open(archive, suffix=suffix) as archive: return extract(archive, directory, None, unpack_single_dir, check_extract_file, progress_callback, default_mode) if isinstance(default_mode, str): default_mode = int(default_mode, 8) if progress_callback: progress_callback(-1, 0, None) names = archive.getnames() # Find out if we have only one top-level directory. toplevel_dirs = set() for name in names: parts = name.split('/') if len(parts) > 1: toplevel_dirs.add(parts[0]) if unpack_single_dir and len(toplevel_dirs) == 1: stripdir = next(iter(toplevel_dirs)) + '/' else: stripdir = None for index, name in enumerate(names): if progress_callback: progress_callback(index + 1, len(names), name) if name.startswith('..') or name.startswith('/') or os.path.isabs(name): continue if check_extract_file and not check_extract_file(name): continue if name.endswith('/'): continue if stripdir: filename = name[len(stripdir):] if not filename: continue else: filename = name info = archive.getmember(name) src = archive.extractfile(name) if not src: continue try: filename = os.path.join(directory, filename) dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) with builtins.open(filename, 'wb') as dst: shutil.copyfileobj(src, dst) os.chmod(filename, info.mode or default_mode) os.utime(filename, (-1, info.mtime)) finally: src.close() if progress_callback: progress_callback(len(names), len(names), None)" 530,"def transitions_to(self, dst): ''' returns enumerable of (prevstate, t) tuples this is super slow and needs to be sped up ''' if dst in self._transitions_to: for t in self._transitions_to[dst]: for s in self._transitions_to[dst][t]: yield (s, t)" 531,"def reltags(self, src, cache=None): ''' returns all the tags that are relevant at this state cache should be a dictionary and it is updated by the function ''' if not self._tag_assocs: return set() # fucking python and it's terrible support for recursion makes this # far more complicated than it needs to be if cache == None: cache = {} q = _otq() q.append(src) updateq = _otq() while q: i = q.popleft() if i in cache: continue cache[i] = set() for (s,t) in self.transitions_to(i): q.append(s) if self.is_tagged(t,s,i): cache[i].add((self.tag(t,s,i),s, i)) updateq.appendleft((i, s)) while updateq: i = updateq.popleft() cache[i[0]].update(cache[i[1]]) return cache[src]" 532,"def _add_epsilon_states(self, stateset, gathered_epsilons): ''' stateset is the list of initial states gathered_epsilons is a dictionary of (dst: src) epsilon dictionaries ''' for i in list(stateset): if i not in gathered_epsilons: gathered_epsilons[i] = {} q = _otq() q.append(i) while q: s = q.popleft() for j in self._transitions.setdefault(s, {}).setdefault(NFA.EPSILON, set()): gathered_epsilons[i][j] = s if j not in gathered_epsilons[i] else self.choose(s, j) q.append(j) stateset.update(gathered_epsilons[i].keys())" 533,"def _states_to_dfa_bytecode(self, states, \ tran=None, \ debug=False, \ compiled_states=None, \ gathered_epsilons=None, \ cached_transitions=None, \ cached_tcode=None, \ reltags_cache=None \ ): '''returns the instruction pointer to the bytecode added''' pstates = copy.copy(states) if reltags_cache == None: reltags_cache = {} if cached_tcode == None: cached_tcode = {} if cached_transitions == None: cached_transitions = {} if gathered_epsilons == None: gathered_epsilons = {} self._add_epsilon_states(states, gathered_epsilons) if tran != None: states = self.nextstates(states, tran) self._add_epsilon_states(states, gathered_epsilons) if self._magic != None: states = states.union(self._magic(states)) tstates = tuple(states) # this is used so we only compile each stateset once if compiled_states == None: compiled_states = {} if tstates in compiled_states: return compiled_states[tstates] # grab the ip from our codeblock ip = self._bytecode.newblock(tstates) compiled_states[tstates] = ip # TODO # epsilon transitions are never 'taken' so we need # to insert any ltagv/utagv instructions required # for all epsilon transitions # gathered_epsilons[state] holds a dictionary of dst: src mappings, so we can use that data if self.do_tags: tags = set() rtags = set() for ts in pstates: for dst in gathered_epsilons[ts]: rtags.update(self.reltags(dst, reltags_cache)) src = gathered_epsilons[ts][dst] if self.is_tagged(NFA.EPSILON, src, dst): tags.add((self.tag(NFA.EPSILON, src, dst), dst)) self._write_transition_code(tags, rtags, ip) # run any defined state hooks for s in tstates: if s in self._state_hooks: ip.append(VM.PyCode(self._state_hooks[s])) # do a multi-match for any final states finals = self._final_states.intersection(states) if len(finals) > 0: ip.append(VM.MultiMatch(finals)) # do any interupts required interupts = self._interupt_states.intersection(states) if len(interupts) > 0: ip.append(VM.MultiInterupt(interupts)) # consume a character ip.append(VM.Consume()) ts = self.transitions(states, cached_transitions) if debug: print 'compiling bytecode for stateset:\n\t%s\n\t0x%x: %s' % (states,ip,(defaults,ts)) def mkbytecode(t): return lambda: self._transitions_to_dfa_bytecode(states, t, cached_tcode, debug=debug, compiled_states=compiled_states, gathered_epsilons=gathered_epsilons, cached_transitions=cached_transitions, reltags_cache=reltags_cache) # for any of the non-default states add a conditional jmp for k in ts: if k in (NFA.ANY, NFA.EPSILON): continue jmppoint = VM.DelayedArg(mkbytecode(k)) ip.append(VM.Compare(k)) ip.append(VM.CondJmp(jmppoint)) # jmp to default state if there is one, otherwise leave defaults = self.nextstates(states, NFA.ANY) if len(defaults) > 0: jmppoint = VM.DelayedArg(mkbytecode(NFA.ANY)) ip.append(VM.Jmp(jmppoint)) else: ip.append(VM.Leave()) # return the instruction pointer return ip" 534,"def _add_training_data(self, src, dst, symbol): """""" Training_data is a dictionary from strings to lists. - Each string (key) is an access string - Each list (value) is a list of tuples (target_state, [symbols directed to that state]). These represent that a transition exists from the state used as key to the first part of the training_data to the dst state which is the first part of the tuple with all the symbols in the list in the SECOND part of the tuple. Args: src (str): The source state dst (str): The target state symbol (str): The transition symbol Returns: None """""" src_data = self.training_data[src] for (s, v) in src_data: if s == dst: v.append(symbol) return src_data.append((dst, [symbol]))" 535,"def is_closed(self): """""" _check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned. """""" old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ ""No update happened from previous round. The algo will loop infinetely"" return True, None" 536,"def _fill_table_entry(self, row, col): """""""""" Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None """""" self.observation_table[row, col] = self._membership_query(row + col)" 537,"def _run_in_hypothesis(self, mma, w_string, index): """""""""" Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string """""" state = mma.states[0] s_index = 0 for i in range(index): for arc in state: if arc.guard.is_sat(w_string[i]): state = mma.states[arc.dst_state] s_index = arc.dst_state # The id of the state is its index inside the Sm list access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string" 538,"def _process_counter_example(self, mma, w_string): """""""" Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Return: None """""" if len(w_string) == 1: self.observation_table.smi_vector.append(w_string) for exp in self.observation_table.em_vector: self._fill_table_entry(w_string, exp) diff = len(w_string) same = 0 membership_answer = self._membership_query(w_string) while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) if membership_answer != self._membership_query(access_string + w_string[i:]): diff = i else: same = i if diff - same == 1: break # First check if the transition is part of our training data. access_string = self._run_in_hypothesis(mma, w_string, diff - 1) wrong_transition = access_string + w_string[diff - 1] if wrong_transition not in self.observation_table.smi_vector: # If transition is not part of our training data add s_ib to Smi and # return to checking table closedness. self.observation_table.smi_vector.append(wrong_transition) for exp in self.observation_table.em_vector: self._fill_table_entry(wrong_transition, exp) return # This point presents a tradeoff between equivalence and membership # queries. If the transition in the counterexample'input_string breakpoint is not # part of our current training data (i.e. s_ib is not part of our Smi # set), then we assume a wrong transition and return to checking table # closure by adding s_ib to our training data. This saves a number of # membership queries since we don't add a row in our table unless # absolutely necessary. Notice that even if Equivalence queries are # expensive in general caching the result will be able to discover that # this iteration required a new state in the next equivalence query. exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)" 539,"def _get_predicate_guards(self, state, state_training_data): """""" Args: state (DFA state): The dfa state state_training_data (list): The training data set Returns: list: A list of transitions """""" # choose the sink transition. # First option: Just the maximum transition # sink = max(state_training_data, key=lambda x: len(x[1]))[0] # Second option: Heuristics based on RE filters properties max_size_trans = max(state_training_data, key=lambda x: len(x[1])) max_size_trans_l = [x for x in state_training_data if len(x[1]) == len(max_size_trans[1])] target_states = [t[0] for t in max_size_trans_l] if len(max_size_trans_l) == 1: sink = max_size_trans[0] elif '' in target_states: sink = '' elif state in target_states: sink = state else: sink = random.choice(target_states) # End of sink selection transitions = [] known_symbols = [] for (t, data) in state_training_data: if t == sink: continue pred = SetPredicate(data) transitions.append((t, pred)) known_symbols += data transitions.append( (sink, SetPredicate(set(self.alphabet) - set(known_symbols)))) return transitions" 540,"def get_sfa_conjecture(self): """""" Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table. """""" sfa = SFA(self.alphabet) for s in self.observation_table.sm_vector: transitions = self._get_predicate_guards( s, self.observation_table.training_data[s]) for (t, pred) in transitions: src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(t) assert isinstance( pred, SetPredicate), ""Invalid type for predicate {}"".format(pred) sfa.add_arc(src_id, dst_id, pred) # Mark the final states in the hypothesis automaton. i = 0 for s in self.observation_table.sm_vector: sfa.states[i].final = self.observation_table[s, self.epsilon] i += 1 return sfa" 541,"def _init_table(self): """""" Initialize the observation table. """""" self.observation_table.sm_vector.append(self.epsilon) self.observation_table.smi_vector = [random.choice(self.alphabet)] self.observation_table.em_vector.append(self.epsilon) self._fill_table_entry(self.epsilon, self.epsilon) for s in self.observation_table.smi_vector: self._fill_table_entry(s, self.epsilon)" 542,"def _init_table_from_dfa(self, mma): """""" Initializes table form a DFA Args: mma: The input automaton Returns: None """""" observation_table_init = ObservationTableInit(self.epsilon, self.alphabet) sm_vector, smi_vector, em_vector = observation_table_init.initialize(mma, True) self.observation_table.sm_vector = sm_vector self.observation_table.smi_vector = smi_vector self.observation_table.em_vector = em_vector logging.info('Initialized from DFA em_vector table is the following:') logging.info(em_vector) self._fill_table_entry(self.epsilon, self.epsilon) # list(set([])) is used to remove duplicates, [1:0] to remove epsilon for row in sorted(list(set(sm_vector + smi_vector)), key=len)[1:]: for column in em_vector: self._fill_table_entry(str(row), str(column))" 543,"def learn_sfa(self, mma=None): """""" Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma: Returns: MealyMachine: A model for the Mealy machine to be learned. """""" logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed while not closed: logging.debug('Checking if table is closed.') closed, s = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(s) else: logging.debug('Table closed.') # Create conjecture sfa = self.get_sfa_conjecture() logging.info('Generated conjecture machine with %d states.', len(list(sfa.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(sfa) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info( 'Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(sfa, counter_example) logging.info('Learning complete.') return '', sfa" 544,"def make_log_metric(level=logging.INFO, msg=""%d items in %.2f seconds""): """"""Make a new metric function that logs at the given level :arg int level: logging level, defaults to ``logging.INFO`` :arg string msg: logging message format string, taking ``count`` and ``elapsed`` :rtype: function """""" def log_metric(name, count, elapsed): log_name = 'instrument.{}'.format(name) if name else 'instrument' logging.getLogger(log_name).log(level, msg, count, elapsed) return log_metric" 545,"def _auto_init(self, *args, **kwrds): """"""Our decorator will add this as __init__ to target classes."""""" for fld in getattr(self, '__fields__', []): val = kwrds.get(fld.name, _NO_VAL) if val is _NO_VAL: val = fld.get_default_val() setattr(self, fld.name, val) if callable(getattr(self, 'setup', None)): self.setup(*args, **kwrds)" 546,"def ctor_overridable(cls): """"""Return true if cls has on overridable __init__."""""" prev_init = getattr(cls, ""__init__"", None) if not callable(prev_init): return True if prev_init in [object.__init__, _auto_init]: return True if getattr(prev_init, '_clobber_ok', False): return True print(cls, prev_init, getattr(prev_init, '_clobber_ok', 'missing')) return False" 547,"def DBObject(table_name, versioning=VersioningTypes.NONE): """"""Classes annotated with DBObject gain persistence methods."""""" def wrapped(cls): field_names = set() all_fields = [] for name in dir(cls): fld = getattr(cls, name) if fld and isinstance(fld, Field): fld.name = name all_fields.append(fld) field_names.add(name) def add_missing_field(name, default='', insert_pos=None): if name not in field_names: fld = Field(default=default) fld.name = name all_fields.insert( len(all_fields) if insert_pos is None else insert_pos, fld ) add_missing_field('id', insert_pos=0) add_missing_field('_create_date') add_missing_field('_last_update') if versioning == VersioningTypes.DELTA_HISTORY: add_missing_field('_version_hist', default=list) # Things we count on as part of our processing cls.__table_name__ = table_name cls.__versioning__ = versioning cls.__fields__ = all_fields # Give them a ctor for free - but make sure we aren't clobbering one if not ctor_overridable(cls): raise TypeError( 'Classes with user-supplied __init__ should not be decorated ' 'with DBObject. Use the setup method' ) cls.__init__ = _auto_init # Duck-type the class for our data methods cls.get_table_name = classmethod(_get_table_name) cls.get_id = _get_id cls.set_id = _set_id cls.to_data = _to_data cls.from_data = classmethod(_from_data) cls.index_names = classmethod(_index_names) cls.indexes = _indexes # Bonus methods they get for using gludb.simple cls.get_version_hist = _get_version_hist # Register with our abc since we actually implement all necessary # functionality Storable.register(cls) # And now that we're registered, we can also get the database # read/write functionality for free cls = DatabaseEnabled(cls) if versioning == VersioningTypes.DELTA_HISTORY: cls.save = _delta_save(cls.save) return cls return wrapped" 548,"def get_default_val(self): """"""Helper to expand default value (support callables)."""""" val = self.default while callable(val): val = val() return val" 549,"def train(self, data, target, **kwargs): """""" Used in the training phase. Override. """""" non_predictors = [i.replace("" "", ""_"").lower() for i in list(set(data['team']))] + [""team"", ""next_year_wins""] self.column_names = [l for l in list(data.columns) if l not in non_predictors] results, folds = self.cross_validate(data, non_predictors, **kwargs) self.gather_results(results, folds, data)" 550,"def compile(self, code): """""" Compile a simex code (e.g. {{ anything }}) to regex. Returns regex. """""" is_plain_text = True compiled_regex = r"""" for chunk in self.delimiter_regex().split(code): if is_plain_text: compiled_regex = compiled_regex + simex_escape(chunk, flexible_whitespace=self._flexible_whitespace) else: stripped_chunk = chunk.strip() if stripped_chunk in self._regexes.keys(): compiled_regex = u""{0}{1}"".format( compiled_regex, self._regexes[stripped_chunk] ) else: raise KeyNotFound(""'{0}' not found in keys"") is_plain_text = not is_plain_text if self._exact: compiled_regex = r""^"" + compiled_regex + r""$"" return compile(compiled_regex)" 551,"def as_command(self): """"""Creates the click command wrapping the function """""" try: params = self.unbound_func.__click_params__ params.reverse() del self.unbound_func.__click_params__ except AttributeError: params = [] help = inspect.getdoc(self.real_func) if isinstance(help, bytes): help = help.decode('utf-8') self.options.setdefault('help', help) @pass_script_info_decorator def callback(info, *args, **kwargs): if self.with_reloader: app = info.load_app() if app.debug: def inner(): return self.command_callback(info, *args, **kwargs) run_with_reloader(inner, extra_files=get_reloader_extra_files()) return self.command_callback(info, *args, **kwargs) return self.cls(name=self.name, callback=callback, params=params, **self.options)" 552,"def command_line_options(command_line_arguments): """"""Parse the program options"""""" # set up command line parser parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-d', '--files', required=True, nargs='+', help = ""A list of score files to evaluate."") parser.add_argument('-b', '--baselines', default=[], nargs='+', help = ""A list of baseline results to add to the plot"") parser.add_argument('-D', '--directory', default = '.', help = ""A directory, where to find the --files"") parser.add_argument('-B', '--baseline-directory', default = '.', help = ""A directory, where to find the --baselines"") parser.add_argument('-R', '--auto-baselines', choices = ('bioid', 'mit-cmu'), help = ""Automatically add the baselines for the given database"") parser.add_argument('-l', '--legends', nargs='+', help = ""A list of legend strings used for ROC, CMC and DET plots; if given, must be the same number than --files plus --baselines."") parser.add_argument('-w', '--output', default = 'FROC.pdf', help = ""If given, FROC curves will be plotted into the given pdf file."") parser.add_argument('-c', '--count-detections', action='store_true', help = ""Counts the number of detections (positive is higher than negative, per file)."") parser.add_argument('-n', '--max', type=int, nargs=2, default=(160,70), help = ""The highest false alarms and the lowest detection rate to plot"") parser.add_argument('-t', '--title', default='FROC', help = ""The title of the plot"") parser.add_argument('--self-test', action='store_true', help=argparse.SUPPRESS) # add verbosity option bob.core.log.add_command_line_option(parser) args = parser.parse_args(command_line_arguments) bob.core.log.set_verbosity_level(logger, args.verbose) if args.legends is not None: count = len(args.files) + (len(args.baselines) if args.baselines is not None else 0) if len(args.legends) != count: logger.error(""The number of --files (%d) plus --baselines (%d) must be the same as --legends (%d)"", len(args.files), len(args.baselines) if args.baselines else 0, len(args.legends)) args.legends = None # update legends when they are not specified on command line if args.legends is None: args.legends = args.files if not args.baselines else args.files + args.baselines args.legends = [l.replace(""_"",""-"") for l in args.legends] if args.auto_baselines == 'bioid': args.baselines.extend([""baselines/baseline_detection_froba_mct_BIOID"", ""cosmin/BIOID/face.elbp.proj0.var.levels10.roc""]) args.legends.extend([""Froba"", ""Cosmin""]) elif args.auto_baselines == 'mit-cmu': args.baselines.extend([""baselines/baseline_detection_fcboost_MIT+CMU"", ""baselines/baseline_detection_viola_rapid1_MIT+CMU"", ""cosmin/MIT+CMU/face.elbp.proj0.var.levels10.roc""]) args.legends.extend([""FcBoost"", ""Viola"", ""Cosmin""]) return args" 553,"def main(command_line_arguments=None): """"""Reads score files, computes error measures and plots curves."""""" args = command_line_options(command_line_arguments) # get some colors for plotting cmap = mpl.cm.get_cmap(name='hsv') count = len(args.files) + (len(args.baselines) if args.baselines else 0) colors = [cmap(i) for i in numpy.linspace(0, 1.0, count+1)] # First, read the score files logger.info(""Loading %d score files"" % len(args.files)) scores = [read_score_file(os.path.join(args.directory, f)) for f in args.files] false_alarms = [] detection_rate = [] logger.info(""Computing FROC curves"") for score in scores: # compute some thresholds tmin = min(score[2]) tmax = max(score[2]) count = 100 thresholds = [tmin + float(x)/count * (tmax - tmin) for x in range(count+2)] false_alarms.append([]) detection_rate.append([]) for threshold in thresholds: detection_rate[-1].append(numpy.count_nonzero(numpy.array(score[1]) >= threshold) / float(score[0])) false_alarms[-1].append(numpy.count_nonzero(numpy.array(score[2]) >= threshold)) # to display 0 in a semilogx plot, we have to add a little # false_alarms[-1][-1] += 1e-8 # also read baselines if args.baselines is not None: for baseline in args.baselines: dr = [] fa = [] with open(os.path.join(args.baseline_directory, baseline)) as f: for line in f: splits = line.rstrip().split() dr.append(float(splits[0])) fa.append(int(splits[1])) false_alarms.append(fa) detection_rate.append(dr) logger.info(""Plotting FROC curves to file '%s'"", args.output) # create a multi-page PDF for the ROC curve pdf = PdfPages(args.output) figure = _plot_froc(false_alarms, detection_rate, colors, args.legends, args.title, args.max) mpl.xlabel('False Alarm (of %d pruned)' % len(scores[0][2])) mpl.ylabel('Detection Rate in \%% (total %d faces)' % scores[0][0]) pdf.savefig(figure) pdf.close() if args.count_detections: for i, f in enumerate(args.files): det, all = count_detections(f) print(""The number of detected faces for %s is %d out of %d"" % (args.legends[i], det, all))" 554,"def priority(var): """"""Prioritizes resource position in the final HTML. To be fed into sorted(key=). Javascript consoles throw errors if Bootstrap's js file is mentioned before jQuery. Using this function such errors can be avoided. Used internally. Positional arguments: var -- value sent by list.sorted(), which is a value in Statics().all_variables. Returns: Either a number if sorting is enforced for the value in `var`, or returns `var` itself. """""" order = dict(JQUERY='0', BOOTSTRAP='1') return order.get(var, var)" 555,"def get_resources(minify=False): """"""Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values. """""" all_resources = dict() subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__() for resource in subclasses: obj = resource(minify) all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js)) return all_resources" 556,"def citedby_pid(self, pid, metaonly=False, from_heap=True): """""" Retrieve citedby documents from a given PID number. pid: SciELO PID number metaonly: will retrieve only the metadata of the requested article citations including the number of citations it has received. from_heap: will retrieve the number of citations from a preproduced report, it will not fetch the api. Much faster results but not extremely updated. """""" if from_heap is True: result = citations.raw_data(pid) if result and 'cited_by' in result and metaonly is True: del(result['cited_by']) return result if result: return result url = urljoin(self.CITEDBY_URL, self.PID_ENDPOINT) params = { ""q"": pid, ""metaonly"": ""true"" if metaonly is True else ""false"" } result = self._do_request(url, params=params) return result" 557,"def citedby_pid(self, pid, metaonly=False, from_heap=True): """""" Retrieve citedby documents from a given PID number. pid: SciELO PID number metaonly: will retrieve only the metadata of the requested article citations including the number of citations it has received. from_heap: will retrieve the number of citations from a preproduced report, it will not fetch the api. Much faster results but not extremelly updated. """""" if from_heap is True: result = citations.raw_data(pid) if result and 'cited_by' in result and metaonly is True: del(result['cited_by']) return result if result: return result result = self.client.citedby_pid(pid, metaonly=metaonly) try: return json.loads(result) except: return None" 558,"def search(self, dsl, params): """""" Free queries to ES index. dsl (string): with DSL query params (list): [(key, value), (key, value)] where key is a query parameter, and value is the value required for parameter, ex: [('size', '0'), ('search_type', 'count')] """""" query_parameters = [] for key, value in params: query_parameters.append(self.CITEDBY_THRIFT.kwargs(str(key), str(value))) try: result = self.client.search(dsl, query_parameters) except self.CITEDBY_THRIFT.ServerError: raise ServerError('you may trying to run a bad DSL Query') try: return json.loads(result) except: return None" 559,"def raise_error(error): """"""Intakes a dict of remote error information and raises a DashiError """""" exc_type = error.get('exc_type') if exc_type and exc_type.startswith(ERROR_PREFIX): exc_type = exc_type[len(ERROR_PREFIX):] exc_cls = ERROR_TYPE_MAP.get(exc_type, DashiError) else: exc_cls = DashiError raise exc_cls(**error)" 560,"def fire(self, name, operation, args=None, **kwargs): """"""Send a message without waiting for a reply @param name: name of destination service queue @param operation: name of service operation to invoke @param args: dictionary of keyword args to pass to operation. Use this OR kwargs. @param kwargs: additional args to pass to operation """""" if args: if kwargs: raise TypeError(""specify args dict or keyword arguments, not both"") else: args = kwargs d = dict(op=operation, args=args) headers = {'sender': self.add_sysname(self.name)} dest = self.add_sysname(name) def _fire(channel): with Producer(channel) as producer: producer.publish(d, routing_key=dest, headers=headers, serializer=self._serializer, exchange=self._exchange, declare=[self._exchange]) log.debug(""sending message to %s"", dest) with connections[self._pool_conn].acquire(block=True) as conn: _, channel = self.ensure(conn, _fire) conn.maybe_close_channel(channel)" 561,"def call(self, name, operation, timeout=10, args=None, **kwargs): """"""Send a message and wait for reply @param name: name of destination service queue @param operation: name of service operation to invoke @param timeout: RPC timeout to await a reply @param args: dictionary of keyword args to pass to operation. Use this OR kwargs. @param kwargs: additional args to pass to operation """""" if args: if kwargs: raise TypeError(""specify args dict or keyword arguments, not both"") else: args = kwargs # create a direct queue for the reply. This may end up being a # bottleneck for performance: each rpc call gets a brand new # exclusive queue. However this approach is used nova.rpc and # seems to have carried them pretty far. If/when this # becomes a bottleneck we can set up a long-lived backend queue and # use correlation_id to deal with concurrent RPC calls. See: # http://www.rabbitmq.com/tutorials/tutorial-six-python.html msg_id = uuid.uuid4().hex # expire the reply queue shortly after the timeout. it will be # (lazily) deleted by the broker if we don't clean it up first queue_arguments = {'x-expires': int((timeout + 1) * 1000)} queue = Queue(name=msg_id, exchange=self._exchange, routing_key=msg_id, durable=False, queue_arguments=queue_arguments) messages = [] event = threading.Event() def _callback(body, message): messages.append(body) message.ack() event.set() d = dict(op=operation, args=args) headers = {'reply-to': msg_id, 'sender': self.add_sysname(self.name)} dest = self.add_sysname(name) def _declare_and_send(channel): consumer = Consumer(channel, (queue,), callbacks=(_callback,)) with Producer(channel) as producer: producer.publish(d, routing_key=dest, headers=headers, exchange=self._exchange, serializer=self._serializer) return consumer log.debug(""sending call to %s:%s"", dest, operation) with connections[self._pool_conn].acquire(block=True) as conn: consumer, channel = self.ensure(conn, _declare_and_send) try: self._consume(conn, consumer, timeout=timeout, until_event=event) # try to delete queue, but don't worry if it fails (will expire) try: queue = queue.bind(channel) queue.delete(nowait=True) except Exception: log.exception(""error deleting queue"") finally: conn.maybe_close_channel(channel) msg_body = messages[0] if msg_body.get('error'): raise_error(msg_body['error']) else: return msg_body.get('result')" 562,"def handle(self, operation, operation_name=None, sender_kwarg=None): """"""Handle an operation using the specified function @param operation: function to call for this operation @param operation_name: operation name. if unspecified operation.__name__ is used @param sender_kwarg: optional keyword arg on operation to feed in sender name """""" if not self._consumer: self._consumer = DashiConsumer(self, self._conn, self._name, self._exchange, sysname=self._sysname) self._consumer.add_op(operation_name or operation.__name__, operation, sender_kwarg=sender_kwarg)" 563,"def cancel(self, block=True): """"""Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned """""" if self._consumer: self._consumer.cancel(block=block)" 564,"def link_exceptions(self, custom_exception=None, dashi_exception=None): """"""Link a custom exception thrown on the receiver to a dashi exception """""" if custom_exception is None: raise ValueError(""custom_exception must be set"") if dashi_exception is None: raise ValueError(""dashi_exception must be set"") self._linked_exceptions[custom_exception] = dashi_exception" 565,"def ensure(self, connection, func, *args, **kwargs): """"""Perform an operation until success Repeats in the face of connection errors, pursuant to retry policy. """""" channel = None while 1: try: if channel is None: channel = connection.channel() return func(channel, *args, **kwargs), channel except (connection.connection_errors, IOError): self._call_errback() channel = self.connect(connection)" 566,"def re_tab(s): """"""Return a tabbed string from an expanded one."""""" l = [] p = 0 for i in range(8, len(s), 8): if s[i - 2:i] == "" "": # collapse two or more spaces into a tab l.append(s[p:i].rstrip() + ""\t"") p = i if p == 0: return s else: l.append(s[p:]) return """".join(l)" 567,"def read_next_line(self): """"""Read another line from the file."""""" next_line = self.file.readline() if not next_line or next_line[-1:] != '\n': # no newline on last line of file self.file = None else: # trim newline characters next_line = next_line[:-1] expanded = next_line.expandtabs() edit = urwid.Edit("""", expanded, allow_tab=True) edit.set_edit_pos(0) edit.original_text = next_line self.lines.append(edit) return next_line" 568,"def _get_at_pos(self, pos): """"""Return a widget for the line number passed."""""" if pos < 0: # line 0 is the start of the file, no more above return None, None if len(self.lines) > pos: # we have that line so return it return self.lines[pos], pos if self.file is None: # file is closed, so there are no more lines return None, None assert pos == len(self.lines), ""out of order request?"" self.read_next_line() return self.lines[-1], pos" 569,"def split_focus(self): """"""Divide the focus edit widget at the cursor location."""""" focus = self.lines[self.focus] pos = focus.edit_pos edit = urwid.Edit("""", focus.edit_text[pos:], allow_tab=True) edit.original_text = """" focus.set_edit_text(focus.edit_text[:pos]) edit.set_edit_pos(0) self.lines.insert(self.focus + 1, edit)" 570,"def combine_focus_with_prev(self): """"""Combine the focus edit widget with the one above."""""" above, ignore = self.get_prev(self.focus) if above is None: # already at the top return focus = self.lines[self.focus] above.set_edit_pos(len(above.edit_text)) above.set_edit_text(above.edit_text + focus.edit_text) del self.lines[self.focus] self.focus -= 1" 571,"def combine_focus_with_next(self): """"""Combine the focus edit widget with the one below."""""" below, ignore = self.get_next(self.focus) if below is None: # already at bottom return focus = self.lines[self.focus] focus.set_edit_text(focus.edit_text + below.edit_text) del self.lines[self.focus + 1]" 572,"def handle_keypress(self, k): """"""Last resort for keypresses."""""" if k == ""esc"": self.save_file() raise urwid.ExitMainLoop() elif k == ""delete"": # delete at end of line self.walker.combine_focus_with_next() elif k == ""backspace"": # backspace at beginning of line self.walker.combine_focus_with_prev() elif k == ""enter"": # start new line self.walker.split_focus() # move the cursor to the new line and reset pref_col self.view.keypress(size, ""down"") self.view.keypress(size, ""home"")" 573,"def save_file(self): """"""Write the file out to disk."""""" l = [] walk = self.walker for edit in walk.lines: # collect the text already stored in edit widgets if edit.original_text.expandtabs() == edit.edit_text: l.append(edit.original_text) else: l.append(re_tab(edit.edit_text)) # then the rest while walk.file is not None: l.append(walk.read_next_line()) # write back to disk outfile = open(self.save_name, ""w"") l_iter = iter(l) line = next(l_iter) prefix = """" while True: try: outfile.write(prefix + line) prefix = ""\n"" line = next(l_iter) except StopIteration: if line != ""\n"": outfile.write(""\n"") break" 574,"def _media(self): """""" Returns a forms.Media instance with the basic editor media and media from all registered extensions. """""" css = ['markymark/css/markdown-editor.css'] iconlibrary_css = getattr( settings, 'MARKYMARK_FONTAWESOME_CSS', 'markymark/fontawesome/fontawesome.min.css' ) if iconlibrary_css: css.append(iconlibrary_css) media = forms.Media( css={'all': css}, js=('markymark/js/markdown-editor.js',) ) # Use official extension loading to initialize all extensions # and hook in extension-defined media files. renderer = initialize_renderer() for extension in renderer.registeredExtensions: if hasattr(extension, 'media'): media += extension.media return media" 575,"def rel(path, parent=None, par=False): """""" Takes *path* and computes the relative path from *parent*. If *parent* is omitted, the current working directory is used. If *par* is #True, a relative path is always created when possible. Otherwise, a relative path is only returned if *path* lives inside the *parent* directory. """""" try: res = os.path.relpath(path, parent) except ValueError: # Raised eg. on Windows for differing drive letters. if not par: return abs(path) raise else: if not par and not issub(res): return abs(path) return res" 576,"def issub(path): """""" Returns #True if *path* is a relative path that does not point outside of its parent directory or is equal to its parent directory (thus, this function will also return False for a path like `./`). """""" if isabs(path): return False if path.startswith(curdir + sep) or path.startswith(pardir + sep) or \ path == curdir or path == pardir: return False return True" 577,"def glob(patterns, parent=None, excludes=None, include_dotfiles=False, ignore_false_excludes=False): """""" Wrapper for #glob2.glob() that accepts an arbitrary number of patterns and matches them. The paths are normalized with #norm(). Relative patterns are automaticlly joined with *parent*. If the parameter is omitted, it defaults to the current working directory. If *excludes* is specified, it must be a string or a list of strings that is/contains glob patterns or filenames to be removed from the result before returning. > Every file listed in *excludes* will only remove **one** match from > the result list that was generated from *patterns*. Thus, if you > want to exclude some files with a pattern except for a specific file > that would also match that pattern, simply list that file another > time in the *patterns*. # Parameters patterns (list of str): A list of glob patterns or filenames. parent (str): The parent directory for relative paths. excludes (list of str): A list of glob patterns or filenames. include_dotfiles (bool): If True, `*` and `**` can also capture file or directory names starting with a dot. ignore_false_excludes (bool): False by default. If True, items listed in *excludes* that have not been globbed will raise an exception. # Returns list of str: A list of filenames. """""" if not glob2: raise glob2_ext if isinstance(patterns, str): patterns = [patterns] if not parent: parent = os.getcwd() result = [] for pattern in patterns: if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) result += glob2.glob(canonical(pattern)) for pattern in (excludes or ()): if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) pattern = canonical(pattern) if not isglob(pattern): try: result.remove(pattern) except ValueError as exc: if not ignore_false_excludes: raise ValueError('{} ({})'.format(exc, pattern)) else: for item in glob2.glob(pattern): try: result.remove(item) except ValueError as exc: if not ignore_false_excludes: raise ValueError('{} ({})'.format(exc, pattern)) return result" 578,"def addtobase(subject, base_suffix): """""" Adds the string *base_suffix* to the basename of *subject*. """""" if not base_suffix: return subject base, ext = os.path.splitext(subject) return base + base_suffix + ext" 579,"def addprefix(subject, prefix): """""" Adds the specified *prefix* to the last path element in *subject*. If *prefix* is a callable, it must accept exactly one argument, which is the last path element, and return a modified value. """""" if not prefix: return subject dir_, base = split(subject) if callable(prefix): base = prefix(base) else: base = prefix + base return join(dir_, base)" 580,"def addsuffix(subject, suffix, replace=False): """""" Adds the specified *suffix* to the *subject*. If *replace* is True, the old suffix will be removed first. If *suffix* is callable, it must accept exactly one argument and return a modified value. """""" if not suffix and not replace: return subject if replace: subject = rmvsuffix(subject) if suffix and callable(suffix): subject = suffix(subject) elif suffix: subject += suffix return subject" 581,"def rmvsuffix(subject): """""" Remove the suffix from *subject*. """""" index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): subject = subject[:index] return subject" 582,"def getsuffix(subject): """""" Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period. """""" index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): return subject[index+1:] return None" 583,"def makedirs(path, exist_ok=True): """""" Like #os.makedirs(), with *exist_ok* defaulting to #True. """""" try: os.makedirs(path) except OSError as exc: if exist_ok and exc.errno == errno.EEXIST: return raise" 584,"def chmod_update(flags, modstring): """""" Modifies *flags* according to *modstring*. """""" mapping = { 'r': (_stat.S_IRUSR, _stat.S_IRGRP, _stat.S_IROTH), 'w': (_stat.S_IWUSR, _stat.S_IWGRP, _stat.S_IWOTH), 'x': (_stat.S_IXUSR, _stat.S_IXGRP, _stat.S_IXOTH) } target, direction = 'a', None for c in modstring: if c in '+-': direction = c continue if c in 'ugoa': target = c direction = None # Need a - or + after group specifier. continue if c in 'rwx' and direction in '+-': if target == 'a': mask = functools.reduce(operator.or_, mapping[c]) else: mask = mapping[c]['ugo'.index(target)] if direction == '-': flags &= ~mask else: flags |= mask continue raise ValueError('invalid chmod: {!r}'.format(modstring)) return flags" 585,"def chmod_repr(flags): """""" Returns a string representation of the access flags *flags*. """""" template = 'rwxrwxrwx' order = (_stat.S_IRUSR, _stat.S_IWUSR, _stat.S_IXUSR, _stat.S_IRGRP, _stat.S_IWGRP, _stat.S_IXGRP, _stat.S_IROTH, _stat.S_IWOTH, _stat.S_IXOTH) return ''.join(template[i] if flags&x else '-' for i, x in enumerate(order))" 586,"def compare_timestamp(src, dst): """""" Compares the timestamps of file *src* and *dst*, returning #True if the *dst* is out of date or does not exist. Raises an #OSError if the *src* file does not exist. """""" try: dst_time = os.path.getmtime(dst) except OSError as exc: if exc.errno == errno.ENOENT: return True # dst does not exist src_time = os.path.getmtime(src) return src_time > dst_time" 587,"def init_app(self, app): """"""Initialize the extension."""""" # Set default Flask config option. app.config.setdefault('STATICS_MINIFY', False) # Select resources. self.all_resources = ALL_RESOURCES_MINIFIED if app.config.get('STATICS_MINIFY') else ALL_RESOURCES self.all_variables = ALL_VARIABLES # Add this instance to app.extensions. if not hasattr(app, 'extensions'): app.extensions = dict() if 'statics' in app.extensions: raise ValueError('Already registered extension STATICS.') app.extensions['statics'] = _StaticsState(self, app) # Initialize blueprint. name = 'flask_statics_helper' static_url_path = '{0}/{1}'.format(app.static_url_path, name) self.blueprint = Blueprint(name, __name__, template_folder='templates', static_folder='static', static_url_path=static_url_path) self.blueprint.add_app_template_global(self.all_variables, '_flask_statics_helper_all_variables') self.blueprint.add_app_template_global(self.all_resources, '_flask_statics_helper_all_resources') app.register_blueprint(self.blueprint)" 588,"def measure_board_rms(control_board, n_samples=10, sampling_ms=10, delay_between_samples_ms=0): ''' Read RMS voltage samples from control board high-voltage feedback circuit. ''' try: results = control_board.measure_impedance(n_samples, sampling_ms, delay_between_samples_ms, True, True, []) except RuntimeError: # `RuntimeError` may be raised if, for example, current limit was # reached during measurement. In such cases, return an empty frame. logger.warning('Error encountered during high-voltage RMS ' 'measurement.', exc_info=True) data = pd.DataFrame(None, columns=['board measured V', 'divider resistor index']) else: data = pd.DataFrame({'board measured V': results.V_hv}) data['divider resistor index'] = results.hv_resistor return data" 589,"def find_good(control_board, actuation_steps, resistor_index, start_index, end_index): ''' Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor. ''' lower = start_index upper = end_index while lower < upper - 1: index = lower + (upper - lower) / 2 v = actuation_steps[index] control_board.set_waveform_voltage(v) data = measure_board_rms(control_board) valid_data = data[data['divider resistor index'] >= 0] if (valid_data['divider resistor index'] < resistor_index).sum(): # We have some measurements from another resistor. upper = index else: lower = index control_board.set_waveform_voltage(actuation_steps[lower]) data = measure_board_rms(control_board) return lower, data" 590,"def resistor_max_actuation_readings(control_board, frequencies, oscope_reading_func): ''' For each resistor in the high-voltage feedback resistor bank, read the board measured voltage and the oscilloscope measured voltage for an actuation voltage that nearly saturates the feedback resistor. By searching for an actuation voltage near saturation, the signal-to-noise ratio is minimized. ''' # Set board amplifier gain to 1. # __NB__ This is likely _far_ lower than the actual gain _(which may be a # factor of several hundred)_.. control_board.set_waveform_voltage(0) control_board.auto_adjust_amplifier_gain = False control_board.amplifier_gain = 1. # Set waveform voltage to a low value and obtain the corresponding # oscilloscope reading to calculate an approximate gain of the amplifier. target_voltage = 0.1 control_board.set_waveform_voltage(target_voltage) oscope_rms = oscope_reading_func() estimated_amplifier_gain = oscope_rms / target_voltage # Based on the maximum amplified RMS voltage, define a set of actuation # voltages to search when performing calibration. max_post_gain_V = 0.8 * control_board.max_waveform_voltage max_actuation_V = max_post_gain_V / estimated_amplifier_gain actuation_steps = np.linspace(0.005, max_actuation_V, num=50) resistor_count = len(control_board.a0_series_resistance) # Define frequency/resistor index pairs to take measurements at. conditions = pd.DataFrame([[r, f] for r in range(resistor_count - 1, -1, -1) for f in frequencies], columns=['resistor index', 'frequency']) # Define function to process each frequency/resistor index pair. def max_actuation_reading(x): ''' Measure maximum board RMS voltage using specified feedback resistor, at the specified frequency. Request corresponding oscilloscope RMS voltage reading. ''' r = x['resistor index'].values[0] f = x['frequency'].values[0] control_board.set_waveform_frequency(f) actuation_index, data = find_good(control_board, actuation_steps, r, 0, len(actuation_steps) - 1) board_measured_rms = data.loc[data['divider resistor index'] >= 0, 'board measured V'].mean() oscope_rms = oscope_reading_func() print 'R=%s, f=%s' % (r, f) return pd.DataFrame([[r, f, actuation_index, board_measured_rms, oscope_rms]], columns=['resistor index', 'frequency', 'actuation index', 'board measured V', 'oscope measured V']) # Return board-measured RMS voltage and oscilloscope-measured RMS voltage # for each frequency/feedback resistor pair. return (conditions.groupby(['resistor index', 'frequency']) .apply(max_actuation_reading).reset_index(drop=True))" 591,"def fit_feedback_params(calibration, max_resistor_readings): ''' Fit model of control board high-voltage feedback resistor and parasitic capacitance values based on measured voltage readings. ''' R1 = 10e6 # Get transfer function to compute the amplitude of the high-voltage input # to the control board _(i.e., the output of the amplifier)_ based on the # attenuated voltage measured by the analog-to-digital converter on the # control board. # # The signature of the transfer function is: # # H(V1, R1, C1, R2, C2, f) # # See the `z_transfer_functions` function docstring for definitions of the # parameters based on the control board major version. def fit_resistor_params(x): resistor_index = x['resistor index'].values[0] p0 = [calibration.R_hv[resistor_index], calibration.C_hv[resistor_index]] def error(p, df, R1): v1 = compute_from_transfer_function(calibration.hw_version.major, 'V1', V2=df['board measured V'], R1=R1, R2=p[0], C2=p[1], f=df['frequency'].values) e = df['oscope measured V'] - v1 return e p1, success = optimize.leastsq(error, p0, args=(x, R1)) # take the absolute value of the fitted values, since is possible # for the fit to produce negative resistor and capacitor values p1 = np.abs(p1) return pd.DataFrame([p0 + p1.tolist()], columns=['original R', 'original C', 'fitted R', 'fitted C']).T results = (max_resistor_readings [max_resistor_readings['resistor index'] >= 0] .groupby(['resistor index']).apply(fit_resistor_params)) data = results.unstack() data.columns = data.columns.droplevel() return data" 592,"def plot_feedback_params(hw_major_version, max_resistor_readings, feedback_params, axis=None): ''' Plot the effective attenuation _(i.e., gain less than 1)_ of the control board measurements of high-voltage AC input according to: - AC signal frequency. - feedback resistor used _(varies based on amplitude of AC signal)_. Each high-voltage feedback resistor (unintentionally) forms a low-pass filter, resulting in attenuation of the voltage measured on the control board. The plot generated by this function plots each of the following trends for each feedback resistor: - Oscilloscope measurements. - Previous model of attenuation. - Newly fitted model of attenuation, based on oscilloscope readings. ''' R1 = 10e6 # Since the feedback circuit changed in version 2 of the control board, we # use the transfer function that corresponds to the current control board # version that the fitted attenuation model is based on. if axis is None: fig = plt.figure() axis = fig.add_subplot(111) markers = MarkerStyle.filled_markers def plot_resistor_params(args): resistor_index, x = args try: color = axis._get_lines.color_cycle.next() except: # make compatible with matplotlib v1.5 color = axis._get_lines.prop_cycler.next()['color'] F = feedback_params.loc[resistor_index] # Broadcast values in case sympy function simplifies to scalar value. values = np.empty_like(x['frequency']) values[:] = compute_from_transfer_function(hw_major_version, 'V2', V1=1., R1=R1, R2=F['original R'], C2=F['original C'], f=x['frequency']) axis.loglog(x['frequency'], values, color=color, linestyle='--', label='R$_{%d}$ (previous fit)' % resistor_index) values[:] = compute_from_transfer_function(hw_major_version, 'V2', V1=1., R1=R1, R2=F['fitted R'], C2=F['fitted C'], f=x['frequency']) axis.loglog(x['frequency'], values, color=color, linestyle='-', alpha=0.6, label='R$_{%d}$ (new fit)' % resistor_index) attenuation = x['board measured V'] / x['oscope measured V'] axis.plot(x['frequency'], attenuation, color='none', marker=markers[resistor_index % len(markers)], label='R$_{%d}$ (scope measured)' % resistor_index, linestyle='none', markeredgecolor=color, markeredgewidth=2, markersize=8) return 0 map(plot_resistor_params, max_resistor_readings.groupby('resistor index')) legend = axis.legend(ncol=3) legend.draw_frame(False) axis.set_xlabel('Frequency (Hz)') axis.set_ylabel(r'$\frac{V_{BOARD}}' r'{V_{SCOPE}}$', fontsize=25)" 593,"def update_control_board_calibration(control_board, fitted_params): ''' Update the control board with the specified fitted parameters. ''' # Update the control board with the new fitted capacitor and resistor # values for the reference load analog input (channel 0). control_board.a0_series_resistance = fitted_params['fitted R'].values control_board.a0_series_capacitance = fitted_params['fitted C'].values" 594,"def load(self): """""" Load each path in order. Remember paths already loaded and only load new ones. """""" data = self.dict_class() for path in self.paths: if path in self.paths_loaded: continue try: with open(path, 'r') as file: path_data = yaml.load(file.read()) data = dict_merge(data, path_data) self.paths_loaded.add(path) except IOError: # TODO: Log this correctly once logging is implemented if not path.endswith('.local.yml'): print 'CONFIG NOT FOUND: %s' % (path) self.data = data" 595,"def _initialize(self, settings_module): """""" Initialize the settings from a given settings_module settings_module - path to settings module """""" #Get the global settings values and assign them as self attributes self.settings_list = [] for setting in dir(global_settings): #Only get upper case settings if setting == setting.upper(): setattr(self, setting, getattr(global_settings, setting)) self.settings_list.append(setting) #If a settings module was passed in, import it, and grab settings from it #Overwrite global settings with theses if settings_module is not None: self.SETTINGS_MODULE = settings_module #Try to import the settings module try: mod = import_module(self.SETTINGS_MODULE) except ImportError: error_message = ""Could not import settings at {0}"".format(self.SETTINGS_MODULE) log.exception(error_message) raise ImportError(error_message) #Grab uppercased settings as set them as self attrs for setting in dir(mod): if setting == setting.upper(): if setting == ""INSTALLED_APPS"": self.INSTALLED_APPS += getattr(mod, setting) else: setattr(self, setting, getattr(mod, setting)) self.settings_list.append(setting) #If PATH_SETTINGS is in the settings file, extend the system path to include it if hasattr(self, ""PATH_SETTINGS""): for path in self.PATH_SETTINGS: sys.path.extend(getattr(self,path)) self.settings_list = list(set(self.settings_list))" 596,"def _setup(self): """""" Perform initial setup of the settings class, such as getting the settings module and setting the settings """""" settings_module = None #Get the settings module from the environment variables try: settings_module = os.environ[global_settings.MODULE_VARIABLE] except KeyError: error_message = ""Settings not properly configured. Cannot find the environment variable {0}"".format(global_settings.MODULE_VARIABLE) log.exception(error_message) self._initialize(settings_module) self._configure_logging()" 597,"def _configure_logging(self): """""" Setting up logging from logging config in settings """""" if not self.LOGGING_CONFIG: #Fallback to default logging in global settings if needed dictConfig(self.DEFAULT_LOGGING) else: dictConfig(self.LOGGING_CONFIG)" 598,"def ensure_context(**vars): """"""Ensures that a context is in the stack, creates one otherwise. """""" ctx = _context_stack.top stacked = False if not ctx: ctx = Context() stacked = True _context_stack.push(ctx) ctx.update(vars) try: yield ctx finally: if stacked: _context_stack.pop()" 599,"def request_context(app, request): """"""Creates a Context instance from the given request object """""" vars = {} if request.view_args is not None: vars.update(request.view_args) vars.update({ ""request"": request, ""GET"": AttrDict(request.args.to_dict()), ""POST"" : AttrDict(request.form.to_dict()), ""app"": app, ""config"": app.config, ""session"": session, ""g"": g, ""now"": datetime.datetime.now, ""utcnow"": datetime.datetime.utcnow, ""today"": datetime.date.today}) context = Context(vars) context.vars[""current_context""] = context return context" 600,"def clone(self, **override_vars): """"""Creates a copy of this context"""""" c = Context(self.vars, self.data) c.executed_actions = set(self.executed_actions) c.vars.update(override_vars) return c" 601,"def setup(): """"""import the matplotlib modules and set the style Returns ------- plt: pylab imported pylab module mpl: matplotlib module imported matplotlib module """""" # Latex support can be activated using an environment variable, otherwise # the default settings are: # - for windows: off # - else: on use_latex = False if('DD_USE_LATEX' in os.environ): if os.environ['DD_USE_LATEX'] == '1': use_latex = True else: if platform.system() == ""Windows"": use_latex = False else: use_latex = True already_loaded = 'matplotlib' in sys.modules # just make sure we can access matplotlib as mpl import matplotlib as mpl if not already_loaded: mpl.use('Agg') import matplotlib.pyplot as plt plt.style.use('seaborn') # general settings mpl.rcParams['font.size'] = 7.0 mpl.rcParams['axes.labelsize'] = 7.0 mpl.rcParams['xtick.labelsize'] = 7.0 mpl.rcParams['ytick.labelsize'] = 7.0 mpl.rcParams[""lines.linewidth""] = 1.5 mpl.rcParams[""lines.markeredgewidth""] = 3.0 mpl.rcParams[""lines.markersize""] = 3.0 # mpl.rcParams['font.sans-serif'] = 'Droid Sans' # mpl.rcParams['font.family'] = 'Open Sans' # mpl.rcParams['font.weight'] = 400 mpl.rcParams['mathtext.default'] = 'regular' # mpl.rcParams['font.family'] = 'Droid Sans' if use_latex: mpl.rcParams['text.usetex'] = True mpl.rc( 'text.latex', preamble=''.join(( # r'\usepackage{droidsans}', # r'\usepackage[T1]{fontenc} ', r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}', r'\renewcommand\familydefault{\sfdefault} ', # r'\usepackage{mathastext} ' )) ) else: mpl.rcParams['text.usetex'] = False import mpl_toolkits.axes_grid1 as axes_grid1 axes_grid1 return plt, mpl" 602,"def mpl_get_cb_bound_below_plot(ax): """""" Return the coordinates for a colorbar axes below the provided axes object. Take into account the changes of the axes due to aspect ratio settings. Parts of this code are taken from the transforms.py file from matplotlib Important: Use only AFTER fig.subplots_adjust(...) Use as: ======= """""" position = ax.get_position() figW, figH = ax.get_figure().get_size_inches() fig_aspect = figH / figW box_aspect = ax.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds ax_size = ax.get_position().bounds # the colorbar is set to 0.01 width sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03] return sizes" 603,"def main(): """"""Generate an XLS with specified content."""""" table = """"""
First NameLast Name
PaulMcGrath
LiamBrady
JohnGiles
"""""" docraptor = DocRaptor() print(""Create test_basic.xls"") with open(""test_basic.xls"", ""wb"") as pdf_file: pdf_file.write( docraptor.create( {""document_content"": table, ""document_type"": ""xls"", ""test"": True} ).content )" 604,"def restore_gc_state(): """""" Restore the garbage collector state on leaving the with block. """""" old_isenabled = gc.isenabled() old_flags = gc.get_debug() try: yield finally: gc.set_debug(old_flags) (gc.enable if old_isenabled else gc.disable)()" 605,"def develop_link(options, info): ''' Prepare development environment. Perform the following steps: - Uninstall ``dmf_control_board_firmware`` if installed as Conda package. - Install build and run-time Conda dependencies. - Link working ``.pioenvs`` directory into Conda ``Library`` directory to make development versions of compiled firmware binaries available to Python API. - Link ``dmf_control_board_firmware`` Python package into site packages directory. See Also -------- :func:`develop_unlink` ''' project_dir = ph.path(__file__).realpath().parent # Uninstall ``dmf_control_board_firmware`` if installed as Conda package. info('Check if Conda package is installed...') version_info = ch.conda_version_info('dmf-control-board-firmware') if version_info.get('installed') is not None: info('Uninstall `dmf-control-board-firmware` package...') ch.conda_exec('uninstall', '-y', 'dmf-control-board-firmware', verbose=True) else: info('`dmf-control-board-firmware` package is not installed.') # Install build and run-time Conda dependencies. info('Install build and run-time Conda dependencies...') recipe_dir = project_dir.joinpath('.conda-recipe').realpath() ch.conda_exec('install', '-y', '-n', 'root', 'conda-build', verbose=True) ch.development_setup(recipe_dir, verbose=True) # Link working ``.pioenvs`` directory into Conda ``Library`` directory. info('Link working firmware directories into Conda environment.') pio_bin_dir = pioh.conda_bin_path() fw_bin_dir = pio_bin_dir.joinpath('dmf-control-board-firmware') if not fw_bin_dir.exists(): project_dir.joinpath('.pioenvs').junction(fw_bin_dir) fw_config_ini = fw_bin_dir.joinpath('platformio.ini') if not fw_config_ini.exists(): project_dir.joinpath('platformio.ini').link(fw_config_ini) # Link ``dmf_control_board_firmware`` Python package `conda.pth` in site # packages directory. info('Link working Python directory into Conda environment...') ch.conda_exec('develop', project_dir, verbose=True) info(72 * '-' + '\nFinished')" 606,"def develop_unlink(options, info): ''' Prepare development environment. Perform the following steps: - Unlink working ``.pioenvs`` directory into Conda ``Library`` directory. - Unlink ``dmf_control_board_firmware`` Python package from site packages directory. See Also -------- :func:`develop_link` ''' project_dir = ph.path(__file__).realpath().parent # Unlink working ``.pioenvs`` directory into Conda ``Library`` directory. info('Unlink working firmware directories from Conda environment.') pio_bin_dir = pioh.conda_bin_path() fw_bin_dir = pio_bin_dir.joinpath('dmf-control-board-firmware') if fw_bin_dir.exists(): fw_config_ini = fw_bin_dir.joinpath('platformio.ini') if fw_config_ini.exists(): fw_config_ini.unlink() fw_bin_dir.unlink() # Remove link to ``dmf_control_board_firmware`` Python package in # `conda.pth` in site packages directory. info('Unlink working Python directory from Conda environment...') ch.conda_exec('develop', '-u', project_dir, verbose=True) info(72 * '-' + '\nFinished')" 607,"def response(self, parameters): r""""""Return the forward response in base dimensions :math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac {m_i}{1 + (j \omega \tau_i)^c_i}\right)` Parameters ---------- pars: Returns ------- response: Nx2 array, first axis denotes frequencies, seconds real and imaginary parts """""" # get a config object self._set_parameters(parameters) terms = self.m / (1 + (1j * self.w * self.tau) ** self.c) # sum up terms specs = np.sum(terms, axis=1) ccomplex = self.sigmai * (1 - specs) response = sip_response.sip_response(self.f, ccomplex=ccomplex) return response" 608,"def dre_dsigmai(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) terms = self.m * self.num / self.denom specs = np.sum(terms, axis=1) result = 1 - specs return result" 609,"def dre_dm(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) terms = self.num / self.denom result = - self.sigmai * terms return result" 610,"def dre_dtau(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) # term 1 num1 = self.c * self.w * self.otc1 * np.cos(self.ang) term1 = num1/self.denom # term 2 num2a = self.otc * np.cos(self.ang) num2b = 1 + num2a denom2 = self.denom ** 2 term2 = num2b / denom2 # term 3 term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2 result = self.sigmai * self.m * (term1 + term2 * term3) return result" 611,"def dre_dc(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) # term 1 num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang) num1b = self.otc * np.cos(self.ang) * np.pi / 2.0 term1 = (num1a + num1b) / self.denom # term 2 num2 = self.otc * np.sin(self.c / np.pi) * 2 denom2 = self.denom ** 2 term2 = num2 / denom2 # term 3 num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang) num3c = 2 * np.log(self.w * self.tau) * self.otc2 term3 = num3a - num3b + num3c result = self.sigmai * self.m * (term1 + term2 * term3) return result" 612,"def dim_dsigmai(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom, axis=1) return result" 613,"def dim_dm(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) num1 = self.otc * np.sin(self.ang) result = -self.sigmai * num1 / self.denom return result" 614,"def dim_dtau(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) # term 1 num1 = -self.m * (self.w ** self.c) * self.c\ * (self.tau ** (self.c - 1)) * np.sin(self.ang) term1 = self.sigmai * num1 / self.denom # term 2 num2a = -self.m * self.otc * np.sin(self.ang) num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\ np.cos(self.ang) num2c = 2 * self.c * (self.w ** (self.c * 2)) *\ (self.tau ** (2 * self.c - 1)) term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2) result = term1 + term2 return result" 615,"def dim_dc(self, pars): r"""""" :math:Add formula """""" self._set_parameters(pars) # term 1 num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\ * self.otc num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2) term1 = self.sigma0 * (-num1a - num1b) / self.denom # term 2 num2a = -self.m * self.otc * np.cos(self.ang) num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang) num2d = 2 * np.log(self.w * self.tau) * self.otc2 numerator = num2a * (num2b + num2c) + num2d term2 = self.sigma0 * numerator / (self.denom ** 2) result = term1 + term2 return result" 616,"def add_view_file_mapping(self, pattern, cls): """"""Adds a mapping between a file and a view class. Pattern can be an extension in the form .EXT or a filename. """""" if isinstance(pattern, str): if not pattern.endswith(""*""): _, ext = os.path.splitext(pattern) self.allowed_extensions.add(ext) pattern = re.compile(""^"" + re.escape(pattern).replace(""\\*"", "".+"") + ""$"", re.I) self.view_class_files_map.append((pattern, cls))" 617,"def load_file(self, app, pathname, relpath, pypath): """"""Loads a file and creates a View from it. Files are split between a YAML front-matter and the content (unless it is a .yml file). """""" try: view_class = self.get_file_view_cls(relpath) return create_view_from_file(pathname, source_template=relpath, view_class=view_class) except DeclarativeViewError: pass" 618,"def get_file_view_cls(self, filename): """"""Returns the view class associated to a filename """""" if filename is None: return self.default_view_class for pattern, cls in self.view_class_files_map: if pattern.match(filename): return cls return self.default_view_class" 619,"def children(self, vertex): """""" Return the list of immediate children of the given vertex. """""" return [self.head(edge) for edge in self.out_edges(vertex)]" 620,"def parents(self, vertex): """""" Return the list of immediate parents of this vertex. """""" return [self.tail(edge) for edge in self.in_edges(vertex)]" 621,"def references(self): """""" Return (tail, head) pairs for each edge in the graph. """""" return [ (tail, head) for tail in self.vertices for head in self.children(tail) ]" 622,"def descendants(self, start, generations=None): """""" Return the subgraph of all nodes reachable from the given start vertex, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to. """""" visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for child in self.children(vertex): if child not in visited: visited.add(child) to_visit.append((child, depth+1)) return self.full_subgraph(visited)" 623,"def ancestors(self, start, generations=None): """""" Return the subgraph of all nodes from which the given vertex is reachable, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to. """""" visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for parent in self.parents(vertex): if parent not in visited: visited.add(parent) to_visit.append((parent, depth+1)) return self.full_subgraph(visited)" 624,"def _component_graph(self): """""" Compute the graph of strongly connected components. Each strongly connected component is itself represented as a list of pairs, giving information not only about the vertices belonging to this strongly connected component, but also the edges leading from this strongly connected component to other components. Each pair is of the form ('EDGE', v) or ('VERTEX', v) for some vertex v. In the first case, that indicates that there's an edge from this strongly connected component to the given vertex v (which will belong to another component); in the second, it indicates that v is a member of this strongly connected component. Each component will begin with a vertex (the *root* vertex of the strongly connected component); the following edges are edges from that vertex. Algorithm is based on that described in ""Path-based depth-first search for strong and biconnected components"" by Harold N. Gabow, Inf.Process.Lett. 74 (2000) 107--114. """""" sccs = [] stack = [] boundaries = [] identified = self.vertex_set() index = self.vertex_dict() to_do = [] def visit_vertex(v): index[v] = len(stack) stack.append(('VERTEX', v)) boundaries.append(index[v]) to_do.append((leave_vertex, v)) to_do.extend((visit_edge, w) for w in self.children(v)) def visit_edge(v): if v in identified: stack.append(('EDGE', v)) elif v in index: while index[v] < boundaries[-1]: boundaries.pop() else: to_do.append((visit_vertex, v)) def leave_vertex(v): if boundaries[-1] == index[v]: root = boundaries.pop() scc = stack[root:] del stack[root:] for item_type, w in scc: if item_type == 'VERTEX': identified.add(w) del index[w] sccs.append(scc) stack.append(('EDGE', v)) # Visit every vertex of the graph. for v in self.vertices: if v not in identified: to_do.append((visit_vertex, v)) while to_do: operation, v = to_do.pop() operation(v) stack.pop() return sccs" 625,"def source_components(self): """""" Return the strongly connected components not reachable from any other component. Any component in the graph is reachable from one of these. """""" raw_sccs = self._component_graph() # Construct a dictionary mapping each vertex to the root of its scc. vertex_to_root = self.vertex_dict() # And keep track of which SCCs have incoming edges. non_sources = self.vertex_set() # Build maps from vertices to roots, and identify the sccs that *are* # reachable from other components. for scc in raw_sccs: root = scc[0][1] for item_type, w in scc: if item_type == 'VERTEX': vertex_to_root[w] = root elif item_type == 'EDGE': non_sources.add(vertex_to_root[w]) sccs = [] for raw_scc in raw_sccs: root = raw_scc[0][1] if root not in non_sources: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs]" 626,"def strongly_connected_components(self): """""" Return list of strongly connected components of this graph. Returns a list of subgraphs. Algorithm is based on that described in ""Path-based depth-first search for strong and biconnected components"" by Harold N. Gabow, Inf.Process.Lett. 74 (2000) 107--114. """""" raw_sccs = self._component_graph() sccs = [] for raw_scc in raw_sccs: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs]" 627,"def save(self, *args, **kwargs): """""" **uid**: :code:`person:{slug}` """""" if not self.full_name: self.full_name = '{0}{1}{2}'.format( self.first_name, '{}'.format( ' ' + self.middle_name + ' ' if self.middle_name else ' ', ), self.last_name, '{}'.format(' ' + self.suffix if self.suffix else '') ) self.slug = uuslug( self.full_name, instance=self, max_length=100, separator='-', start_no=2 ) if not self.uid: self.uid = 'person:{}'.format(self.slug) super(Person, self).save(*args, **kwargs)" 628,"def signature(self): """""" Compute the ORDER_HASH of the request. The hashable string is composed by getting the values from: MERCHANT ORDER_REF ORDER_DATE ORDER_PNAME[] ORDER_PCODE[] ORDER_PINFO[] ORDER_PRICE[] ORDER_QTY[] ORDER_VAT[] ORDER_SHIPPING PRICES_CURRENCY DISCOUNT DESTINATION_CITY DESTINATION_STATE DESTINATION_COUNTRY PAY_METHOD ORDER_PRICE_TYPE[] SELECTED_INSTALLMENTS_NO TESTORDER in this exact order. Next, we need to concatenate their lenghts with thier values, resulting in a string like: 8PAYUDEMO9789456123192016-10-05 11:12:279CD Player12MobilePhone6Laptop 10PROD_0489110PROD_0740910PROD_0496527Extended Warranty - 5 Years8 Dual SIM1117""Display482.371945.7545230171311220220220103RON2559 Bucuresti9Bucuresti2RO8CCVISAMC5GROSS5GROSS5GROSS4TRUE Using this string and the MERCHANT_KEY, we compute the HMAC. """""" hashable_fields = ['MERCHANT', 'ORDER_REF', 'ORDER_DATE', 'ORDER_SHIPPING', 'PRICES_CURRENCY', 'DISCOUNT', 'DESTINATION_CITY', 'DESTINATION_STATE', 'DESTINATION_COUNTRY', 'PAY_METHOD', 'SELECTED_INSTALLMENTS_NO', 'TESTORDER'] result = text_type() # We need this hack since payU is not consistent # with the order of fields in hash string suffix = text_type() for field in self: if field.name == 'ORDER_HASH': continue field_value = field.value() if field.name in hashable_fields and field_value: encoded_value = text_type('{length}{value}').format( length=len(text_type(field_value).encode('utf-8')), value=field_value ) if field.name == 'TESTORDER' or \ field.name == 'SELECTED_INSTALLMENTS_NO': suffix += encoded_value else: result += encoded_value if field.name == 'ORDER': for detail in PAYU_ORDER_DETAILS: if any([detail in order and order[detail] for order in field_value]): for order in field_value: value = order.get(detail, '') item = text_type('{length}{value}').format( length=len(text_type(value).encode('utf-8')), value=value ) if detail == 'PRICE_TYPE': suffix += item else: result += item result += suffix result = result.encode('utf-8') return hmac.new(PAYU_MERCHANT_KEY, result).hexdigest()" 629,"def _prepare_orders(self, orders): """""" Each order needs to have all it's details filled with default value, or None, in case those are not already filled. """""" for detail in PAYU_ORDER_DETAILS: if not any([detail in order for order in orders]): for order in orders: order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None) return orders" 630,"def staticfiles_url_fetcher(url): """""" Returns the file matching url. This method will handle any URL resources that rendering HTML requires (eg: images pointed my ``img`` tags, stylesheets, etc). The default behaviour will fetch any http(s) files normally, and will also attempt to resolve staticfiles internally (this should mostly affect development scenarios, but also works if static files are served under a relative url). Returns a dictionary with two entries: ``string``, which is the resources data as a string and ``mime_type``, which is the identified mime type for the resource. """""" if url.startswith('/'): base_url = staticfiles_storage.base_url filename = url.replace(base_url, '', 1) path = finders.find(filename) if path: # This should match most cases. Manifest static files with relative # URLs will only be picked up in DEBUG mode here. with open(path, 'rb') as f: data = f.read() else: # This should just match things like Manifest static files with # relative URLs. While this code path will expect `collectstatic` # to have run, it should only be reached on if DEBUG = False. # XXX: Only Django >= 2.0 supports using this as a context manager: f = staticfiles_storage.open(filename) data = f.read() f.close() return { 'string': data, 'mime_type': mimetypes.guess_type(url)[0], } else: return default_url_fetcher(url)" 631,"def render_pdf( template, file_, url_fetcher=staticfiles_url_fetcher, context=None, ): """""" Writes the PDF data into ``file_``. Note that ``file_`` can actually be a Django Response object as well. This function may be used as a helper that can be used to save a PDF file to a file (or anything else outside of a request/response cycle), eg:: :param str html: A rendered HTML. :param file file_: A file like object (or a Response) where to output the rendered PDF. """""" context = context or {} html = get_template(template).render(context) HTML( string=html, base_url='not-used://', url_fetcher=url_fetcher, ).write_pdf( target=file_, )" 632,"def encode_bytes(src_buf, dst_file): """"""Encode a buffer length followed by the bytes of the buffer itself. Parameters ---------- src_buf: bytes Source bytes to be encoded. Function asserts that 0 <= len(src_buf) <= 2**16-1. dst_file: file File-like object with write method. Returns ------- int Number of bytes written to `dst_file`. """""" if not isinstance(src_buf, bytes): raise TypeError('src_buf must by bytes.') len_src_buf = len(src_buf) assert 0 <= len_src_buf <= 2**16-1 num_written_bytes = len_src_buf + 2 len_buf = FIELD_U16.pack(len_src_buf) dst_file.write(len_buf) dst_file.write(src_buf) return num_written_bytes" 633,"def decode_bytes(f): """"""Decode a buffer length from a 2-byte unsigned int then read the subsequent bytes. Parameters ---------- f: file File-like object with read method. Raises ------ UnderflowDecodeError When the end of stream is encountered before the end of the encoded bytes. Returns ------- int Number of bytes read from `f`. bytes Value bytes decoded from `f`. """""" buf = f.read(FIELD_U16.size) if len(buf) < FIELD_U16.size: raise UnderflowDecodeError() (num_bytes,) = FIELD_U16.unpack_from(buf) num_bytes_consumed = FIELD_U16.size + num_bytes buf = f.read(num_bytes) if len(buf) < num_bytes: raise UnderflowDecodeError() return num_bytes_consumed, buf" 634,"def encode_utf8(s, f): """"""UTF-8 encodes string `s` to file-like object `f` according to the MQTT Version 3.1.1 specification in section 1.5.3. The maximum length for the encoded string is 2**16-1 (65535) bytes. An assertion error will result if the encoded string is longer. Parameters ---------- s: str String to be encoded. f: file File-like object. Returns ------- int Number of bytes written to f. """""" encode = codecs.getencoder('utf8') encoded_str_bytes, num_encoded_chars = encode(s) num_encoded_str_bytes = len(encoded_str_bytes) assert 0 <= num_encoded_str_bytes <= 2**16-1 num_encoded_bytes = num_encoded_str_bytes + 2 f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8)) f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff)) f.write(encoded_str_bytes) return num_encoded_bytes" 635,"def decode_utf8(f): """"""Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Parameters ---------- f: file File-like object with read method. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. Utf8DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from ``f``. """""" decode = codecs.getdecoder('utf8') buf = f.read(FIELD_U16.size) if len(buf) < FIELD_U16.size: raise UnderflowDecodeError() (num_utf8_bytes,) = FIELD_U16.unpack_from(buf) num_bytes_consumed = FIELD_U16.size + num_utf8_bytes buf = f.read(num_utf8_bytes) if len(buf) < num_utf8_bytes: raise UnderflowDecodeError() try: s, num_chars = decode(buf, 'strict') except UnicodeError as e: raise Utf8DecodeError(e) return num_bytes_consumed, s" 636,"def encode_varint(v, f): """"""Encode integer `v` to file `f`. Parameters ---------- v: int Integer v >= 0. f: file Object containing a write method. Returns ------- int Number of bytes written. """""" assert v >= 0 num_bytes = 0 while True: b = v % 0x80 v = v // 0x80 if v > 0: b = b | 0x80 f.write(FIELD_U8.pack(b)) num_bytes += 1 if v == 0: break return num_bytes" 637,"def decode_varint(f, max_bytes=4): """"""Decode variable integer using algorithm similar to that described in MQTT Version 3.1.1 line 297. Parameters ---------- f: file Object with a read method. max_bytes: int or None If a varint cannot be constructed using `max_bytes` or fewer from f then raises a `DecodeError`. If None then there is no maximum number of bytes. Raises ------- DecodeError When length is greater than max_bytes. UnderflowDecodeError When file ends before enough bytes can be read to construct the varint. Returns ------- int Number of bytes consumed. int Value extracted from `f`. """""" num_bytes_consumed = 0 value = 0 m = 1 while True: buf = f.read(1) if len(buf) == 0: raise UnderflowDecodeError() (u8,) = FIELD_U8.unpack(buf) value += (u8 & 0x7f) * m m *= 0x80 num_bytes_consumed += 1 if u8 & 0x80 == 0: # No further bytes break elif max_bytes is not None and num_bytes_consumed >= max_bytes: raise DecodeError('Variable integer contained more than maximum bytes ({}).'.format(max_bytes)) return num_bytes_consumed, value" 638,"def unpack(self, struct): """"""Read as many bytes as are required to extract struct then unpack and return a tuple of the values. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to extract the bytes. Parameters ---------- struct: struct.Struct Returns ------- tuple Tuple of extracted values. """""" v = struct.unpack(self.read(struct.size)) return v" 639,"def unpack_utf8(self): """"""Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from the underlying stream. """""" num_bytes_consumed, s = decode_utf8(self.__f) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, s" 640,"def unpack_bytes(self): """"""Unpack a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Returns ------- int Number of bytes consumed bytes A bytes object extracted from the underlying stream. """""" num_bytes_consumed, b = decode_bytes(self.__f) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, b" 641,"def unpack_varint(self, max_bytes): """"""Decode variable integer using algorithm similar to that described in MQTT Version 3.1.1 line 297. Parameters ---------- max_bytes: int or None If a varint cannot be constructed using `max_bytes` or fewer from f then raises a `DecodeError`. If None then there is no maximum number of bytes. Raises ------- DecodeError When length is greater than max_bytes. UnderflowDecodeError When file ends before enough bytes can be read to construct the varint. Returns ------- int Number of bytes consumed. int Value extracted from `f`. """""" num_bytes_consumed, value = decode_varint(self.__f, max_bytes) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, value" 642,"def read(self, num_bytes): """"""Read `num_bytes` and return them. Parameters ---------- num_bytes : int Number of bytes to extract from the underlying stream. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to extract the bytes. Returns ------- bytes A bytes object extracted from underlying stream. """""" buf = self.__f.read(num_bytes) assert len(buf) <= num_bytes if len(buf) < num_bytes: raise UnderflowDecodeError() self.__num_bytes_consumed += num_bytes return buf" 643,"def read(self, max_bytes=1): """"""Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than ``max_bytes``. On end-of file returns a bytes object with zero-length. """""" if self.limit is None: b = self.__f.read(max_bytes) else: if self.__num_bytes_consumed + max_bytes > self.limit: max_bytes = self.limit - self.__num_bytes_consumed b = self.__f.read(max_bytes) self.__num_bytes_consumed += len(b) return b" 644,"def read(self, max_bytes=1): """"""Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Raises ------ ValueError If read is called after close has been called. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than `max_bytes`. On end-of file returns a bytes object with zero-length. """""" if self.__num_bytes_consumed is None: raise ValueError('I/O operation on closed file.') if self.__num_bytes_consumed + max_bytes >= len(self.__buf): max_bytes = len(self.__buf) - self.__num_bytes_consumed b = self.__buf[self.__num_bytes_consumed:self.__num_bytes_consumed + max_bytes] self.__num_bytes_consumed += max_bytes if isinstance(b, bytearray): b = bytes(b) assert isinstance(b, bytes) return b" 645,"def timeout(self, value): ''' Specifies a timeout on the search query ''' if not self.params: self.params = dict(timeout=value) return self self.params['timeout'] = value return self" 646,"def filtered(self, efilter): ''' Applies a filter to the search ''' if not self.params: self.params={'filter' : efilter} return self if not self.params.has_key('filter'): self.params['filter'] = efilter return self self.params['filter'].update(efilter) return self" 647,"def size(self,value): ''' The number of hits to return. Defaults to 10 ''' if not self.params: self.params = dict(size=value) return self self.params['size'] = value return self" 648,"def from_offset(self, value): ''' The starting from index of the hits to return. Defaults to 0. ''' if not self.params: self.params = dict({'from':value}) return self self.params['from'] = value return self" 649,"def sort(self, *args, **kwargs): ''' http://www.elasticsearch.org/guide/reference/api/search/sort.html Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. standard arguments are ordered ascending, keyword arguments are fields and you specify the order either asc or desc ''' if not self.params: self.params = dict() self.params['sort'] = list() for arg in args: self.params['sort'].append(arg) for k,v in kwargs.iteritems(): self.params['sort'].append({k : v}) return self" 650,"def sorted(self, fsort): ''' Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. ''' if not self.params: self.params = dict() self.params['sort'] = fsort return self" 651,"def search_simple(self, index,itype, key, search_term): ''' ElasticSearch.search_simple(index,itype,key,search_term) Usage: > es = ElasticSearch() > es.search_simple('twitter','users','name','kim') ''' request = self.session url = 'http://%s:%s/%s/%s/_search?q=%s:%s' % (self.host,self.port,index,itype,key,search_term) response = request.get(url) return response" 652,"def search_advanced(self, index, itype, query): ''' Advanced search interface using specified query > query = ElasticQuery().term(user='kimchy') > ElasticSearch().search_advanced('twitter','posts',query) ... Search results ... ''' request = self.session url = 'http://%s:%s/%s/%s/_search' % (self.host,self.port,index,itype) if self.params: query_header = dict(query=query, **self.params) else: query_header = dict(query=query) if self.verbose: print query_header response = request.post(url,query_header) return response" 653,"def doc_create(self,index,itype,value): ''' Creates a document ''' request = self.session url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype) if self.verbose: print value response = request.post(url,value) return response" 654,"def search_index_simple(self,index,key,search_term): ''' Search the index using a simple key and search_term @param index Name of the index @param key Search Key @param search_term The term to be searched for ''' request = self.session url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term) response = request.get(url) return response" 655,"def search_index_advanced(self, index, query): ''' Advanced search query against an entire index > query = ElasticQuery().query_string(query='imchi') > search = ElasticSearch() ''' request = self.session url = 'http://%s:%s/%s/_search' % (self.host, self.port, index) if self.params: content = dict(query=query, **self.params) else: content = dict(query=query) if self.verbose: print content response = request.post(url,content) return response" 656,"def index_create(self, index, number_of_shards=5,number_of_replicas=1): ''' Creates the specified index > search = ElasticSearch() > search.index_create('twitter') {""ok"":true,""acknowledged"":true} ''' request = self.session content = {'settings' : dict(number_of_shards=number_of_shards, number_of_replicas=number_of_replicas)} if self.verbose: print content url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.put(url,content) return response" 657,"def index_delete(self, index): ''' Delets the specified index > search = ElasticSearch() > search.index_delete('twitter') {""ok"" : True, ""acknowledged"" : True } ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.delete(url) return response" 658,"def index_open(self, index): ''' Opens the speicified index. http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html > ElasticSearch().index_open('my_index') ''' request = self.session url = 'http://%s:%s/%s/_open' % (self.host, self.port, index) response = request.post(url,None) return response" 659,"def river_couchdb_create(self, index_name,index_type='',couchdb_db='', river_name='',couchdb_host='localhost', couchdb_port='5984',couchdb_user=None, couchdb_password=None, couchdb_filter=None,script=''): ''' https://github.com/elasticsearch/elasticsearch-river-couchdb Creates a river for the specified couchdb_db. > search = ElasticSearch() > search.river_couchdb_create('feeds','feeds','feeds') {u'_id': u'_meta', u'_index': u'_river', u'_type': u'test_db', u'_version': 1, u'ok': True} ''' request = self.session if not index_type: index_type = index_name if not couchdb_db: couchdb_db = index_name content = { 'type' : 'couchdb', 'couchdb' : { 'host' : couchdb_host, 'port' : couchdb_port, 'db' : couchdb_db, 'filter' : couchdb_filter }, 'index' : { 'index' : index_name, 'type' : index_type } } if couchdb_user and couchdb_password: content['couchdb']['user'] = couchdb_user content['couchdb']['password'] = couchdb_password if script: content['couchdb']['script'] = script if self.verbose: print content url = 'http://%s:%s/_river/%s/_meta' %(self.host, self.port, river_name or index_name) response = request.post(url,content) return response" 660,"def river_couchdb_delete(self, index_name): ''' https://github.com/elasticsearch/elasticsearch-river-couchdb Delete's a river for the specified index WARNING: It DOES NOT delete the index, only the river, so the only effects of this are that the index will no longer poll CouchDB for updates. ''' request = self.session url = 'http://%s:%s/_river/%s' % (self.host, self.port, index_name) response = request.delete(url) return response" 661,"def index_list(self): ''' Lists indices ''' request = self.session url = 'http://%s:%s/_cluster/state/' % (self.host, self.port) response = request.get(url) if request.status_code==200: return response.get('metadata',{}).get('indices',{}).keys() else: return response" 662,"def map(self,index_name, index_type, map_value): ''' Enable a specific map for an index and type ''' request = self.session url = 'http://%s:%s/%s/%s/_mapping' % (self.host, self.port, index_name, index_type) content = { index_type : { 'properties' : map_value } } if self.verbose: print content response = request.put(url,content) return response" 663,"def list_types(index_name, host='localhost',port='9200'): ''' Lists the context types available in an index ''' return ElasticSearch(host=host, port=port).type_list(index_name)" 664,"def type_list(self, index_name): ''' List the types available in an index ''' request = self.session url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name) response = request.get(url) if request.status_code == 200: return response[index_name].keys() else: return response" 665,"def raw(self, module, method='GET', data=None): ''' Submits or requsts raw input ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, module) if self.verbose: print data if method=='GET': response = request.get(url) elif method=='POST': response = request.post(url,data) elif method=='PUT': response = request.put(url,data) elif method=='DELETE': response = request.delete(url) else: return {'error' : 'No such request method %s' % method} return response" 666,"def inverse(self, N): """""" Returns the modular inverse of an integer with respect to the field characteristic, P. Use the Extended Euclidean Algorithm: https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm """""" if N == 0: return 0 lm, hm = 1, 0 low, high = N % self.P, self.P while low > 1: r = high//low nm, new = hm - lm * r, high - low * r lm, low, hm, high = nm, new, lm, low return lm % self.P" 667,"def is_on_curve(self, point): """""" Checks whether a point is on the curve. Args: point (AffinePoint): Point to be checked. Returns: bool: True if point is on the curve, False otherwise. """""" X, Y = point.X, point.Y return ( pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b ) % self.P == 0" 668,"def generate_private_key(self): """""" Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data. """""" random_string = base64.b64encode(os.urandom(4096)).decode('utf-8') binary_data = bytes(random_string, 'utf-8') hash_object = hashlib.sha256(binary_data) message_digest_bin = hash_object.digest() message_digest_hex = binascii.hexlify(message_digest_bin) return message_digest_hex" 669,"def generate_public_key(self): """""" Generates a public key from the hex-encoded private key using elliptic curve cryptography. The private key is multiplied by a predetermined point on the elliptic curve called the generator point, G, resulting in the corresponding private key. The generator point is always the same for all Bitcoin users. Jacobian coordinates are used to represent the elliptic curve point G. https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates The exponentiating by squaring (also known by double-and-add) method is used for the elliptic curve multiplication that results in the public key. https://en.wikipedia.org/wiki/Exponentiation_by_squaring Bitcoin public keys are 65 bytes. The first byte is 0x04, next 32 bytes correspond to the X coordinate, and last 32 bytes correspond to the Y coordinate. They are typically encoded as 130-length hex characters. Args: private_key (bytes): UTF-8 encoded hexadecimal Returns: str: The public key in hexadecimal representation. """""" private_key = int(self.private_key, 16) if private_key >= self.N: raise Exception('Invalid private key.') G = JacobianPoint(self.Gx, self.Gy, 1) public_key = G * private_key x_hex = '{0:0{1}x}'.format(public_key.X, 64) y_hex = '{0:0{1}x}'.format(public_key.Y, 64) return '04' + x_hex + y_hex" 670,"def generate_address(self): """""" Creates a Bitcoin address from the public key. Details of the steps for creating the address are outlined in this link: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses The last step is Base58Check encoding, which is similar to Base64 encoding but slightly different to create a more human-readable string where '1' and 'l' won't get confused. More on Base64Check encoding here: https://en.bitcoin.it/wiki/Base58Check_encoding """""" binary_pubkey = binascii.unhexlify(self.public_key) binary_digest_sha256 = hashlib.sha256(binary_pubkey).digest() binary_digest_ripemd160 = hashlib.new('ripemd160', binary_digest_sha256).digest() binary_version_byte = bytes([0]) binary_with_version_key = binary_version_byte + binary_digest_ripemd160 checksum_intermed = hashlib.sha256(binary_with_version_key).digest() checksum_intermed = hashlib.sha256(checksum_intermed).digest() checksum = checksum_intermed[:4] binary_address = binary_digest_ripemd160 + checksum leading_zero_bytes = 0 for char in binary_address: if char == 0: leading_zero_bytes += 1 inp = binary_address + checksum result = 0 while len(inp) > 0: result *= 256 result += inp[0] inp = inp[1:] result_bytes = bytes() while result > 0: curcode = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'[result % 58] result_bytes = bytes([ord(curcode)]) + result_bytes result //= 58 pad_size = 0 - len(result_bytes) padding_element = b'1' if pad_size > 0: result_bytes = padding_element * pad_size + result_bytes result = ''.join([chr(y) for y in result_bytes]) address = '1' * leading_zero_bytes + result return address" 671,"def double(self): """""" Doubles this point. Returns: JacobianPoint: The point corresponding to `2 * self`. """""" X1, Y1, Z1 = self.X, self.Y, self.Z if Y1 == 0: return POINT_AT_INFINITY S = (4 * X1 * Y1 ** 2) % self.P M = (3 * X1 ** 2 + self.a * Z1 ** 4) % self.P X3 = (M ** 2 - 2 * S) % self.P Y3 = (M * (S - X3) - 8 * Y1 ** 4) % self.P Z3 = (2 * Y1 * Z1) % self.P return JacobianPoint(X3, Y3, Z3)" 672,"def to_affine(self): """""" Converts this point to an affine representation. Returns: AffinePoint: The affine reprsentation. """""" X, Y, Z = self.x, self.y, self.inverse(self.z) return ((X * Z ** 2) % P, (Y * Z ** 3) % P)" 673,"def double(self): """""" Doubles this point. Returns: AffinePoint: The point corresponding to `2 * self`. """""" X1, Y1, a, P = self.X, self.Y, self.a, self.P if self.infinity: return self S = ((3 * X1 ** 2 + a) * self.inverse(2 * Y1)) % P X2 = (S ** 2 - (2 * X1)) % P Y2 = (S * (X1 - X2) - Y1) % P return AffinePoint(X2, Y2)" 674,"def slope(self, other): """""" Determines the slope between this point and another point. Args: other (AffinePoint): The second point. Returns: int: Slope between self and other. """""" X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y Y3 = Y1 - Y2 X3 = X1 - X2 return (Y3 * self.inverse(X3)) % self.P" 675,"def to_jacobian(self): """""" Converts this point to a Jacobian representation. Returns: JacobianPoint: The Jacobian representation. """""" if not self: return JacobianPoint(X=0, Y=0, Z=0) return JacobianPoint(X=self.X, Y=self.Y, Z=1)" 676,"def import_model(self, name, path=""floyd.db.models""): """"""imports a model of name from path, returning from local model cache if it has been previously loaded otherwise importing"""""" if name in self._model_cache: return self._model_cache[name] try: model = getattr(__import__(path, None, None, [name]), name) self._model_cache[name] = model except ImportError: return False return model" 677,"def parse_md(self): """"""Takes a post path and returns a dictionary of variables"""""" post_content = _MARKDOWN.convert(self.raw_src) if hasattr(_MARKDOWN, 'Meta'): # 'Meta' in _MARKDOWN and _MARKDOWN.Meta: for key in _MARKDOWN.Meta: print ""\t meta: %s: %s (%s)"" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0])) if key == 'pubdate': setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0]))) else: setattr(self, key, _MARKDOWN.Meta[key][0]) self.content = post_content self.stub = self.__key__ # set required fields # @TODO required in schema rather than here if not hasattr(self, 'pubdate'): print '\t Notice: setting default pubdate' setattr(self, 'pubdate', datetime.datetime.now())" 678,"def filter(self, **kwargs): # @TODO refactor with models as dicts """"""filter results of dataset eg. Query('Posts').filter(post_type='post') """""" f_field = kwargs.keys()[0] f_value = kwargs[f_field] _newset = [] for m in self._dataset: if hasattr(m, f_field): if getattr(m, f_field) == f_value: _newset.append(m) self._dataset = _newset return self" 679,"def sort_by(self, sb): """"""Sort results"""""" self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True) return self" 680,"def execute_train_task_with_dependencies(self, task_cls, **kwargs): """""" Run the training, as well as any dependencies of the training task_cls - class of a task """""" log.info(""Task {0}"".format(get_task_name(task_cls))) #Instantiate the task task_inst = task_cls() #Grab arguments from the task instance and set them for arg in task_inst.args: if arg not in kwargs: kwargs[arg] = task_inst.args[arg] #Check for dependencies defined by the task if hasattr(task_inst, ""dependencies""): deps = task_inst.dependencies dep_results = [] #Run the dependencies through recursion (in case of dependencies of dependencies, etc) for dep in deps: log.info(""Dependency {0}"".format(get_task_name(dep))) dep_results.append(self.execute_train_task_with_dependencies(dep.cls, **dep.args)) trained_dependencies = [] #Add executed dependency to trained_dependencies list on the task for i in xrange(0,len(deps)): dep = deps[i] dep_result = dep_results[i] name = dep.name namespace = dep.namespace category = dep.category trained_dependencies.append(TrainedDependency(category=category, namespace=namespace, name = name, inst = dep)) task_inst.trained_dependencies = trained_dependencies #Finally, run the task task_inst.train(**kwargs) return task_inst" 681,"def execute_predict_task(self, task_inst, predict_data, **kwargs): """""" Do a prediction task_inst - instance of a task """""" result = task_inst.predict(predict_data, **task_inst.args) return result" 682,"def train(self, **kwargs): """""" Do the workflow training """""" log.info(""Starting to train..."") if not self.setup_run: self.setup() self.trained_tasks = [] for task in self.tasks: data = self.reformatted_input[task.data_format]['data'] target = self.reformatted_input[task.data_format]['target'] if data is None: raise Exception(""Data cannot be none. Check the config file to make sure the right input is being read."") kwargs['data']=data kwargs['target']=target trained_task = self.execute_train_task_with_dependencies(task, **kwargs) self.trained_tasks.append(trained_task) #If the trained task alters the data in any way, pass it down the chain to the next task if hasattr(trained_task, 'data'): self.reformatted_input[task.data_format]['data'] = trained_task.data log.info(""Finished training."")" 683,"def predict(self, **kwargs): """""" Do the workflow prediction (done after training, with new data) """""" reformatted_predict = self.reformat_predict_data() results = {} for task_inst in self.trained_tasks: predict = reformatted_predict[task_inst.data_format]['predict'] kwargs['predict']=predict results.update({get_task_name(task_inst) : self.execute_predict_task(task_inst, predict, **kwargs)}) return results" 684,"def read_input(self, input_cls, filename, **kwargs): """""" Read in input and do some minimal preformatting input_cls - the class to use to read the input filename - input filename """""" input_inst = input_cls() input_inst.read_input(filename) return input_inst.get_data()" 685,"def reformat_file(self, input_file, input_format, output_format): """""" Reformat input data files to a format the tasks can use """""" #Return none if input_file or input_format do not exist if input_file is None or input_format is None: return None #Find the needed input class and read the input stream try: input_cls = self.find_input(input_format) input_inst = input_cls() except TypeError: #Return none if input_cls is a Nonetype return None #If the input file cannot be found, return None try: input_inst.read_input(self.absolute_filepath(input_file)) except IOError: return None formatter = find_needed_formatter(input_format, output_format) if formatter is None: raise Exception(""Cannot find a formatter that can convert from {0} to {1}"".format(self.input_format, output_format)) formatter_inst = formatter() formatter_inst.read_input(input_inst.get_data(), input_format) data = formatter_inst.get_data(output_format) return data" 686,"def reformat_input(self, **kwargs): """""" Reformat input data """""" reformatted_input = {} needed_formats = [] for task_cls in self.tasks: needed_formats.append(task_cls.data_format) self.needed_formats = list(set(needed_formats)) for output_format in self.needed_formats: reformatted_input.update( { output_format : { 'data' : self.reformat_file(self.input_file, self.input_format, output_format), 'target' : self.reformat_file(self.target_file, self.target_format, output_format) } } ) return reformatted_input" 687,"def _create_modulename(cdef_sources, source, sys_version): """""" This is the same as CFFI's create modulename except we don't include the CFFI version. """""" key = '\x00'.join([sys_version[:3], source, cdef_sources]) key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') return '_xprintidle_cffi_{0}{1}'.format(k1, k2)" 688,"def server_identity_is_verified(self): """""" GPGAuth stage0 """""" # Encrypt a uuid token for the server server_verify_token = self.gpg.encrypt(self._nonce0, self.server_fingerprint, always_trust=True) if not server_verify_token.ok: raise GPGAuthStage0Exception( 'Encryption of the nonce0 (%s) ' 'to the server fingerprint (%s) failed.' % (self._nonce0, self.server_fingerprint) ) server_verify_response = post_server_verify_token( self, keyid=self.user_fingerprint, server_verify_token=str(server_verify_token) ) if not check_server_verify_response(server_verify_response): raise GPGAuthStage0Exception(""Verify endpoint wrongly formatted"") if server_verify_response.headers.get('X-GPGAuth-Verify-Response') != self._nonce0: raise GPGAuthStage0Exception( 'The server decrypted something different than what we sent ' '(%s <> %s)' % (server_verify_response.headers.get('X-GPGAuth-Verify-Response'), self._nonce0)) logger.info('server_identity_is_verified: OK') return True" 689,"def user_auth_token(self): """""" GPGAuth Stage1 """""" # stage0 is a prequisite if not self.server_identity_is_verified: return False server_login_response = post_log_in( self, keyid=self.user_fingerprint ) if not check_server_login_stage1_response(server_login_response): raise GPGAuthStage1Exception(""Login endpoint wrongly formatted"") # Get the encrypted User Auth Token encrypted_user_auth_token = unquote_plus( server_login_response.headers.get('X-GPGAuth-User-Auth-Token') .replace('\\\\', '\\') ).replace('\\ ', ' ') logger.debug('User token to decrypt: %s', encrypted_user_auth_token) logger.info('Decrypting the user authentication token; ' 'password prompt expected') passphrase = None # For the sake of tests, allow one to set the passphrase onto # the object if hasattr(self, '_user_passphrase'): passphrase = self._user_passphrase user_auth_token = self.gpg.decrypt(encrypted_user_auth_token, always_trust=True, passphrase=passphrase) if not user_auth_token.ok: raise GPGAuthStage1Exception(""Auth token decryption failed: %s"", user_auth_token.status) logger.info('user_auth_token: %s', user_auth_token) return str(user_auth_token)" 690,"def is_authenticated_with_token(self): """""" GPGAuth Stage 2 """""" """""" Send back the token to the server to get auth cookie """""" server_login_response = post_log_in( self, keyid=self.user_fingerprint, user_token_result=self.user_auth_token ) if not check_server_login_stage2_response(server_login_response): raise GPGAuthStage2Exception(""Login endpoint wrongly formatted"") self.cookies.save(ignore_discard=True) logger.info('is_authenticated_with_token: OK') return True" 691,"def publish(self,message,message_type,topic=''): """""" Publish the message on the PUB socket with the given topic name. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''. """""" if message_type == MULTIPART: raise Exception(""Unsupported request type"") super(Publisher,self).send(message,message_type,topic)" 692,"def load(self, cls, run_id): """""" Load a workflow cls - workflow class (to get __name__ from) run_id - id given to the specific run """""" id_code = self.generate_load_identifier(cls, run_id) inst = self.store.load(id_code) return inst" 693,"def save(self, obj, run_id): """""" Save a workflow obj - instance of a workflow to save run_id - unique id to give the run """""" id_code = self.generate_save_identifier(obj, run_id) self.store.save(obj, id_code)" 694,"def setup_tasks(self, tasks): """""" Find task classes from category.namespace.name strings tasks - list of strings """""" task_classes = [] for task in tasks: category, namespace, name = task.split(""."") try: cls = find_in_registry(category=category, namespace=namespace, name=name)[0] except TypeError: log.error(""Could not find the task with category.namespace.name {0}"".format(task)) raise TypeError task_classes.append(cls) self.tasks = task_classes" 695,"def initialize_workflow(self, workflow): """""" Create a workflow workflow - a workflow class """""" self.workflow = workflow() self.workflow.tasks = self.tasks self.workflow.input_file = self.input_file self.workflow.input_format = self.input_format self.workflow.target_file = self.target_file self.workflow.target_format = self.target_format self.workflow.run_id = self.run_id self.workflow.setup()" 696,"def reformat_filepath(self, config_file, filename): """""" Convert relative paths in config file to absolute """""" if not filename.startswith(""/""): filename = self.config_file_format.format(config_file, filename) return filename" 697,"def item_lister(command, _connection, page_size, page_number, sort_by, sort_order, item_class, result_set, **kwargs): """""" A generator function for listing Video and Playlist objects. """""" # pylint: disable=R0913 page = page_number while True: item_collection = _connection.get_list(command, page_size=page_size, page_number=page, sort_by=sort_by, sort_order=sort_order, item_class=item_class, **kwargs) result_set.total_count = item_collection.total_count result_set.page_number = page for item in item_collection.items: yield item if item_collection.total_count < 0 or item_collection.page_size == 0: break if len(item_collection.items) > 0: page += 1 else: break" 698,"def get_manifest(self, asset_xml): """""" Construct and return the xml manifest to deliver along with video file. """""" # pylint: disable=E1101 manifest = '' manifest += ' {type} ------- using {parser}'.format(loc=loc, type=get_pretty_type_str(desired_type), parser=str(parser))" 712,"def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool) -> Tuple[bool, bool]: """""" Utility method to check if a parser is able to parse a given type, either in * strict mode (desired_type must be one of the supported ones, or the parser should be generic) * inference mode (non-strict) : desired_type may be a parent class of one the parser is able to produce :param desired_type: the type of the object that should be parsed, :param desired_ext: the file extension that should be parsed :param strict: a boolean indicating whether to evaluate in strict mode or not :return: a first boolean indicating if there was a match, and a second boolean indicating if that match was strict (None if no match) """""" # (1) first handle the easy joker+joker case if desired_ext is JOKER and desired_type is JOKER: return True, None # (2) if ext is not a joker we can quickly check if it is supported if desired_ext is not JOKER: check_var(desired_ext, var_types=str, var_name='desired_ext') if desired_ext not in self.supported_exts: # ** no match on extension - no need to go further return False, None # (3) if type=joker and ext is supported => easy if desired_type is JOKER: # ** only extension match is required - ok. return True, None # (4) at this point, ext is JOKER OR supported and type is not JOKER. Check type match check_var(desired_type, var_types=type, var_name='desired_type_of_output') check_var(strict, var_types=bool, var_name='strict') # -- first call custom checker if provided if self.is_able_to_parse_func is not None and not self.is_able_to_parse_func(strict, desired_type): return False, None # -- strict match : either the parser is able to parse Anything, or the type is in the list of supported types if self.is_generic() or (desired_type in self.supported_types): return True, True # exact match # -- non-strict match : if the parser is able to parse a subclass of the desired type, it is ok elif (not strict) \ and any(issubclass(supported, desired_type) for supported in self.supported_types): return True, False # approx match # -- no match at all else: return False, None" 713,"def are_worth_chaining(parser, to_type: Type[S], converter: Converter[S, T]) -> bool: """""" Utility method to check if it makes sense to chain this parser with the given destination type, and the given converter to create a parsing chain. Returns True if it brings value to chain them. To bring value, * the converter's output should not be a parent class of the parser's output. Otherwise the chain does not even make any progress :) * The parser has to allow chaining (with converter.can_chain=True) :param parser: :param to_type: :param converter: :return: """""" if not parser.can_chain: # The base parser prevents chaining return False elif not is_any_type(to_type) and is_any_type(converter.to_type): # we gain the capability to generate any type. So it is interesting. return True elif issubclass(to_type, converter.to_type): # Not interesting : the outcome of the chain would be not better than one of the parser alone return False # Note: we dont say that chaining a generic parser with a converter is useless. Indeed it might unlock some # capabilities for the user (new file extensions, etc.) that would not be available with the generic parser # targetting to_type alone. For example parsing object A from its constructor then converting A to B might # sometimes be interesting, rather than parsing B from its constructor else: # Interesting return True" 714,"def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return: """""" try: typ = get_pretty_type_str(desired_type) except: typ = str(desired_type) e = ParsingException('Error while parsing ' + str(obj) + ' as a ' + typ + ' with parser \'' + str(parser) + '\' using options=(' + str(options) + ') : caught \n ' + str(caught.__class__.__name__) + ' : ' + str(caught))\ .with_traceback(caught.__traceback__) # 'from e' was hiding the inner traceback. This is much better for debug e.__cause__ = None # e.__cause__ = caught # store the exception still, to be able to handle it later e.caught = caught return e" 715,"def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param result: :param options: :return: """""" msg = ""Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object "" \ ""of wrong type {tret}: {ret}"".format(obj=obj, typ=get_pretty_type_str(desired_type), p=parser, opts=options, tret=type(result), ret=result) return WrongTypeCreatedError(msg)" 716,"def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return: """""" try: res = self._execute(logger, options) except Exception as e: raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e, options) # Check that the returned parsed object has the correct type if res is not None: if robust_isinstance(res, self.obj_type): return res # wrong type : error raise WrongTypeCreatedError.create_for_wrong_result_type(self.parser, self.obj_type, self.obj_on_fs_to_parse, res, options)" 717,"def _execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementing classes should perform the parsing here, possibly using custom methods of self.parser. :param logger: :param options: :return: """""" pass" 718,"def _get_applicable_options(self, options: Dict[str, Dict[str, Any]]): """""" Returns the options that are applicable to this particular parser, from the full map of options. It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of the options corresponding to this id, or returns an empty dict(). :param options: a dictionary parser_id > options :return: """""" return get_options_for_id(options, self.get_id_for_options())" 719,"def create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, options: Dict[str, Dict[str, Any]]) -> ParsingPlan[T]: """""" Creates a parsing plan to parse the given filesystem object into the given desired_type. Implementing classes may wish to support additional parameters. :param desired_type: the type of object that should be created as the output of parsing plan execution. :param filesystem_object: the persisted object that should be parsed :param logger: an optional logger to log all parsing plan creation and execution information :param options: a dictionary additional implementation-specific parameters (one dict per parser id). Implementing classes may use 'self._get_applicable_options()' to get the options that are of interest for this parser. :return: """""" pass" 720,"def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed): """""" Add a t_hosts record :param f_ipaddr: IP address :param f_macaddr: MAC Address :param f_hostname: Hostname :param f_netbios_name: NetBIOS Name :param f_engineer: Engineer username :param f_asset_group: Asset group :param f_confirmed: Confirmed boolean :return: (True/False, t_hosts.id or response message) """""" return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed)" 721,"def parse_now_field(s): """"""Return a datetime instance from a string generated by now_field. IMPORTANT: the datetime will be in UTC"""""" if not s.startswith('UTC:'): return None # Invalid string s = s[4:] # isoformat can return strings both with and without microseconds - we # account for both try: dt = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f') except ValueError: dt = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S') return dt" 722,"def get_ftp(ftp_conf, debug=0): """"""得到一个 已经打开的FTP 实例,和一个 ftp 路径。 :param dict ftp_conf: ftp配置文件,格式如下: >>> { >>> 'server':'127.0.0.1', >>> 'start_path':None, >>> 'user':'admin', >>> 'password':'123456', >>> } :returns: ftp, ftpserverstr :rtype: :class:`ftplib.FTP` , str """""" server = ftp_conf.get('server') user = ftp_conf.get('user') password = ftp_conf.get('password') start_path = ftp_conf.get('start_path') slog.info(""Connecting FTP server %s ......"", server) ftpStr = 'ftp://%s/'%server if start_path: ftpStr = ftpStr+start_path ftp = ftplib.FTP(server, user, password) ftp.set_debuglevel(debug) if start_path: ftp.cwd(start_path) serverFiles = ftp.nlst() slog.info('There are some files in %s:\n[%s]'%(ftpStr, ', '.join(serverFiles))) return ftp, ftpStr" 723,"def upload_file(file_path, remote_path, ftp_conf, remove_file=False): """"""上传第一个指定的文件到 FTP 服务器。 :param str file_path: 待上传文件的绝对路径。 :param str remote_path: 文件在 FTP 服务器上的相对路径(相对于 FTP 服务器的初始路径)。 :param dict ftp_conf: ftp配置文件,详见 :func:`get_ftp` 。 :param bool remove_file: 上传成功后是否删除本地文件。 :returns: FTP 服务器上的文件列表 :rtype: list """""" check_ftp_conf(ftp_conf) ftp, ftpStr = get_ftp(ftp_conf) lf = open(file_path, 'rb') slog.info('Uploading ""%s"" to ""%s/%s"" ......'%(file_path, ftpStr, remote_path)) ftp.storbinary(""STOR %s""%remote_path, lf) filelist = ftp.nlst() ftp.quit() lf.close() if remove_file: os.remove(file_path) slog.info('Upload done.') return filelist" 724,"def retrieve_data(self): """""" Retrives data as a DataFrame. """""" #==== Retrieve data ====# df = self.manager.get_historic_data(self.start.date(), self.end.date()) df.replace(0, np.nan, inplace=True) return df" 725,"def get_min_risk(self, weights, cov_matrix): """""" Minimizes the variance of a portfolio. """""" def func(weights): """"""The objective function that minimizes variance."""""" return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights) def func_deriv(weights): """"""The derivative of the objective function."""""" return ( np.matmul(weights.transpose(), cov_matrix.transpose()) + np.matmul(weights.transpose(), cov_matrix) ) constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)}) solution = self.solve_minimize(func, weights, constraints, func_deriv=func_deriv) # NOTE: `min_risk` is unused, but may be helpful later. # min_risk = solution.fun allocation = solution.x return allocation" 726,"def get_max_return(self, weights, returns): """""" Maximizes the returns of a portfolio. """""" def func(weights): """"""The objective function that maximizes returns."""""" return np.dot(weights, returns.values) * -1 constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)}) solution = self.solve_minimize(func, weights, constraints) max_return = solution.fun * -1 # NOTE: `max_risk` is not used anywhere, but may be helpful in the future. # allocation = solution.x # max_risk = np.matmul( # np.matmul(allocation.transpose(), cov_matrix), allocation # ) return max_return" 727,"def efficient_frontier( self, returns, cov_matrix, min_return, max_return, count ): """""" Returns a DataFrame of efficient portfolio allocations for `count` risk indices. """""" columns = [coin for coin in self.SUPPORTED_COINS] # columns.append('Return') # columns.append('Risk') values = pd.DataFrame(columns=columns) weights = [1/len(self.SUPPORTED_COINS)] * len(self.SUPPORTED_COINS) def func(weights): """"""The objective function that minimizes variance."""""" return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights) def func_deriv(weights): """"""The derivative of the objective function."""""" return ( np.matmul(weights.transpose(), cov_matrix.transpose()) + np.matmul(weights.transpose(), cov_matrix) ) for point in np.linspace(min_return, max_return, count): constraints = ( {'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)}, {'type': 'ineq', 'fun': lambda weights, i=point: ( np.dot(weights, returns.values) - i )} ) solution = self.solve_minimize(func, weights, constraints, func_deriv=func_deriv) columns = {} for index, coin in enumerate(self.SUPPORTED_COINS): columns[coin] = math.floor(solution.x[index] * 100 * 100) / 100 # NOTE: These lines could be helpful, but are commented out right now. # columns['Return'] = round(np.dot(solution.x, returns), 6) # columns['Risk'] = round(solution.fun, 6) values = values.append(columns, ignore_index=True) return values" 728,"def solve_minimize( self, func, weights, constraints, lower_bound=0.0, upper_bound=1.0, func_deriv=False ): """""" Returns the solution to a minimization problem. """""" bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS) return minimize( fun=func, x0=weights, jac=func_deriv, bounds=bounds, constraints=constraints, method='SLSQP', options={'disp': False} )" 729,"def allocate(self): """""" Returns an efficient portfolio allocation for the given risk index. """""" df = self.manager.get_historic_data()[self.SUPPORTED_COINS] #==== Calculate the daily changes ====# change_columns = [] for column in df: if column in self.SUPPORTED_COINS: change_column = '{}_change'.format(column) values = pd.Series( (df[column].shift(-1) - df[column]) / -df[column].shift(-1) ).values df[change_column] = values change_columns.append(change_column) # print(df.head()) # print(df.tail()) #==== Variances and returns ====# columns = change_columns # NOTE: `risks` is not used, but may be used in the future risks = df[columns].apply(np.nanvar, axis=0) # print('\nVariance:\n{}\n'.format(risks)) returns = df[columns].apply(np.nanmean, axis=0) # print('\nExpected returns:\n{}\n'.format(returns)) #==== Calculate risk and expected return ====# cov_matrix = df[columns].cov() # NOTE: The diagonal variances weren't calculated correctly, so here is a fix. cov_matrix.values[[np.arange(len(self.SUPPORTED_COINS))] * 2] = df[columns].apply(np.nanvar, axis=0) weights = np.array([1/len(self.SUPPORTED_COINS)] * len(self.SUPPORTED_COINS)).reshape(len(self.SUPPORTED_COINS), 1) #==== Calculate portfolio with the minimum risk ====# min_risk = self.get_min_risk(weights, cov_matrix) min_return = np.dot(min_risk, returns.values) #==== Calculate portfolio with the maximum return ====# max_return = self.get_max_return(weights, returns) #==== Calculate efficient frontier ====# frontier = self.efficient_frontier( returns, cov_matrix, min_return, max_return, 6 ) return frontier" 730,"def handle_default_options(options): """""" Pass in a Values instance from OptionParser. Handle settings and pythonpath options - Values from OptionParser """""" if options.settings: #Set the percept_settings_module (picked up by settings in conf.base) os.environ['PERCEPT_SETTINGS_MODULE'] = options.settings if options.pythonpath: #Append the pythonpath and the directory one up from the pythonpath to sys.path for importing options.pythonpath = os.path.abspath(os.path.expanduser(options.pythonpath)) up_one_path = os.path.abspath(os.path.join(options.pythonpath, "".."")) sys.path.append(options.pythonpath) sys.path.append(up_one_path) return options" 731,"def create_parser(self, prog_name, subcommand): """""" Create an OptionParser prog_name - Name of a command subcommand - Name of a subcommand """""" parser = OptionParser(prog=prog_name, usage=self.usage(subcommand), option_list=self.option_list) return parser" 732,"def hook(name=None, *args, **kwargs): """"""Decorator to register the function as a hook """""" def decorator(f): if not hasattr(f, ""hooks""): f.hooks = [] f.hooks.append((name or f.__name__, args, kwargs)) return f return decorator" 733,"def register_hooks(func, hooks, obj): """"""Register func on obj via hooks. Hooks should be a tuple of (name, args, kwargs) where name is a method name of obj. If args or kwargs are not empty, the method will be called first and expect a new function as return. """""" for name, args, kwargs in hooks: hook = getattr(obj, name) force_call = kwargs.pop(""_force_call"", False) if force_call or len(args) > 0 or len(kwargs) > 0: hook = hook(*args, **kwargs) hook(func)" 734,"def action(*args, **kwargs): """"""Transforms functions or class methods into actions. Optionnaly, you can define a function to be used as the view initializer: @action() def my_action(): pass @my_action.init_view def my_action_init_view(view, options): pass """""" def decorator(f): return ActionFunction(f, *args, **kwargs) return decorator" 735,"def with_actions(actions_or_group_name, actions=None): """"""Executes the list of actions before/after the function Actions should be a list where items are action names as strings or a dict. See frasco.actions.loaders.load_action(). """""" group = None if isinstance(actions_or_group_name, str): group = actions_or_group_name else: actions = actions_or_group_name def decorator(f): if isinstance(f, WithActionsDecorator): dec = f else: dec = WithActionsDecorator(f) dec.actions.extend(load_actions(actions, group=group)) return dec return decorator" 736,"def expose(rule, **options): """"""Decorator to add an url rule to a function """""" def decorator(f): if not hasattr(f, ""urls""): f.urls = [] if isinstance(rule, (list, tuple)): f.urls.extend(rule) else: f.urls.append((rule, options)) return f return decorator" 737,"def plot_channel_sweep(proxy, start_channel): ''' Parameters ---------- proxy : DMFControlBoard start_channel : int Channel number from which to start a channel sweep (should be a multiple of 40, e.g., 0, 40, 80). Returns ------- pandas.DataFrame See description of return of :func:`sweep_channels`. ''' test_loads = TEST_LOADS.copy() test_loads.index += start_channel results = sweep_channels(proxy, test_loads) normalized_measurements = (results['measured capacitance'] / results['expected capacitance']) fig, axis = plt.subplots(figsize=(10, 8)) axis.bar(normalized_measurements.index - 0.3, normalized_measurements, width=0.6, edgecolor='none', facecolor='limegreen') axis.set_xlim(left=test_loads.index.min() - 0.5, right=test_loads.index.max() + 0.5) axis.set_xlabel('channel') axis.set_ylabel(r'$\frac{C_{\tt{measured}}}{C_{\tt{expected}}}$', fontsize=28) return results" 738,"def _create_unicode_map(): """""" Create the inverse map from unicode to betacode. Returns: The hash map to convert unicode characters to the beta code representation. """""" unicode_map = {} for beta, uni in _map.BETACODE_MAP.items(): # Include decomposed equivalent where necessary. norm = unicodedata.normalize('NFC', uni) unicode_map[norm] = beta unicode_map[uni] = beta # Add the final sigmas. final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA) unicode_map[final_sigma_norm] = 's' unicode_map[_FINAL_LC_SIGMA] = 's' return unicode_map" 739,"def _create_conversion_trie(strict): """""" Create the trie for betacode conversion. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The trie for conversion. """""" t = pygtrie.CharTrie() for beta, uni in _map.BETACODE_MAP.items(): if strict: t[beta] = uni else: # The order of accents is very strict and weak. Allow for many orders of # accents between asterisk and letter or after letter. This does not # introduce ambiguity since each betacode token only has one letter and # either starts with a asterisk or a letter. diacritics = beta[1:] perms = itertools.permutations(diacritics) for perm in perms: perm_str = beta[0] + ''.join(perm) t[perm_str.lower()] = uni t[perm_str.upper()] = uni return t" 740,"def _find_max_beta_token_len(): """""" Finds the maximum length of a single betacode token. Returns: The length of the longest key in the betacode map, which corresponds to the longest single betacode token. """""" max_beta_len = -1 for beta, uni in _map.BETACODE_MAP.items(): if len(beta) > max_beta_len: max_beta_len = len(beta) return max_beta_len" 741,"def beta_to_uni(text, strict=False): """""" Converts the given text from betacode to unicode. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The converted text. """""" # Check if the requested configuration for conversion already has a trie # stored otherwise convert it. param_key = (strict,) try: t = _BETA_CONVERSION_TRIES[param_key] except KeyError: t = _create_conversion_trie(*param_key) _BETA_CONVERSION_TRIES[param_key] = t transform = [] idx = 0 possible_word_boundary = False while idx < len(text): if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN]) if step: possible_word_boundary = text[idx] in _BETA_PUNCTUATION key, value = step transform.append(value) idx += len(key) else: possible_word_boundary = True transform.append(text[idx]) idx += 1 # Check one last time in case there is some whitespace or punctuation at the # end and check if the last character is a sigma. if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA: transform[-1] = _FINAL_LC_SIGMA converted = ''.join(transform) return converted" 742,"def uni_to_beta(text): """""" Convert unicode text to a betacode equivalent. This method can handle tónos or oxeîa characters in the input. Args: text: The text to convert to betacode. This text does not have to all be Greek polytonic text, and only Greek characters will be converted. Note that in this case, you cannot convert to beta and then back to unicode. Returns: The betacode equivalent of the inputted text where applicable. """""" u = _UNICODE_MAP transform = [] for ch in text: try: conv = u[ch] except KeyError: conv = ch transform.append(conv) converted = ''.join(transform) return converted" 743,"def __calculate_order(self, node_dict): """""" Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies. Raise an error if there is a cycle, or nodes are missing. """""" if len(node_dict.keys()) != len(set(node_dict.keys())): raise DependencyTreeException(""Duplicate Keys Exist in node dictionary!"") valid_order = [node for node, dependencies in node_dict.items() if len(dependencies) == 0] remaining_nodes = [node for node in node_dict.keys() if node not in valid_order] while len(remaining_nodes) > 0: node_added = False for node in remaining_nodes: dependencies = [d for d in node_dict[node] if d not in valid_order] if len(dependencies) == 0: valid_order.append(node) remaining_nodes.remove(node) node_added = True if not node_added: # the tree must be invalid, as it was not possible to remove a node. # it's hard to find all the errors, so just spit out the first one you can find. invalid_node = remaining_nodes[0] invalid_dependency = ', '.join(node_dict[invalid_node]) if invalid_dependency not in remaining_nodes: raise DependencyTreeException( ""Missing dependency! One or more of ({dependency}) are missing for {dependant}."".format( dependant=invalid_node, dependency=invalid_dependency)) else: raise DependencyTreeException(""The dependency %s is cyclic or dependent on a cyclic dependency"" % invalid_dependency) return valid_order" 744,"def read_input(self, filename, has_header=True): """""" filename is any filename, or something on which open() can be called for example: csv_input = CSVInput() csv_input.read_input(""csvfile.csv"") """""" stream = open(filename) reader = csv.reader(stream) csv_data = [] for (i, row) in enumerate(reader): if i==0: if not has_header: csv_data.append([str(i) for i in xrange(0,len(row))]) csv_data.append(row) self.data = csv_data" 745,"def pprint_out(dct: Dict): """""" Utility methods to pretty-print a dictionary that is typically outputted by parsyfiles (an ordered dict) :param dct: :return: """""" for name, val in dct.items(): print(name + ':') pprint(val, indent=4)" 746,"def warn_import_error(type_of_obj_support: str, caught: ImportError): """""" Utility method to print a warning message about failed import of some modules :param type_of_obj_support: :param caught: :return: """""" msg = StringIO() msg.writelines('Import Error while trying to add support for ' + type_of_obj_support + '. You may continue but ' 'the associated parsers and converters wont be available : \n') traceback.print_tb(caught.__traceback__, file=msg) msg.writelines(str(caught.__class__.__name__) + ' : ' + str(caught) + '\n') warn(msg.getvalue())" 747,"def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]: """""" Utility method to create a default options structure with the lazy parsing inside :param lazy_mfcollection_parsing: :return: the options structure filled with lazyparsing option (for the MultifileCollectionParser) """""" return {MultifileCollectionParser.__name__: {'lazy_parsing': lazy_mfcollection_parsing}}" 748,"def add_parser_options(options: Dict[str, Dict[str, Any]], parser_id: str, parser_options: Dict[str, Dict[str, Any]], overwrite: bool = False): """""" Utility method to add options for a given parser, to the provided options structure :param options: :param parser_id: :param parser_options: :param overwrite: True to silently overwrite. Otherwise an error will be thrown :return: """""" if parser_id in options.keys() and not overwrite: raise ValueError('There are already options in this dictionary for parser id ' + parser_id) options[parser_id] = parser_options return options" 749,"def register_default_plugins(root_parser: ParserRegistryWithConverters): """""" Utility method to register all default plugins on the given parser+converter registry :param root_parser: :return: """""" # -------------------- CORE --------------------------- try: # -- primitive types from parsyfiles.plugins_base.support_for_primitive_types import get_default_primitive_parsers, \ get_default_primitive_converters root_parser.register_parsers(get_default_primitive_parsers()) root_parser.register_converters(get_default_primitive_converters()) except ImportError as e: warn_import_error('primitive types', e) try: # -- collections from parsyfiles.plugins_base.support_for_collections import get_default_collection_parsers, \ get_default_collection_converters root_parser.register_parsers(get_default_collection_parsers(root_parser, root_parser)) root_parser.register_converters(get_default_collection_converters(root_parser)) except ImportError as e: warn_import_error('dict', e) try: # -- objects from parsyfiles.plugins_base.support_for_objects import get_default_object_parsers, \ get_default_object_converters root_parser.register_parsers(get_default_object_parsers(root_parser, root_parser)) root_parser.register_converters(get_default_object_converters(root_parser)) except ImportError as e: warn_import_error('objects', e) try: # -- config from parsyfiles.plugins_base.support_for_configparser import get_default_config_parsers, \ get_default_config_converters root_parser.register_parsers(get_default_config_parsers()) root_parser.register_converters(get_default_config_converters(root_parser)) except ImportError as e: warn_import_error('config', e) # ------------------------- OPTIONAL ----------------- try: # -- jprops from parsyfiles.plugins_optional.support_for_jprops import get_default_jprops_parsers root_parser.register_parsers(get_default_jprops_parsers(root_parser, root_parser)) # root_parser.register_converters() except ImportError as e: warn_import_error('jprops', e) try: # -- yaml from parsyfiles.plugins_optional.support_for_yaml import get_default_yaml_parsers root_parser.register_parsers(get_default_yaml_parsers(root_parser, root_parser)) # root_parser.register_converters() except ImportError as e: warn_import_error('yaml', e) try: # -- numpy from parsyfiles.plugins_optional.support_for_numpy import get_default_np_parsers, get_default_np_converters root_parser.register_parsers(get_default_np_parsers()) root_parser.register_converters(get_default_np_converters()) except ImportError as e: warn_import_error('numpy', e) try: # -- pandas from parsyfiles.plugins_optional.support_for_pandas import get_default_pandas_parsers, \ get_default_pandas_converters root_parser.register_parsers(get_default_pandas_parsers()) root_parser.register_converters(get_default_pandas_converters()) except ImportError as e: warn_import_error('pandas', e)" 750,"def parse_item(location: str, item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, logger: Logger = default_logger, lazy_mfcollection_parsing: bool = False) -> T: """""" Creates a RootParser() and calls its parse_item() method :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param logger: :param lazy_mfcollection_parsing: :return: """""" rp = _create_parser_from_default(logger) opts = create_parser_options(lazy_mfcollection_parsing=lazy_mfcollection_parsing) return rp.parse_item(location, item_type, item_name_for_log=item_name_for_log, file_mapping_conf=file_mapping_conf, options=opts)" 751,"def parse_collection(location: str, base_item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, logger: Logger = default_logger, lazy_mfcollection_parsing: bool = False)\ -> Dict[str, T]: """""" Utility method to create a RootParser() with default configuration and call its parse_collection() method :param location: :param base_item_type: :param item_name_for_log: :param file_mapping_conf: :param logger: :param lazy_mfcollection_parsing: :return: """""" rp = _create_parser_from_default(logger) opts = create_parser_options(lazy_mfcollection_parsing=lazy_mfcollection_parsing) return rp.parse_collection(location, base_item_type, item_name_for_log=item_name_for_log, file_mapping_conf=file_mapping_conf, options=opts)" 752,"def install_basic_multifile_support(self): """""" Utility method for users who created a RootParser with register_default_plugins=False, in order to register only the multifile support :return: """""" if not self.multifile_installed: self.register_parser(MultifileCollectionParser(self)) self.register_parser(MultifileObjectParser(self, self)) self.multifile_installed = True else: raise Exception('Multifile support has already been installed')" 753,"def parse_collection(self, item_file_prefix: str, base_item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> Dict[str, T]: """""" Main method to parse a collection of items of type 'base_item_type'. :param item_file_prefix: :param base_item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return: """""" # -- item_name_for_log item_name_for_log = item_name_for_log or '' check_var(item_name_for_log, var_types=str, var_name='item_name_for_log') # creating the wrapping dictionary type collection_type = Dict[str, base_item_type] if len(item_name_for_log) > 0: item_name_for_log = item_name_for_log + ' ' self.logger.debug('**** Starting to parse ' + item_name_for_log + 'collection of <' + get_pretty_type_str(base_item_type) + '> at location ' + item_file_prefix +' ****') # common steps return self._parse__item(collection_type, item_file_prefix, file_mapping_conf, options=options)" 754,"def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T: """""" Main method to parse an item of type item_type :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return: """""" # -- item_name_for_log item_name_for_log = item_name_for_log or '' check_var(item_name_for_log, var_types=str, var_name='item_name_for_log') if len(item_name_for_log) > 0: item_name_for_log = item_name_for_log + ' ' self.logger.debug('**** Starting to parse single object ' + item_name_for_log + 'of type <' + get_pretty_type_str(item_type) + '> at location ' + location + ' ****') # common steps return self._parse__item(item_type, location, file_mapping_conf, options=options)" 755,"def _parse__item(self, item_type: Type[T], item_file_prefix: str, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T: """""" Common parsing steps to parse an item :param item_type: :param item_file_prefix: :param file_mapping_conf: :param options: :return: """""" # for consistency : if options is None, default to the default values of create_parser_options options = options or create_parser_options() # creating the persisted object (this performs required checks) file_mapping_conf = file_mapping_conf or WrappedFileMappingConfiguration() obj = file_mapping_conf.create_persisted_object(item_file_prefix, logger=self.logger) # print('') self.logger.debug('') # create the parsing plan pp = self.create_parsing_plan(item_type, obj, logger=self.logger) # print('') self.logger.debug('') # parse res = pp.execute(logger=self.logger, options=options) # print('') self.logger.debug('') return res" 756,"def findSubCommand(args): """""" Given a list ['foo','bar', 'baz'], attempts to create a command name in the format 'foo-bar-baz'. If that command exists, we run it. If it doesn't, we check to see if foo-bar exists, in which case we run `foo-bar baz`. We keep taking chunks off the end of the command name and adding them to the argument list until we find a valid command name we can run. This allows us to easily make git-style command drivers where for example we have a driver script, foo, and subcommand scripts foo-bar and foo-baz, and when the user types `foo bar foobar` we find the foo-bar script and run it as `foo-bar foobar` :param list|tuple args: list to try and convert to a command args pair :returns: command and arguments list :rtype: tuple :raises StandardError: if the args can't be matched to an executable subcommand """""" # If the only command we find is the first element of args, we've found the # driver script itself and re-executing it will cause an infinite loop, so # don't even look at the first element on its own. for n in range(len(args) - 1): command = '-'.join(args[:(len(args) - n)]) commandArgs = args[len(args) - n:] if isProgram(command): return (command, commandArgs) raise StandardError(""Could not find a %s subcommand executable"" % command)" 757,"def SpamsumDistance(ssA, ssB): ''' returns the spamsum distance between ssA and ssB if they use a different block size, assume maximum distance otherwise returns the LevDistance ''' mA = re.match('^(\d+)[:](.*)$', ssA) mB = re.match('^(\d+)[:](.*)$', ssB) if mA == None or mB == None: raise ""do not appear to be spamsum signatures"" if mA.group(1) != mB.group(1): return max([len(mA.group(2)), len(mB.group(2))]) else: return LevDistance(mA.group(2), mB.group(2))" 758,"def terms(cls, tags, minimum_match=None): ''' A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example: { ""terms"" : { ""tags"" : [ ""blue"", ""pill"" ], ""minimum_match"" : 1 } }''' instance = cls(terms={'tags': tags}) if minimum_match is not None: instance['terms']['minimum_match'] = minimum_match return instance" 759,"def field(cls, field, query, boost=None, enable_position_increments=None): ''' A query that executes a query string against a specific field. It is a simplified version of query_string query (by setting the default_field to the field this query executed against). In its simplest form: { ""field"" : { ""name.first"" : ""+something -else"" } } Most of the query_string parameters are allowed with the field query as well, in such a case, the query should be formatted as follows: { ""field"" : { ""name.first"" : { ""query"" : ""+something -else"", ""boost"" : 2.0, ""enable_position_increments"": false } } } ''' instance = cls(field={field: {'query': query}}) if boost is not None: instance['field']['boost'] = boost if enable_position_increments is not None: instance['field']['enable_position_increments'] = enable_position_increments return instance" 760,"def match(cls, field, query, operator=None): ''' A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example: { ""match"" : { ""message"" : ""this is a test"" } } Note, message is the name of a field, you can subsitute the name of any field (including _all) instead. ''' instance = cls(match={field: {'query': query}}) if operator is not None: instance['match'][field]['operator'] = operator return instance" 761,"def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}} ''' instance = cls(bool={}) if must is not None: instance['bool']['must'] = must if should is not None: instance['bool']['should'] = should if must_not is not None: instance['bool']['must_not'] = must_not if minimum_number_should_match is not None: instance['bool']['minimum_number_should_match'] = minimum_number_should_match if boost is not None: instance['bool']['boost'] = boost return instance" 762,"def fuzzy(cls, field, value, boost=None, min_similarity=None, prefix_length=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm. ''' instance = cls(fuzzy={field: {'value': value}}) if boost is not None: instance['fuzzy'][field]['boost'] = boost if min_similarity is not None: instance['fuzzy'][field]['min_similarity'] = min_similarity if prefix_length is not None: instance['fuzzy'][field]['prefix_length'] = prefix_length return instance" 763,"def fuzzy_like_this(cls, like_text, fields=None, ignore_tf=None, max_query_terms=None, min_similarity=None, prefix_length=None, boost=None, analyzer=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/flt-query.html Fuzzy like this query find documents that are ""like"" provided text by running it against one or more fields. > query = ElasticQuery().fuzzy_like_this('text like this one', fields=['name.first', 'name.last'], max_query_terms=12) > query {'fuzze_like_this': {'boost': 1.0, 'fields': ['name.first', 'name.last'], 'ifgnore_tf': False, 'like_text': 'text like this one', 'max_query_terms': 12, 'min_similarity': 0.5, 'prefix_length': 0}} ''' instance = cls(fuzzy_like_this={'like_text': like_text}) if fields is not None: instance['fuzzy_like_this']['fields'] = fields if ignore_tf is not None: instance['fuzzy_like_this']['ignore_tf'] = ignore_tf if max_query_terms is not None: instance['fuzzy_like_this']['max_query_terms'] = max_query_terms if min_similarity is not None: instance['fuzzy_like_this']['min_similarity'] = min_similarity if prefix_length is not None: instance['fuzzy_like_this']['prefix_length'] = prefix_length if boost is not None: instance['fuzzy_like_this']['boost'] = boost if analyzer is not None: instance['fuzzy_like_this']['analyzer'] = analyzer return instance" 764,"def has_child(cls, child_type, query): ''' http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query. > child_query = ElasticQuery().term(tag='something') > query = ElasticQuery().has_Child('blog_tag', child_query) ''' instance = cls(has_child={'type': child_type, 'query': query}) return instance" 765,"def mlt(cls, like_text, fields=None, percent_terms_to_match=None, min_term_freq=None, max_query_terms=None, stop_words=None, min_doc_freq=None, max_doc_freq=None, min_word_len=None, max_word_len=None, boost_terms=None, boost=None, analyzer=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/mlt-query.html More like this query find documents that are ""like"" provided text by running it against one or more fields. > query = ElasticQuery().mlt('text like this one', fields=['post.content']) ''' instance = cls(more_like_this={'like_text': like_text}) if fields is not None: instance['more_like_this']['fields'] = fields if percent_terms_to_match is not None: instance['more_like_this']['percent_terms_to_match'] = percent_terms_to_match if min_term_freq is not None: instance['more_like_this']['min_term_freq'] = min_term_freq if max_query_terms is not None: instance['more_like_this']['max_query_terms'] = max_query_terms if stop_words is not None: instance['more_like_this']['stop_words'] = stop_words if min_doc_freq is not None: instance['more_like_this']['min_doc_freq'] = min_doc_freq if max_doc_freq is not None: instance['more_like_this']['max_doc_freq'] = max_doc_freq if min_word_len is not None: instance['more_like_this']['min_word_len'] = min_word_len if max_word_len is not None: instance['more_like_this']['max_word_len'] = max_word_len if boost_terms is not None: instance['more_like_this']['boost_terms'] = boost_terms if boost is not None: instance['more_like_this']['boost'] = boost if analyzer is not None: instance['more_like_this']['analyzer'] = analyzer return instance" 766,"def query_string(cls, query, default_field=None, default_operator=None, analyzer=None, allow_leading_wildcard=None, lowercase_expanded_terms=None, enable_position_increments=None, fuzzy_prefix_length=None, fuzzy_min_sim=None, phrase_slop=None, boost=None, analyze_wildcard=None, auto_generate_phrase_queries=None, minimum_should_match=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html A query that uses a query parser in order to parse its content. > query = ElasticQuery().query_string('this AND that OR thus', default_field='content') ''' instance = cls(query_string={'query': query}) if default_field is not None: instance['query_string']['default_field'] = default_field if default_operator is not None: instance['query_string']['default_operator'] = default_operator if analyzer is not None: instance['query_string']['analyzer'] = analyzer if allow_leading_wildcard is not None: instance['query_string']['allow_leading_wildcard'] = allow_leading_wildcard if lowercase_expanded_terms is not None: instance['query_string']['lowercase_expanded_terms'] = lowercase_expanded_terms if enable_position_increments is not None: instance['query_string']['enable_position_increments'] = enable_position_increments if fuzzy_prefix_length is not None: instance['query_string']['fuzzy_prefix_length'] = fuzzy_prefix_length if fuzzy_min_sim is not None: instance['query_string']['fuzzy_min_sim'] = fuzzy_min_sim if phrase_slop is not None: instance['query_string']['phrase_slop'] = phrase_slop if boost is not None: instance['query_string']['boost'] = boost if analyze_wildcard is not None: instance['query_string']['analyze_wildcard'] = analyze_wildcard if auto_generate_phrase_queries is not None: instance['query_string']['auto_generate_phrase_queries'] = auto_generate_phrase_queries if minimum_should_match is not None: instance['query_string']['minimum_should_match'] = minimum_should_match return instance" 767,"def an_text_url(identifiant, code): """""" Port of the PHP function used by the National Assembly: public function urlOpaque($identifiant, $codeType = NULL) { $datas = array( 'PRJL' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => ''), 'PION' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNRECOMENQ' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNREAPPART341' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNREMODREGLTAN' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'AVCE' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ace'), 'ETDI' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ei'), 'ACIN' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ai'), 'LETT' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-l'), 'PNRETVXINSTITEUROP' => array('repertoire' => 'europe/resolutions', 'prefixe' => 'ppe', 'suffixe' => ''), 'PNRE' => array('repertoire' => 'europe/resolutions', 'prefixe' => 'ppe', 'suffixe' => ''), 'RION' => array('repertoire' => '', 'prefixe' => '', 'suffixe' => ''), 'TCOM' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMMODREGLTAN' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMTVXINSTITEUROP' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMCOMENQ' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TADO' => array('repertoire' => 'ta', 'prefixe' => 'ta', 'suffixe' => ''), ); preg_match('/(.{4})([ANS]*)(R[0-9])([LS]*)([0-9]*)([BTACP]*)(.*)/', $identifiant, $matches); $leg = $matches[5]; $typeTa = $matches[6]; $num = $matches[7]; switch ($typeTa) { case 'BTC': $type = 'TCOM'; break; case 'BTA': $type = 'TADO'; break; default: $type = $codeType; } $host = ""http://www.assemblee-nationale.fr/""; return $host . $leg . ""/"" . $datas[$type]['repertoire'] . ""/"" . $datas[$type]['prefixe'] . $num . $datas[$type]['suffixe'] . "".pdf""; } """""" datas = { 'PRJL': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '', }, 'PION': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNRECOMENQ': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNREAPPART341': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNREMODREGLTAN': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'AVCE': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ace', }, 'ETDI': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ei', }, 'ACIN': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ai', }, 'LETT': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-l', }, 'PNRETVXINSTITEUROP': { 'repertoire': 'europe/resolutions', 'prefixe': 'ppe', 'suffixe': '', }, 'PNRE': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'RION': { 'repertoire': '', 'prefixe': '', 'suffixe': '', }, 'TCOM': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMMODREGLTAN': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMTVXINSTITEUROP': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMCOMENQ': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TADO': { 'repertoire': 'ta', 'prefixe': 'ta', 'suffixe': '', }, # NOT IN NATIONAL ASSEMBLY PHP CODE 'RAPP': { 'repertoire': 'rapports', 'prefixe': 'r', 'suffixe': '', }, 'RINF': { 'repertoire': 'rapports', 'prefixe': 'r', 'suffixe': '', } } match = re.match(r'(.{4})([ANS]*)(R[0-9])([LS]*)([0-9]*)([BTACP]*)(.*)', identifiant) leg = match.group(5) typeTa = match.group(6) num = match.group(7) if typeTa == 'BTC': type = 'TCOM' elif typeTa == 'BTA': type = 'TADO' else: type = code host = ""http://www.assemblee-nationale.fr/"" if type not in datas: # ex: ALCNANR5L15B0002 (allocution du président) raise Exception('Unknown document type for %s' % identifiant) return host + leg + ""/"" + datas[type]['repertoire'] + ""/"" + datas[type]['prefixe'] + num + datas[type]['suffixe'] + "".asp""" 768,"def queryByPortSensor(portiaConfig, edgeId, port, sensor, last=False, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'limit': None }): """"""Returns a pandas data frame with the portia select resultset"""""" header = {'Accept': 'text/csv'} if last == False: endpoint = '/select/device/{0}/port/{1}/sensor/{2}{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) ) else: endpoint = '/select/device/{0}/port/{1}/sensor/{2}/last{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) ) response = utils.httpGetRequest(portiaConfig, endpoint, header) if response.status_code == 200: try: dimensionSeries = pandas.read_csv( StringIO(response.text), sep=';' ) if portiaConfig['debug']: print( '[portia-debug]: {0} rows'.format( len(dimensionSeries.index) ) ) return dimensionSeries except: raise Exception('couldn\'t create pandas data frame') else: raise Exception('couldn\'t retrieve data')" 769,"def try_parse_num_and_booleans(num_str): """""" Tries to parse the provided string as a number or boolean :param num_str: :return: """""" if isinstance(num_str, str): # bool if num_str.lower() == 'true': return True elif num_str.lower() == 'false': return False # int if num_str.isdigit(): return int(num_str) # float try: return float(num_str) except ValueError: # give up return num_str else: # dont try return num_str" 770,"def read_dict_from_properties(desired_type: Type[dict], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: """""" Helper method to read a dictionary from a .properties file (java-style) using jprops. Since jprops does not provide automatic handling for boolean and numbers, this tries to add the feature. :param file_object: :return: """""" # right now jprops relies on a byte stream. So we convert back our nicely decoded Text stream to a unicode # byte stream ! (urgh) class Unicoder: def __init__(self, file_object): self.f = file_object def __iter__(self): return self def __next__(self): line = self.f.__next__() return line.encode(encoding='utf-8') res = jprops.load_properties(Unicoder(file_object)) # first automatic conversion of strings > numbers res = {key: try_parse_num_and_booleans(val) for key, val in res.items()} # further convert if required return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger, **kwargs)" 771,"def get_default_jprops_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: """""" Utility method to return the default parsers able to parse a dictionary from a properties file. :return: """""" return [SingleFileParserFunction(parser_function=read_dict_from_properties, streaming_mode=True, custom_name='read_dict_from_properties', supported_exts={'.properties', '.txt'}, supported_types={dict}, function_args={'conversion_finder': conversion_finder}), # SingleFileParserFunction(parser_function=read_list_from_properties, # streaming_mode=True, # supported_exts={'.properties', '.txt'}, # supported_types={list}), ]" 772,"def hook(event=None, dependencies=None): """"""Hooking decorator. Just `@hook(event, dependencies)` on your function Kwargs: event (str): String or Iterable with events to hook dependencies (str): String or Iterable with modules whose hooks have to be called before this one for **this** event Wraps :func:`EventList.hook` """""" def wrapper(func): """"""I'm a simple wrapper that manages event hooking"""""" func.__deps__ = dependencies EVENTS.hook(func, event, dependencies) return func return wrapper" 773,"def load(path): """"""Helper function that tries to load a filepath (or python module notation) as a python module and on failure `exec` it. Args: path (str): Path or module to load The function tries to import `example.module` when either `example.module`, `example/module` or `example/module.py` is given. """""" importpath = path.replace(""/"", ""."").replace(""\\"", ""."") if importpath[-3:] == "".py"": importpath = importpath[:-3] try: importlib.import_module(importpath) except (ModuleNotFoundError, TypeError): exec(open(path).read())" 774,"def add_image(self, image_path, annotations): """"""Adds an image and its bounding boxes to the current list of files The bounding boxes are automatically estimated based on the given annotations. **Parameters:** ``image_path`` : str The file name of the image, including its full path ``annotations`` : [dict] A list of annotations, i.e., where each annotation can be anything that :py:func:`bounding_box_from_annotation` can handle; this list can be empty, in case the image does not contain any faces """""" self.image_paths.append(image_path) self.bounding_boxes.append([bounding_box_from_annotation(**a) for a in annotations])" 775,"def add_from_db(self, database, files): """"""Adds images and bounding boxes for the given files of a database that follows the :py:ref:`bob.bio.base.database.BioDatabase ` interface. **Parameters:** ``database`` : a derivative of :py:class:`bob.bio.base.database.BioDatabase` The database interface, which provides file names and annotations for the given ``files`` ``files`` : :py:class:`bob.bio.base.database.BioFile` or compatible The files (as returned by :py:meth:`bob.bio.base.database.BioDatabase.objects`) which should be added to the training list """""" for f in files: annotation = database.annotations(f) image_path = database.original_file_name(f) self.add_image(image_path, [annotation])" 776,"def save(self, list_file): """"""Saves the current list of annotations to the given file. **Parameters:** ``list_file`` : str The name of a list file to write the currently stored list into """""" bob.io.base.create_directories_safe(os.path.dirname(list_file)) with open(list_file, 'w') as f: for i in range(len(self.image_paths)): f.write(self.image_paths[i]) for bbx in self.bounding_boxes[i]: f.write(""\t[%f %f %f %f]"" % (bbx.top_f, bbx.left_f, bbx.size_f[0], bbx.size_f[1])) f.write(""\n"")" 777,"def load(self, list_file): """"""Loads the list of annotations from the given file and **appends** it to the current list. ``list_file`` : str The name of a list file to load and append """""" with open(list_file) as f: for line in f: if line and line[0] != '#': splits = line.split() bounding_boxes = [] for i in range(1, len(splits), 4): assert splits[i][0] == '[' and splits[i+3][-1] == ']' bounding_boxes.append(BoundingBox(topleft=(float(splits[i][1:]), float(splits[i+1])), size=(float(splits[i+2]), float(splits[i+3][:-1])))) self.image_paths.append(splits[0]) self.bounding_boxes.append(bounding_boxes)" 778,"def iterate(self, max_number_of_files=None): """"""iterate([max_number_of_files]) -> image, bounding_boxes, image_file Yields the image and the bounding boxes stored in the training set as an iterator. This function loads the images and converts them to gray-scale. It yields the image, the list of bounding boxes and the original image file name. **Parameters:** ``max_number_of_files`` : int or ``None`` If specified, limit the number of returned data by sub-selection using :py:func:`quasi_random_indices` **Yields:** ``image`` : array_like(2D) The image loaded from file and converted to gray scale ``bounding_boxes`` : [:py:class:`BoundingBox`] A list of bounding boxes, where faces are found in the image; might be empty (in case of pure background images) `` image_file`` : str The name of the original image that was read """""" indices = quasi_random_indices(len(self), max_number_of_files) for index in indices: image = bob.io.base.load(self.image_paths[index]) if len(image.shape) == 3: image = bob.ip.color.rgb_to_gray(image) # return image and bounding box as iterator yield image, self.bounding_boxes[index], self.image_paths[index]" 779,"def _feature_file(self, parallel = None, index = None): """"""Returns the name of an intermediate file for storing features."""""" if index is None: index = 0 if parallel is None or ""SGE_TASK_ID"" not in os.environ else int(os.environ[""SGE_TASK_ID""]) return os.path.join(self.feature_directory, ""Features_%02d.hdf5"" % index)" 780,"def extract(self, sampler, feature_extractor, number_of_examples_per_scale = (100, 100), similarity_thresholds = (0.5, 0.8), parallel = None, mirror = False, use_every_nth_negative_scale = 1): """"""Extracts features from **all** images in **all** scales and writes them to file. This function iterates over all images that are present in the internally stored list, and extracts features using the given ``feature_extractor`` for every image patch that the given ``sampler`` returns. The final features will be stored in the ``feature_directory`` that is set in the constructor. For each image, the ``sampler`` samples patch locations, which cover the whole image in different scales. For each patch locations is tested, how similar they are to the face bounding boxes that belong to that image, using the Jaccard :py:meth:`BoundingBox.similarity`. The similarity is compared to the ``similarity_thresholds``. If it is smaller than the first threshold, the patch is considered as background, when it is greater the the second threshold, it is considered as a face, otherwise it is rejected. Depending on the image resolution and the number of bounding boxes, this will usually result in some positive and thousands of negative patches per image. To limit the total amount of training data, for all scales, only up to a given number of positive and negative patches are kept. Also, to further limit the number of negative samples, only every ``use_every_nth_negative_scale`` scale is considered (for the positives, always all scales are processed). To increase the number (especially of positive) examples, features can also be extracted for horizontally mirrored images. Simply set the ``mirror`` parameter to ``True``. Furthermore, this function is designed to be run using several parallel processes, e.g., using the `GridTK `_. Each of the processes will run on a particular subset of the images, which is defined by the ``SGE_TASK_ID`` environment variable. The ``parallel`` parameter defines the total number of parallel processes that are used. **Parameters:** ``sampler`` : :py:class:`Sampler` The sampler to use to sample patches of the images. Please assure that the sampler is set up such that it samples patch locations which can overlap with the face locations. ``feature_extractor`` : :py:class:`FeatureExtractor` The feature extractor to be used to extract features from image patches ``number_of_examples_per_scale`` : (int, int) The maximum number of positive and negative examples to extract for each scale of the image ``similarity_thresholds`` : (float, float) The Jaccard similarity threshold, below which patch locations are considered to be negative, and above which patch locations are considered to be positive examples. ``parallel`` : int or ``None`` If given, the total number of parallel processes, which are used to extract features (the current process index is read from the ``SGE_TASK_ID`` environment variable) ``mirror`` : bool Extract positive and negative samples also from horizontally mirrored images? ``use_every_nth_negative_scale`` : int Skip some negative scales to decrease the number of negative examples, i.e., only extract and store negative features, when ``scale_counter % use_every_nth_negative_scale == 0`` .. note:: The ``scale_counter`` is not reset between images, so that we might get features from different scales in subsequent images. """""" feature_file = self._feature_file(parallel) bob.io.base.create_directories_safe(self.feature_directory) if parallel is None or ""SGE_TASK_ID"" not in os.environ or os.environ[""SGE_TASK_ID""] == '1': extractor_file = os.path.join(self.feature_directory, ""Extractor.hdf5"") hdf5 = bob.io.base.HDF5File(extractor_file, ""w"") feature_extractor.save(hdf5) del hdf5 total_positives, total_negatives = 0, 0 indices = parallel_part(range(len(self)), parallel) if not indices: logger.warning(""The index range for the current parallel thread is empty."") else: logger.info(""Extracting features for images in range %d - %d of %d"", indices[0], indices[-1], len(self)) hdf5 = bob.io.base.HDF5File(feature_file, ""w"") for index in indices: hdf5.create_group(""Image-%d"" % index) hdf5.cd(""Image-%d"" % index) logger.debug(""Processing file %d of %d: %s"", index+1, indices[-1]+1, self.image_paths[index]) # load image image = bob.io.base.load(self.image_paths[index]) if image.ndim == 3: image = bob.ip.color.rgb_to_gray(image) # get ground_truth bounding boxes ground_truth = self.bounding_boxes[index] # collect image and GT for originally and mirrored image images = [image] if not mirror else [image, bob.ip.base.flop(image)] ground_truths = [ground_truth] if not mirror else [ground_truth, [gt.mirror_x(image.shape[1]) for gt in ground_truth]] parts = ""om"" # now, sample scale_counter = -1 for image, ground_truth, part in zip(images, ground_truths, parts): for scale, scaled_image_shape in sampler.scales(image): scale_counter += 1 scaled_gt = [gt.scale(scale) for gt in ground_truth] positives = [] negatives = [] # iterate over all possible positions in the image for bb in sampler.sample_scaled(scaled_image_shape): # check if the patch is a positive example positive = False negative = True for gt in scaled_gt: similarity = bb.similarity(gt) if similarity > similarity_thresholds[1]: positive = True break if similarity > similarity_thresholds[0]: negative = False break if positive: positives.append(bb) elif negative and scale_counter % use_every_nth_negative_scale == 0: negatives.append(bb) # per scale, limit the number of positive and negative samples positives = [positives[i] for i in quasi_random_indices(len(positives), number_of_examples_per_scale[0])] negatives = [negatives[i] for i in quasi_random_indices(len(negatives), number_of_examples_per_scale[1])] # extract features feature_extractor.prepare(image, scale) # .. negative features if negatives: negative_features = numpy.zeros((len(negatives), feature_extractor.number_of_features), numpy.uint16) for i, bb in enumerate(negatives): feature_extractor.extract_all(bb, negative_features, i) hdf5.set(""Negatives-%s-%.5f"" % (part,scale), negative_features) total_negatives += len(negatives) # positive features if positives: positive_features = numpy.zeros((len(positives), feature_extractor.number_of_features), numpy.uint16) for i, bb in enumerate(positives): feature_extractor.extract_all(bb, positive_features, i) hdf5.set(""Positives-%s-%.5f"" % (part,scale), positive_features) total_positives += len(positives) # cd backwards after each image hdf5.cd("".."") hdf5.set(""TotalPositives"", total_positives) hdf5.set(""TotalNegatives"", total_negatives)" 781,"def sample(self, model = None, maximum_number_of_positives = None, maximum_number_of_negatives = None, positive_indices = None, negative_indices = None): """"""sample([model], [maximum_number_of_positives], [maximum_number_of_negatives], [positive_indices], [negative_indices]) -> positives, negatives Returns positive and negative samples from the set of positives and negatives. This reads the previously extracted feature file (or all of them, in case features were extracted in parallel) and returns features. If the ``model`` is not specified, a random sub-selection of positive and negative features is returned. When the ``model`` is given, all patches are first classified with the given ``model``, and the ones that are mis-classified most are returned. The number of returned positives and negatives can be limited by specifying the ``maximum_number_of_positives`` and ``maximum_number_of_negatives``. This function keeps track of the positives and negatives that it once has returned, so it does not return the same positive or negative feature twice. However, when you have to restart training from a given point, you can set the ``positive_indices`` and ``negative_indices`` parameters, to retrieve the features for the given indices. In this case, no additional features are selected, but the given sets of indices are stored internally. .. note:: The ``positive_indices`` and ``negative_indices`` only have an effect, when ``model`` is ``None``. **Parameters:** ``model`` : :py:class:`bob.learn.boosting.BoostedMachine` or ``None`` If given, the ``model`` is used to predict the training features, and the highest mis-predicted features are returned ``maximum_number_of_positives, maximum_number_of_negatives`` : int The maximum number of positive and negative features to be returned ``positive_indices, negative_indices`` : set(int) or ``None`` The set of positive and negative indices to extract features for, instead of randomly choosing indices; only considered when ``model = None`` **Returns:** ``positives, negatives`` : array_like(2D, uint16) The new set of training features for the positive class (faces) and negative class (background). """""" # get all existing feature files feature_file = self._feature_file(index = 0) if os.path.exists(feature_file): feature_files = [feature_file] else: feature_files = [] i = 1 while True: feature_file = self._feature_file(index = i) if not os.path.exists(feature_file): break feature_files.append(feature_file) i += 1 features = [] labels = [] # make a first iteration through the feature files and count the number of positives and negatives positive_count, negative_count = 0, 0 logger.info(""Reading %d feature files"", len(feature_files)) for feature_file in feature_files: logger.debug("".. Loading file %s"", feature_file) hdf5 = bob.io.base.HDF5File(feature_file) positive_count += hdf5.get(""TotalPositives"") negative_count += hdf5.get(""TotalNegatives"") del hdf5 if model is None: # get a list of indices and store them, so that we don't re-use them next time if positive_indices is None: positive_indices = set(quasi_random_indices(positive_count, maximum_number_of_positives)) if negative_indices is None: negative_indices = set(quasi_random_indices(negative_count, maximum_number_of_negatives)) self.positive_indices |= positive_indices self.negative_indices |= negative_indices # now, iterate through the files again and sample positive_indices = collections.deque(sorted(positive_indices)) negative_indices = collections.deque(sorted(negative_indices)) logger.info(""Extracting %d of %d positive and %d of %d negative samples"" % (len(positive_indices), positive_count, len(negative_indices), negative_count)) positive_count, negative_count = 0, 0 for feature_file in feature_files: hdf5 = bob.io.base.HDF5File(feature_file) for image in sorted(hdf5.sub_groups(recursive=False, relative=True)): hdf5.cd(image) for scale in sorted(hdf5.keys(relative=True)): read = hdf5.get(scale) size = read.shape[0] if scale.startswith(""Positives""): # copy positive data while positive_indices and positive_count <= positive_indices[0] and positive_count + size > positive_indices[0]: assert positive_indices[0] >= positive_count features.append(read[positive_indices.popleft() - positive_count, :]) labels.append(1) positive_count += size else: # copy negative data while negative_indices and negative_count <= negative_indices[0] and negative_count + size > negative_indices[0]: assert negative_indices[0] >= negative_count features.append(read[negative_indices.popleft() - negative_count, :]) labels.append(-1) negative_count += size hdf5.cd("".."") # return features and labels return numpy.array(features), numpy.array(labels) else: positive_count -= len(self.positive_indices) negative_count -= len(self.negative_indices) logger.info(""Getting worst %d of %d positive and worst %d of %d negative examples"", min(maximum_number_of_positives, positive_count), positive_count, min(maximum_number_of_negatives, negative_count), negative_count) # compute the worst features based on the current model worst_positives, worst_negatives = [], [] positive_count, negative_count = 0, 0 for feature_file in feature_files: hdf5 = bob.io.base.HDF5File(feature_file) for image in sorted(hdf5.sub_groups(recursive=False, relative=True)): hdf5.cd(image) for scale in sorted(hdf5.keys(relative=True)): read = hdf5.get(scale) size = read.shape[0] prediction = bob.blitz.array((size,), numpy.float64) # forward features through the model result = model.forward(read, prediction) if scale.startswith(""Positives""): indices = [i for i in range(size) if positive_count + i not in self.positive_indices] worst_positives.extend([(prediction[i], positive_count + i, read[i]) for i in indices if prediction[i] <= 0]) positive_count += size else: indices = [i for i in range(size) if negative_count + i not in self.negative_indices] worst_negatives.extend([(prediction[i], negative_count + i, read[i]) for i in indices if prediction[i] >= 0]) negative_count += size hdf5.cd("".."") # cut off good results if maximum_number_of_positives is not None and len(worst_positives) > maximum_number_of_positives: # keep only the positives with the low predictions (i.e., the worst) worst_positives = sorted(worst_positives, key=lambda k: k[0])[:maximum_number_of_positives] if maximum_number_of_negatives is not None and len(worst_negatives) > maximum_number_of_negatives: # keep only the negatives with the high predictions (i.e., the worst) worst_negatives = sorted(worst_negatives, reverse=True, key=lambda k: k[0])[:maximum_number_of_negatives] # mark all indices to be used self.positive_indices |= set(k[1] for k in worst_positives) self.negative_indices |= set(k[1] for k in worst_negatives) # finally, collect features and labels return numpy.array([f[2] for f in worst_positives] + [f[2] for f in worst_negatives]), numpy.array([1]*len(worst_positives) + [-1]*len(worst_negatives))" 782,"def feature_extractor(self): """"""feature_extractor() -> extractor Returns the feature extractor used to extract the positive and negative features. This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content. **Returns:** ``extractor`` : :py:class:`FeatureExtractor` The feature extractor used to extract the features stored in the ``feature_directory`` """""" extractor_file = os.path.join(self.feature_directory, ""Extractor.hdf5"") if not os.path.exists(extractor_file): raise IOError(""Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?"" % extractor_file) hdf5 = bob.io.base.HDF5File(extractor_file) return FeatureExtractor(hdf5)" 783,"def get(self, param, default=EMPTY): """""" Returns the nparam value, and returns the default if it doesn't exist. If default is none, an exception will be raised instead. the returned parameter will have been specialized against the global context """""" if not self.has(param): if default is not EMPTY: return default raise ParamNotFoundException(""value for %s not found"" % param) context_dict = copy.deepcopy(self.manifest.get_context_dict()) for k, v in self.raw_dict.items(): context_dict[""%s:%s"" % (self.feature_name, k)] = v cur_value = self.raw_dict[param] prev_value = None max_depth = 5 # apply the context until doing so does not change the value while cur_value != prev_value and max_depth > 0: prev_value = cur_value try: cur_value = str(prev_value) % context_dict except KeyError: e = sys.exc_info()[1] key = e.args[0] if key.startswith('config:'): missing_key = key.split(':')[1] if self.manifest.inputs.is_input(missing_key): val = self.manifest.inputs.get_input(missing_key) context_dict[key] = val else: logger.warn(""Could not specialize %s! Error: %s"" % (self.raw_dict[param], e)) return self.raw_dict[param] except ValueError: # this is an esoteric error, and this implementation # forces a terrible solution. Sorry. # using the standard escaping syntax in python is a mistake. # if a value has a ""%"" inside (e.g. a password), a ValueError # is raised, causing an issue return cur_value max_depth -= 1 return cur_value" 784,"def set(self, param, value): """""" sets the param to the value provided """""" self.raw_dict[param] = value self.manifest.set(self.feature_name, param, value)" 785,"def remove(self, param): """""" Remove a parameter from the manifest """""" if self.has(param): del(self.raw_dict[param]) self.manifest.remove_option(self.feature_name, param)" 786,"def set_if_empty(self, param, default): """""" Set the parameter to the default if it doesn't exist """""" if not self.has(param): self.set(param, default)" 787,"def to_dict(self): """""" Returns the context, fully specialized, as a dictionary """""" return dict((k, str(self.get(k))) for k in self.raw_dict)" 788,"def write_to_manifest(self): """""" Overwrites the section of the manifest with the featureconfig's value """""" self.manifest.remove_section(self.feature_name) self.manifest.add_section(self.feature_name) for k, v in self.raw_dict.items(): self.manifest.set(self.feature_name, k, v)" 789,"def mro_resolve(name, bases, dict): """""" Given a tuple of baseclasses and a dictionary that takes precedence over any value in the bases, finds a value with the specified *name* and returns it. Raises #KeyError if the value can not be found. """""" if name in dict: return dict[name] for base in bases: if hasattr(base, name): return getattr(base, name) try: return mro_resolve(name, base.__bases__, {}) except KeyError: pass raise KeyError(name)" 790,"def round_to_05(n, exp=None, mode='s'): """""" Round to the next 0.5-value. This function applies the round function `func` to round `n` to the next 0.5-value with respect to its exponent with base 10 (i.e. 1.3e-4 will be rounded to 1.5e-4) if `exp` is None or with respect to the given exponent in `exp`. Parameters ---------- n: numpy.ndarray number to round exp: int or numpy.ndarray Exponent for rounding. If None, it will be computed from `n` to be the exponents for base 10. mode: {'s', 'l'} rounding mode. If 's', it will be rounded to value whose absolute value is below `n`, if 'l' it will rounded to the value whose absolute value is above `n`. Returns ------- numpy.ndarray rounded `n` Examples -------- The effects of the different parameters are show in the example below:: >>> from psyplot.plotter.simple import round_to_05 >>> a = [-100.3, 40.6, 8.7, -0.00023] >>>round_to_05(a, mode='s') array([ -1.00000000e+02, 4.00000000e+01, 8.50000000e+00, -2.00000000e-04]) >>> round_to_05(a, mode='l') array([ -1.50000000e+02, 4.50000000e+01, 9.00000000e+00, -2.50000000e-04])"""""" n = np.asarray(n) if exp is None: exp = np.floor(np.log10(np.abs(n))) # exponent for base 10 ntmp = np.abs(n)/10.**exp # mantissa for base 10 if mode == 's': n1 = ntmp s = 1. n2 = nret = np.floor(ntmp) else: n1 = nret = np.ceil(ntmp) s = -1. n2 = ntmp return np.where(n1 - n2 > 0.5, np.sign(n)*(nret + s*0.5)*10.**exp, np.sign(n)*nret*10.**exp)" 791,"def convert_radian(coord, *variables): """"""Convert the given coordinate from radian to degree Parameters ---------- coord: xr.Variable The variable to transform ``*variables`` The variables that are on the same unit. Returns ------- xr.Variable The transformed variable if one of the given `variables` has units in radian"""""" if any(v.attrs.get('units') == 'radian' for v in variables): return coord * 180. / np.pi return coord" 792,"def format_coord_func(ax, ref): """"""Create a function that can replace the :func:`matplotlib.axes.Axes.format_coord` Parameters ---------- ax: matplotlib.axes.Axes The axes instance ref: weakref.weakref The reference to the :class:`~psyplot.plotter.Formatoption` instance Returns ------- function The function that can be used to replace `ax.format_coord` """""" orig_format_coord = ax.format_coord def func(x, y): orig_s = orig_format_coord(x, y) fmto = ref() if fmto is None: return orig_s try: orig_s += fmto.add2format_coord(x, y) except Exception: fmto.logger.debug( 'Failed to get plot informations for status bar!', exc_info=1) return orig_s return func" 793,"def replace_coord(self, i): """"""Replace the coordinate for the data array at the given position Parameters ---------- i: int The number of the data array in the raw data (if the raw data is not an interactive list, use 0) Returns xarray.DataArray The data array with the replaced coordinate"""""" da = next(islice(self.data_iterator, i, i+1)) name, coord = self.get_alternative_coord(da, i) other_coords = {key: da.coords[key] for key in set(da.coords).difference(da.dims)} ret = da.rename({da.dims[-1]: name}).assign_coords( **{name: coord}).assign_coords(**other_coords) return ret" 794,"def value2pickle(self): """"""Return the current axis colors"""""" return {key: s.get_edgecolor() for key, s in self.ax.spines.items()}" 795,"def set_default_formatters(self, which=None): """"""Sets the default formatters that is used for updating to None Parameters ---------- which: {None, 'minor', 'major'} Specify which locator shall be set"""""" if which is None or which == 'minor': self.default_formatters['minor'] = self.axis.get_minor_formatter() if which is None or which == 'major': self.default_formatters['major'] = self.axis.get_major_formatter()" 796,"def plotted_data(self): """"""The data that is shown to the user"""""" return InteractiveList( [arr for arr, val in zip(self.iter_data, cycle(slist(self.value))) if val is not None])" 797,"def get_cmap(self, arr=None, cmap=None, N=None): """"""Get the :class:`matplotlib.colors.Colormap` for plotting Parameters ---------- arr: np.ndarray The array to plot cmap: str or matplotlib.colors.Colormap The colormap to use. If None, the :attr:`value` of this formatoption is used N: int The number of colors in the colormap. If None, the norm of the :attr:`bounds` formatoption is used and, if necessary, the given array `arr` Returns ------- matplotlib.colors.Colormap The colormap returned by :func:`psy_simple.colors.get_cmap`"""""" N = N or None if cmap is None: cmap = self.value if N is None: try: N = self.bounds.norm.Ncmap except AttributeError: if arr is not None and self.bounds.norm is not None: N = len(np.unique(self.bounds.norm(arr.ravel()))) if N is not None: return get_cmap(cmap, N) return get_cmap(cmap)" 798,"def get_fmt_widget(self, parent, project): """"""Open a :class:`psy_simple.widget.CMapFmtWidget`"""""" from psy_simple.widgets.colors import CMapFmtWidget return CMapFmtWidget(parent, self, project)" 799,"def xcoord(self): """"""The x coordinate :class:`xarray.Variable`"""""" return self.decoder.get_x(self.data, coords=self.data.coords)" 800,"def ycoord(self): """"""The y coordinate :class:`xarray.Variable`"""""" return self.decoder.get_y(self.data, coords=self.data.coords)" 801,"def cell_nodes_x(self): """"""The unstructured x-boundaries with shape (N, m) where m > 2"""""" decoder = self.decoder xcoord = self.xcoord data = self.data xbounds = decoder.get_cell_node_coord( data, coords=data.coords, axis='x') if self.plotter.convert_radian: xbounds = convert_radian(xbounds, xcoord, xbounds) return xbounds.values" 802,"def cell_nodes_y(self): """"""The unstructured y-boundaries with shape (N, m) where m > 2"""""" decoder = self.decoder ycoord = self.ycoord data = self.data ybounds = decoder.get_cell_node_coord( data, coords=data.coords, axis='y') if self.plotter.convert_radian: ybounds = convert_radian(ybounds, ycoord, ybounds) return ybounds.values" 803,"def axis(self): """"""axis of the colorbar with the ticks. Will be overwritten during update process."""""" return getattr( self.colorbar.ax, self.axis_locations[self.position] + 'axis')" 804,"def default_formatters(self): """"""Default locator of the axis of the colorbars"""""" if self._default_formatters: return self._default_formatters else: self.set_default_formatters() return self._default_formatters" 805,"def xcoord(self): """"""The x coordinate :class:`xarray.Variable`"""""" v = next(self.raw_data.psy.iter_base_variables) return self.decoder.get_x(v, coords=self.data.coords)" 806,"def ycoord(self): """"""The y coordinate :class:`xarray.Variable`"""""" v = next(self.raw_data.psy.iter_base_variables) return self.decoder.get_y(v, coords=self.data.coords)" 807,"def add2format_coord(self, x, y): """"""Additional information for the :meth:`format_coord`"""""" u, v = self.data uname, vname = self.data.coords['variable'].values xcoord = self.xcoord ycoord = self.ycoord if self.decoder.is_triangular(self.raw_data[0]): x, y, z1, z2 = self.get_xyz_tri(xcoord, x, ycoord, y, u, v) elif xcoord.ndim == 1: x, y, z1, z2 = self.get_xyz_1d(xcoord, x, ycoord, y, u, v) elif xcoord.ndim == 2: x, y, z1, z2 = self.get_xyz_2d(xcoord, x, ycoord, y, u, v) speed = (z1**2 + z2**2)**0.5 xunit = xcoord.attrs.get('units', '') if xunit: xunit = ' ' + xunit yunit = ycoord.attrs.get('units', '') if yunit: yunit = ' ' + yunit zunit = u.attrs.get('units', '') if zunit: zunit = ' ' + zunit return (', vector data: %s: %.4g%s, %s: %.4g%s, %s: %.4g%s, ' '%s: %.4g%s, absolute: %.4g%s') % ( xcoord.name, x, xunit, ycoord.name, y, yunit, uname, z1, zunit, vname, z2, zunit, speed, zunit)" 808,"def get_xyz_tri(self, xcoord, x, ycoord, y, u, v): """"""Get closest x, y and z for the given `x` and `y` in `data` for 1d coords"""""" return self.get_xyz_2d(xcoord, x, ycoord, y, u, v)" 809,"def get_xyz_1d(self, xcoord, x, ycoord, y, u, v): """"""Get closest x, y and z for the given `x` and `y` in `data` for 1d coords"""""" xclose = xcoord.indexes[xcoord.name].get_loc(x, method='nearest') yclose = ycoord.indexes[ycoord.name].get_loc(y, method='nearest') uval = u[yclose, xclose].values vval = v[yclose, xclose].values return xcoord[xclose].values, ycoord[yclose].values, uval, vval" 810,"def get_xyz_2d(self, xcoord, x, ycoord, y, u, v): """"""Get closest x, y and z for the given `x` and `y` in `data` for 2d coords"""""" xy = xcoord.values.ravel() + 1j * ycoord.values.ravel() dist = np.abs(xy - (x + 1j * y)) imin = np.nanargmin(dist) xy_min = xy[imin] return (xy_min.real, xy_min.imag, u.values.ravel()[imin], v.values.ravel()[imin])" 811,"def hist2d(self, da, **kwargs): """"""Make the two dimensional histogram Parameters ---------- da: xarray.DataArray The data source"""""" if self.value is None or self.value == 'counts': normed = False else: normed = True y = da.values x = da.coords[da.dims[0]].values counts, xedges, yedges = np.histogram2d( x, y, normed=normed, **kwargs) if self.value == 'counts': counts = counts / counts.sum().astype(float) return counts, xedges, yedges" 812,"def _statsmodels_bivariate_kde(self, x, y, bws, xsize, ysize, xyranges): """"""Compute a bivariate kde using statsmodels. This function is mainly motivated through seaborn.distributions._statsmodels_bivariate_kde"""""" import statsmodels.nonparametric.api as smnp for i, (coord, bw) in enumerate(zip([x, y], bws)): if isinstance(bw, six.string_types): bw_func = getattr(smnp.bandwidths, ""bw_"" + bw) bws[i] = bw_func(coord) kde = smnp.KDEMultivariate([x, y], ""cc"", bws) x_support = np.linspace(xyranges[0][0], xyranges[0][1], xsize) y_support = np.linspace(xyranges[1][0], xyranges[1][1], ysize) xx, yy = np.meshgrid(x_support, y_support) z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape) return x_support, y_support, z" 813,"def check_data(cls, name, dims, is_unstructured=None): """""" A validation method for the data shape Parameters ---------- name: str or list of str The variable names (at maximum :attr:`allowed_vars` variables per array) dims: list with length 1 or list of lists with length 1 The dimension of the arrays. Only 1D-Arrays are allowed is_unstructured: bool or list of bool, optional True if the corresponding array is unstructured. This keyword is ignored Returns ------- %(Plotter.check_data.returns)s """""" if isinstance(name, six.string_types) or not is_iterable(name): name = [name] dims = [dims] N = len(name) if len(dims) != N: return [False] * N, [ 'Number of provided names (%i) and dimensions ' '%(i) are not the same' % (N, len(dims))] * N checks = [True] * N messages = [''] * N for i, (n, d) in enumerate(zip(name, dims)): if n != 0 and not n: checks[i] = False messages[i] = 'At least one variable name is required!' elif ((not isstring(n) and is_iterable(n) and len(n) > cls.allowed_vars) and len(d) != (cls.allowed_dims - len(slist(n)))): checks[i] = False messages[i] = 'Only %i names are allowed per array!' % ( cls.allowed_vars) elif len(d) != cls.allowed_dims: checks[i] = False messages[i] = 'Only %i-dimensional arrays are allowed!' % ( cls.allowed_dims) return checks, messages" 814,"def check_data(cls, name, dims, is_unstructured): """""" A validation method for the data shape Parameters ---------- name: str or list of str The variable names (one variable per array) dims: list with length 1 or list of lists with length 1 The dimension of the arrays. Only 1D-Arrays are allowed is_unstructured: bool or list of bool True if the corresponding array is unstructured. Returns ------- %(Plotter.check_data.returns)s """""" if isinstance(name, six.string_types) or not is_iterable(name): name = [name] dims = [dims] is_unstructured = [is_unstructured] N = len(name) if N != 1: return [False] * N, [ 'Number of provided names (%i) must equal 1!' % (N)] * N elif len(dims) != 1: return [False], [ 'Number of provided dimension lists (%i) must equal 1!' % ( len(dims))] elif len(is_unstructured) != 1: return [False], [ ('Number of provided unstructured information (%i) must ' 'equal 1!') % (len(is_unstructured))] if name[0] != 0 and not name[0]: return [False], ['At least one variable name must be provided!'] # unstructured arrays have only 1 dimension dimlen = cls.allowed_dims if is_unstructured[0]: dimlen -= 1 # Check that the array is two-dimensional # # if more than one array name is provided, the dimensions should be # one les than dimlen to have a 2D array if (not isstring(name[0]) and not is_iterable(name[0]) and len(name[0]) != 1 and len(dims[0]) != dimlen - 1): return [False], ['Only one name is allowed per array!'] # otherwise the number of dimensions must equal dimlen if len(dims[0]) != dimlen: return [False], [ 'An array with dimension %i is required, not %i' % ( dimlen, len(dims[0]))] return [True], ['']" 815,"def check_data(cls, name, dims, is_unstructured): """""" A validation method for the data shape Parameters ---------- name: list of str with length 2 The variable names (one for the first, two for the second array) dims: list with length 2 of lists with length 1 The dimension of the arrays. Only 2D-Arrays are allowed (or 1-D if an array is unstructured) is_unstructured: bool or list of bool True if the corresponding array is unstructured. Returns ------- %(Plotter.check_data.returns)s """""" if isinstance(name, six.string_types) or not is_iterable(name): name = [name] dims = [dims] is_unstructured = [is_unstructured] msg = ('Two arrays are required (one for the scalar and ' 'one for the vector field)') if len(name) < 2: return [None], [msg] elif len(name) > 2: return [False], [msg] valid1, msg1 = Simple2DBase.check_data(name[:1], dims[0:1], is_unstructured[:1]) valid2, msg2 = BaseVectorPlotter.check_data(name[1:], dims[1:], is_unstructured[1:]) return valid1 + valid2, msg1 + msg2" 816,"def record_diff(old, new): """"""Return a JSON-compatible structure capable turn the `new` record back into the `old` record. The parameters must be structures compatible with json.dumps *or* strings compatible with json.loads. Note that by design, `old == record_patch(new, record_diff(old, new))`"""""" old, new = _norm_json_params(old, new) return json_delta.diff(new, old, verbose=False)" 817,"def record_patch(rec, diff): """"""Return the JSON-compatible structure that results from applying the changes in `diff` to the record `rec`. The parameters must be structures compatible with json.dumps *or* strings compatible with json.loads. Note that by design, `old == record_patch(new, record_diff(old, new))`"""""" rec, diff = _norm_json_params(rec, diff) return json_delta.patch(rec, diff, in_place=False)" 818,"def append_diff_hist(diff, diff_hist=list()): """"""Given a diff as generated by record_diff, append a diff record to the list of diff_hist records."""""" diff, diff_hist = _norm_json_params(diff, diff_hist) if not diff_hist: diff_hist = list() diff_hist.append({'diff': diff, 'diff_date': now_field()}) return diff_hist" 819,"def parse_diff_hist(curr_obj, diff_hist): """"""Given a diff_hist as created, appended by append_diff_hist, yield the versions of the object start with curr_obj and working backwards in time. Each instance yielded is of the form (obj, date-string) where obj is the JSON version of the object created by applying a diff in the diff history and date-string is a string representing the date/time that the diff was taken"""""" curr_obj, diff_hist = _norm_json_params(curr_obj, diff_hist) yield (json.dumps(curr_obj), None) last_obj = curr_obj for one in reversed(diff_hist): last_obj = record_patch(last_obj, one['diff']) yield json.dumps(last_obj), one['diff_date']" 820,"def to_dict(self): """""" Converts object into a dictionary. """""" data = { 'id': self.id, 'referenceId': self.reference_id, 'type': self.type, 'displayName': self.display_name, 'remoteUrl': self.remote_url} for key in data.keys(): if data[key] == None: data.pop(key) return data" 821,"def to_dict(self): """""" Converts object into a dictionary. """""" data = { 'url': self.url, 'encodingRate': self.encoding_rate, 'frameHeight': self.frame_height, 'frameWidth': self.frame_width, 'size': self.size, 'remoteUrl': self.remote_url, 'remoteStream': self.remote_stream_name, 'videoDuration': self.video_duration, 'videoCodec': self.video_codec} [data.pop(key) for key in data.keys() if data[key] is None] return data" 822,"def to_dict(self): """""" Converts object into a dictionary. """""" data = { 'name': self.name, 'video_id': self.video_id, 'time': self.time, 'forceStop': self.force_stop, 'type': self.type, 'metadata': self.metadata} for key in data.keys(): if data[key] == None: data.pop(key) return data" 823,"def _find_video(self): """""" Lookup and populate ``pybrightcove.video.Video`` object given a video id or reference_id. """""" data = None if self.id: data = self.connection.get_item( 'find_video_by_id', video_id=self.id) elif self.reference_id: data = self.connection.get_item( 'find_video_by_reference_id', reference_id=self.reference_id) if data: self._load(data)" 824,"def _to_dict(self): """""" Converts object into a dictionary. """""" for i, tag in enumerate(self.tags): if tag in ("""", None): self.tags.pop(i) data = { 'name': self.name, 'referenceId': self.reference_id, 'shortDescription': self.short_description, 'longDescription': self.long_description, 'itemState': self.item_state, 'linkURL': self.link_url, 'linkText': self.link_text, 'tags': self.tags, 'economics': self.economics, 'id': self.id, 'end_date': _make_tstamp(self.end_date), 'start_date': _make_tstamp(self.start_date)} if len(self.renditions) > 0: data['renditions'] = [] for r in self.renditions: data['renditions'].append(r.to_dict()) if len(self.metadata) > 0: data['customFields'] = {} for meta in self.metadata: data['customFields'][meta['key']] = meta['value'] [data.pop(key) for key in data.keys() if data[key] == None] return data" 825,"def to_xml(self): # pylint: disable=R0912 """""" Converts object into an XML string. """""" xml = '' for asset in self.assets: xml += ' 0: self.connection.post(xml=self.to_xml(), assets=self.assets) elif not self.id and self._filename: self.id = self.connection.post('create_video', self._filename, create_multiple_renditions=create_multiple_renditions, preserve_source_rendition=preserve_source_rendition, encode_to=encode_to, video=self._to_dict()) elif not self.id and len(self.renditions) > 0: self.id = self.connection.post('create_video', video=self._to_dict()) elif self.id: data = self.connection.post('update_video', video=self._to_dict()) if data: self._load(data)" 831,"def delete(self, cascade=False, delete_shares=False): """""" Deletes the video. """""" if self.id: self.connection.post('delete_video', video_id=self.id, cascade=cascade, delete_shares=delete_shares) self.id = None" 832,"def get_upload_status(self): """""" Get the status of the video that has been uploaded. """""" if self.id: return self.connection.post('get_upload_status', video_id=self.id)" 833,"def share(self, accounts): """""" Create a share """""" if not isinstance(accounts, (list, tuple)): msg = ""Video.share expects an iterable argument"" raise exceptions.PyBrightcoveError(msg) raise exceptions.PyBrightcoveError(""Not yet implemented"")" 834,"def set_image(self, image, filename=None, resize=False): """""" Set the poster or thumbnail of a this Vidoe. """""" if self.id: data = self.connection.post('add_image', filename, video_id=self.id, image=image.to_dict(), resize=resize) if data: self.image = Image(data=data)" 835,"def find_related(self, _connection=None, page_size=100, page_number=0): """""" List all videos that are related to this one. """""" if self.id: return connection.ItemResultSet('find_related_videos', Video, _connection, page_size, page_number, None, None, video_id=self.id)" 836,"def delete_video(video_id, cascade=False, delete_shares=False, _connection=None): """""" Delete the video represented by the ``video_id`` parameter. """""" c = _connection if not c: c = connection.APIConnection() c.post('delete_video', video_id=video_id, cascade=cascade, delete_shares=delete_shares)" 837,"def get_status(video_id, _connection=None): """""" Get the status of a video given the ``video_id`` parameter. """""" c = _connection if not c: c = connection.APIConnection() return c.post('get_upload_status', video_id=video_id)" 838,"def activate(video_id, _connection=None): """""" Mark a video as Active """""" c = _connection if not c: c = connection.APIConnection() data = c.post('update_video', video={ 'id': video_id, 'itemState': enums.ItemStateEnum.ACTIVE}) return Video(data=data, _connection=c)" 839,"def find_modified(since, filter_list=None, _connection=None, page_size=25, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos modified since a certain date. """""" filters = [] if filter_list is not None: filters = filter_list if not isinstance(since, datetime): msg = 'The parameter ""since"" must be a datetime object.' raise exceptions.PyBrightcoveError(msg) fdate = int(since.strftime(""%s"")) / 60 ## Minutes since UNIX time return connection.ItemResultSet('find_modified_videos', Video, _connection, page_size, page_number, sort_by, sort_order, from_date=fdate, filter=filters)" 840,"def find_all(_connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos. """""" return connection.ItemResultSet('find_all_videos', Video, _connection, page_size, page_number, sort_by, sort_order)" 841,"def find_by_tags(and_tags=None, or_tags=None, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List videos given a certain set of tags. """""" err = None if not and_tags and not or_tags: err = ""You must supply at least one of either and_tags or or_tags."" if and_tags and not isinstance(and_tags, (tuple, list)): err = ""The and_tags argument for Video.find_by_tags must an "" err += ""iterable"" if or_tags and not isinstance(or_tags, (tuple, list)): err = ""The or_tags argument for Video.find_by_tags must an "" err += ""iterable"" if err: raise exceptions.PyBrightcoveError(err) atags = None otags = None if and_tags: atags = ','.join([str(t) for t in and_tags]) if or_tags: otags = ','.join([str(t) for t in or_tags]) return connection.ItemResultSet('find_videos_by_tags', Video, _connection, page_size, page_number, sort_by, sort_order, and_tags=atags, or_tags=otags)" 842,"def find_by_text(text, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List videos that match the ``text`` in title or description. """""" return connection.ItemResultSet('find_videos_by_text', Video, _connection, page_size, page_number, sort_by, sort_order, text=text)" 843,"def find_by_campaign(campaign_id, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos for a given campaign. """""" return connection.ItemResultSet( 'find_videos_by_campaign_id', Video, _connection, page_size, page_number, sort_by, sort_order, campaign_id=campaign_id)" 844,"def find_by_user(user_id, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos uploaded by a certain user. """""" return connection.ItemResultSet('find_videos_by_user_id', Video, _connection, page_size, page_number, sort_by, sort_order, user_id=user_id)" 845,"def find_by_reference_ids(reference_ids, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos identified by a list of reference ids """""" if not isinstance(reference_ids, (list, tuple)): err = ""Video.find_by_reference_ids expects an iterable argument"" raise exceptions.PyBrightcoveError(err) ids = ','.join(reference_ids) return connection.ItemResultSet( 'find_videos_by_reference_ids', Video, _connection, page_size, page_number, sort_by, sort_order, reference_ids=ids)" 846,"def find_by_ids(ids, _connection=None, page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER): """""" List all videos identified by a list of Brightcove video ids """""" if not isinstance(ids, (list, tuple)): err = ""Video.find_by_ids expects an iterable argument"" raise exceptions.PyBrightcoveError(err) ids = ','.join([str(i) for i in ids]) return connection.ItemResultSet('find_videos_by_ids', Video, _connection, page_size, page_number, sort_by, sort_order, video_ids=ids)" 847,"def read_gpx(xml, gpxns=None): """"""Parse a GPX file into a GpxModel. Args: xml: A file-like-object opened in binary mode - that is containing bytes rather than characters. The root element of the XML should be a element containing a version attribute. GPX versions 1.1 is supported. gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited by curly braces). If None, (the default) the namespace used in the document will be determined automatically. """""" tree = etree.parse(xml) gpx_element = tree.getroot() return parse_gpx(gpx_element, gpxns=gpxns)" 848,"def parse_gpx(gpx_element, gpxns=None): """"""Parse a GPX file into a GpxModel. Args: xml: A file-like-object opened in binary mode - that is containing bytes rather than characters. The root element of the XML should be a element containing a version attribute. GPX versions 1.0 is supported. Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX. """""" gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError(""No gpx root element"") get_text = lambda tag: optional_text(gpx_element, gpxns+tag) version = gpx_element.attrib['version'] if not version.startswith('1.0'): raise ValueError(""Not a GPX 1.0 file"") creator = gpx_element.attrib['creator'] name = get_text('name') description = get_text('desc') author_name = get_text('author') email = get_text('email') author = Person(author_name, email) url = get_text('url') urlname = get_text('urlname') links = make_links(url, urlname) time = get_text('time') keywords = get_text('keywords') bounds_element = gpx_element.find(gpxns+'bounds') bounds = nullable(parse_bounds)(bounds_element) metadata = Metadata(name=name, description=description, author=author, links=links, time=time, keywords=keywords, bounds=bounds) waypoint_elements = gpx_element.findall(gpxns+'wpt') waypoints = [parse_waypoint(waypoint_element, gpxns) for waypoint_element in waypoint_elements] route_elements = gpx_element.findall(gpxns+'rte') routes = [parse_route(route_element, gpxns) for route_element in route_elements] track_elements = gpx_element.findall(gpxns+'trk') tracks = [parse_track(track_element, gpxns) for track_element in track_elements] # TODO : Private elements gpx_model = GpxModel(creator, metadata, waypoints, routes, tracks) return gpx_model" 849,"def add_log_handler(log, handler=None, debug=None, fmt=None): """"""为一个 :class:`logging.Logger` 的实例增加 handler。 :param Logger log: 需要处理的 :class:`logging.Logger` 的实例。 :param Handler handler: 一个 :class:`logging.Handler` 的实例。 :param int debug: Debug 级别。 :param str fmt: Handler 的 Formatter。 """""" if debug: log.setLevel(debug) if handler: # if not fmt: # fmt = __LOG_FMT if fmt: handler.setFormatter(fmt) log.addHandler(handler)" 850,"def __wrap(self, func): '''This decorator overrides the default arguments of a function. For each keyword argument in the function, the decorator first checks if the argument has been overridden by the caller, and uses that value instead if so. If not, the decorator consults the Preset object for an override value. If both of the above cases fail, the decorator reverts to the function's native default parameter value. ''' def deffunc(*args, **kwargs): '''The decorated function''' # Get the list of function arguments if hasattr(inspect, 'signature'): # Python 3.5 function_args = inspect.signature(func).parameters else: function_args = inspect.getargspec(func).args # Construct a dict of those kwargs which appear in the function filtered_kwargs = kwargs.copy() # look at all relevant keyword arguments for this function for param in function_args: if param in kwargs: # Did the user override the default? filtered_kwargs[param] = kwargs[param] elif param in self._defaults: # Do we have a clobbering value in the default dict? filtered_kwargs[param] = self._defaults[param] # Call the function with the supplied args and the filtered kwarg dict return func(*args, **filtered_kwargs) # pylint: disable=W0142 wrapped = functools.update_wrapper(deffunc, func) # force-mangle the docstring here wrapped.__doc__ = ('WARNING: this function has been modified by the Presets ' 'package.\nDefault parameter values described in the ' 'documentation below may be inaccurate.\n\n{}'.format(wrapped.__doc__)) return wrapped" 851,"def _sumDiceRolls(self, rollList): """"""convert from dice roll structure to a single integer result"""""" if isinstance(rollList, RollList): self.rolls.append(rollList) return rollList.sum() else: return rollList" 852,"def _re_flatten(p): ''' Turn all capturing groups in a regular expression pattern into non-capturing groups. ''' if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)" 853,"def cookies(self): """""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """""" cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() if len(cookies) > self.MAX_PARAMS: raise HTTPError(413, 'Too many cookies') return FormsDict((c.key, c.value) for c in cookies)" 854,"def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called ""URL arguments"" or ""GET parameters"", but not to be confused with ""URL wildcards"" as they are provided by the :class:`Router`. ''' get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) if len(pairs) > self.MAX_PARAMS: raise HTTPError(413, 'Too many parameters') for key, value in pairs: get[key] = value return get" 855,"def copy(self): ''' Returns a copy of self. ''' # TODO copy = Response() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) return copy" 856,"def annotated_references(obj): """""" Return known information about references held by the given object. Returns a mapping from referents to lists of descriptions. Note that there may be more than one edge leading to any particular referent; hence the need for a list. Descriptions are currently strings. """""" references = KeyTransformDict(transform=id, default_factory=list) for type_ in type(obj).__mro__: if type_ in type_based_references: type_based_references[type_](obj, references) add_attr(obj, ""__dict__"", references) add_attr(obj, ""__class__"", references) if isinstance(obj, type): add_attr(obj, ""__mro__"", references) return references" 857,"def object_annotation(obj): """""" Return a string to be used for Graphviz nodes. The string should be short but as informative as possible. """""" # For basic types, use the repr. if isinstance(obj, BASE_TYPES): return repr(obj) if type(obj).__name__ == 'function': return ""function\\n{}"".format(obj.__name__) elif isinstance(obj, types.MethodType): if six.PY2: im_class = obj.im_class if im_class is None: im_class_name = """" else: im_class_name = im_class.__name__ try: func_name = obj.__func__.__name__ except AttributeError: func_name = """" return ""instancemethod\\n{}.{}"".format( im_class_name, func_name, ) else: try: func_name = obj.__func__.__qualname__ except AttributeError: func_name = """" return ""instancemethod\\n{}"".format(func_name) elif isinstance(obj, list): return ""list[{}]"".format(len(obj)) elif isinstance(obj, tuple): return ""tuple[{}]"".format(len(obj)) elif isinstance(obj, dict): return ""dict[{}]"".format(len(obj)) elif isinstance(obj, types.ModuleType): return ""module\\n{}"".format(obj.__name__) elif isinstance(obj, type): return ""type\\n{}"".format(obj.__name__) elif six.PY2 and isinstance(obj, types.InstanceType): return ""instance\\n{}"".format(obj.__class__.__name__) elif isinstance(obj, weakref.ref): referent = obj() if referent is None: return ""weakref (dead referent)"" else: return ""weakref to id 0x{:x}"".format(id(referent)) elif isinstance(obj, types.FrameType): filename = obj.f_code.co_filename if len(filename) > FRAME_FILENAME_LIMIT: filename = ""..."" + filename[-(FRAME_FILENAME_LIMIT-3):] return ""frame\\n{}:{}"".format( filename, obj.f_lineno, ) else: return ""object\\n{}.{}"".format( type(obj).__module__, type(obj).__name__, )" 858,"def disttar(target, source, env): """"""tar archive builder"""""" import tarfile env_dict = env.Dictionary() if env_dict.get(""DISTTAR_FORMAT"") in [""gz"", ""bz2""]: tar_format = env_dict[""DISTTAR_FORMAT""] else: tar_format = """" # split the target directory, filename, and stuffix base_name = str(target[0]).split('.tar')[0] (target_dir, dir_name) = os.path.split(base_name) # create the target directory if it does not exist if target_dir and not os.path.exists(target_dir): os.makedirs(target_dir) # open our tar file for writing print >> sys.stderr, 'DistTar: Writing %s' % str(target[0]) print >> sys.stderr, ' with contents: %s' % [str(s) for s in source] tar = tarfile.open(str(target[0]), ""w:%s"" % tar_format) # write sources to our tar file for item in source: item = str(item) sys.stderr.write(""."") #print ""Adding to TAR file: %s/%s"" % (dir_name,item) tar.add(item,'%s/%s' % (dir_name,item)) # all done sys.stderr.write(""\n"") #print ""Closing TAR file"" tar.close()" 859,"def disttar_suffix(env, sources): """"""tar archive suffix generator"""""" env_dict = env.Dictionary() if env_dict.has_key(""DISTTAR_FORMAT"") and env_dict[""DISTTAR_FORMAT""] in [""gz"", ""bz2""]: return "".tar."" + env_dict[""DISTTAR_FORMAT""] else: return "".tar""" 860,"def generate(env): """""" Add builders and construction variables for the DistTar builder. """""" disttar_action=SCons.Action.Action(disttar, disttar_string) env['BUILDERS']['DistTar'] = Builder( action=disttar_action , emitter=disttar_emitter , suffix = disttar_suffix , target_factory = env.fs.Entry ) env.AppendUnique( DISTTAR_FORMAT = 'gz' )" 861,"def ensure_table(self, cls): """"""Ensure table's existence - as per the gludb spec."""""" id_len = len(uuid()) index_names = cls.index_names() or [] cols = [ 'id char(%d) primary key' % (id_len,), 'value jsonb' ] + [ name + ' text' for name in index_names ] table_name = cls.get_table_name() with self._conn() as conn: with conn.cursor() as cur: cur.execute('create table if not exists %s (%s);' % ( table_name, ','.join(cols) )) for name in index_names: cur.execute('create index if not exists %s on %s(%s);' % ( table_name + '_' + name + '_idx', table_name, name ))" 862,"def find_one(self, cls, id): """"""Find single keyed row - as per the gludb spec."""""" found = self.find_by_index(cls, 'id', id) return found[0] if found else None" 863,"def find_by_index(self, cls, index_name, value): """"""Find all rows matching index query - as per the gludb spec."""""" cur = self._conn().cursor() # psycopg2 supports using Python formatters for queries # we also request our JSON as a string for the from_data calls query = 'select id, value::text from {0} where {1} = %s;'.format( cls.get_table_name(), index_name ) found = [] with self._conn() as conn: with conn.cursor() as cur: cur.execute(query, (value,)) for row in cur: id, data = str(row[0]).strip(), row[1] obj = cls.from_data(data) assert id == obj.id found.append(obj) return found" 864,"def save(self, obj): """"""Save current instance - as per the gludb spec."""""" cur = self._conn().cursor() tabname = obj.__class__.get_table_name() index_names = obj.__class__.index_names() or [] col_names = ['id', 'value'] + index_names value_holders = ['%s'] * len(col_names) updates = ['%s = EXCLUDED.%s' % (cn, cn) for cn in col_names[1:]] if not obj.id: id = uuid() obj.id = id query = 'insert into {0} ({1}) values ({2}) on conflict(id) do update set {3};'.format( tabname, ','.join(col_names), ','.join(value_holders), ','.join(updates), ) values = [obj.id, obj.to_data()] index_vals = obj.indexes() or {} values += [index_vals.get(name, 'NULL') for name in index_names] with self._conn() as conn: with conn.cursor() as cur: cur.execute(query, tuple(values))" 865,"def delete(self, obj): """"""Required functionality."""""" del_id = obj.get_id() if not del_id: return cur = self._conn().cursor() tabname = obj.__class__.get_table_name() query = 'delete from {0} where id = %s;'.format(tabname) with self._conn() as conn: with conn.cursor() as cur: cur.execute(query, (del_id,))" 866,"def authenticated_get(username, password, url, verify=True): """""" Perform an authorized query to the url, and return the result """""" try: response = requests.get(url, auth=(username, password), verify=verify) if response.status_code == 401: raise BadCredentialsException( ""Unable to authenticate user %s to %s with password provided!"" % (username, url)) except requests.exceptions.SSLError: raise CertificateException(""Unable to verify certificate at %s!"" % url) return response.content" 867,"def cleaned_request(request_type, *args, **kwargs): """""" Perform a cleaned requests request """""" s = requests.Session() # this removes netrc checking s.trust_env = False return s.request(request_type, *args, **kwargs)" 868,"def download_to_bytesio(url): """""" Return a bytesio object with a download bar """""" logger.info(""Downloading url: {0}"".format(url)) r = cleaned_request('get', url, stream=True) stream = io.BytesIO() total_length = int(r.headers.get('content-length')) for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1): if chunk: stream.write(chunk) stream.seek(0) return stream" 869,"def add(one, two=4, three=False): ''' This function adds two number. :param one: first number to add :param two: second number to add :rtype: int ''' s = str(int(one) + int(two)) logging.debug('logging sum from hello.py:' + s) print 'printing sum from hello.py:', s return s" 870,"def queue(self): """"""Message queue queue."""""" with self.connection_pool.acquire(block=True) as conn: return Q( self.routing_key, exchange=self.exchange, routing_key=self.routing_key )(conn)" 871,"def exists(self): """"""Test if this queue exists in the AMQP store. Note: This doesn't work with redis as declaring queues has not effect except creating the exchange. :returns: True if the queue exists, else False. :rtype: bool """""" try: queue = self.queue queue.queue_declare(passive=True) except NotFound: return False except ChannelError as e: if e.reply_code == '404': return False raise e return True" 872,"def producer(self, conn): """"""Get a consumer for a connection."""""" return Producer( conn, exchange=self.exchange, routing_key=self.routing_key, auto_declare=True, )" 873,"def consumer(self, conn): """"""Get a consumer for a connection."""""" return Consumer( connection=conn, queue=self.queue.name, exchange=self.exchange.name, exchange_type=self.exchange.type, durable=self.exchange.durable, auto_delete=self.exchange.auto_delete, routing_key=self.routing_key, no_ack=self.no_ack, )" 874,"def create_producer(self): """"""Context manager that yields an instance of ``Producer``."""""" with self.connection_pool.acquire(block=True) as conn: yield self.producer(conn)" 875,"def create_consumer(self): """"""Context manager that yields an instance of ``Consumer``."""""" with self.connection_pool.acquire(block=True) as conn: yield self.consumer(conn)" 876,"def publish(self, events): """"""Publish events."""""" assert len(events) > 0 with self.create_producer() as producer: for event in events: producer.publish(event)" 877,"def consume(self, payload=True): """"""Consume events."""""" with self.create_consumer() as consumer: for msg in consumer.iterqueue(): yield msg.payload if payload else msg" 878,"def get_initial(self, *args, **kwargs): """""" Gathers initial form values from user and profile objects suitable for using as form's initial data. """""" initial = {} for field in self.fields: value = None if hasattr(self.user, field): value = getattr(self.user, field) if hasattr(self.profile, field): value = getattr(self.profile, field) if value: initial.update({ field: value }) if hasattr(self.profile, 'dob'): dob = self.profile.dob if dob: if 'dob_day' in self.fields: initial.update({ 'dob_day': dob.day }) if 'dob_month' in self.fields: initial.update({ 'dob_month': dob.month }) if 'dob_year' in self.fields: initial.update({ 'dob_year': dob.year }) return initial" 879,"def save(self, *args, **kwargs): """""" This method should be called when is_valid is true to save relevant fields to user and profile models. """""" for key, value in self.cleaned_data.items(): if value != None: if hasattr(self.user, key): setattr(self.user, key, value) if hasattr(self.profile, key): setattr(self.profile, key, value) # set password if 'password1' in self.cleaned_data: if self.cleaned_data['password1']: self.user.set_password(self.cleaned_data['password1']) # set dob if 'dob_day' in self.cleaned_data and 'dob_month' in self.\ cleaned_data and 'dob_year' in self.cleaned_data: self.profile.dob = self._gen_dob() self.user.save() self.profile.save()" 880,"def clean_username(self): """""" Validate that the username is alphanumeric and is not already in use. Don't fail if users username is provided. """""" user = None try: user = User.objects.get(username__iexact=self.\ cleaned_data['username']) except User.DoesNotExist: return self.cleaned_data['username'] if user: if user.username == self.user.username: return self.cleaned_data['username'] raise forms.ValidationError(_(\ ""A user with that username already exists.""))" 881,"def clean(self): """""" Verifiy that the values entered into the two password fields match. Note that an error here will end up in ``non_field_errors()`` because it doesn't apply to a single field. """""" if 'dob_day' in self.cleaned_data and 'dob_month' in \ self.cleaned_data and 'dob_year' in self.cleaned_data: try: self._gen_dob() except ValueError: self._errors['dob_day'] = (_(\ ""You provided an invalid date.""),) if 'password1' in self.cleaned_data and 'password2' in \ self.cleaned_data: if self.cleaned_data['password1'] != \ self.cleaned_data['password2']: raise forms.ValidationError(_(\ ""The two password fields didn't match."")) return self.cleaned_data" 882,"def request(self,message,message_type): """""" Send a request message of the given type Args: - message: the message to publish - message_type: the type of message being sent """""" if message_type == MULTIPART: raise Exception(""Unsupported request type"") super(Requestor,self).send(message,message_type)" 883,"def run_as_admin(command, cwd=None, environ=None): """""" Runs a command as an admin in the specified *cwd* and *environ*. On Windows, this creates a temporary directory where this information is stored temporarily so that the new process can launch the proper subprocess. """""" if isinstance(command, str): command = shlex.split(command) if os.name == 'nt': return _run_as_admin_windows(command, cwd, environ) elif os.name == 'posix': command = ['sudo', '-E'] + list(command) sys.exit(subprocess.call(command)) else: raise RuntimeError('Unsupported os: {!r}'.format(os.name))" 884,"def add_body_part(self, key, data, mime_type, size=None): """"""Adds data to the HTTP request body. If more than one part is added, this is assumed to be a mime-multipart request. This method is designed to create MIME 1.0 requests as specified in RFC 1341. Args: data: str or a file-like object containing a part of the request body. mime_type: str The MIME type describing the data size: int Required if the data is a file like object. If the data is a string, the size is calculated so this parameter is ignored. """""" if isinstance(data, str): size = len(data) if hasattr(data, ""fileno""): size = os.fstat(data.fileno())[stat.ST_SIZE] if size is None: # TODO: support chunked transfer if some of the body is of unknown size. raise UnknownSize('Each part of the body must have a known size.') if 'Content-Length' in self.headers: content_length = int(self.headers['Content-Length']) else: content_length = 0 # If this is the first part added to the body, then this is not a multipart # request. boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,) self._body_parts.append(boundary_string) content_length += len(boundary_string) + size # Include the mime type of this part. cd = 'Content-Disposition: form-data; name=""%s""' % key mt = mime_type if hasattr(data, ""fileno""): cd += '; filename=""%s""' % data.name.split('/')[-1] mt = mimetypes.guess_type(data.name)[0] or 'application/octet-stream' cd += '\r\n' type_string = 'Content-Type: %s\r\n\r\n' % (mt) self._body_parts.append(cd) self._body_parts.append(type_string) content_length += len(type_string) + len(cd) self._body_parts.append(data) self.headers['Content-Length'] = str(content_length)" 885,"def _copy(self): """"""Creates a deep copy of this request."""""" copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port, self.uri.path, self.uri.query.copy()) new_request = HttpRequest(uri=copied_uri, method=self.method, headers=self.headers.copy()) new_request._body_parts = self._body_parts[:] return new_request" 886,"def _get_relative_path(self): """"""Returns the path with the query parameters escaped and appended."""""" param_string = self._get_query_string() if self.path is None: path = '/' else: path = self.path if param_string: return '?'.join([path, param_string]) else: return path" 887,"def modify_request(self, http_request=None): """"""Sets HTTP request components based on the URI."""""" if http_request is None: http_request = HttpRequest() if http_request.uri is None: http_request.uri = Uri() # Determine the correct scheme. if self.scheme: http_request.uri.scheme = self.scheme if self.port: http_request.uri.port = self.port if self.host: http_request.uri.host = self.host # Set the relative uri path if self.path: http_request.uri.path = self.path if self.query: http_request.uri.query = self.query.copy() return http_request" 888,"def parse_uri(uri_string): """"""Creates a Uri object which corresponds to the URI string. This method can accept partial URIs, but it will leave missing members of the Uri unset. """""" parts = urlparse.urlparse(uri_string) uri = Uri() if parts[0]: uri.scheme = parts[0] if parts[1]: host_parts = parts[1].split(':') if host_parts[0]: uri.host = host_parts[0] if len(host_parts) > 1: uri.port = int(host_parts[1]) if parts[2]: uri.path = parts[2] if parts[4]: param_pairs = parts[4].split('&') for pair in param_pairs: pair_parts = pair.split('=') if len(pair_parts) > 1: uri.query[urllib.unquote_plus(pair_parts[0])] = ( urllib.unquote_plus(pair_parts[1])) elif len(pair_parts) == 1: uri.query[urllib.unquote_plus(pair_parts[0])] = None return uri" 889,"def _get_connection(self, uri, headers=None): """"""Opens a socket connection to the server to set up an HTTP request. Args: uri: The full URL for the request as a Uri object. headers: A dict of string pairs containing the HTTP headers for the request. """""" connection = None if uri.scheme == 'https': if not uri.port: connection = httplib.HTTPSConnection(uri.host) else: connection = httplib.HTTPSConnection(uri.host, int(uri.port)) else: if not uri.port: connection = httplib.HTTPConnection(uri.host) else: connection = httplib.HTTPConnection(uri.host, int(uri.port)) return connection" 890,"def _http_request(self, method, uri, headers=None, body_parts=None): """"""Makes an HTTP request using httplib. Args: method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc. uri: str or atom.http_core.Uri headers: dict of strings mapping to strings which will be sent as HTTP headers in the request. body_parts: list of strings, objects with a read method, or objects which can be converted to strings using str. Each of these will be sent in order as the body of the HTTP request. """""" if isinstance(uri, (str, unicode)): uri = Uri.parse_uri(uri) connection = self._get_connection(uri, headers=headers) if self.debug: connection.debuglevel = 1 if connection.host != uri.host: connection.putrequest(method, str(uri)) else: connection.putrequest(method, uri._get_relative_path()) # Overcome a bug in Python 2.4 and 2.5 # httplib.HTTPConnection.putrequest adding # HTTP request header 'Host: www.google.com:443' instead of # 'Host: www.google.com', and thus resulting the error message # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. if (uri.scheme == 'https' and int(uri.port or 443) == 443 and hasattr(connection, '_buffer') and isinstance(connection._buffer, list)): header_line = 'Host: %s:443' % uri.host replacement_header_line = 'Host: %s' % uri.host try: connection._buffer[connection._buffer.index(header_line)] = ( replacement_header_line) except ValueError: # header_line missing from connection._buffer pass # Send the HTTP headers. for header_name, value in headers.iteritems(): connection.putheader(header_name, value) connection.endheaders() # If there is data, send it in the request. if body_parts: for part in body_parts: _send_data_part(part, connection) # Return the HTTP Response from the server. return connection.getresponse()" 891,"def seek(self, offset, mode='set', renew=False): """""" Moves the cursor of the Scanner to or by *offset* depending on the *mode*. Is is similar to a file's `seek()` function, however the *mode* parameter also accepts the string-mode values `'set'`, `'cur'` and `'end'`. Note that even for the `'end'` mode, the *offset* must be negative to actually reach back up from the end of the file. If *renew* is set to True, the line and column counting will always begin from the start of the file. Keep in mind that this could can be very slow because it has to go through each and every character until the desired position is reached. Otherwise, if *renew* is set to False, it will be decided if counting from the start is shorter than counting from the current cursor position. """""" mapping = {os.SEEK_SET: 'set', os.SEEK_CUR: 'cur', os.SEEK_END: 'end'} mode = mapping.get(mode, mode) if mode not in ('set', 'cur', 'end'): raise ValueError('invalid mode: ""{}""'.format(mode)) # Translate the other modes into the 'set' mode. if mode == 'end': offset = len(self.text) + offset mode = 'set' elif mode == 'cur': offset = self.index + offset mode = 'set' assert mode == 'set' if offset < 0: offset = 0 elif offset > len(self.text): offset = len(self.text) + 1 if self.index == offset: return # Figure which path is shorter: # 1) Start counting from the beginning of the file, if offset <= abs(self.index - offset): text, index, lineno, colno = self.text, 0, 1, 0 while index != offset: # Find the next newline in the string. nli = text.find('\n', index) if nli >= offset or nli < 0: colno = offset - index index = offset break else: colno = 0 lineno += 1 index = nli + 1 # 2) or step from the current position of the cursor. else: text, index, lineno, colno = self.text, self.index, self.lineno, self.colno if offset < index: # backwards while index != offset: nli = text.rfind('\n', 0, index) if nli < 0 or nli <= offset: if text[offset] == '\n': assert (offset - nli) == 0, (offset, nli) nli = text.rfind('\n', 0, index-1) lineno -= 1 colno = offset - nli - 1 index = offset break else: lineno -= 1 index = nli - 1 else: # forwards while index != offset: nli = text.find('\n', index) if nli < 0 or nli >= offset: colno = offset - index index = offset else: lineno += 1 index = nli + 1 assert lineno >= 1 assert colno >= 0 assert index == offset self.index, self.lineno, self.colno = index, lineno, colno" 892,"def next(self): "" Move on to the next character in the text. "" char = self.char if char == '\n': self.lineno += 1 self.colno = 0 else: self.colno += 1 self.index += 1 return self.char" 893,"def readline(self): "" Reads a full line from the scanner and returns it. "" start = end = self.index while end < len(self.text): if self.text[end] == '\n': end += 1 break end += 1 result = self.text[start:end] self.index = end if result.endswith('\n'): self.colno = 0 self.lineno += 1 else: self.colno += end - start return result" 894,"def match(self, regex, flags=0): """""" Matches the specified *regex* from the current character of the *scanner* and returns the result. The Scanners column and line numbers are updated respectively. # Arguments regex (str, Pattern): The regex to match. flags (int): The flags to use when compiling the pattern. """""" if isinstance(regex, str): regex = re.compile(regex, flags) match = regex.match(self.text, self.index) if not match: return None start, end = match.start(), match.end() lines = self.text.count('\n', start, end) self.index = end if lines: self.colno = end - self.text.rfind('\n', start, end) - 1 self.lineno += lines else: self.colno += end - start return match" 895,"def getmatch(self, regex, group=0, flags=0): """""" The same as #Scanner.match(), but returns the captured group rather than the regex match object, or None if the pattern didn't match. """""" match = self.match(regex, flags) if match: return match.group(group) return None" 896,"def restore(self, cursor): "" Moves the scanner back (or forward) to the specified cursor location. "" if not isinstance(cursor, Cursor): raise TypeError('expected Cursor object', type(cursor)) self.index, self.lineno, self.colno = cursor" 897,"def update(self): """""" Updates the #rules_map dictionary and #skippable_rules list based on the #rules list. Must be called after #rules or any of its items have been modified. The same rule name may appear multiple times. # Raises TypeError: if an item in the `rules` list is not a rule. """""" self.rules_map = {} self.skippable_rules = [] for rule in self.rules: if not isinstance(rule, Rule): raise TypeError('item must be Rule instance', type(rule)) self.rules_map.setdefault(rule.name, []).append(rule) if rule.skip: self.skippable_rules.append(rule)" 898,"def expect(self, *names): """""" Checks if the current #token#s type name matches with any of the specified *names*. This is useful for asserting multiple valid token types at a specific point in the parsing process. # Arguments names (str): One or more token type names. If zero are passed, nothing happens. # Raises UnexpectedTokenError: If the current #token#s type name does not match with any of the specified *names*. """""" if not names: return if not self.token or self.token.type not in names: raise UnexpectedTokenError(names, self.token)" 899,"def accept(self, *names, **kwargs): """""" Extracts a token of one of the specified rule names and doesn't error if unsuccessful. Skippable tokens might still be skipped by this method. # Arguments names (str): One or more token names that are accepted. kwargs: Additional keyword arguments for #next(). # Raises ValueError: if a rule with the specified name doesn't exist. """""" return self.next(*names, as_accept=True, **kwargs)" 900,"def next(self, *expectation, **kwargs): """""" Parses the next token from the input and returns it. The new token can be accessed from the #token attribute after the method was called. If one or more arguments are specified, they must be rule names that are to be expected at the current position. They will be attempted to be matched first (in the specicied order). If the expectation could not be met, an #UnexpectedTokenError is raised. An expected Token will not be skipped, even if its rule defines it so. # Arguments expectation (str): The name of one or more rules that are expected from the current position of the parser. If empty, the first matching token of ALL rules will be returned. In this case, skippable tokens will be skipped. as_accept (bool): If passed True, this method behaves the same as the #accept() method. The default value is #False. weighted (bool): If passed True, the tokens specified with *expectations* are checked first, effectively giving them a higher priority than other they would have from the order in the #rules list. The default value is #False. # Raises ValueError: if an expectation doesn't match with a rule name. UnexpectedTokenError: Ff an expectation is given and the expectation wasn't fulfilled. Only when *as_accept* is set to #False. TokenizationError: if a token could not be generated from the current position of the Scanner. """""" as_accept = kwargs.pop('as_accept', False) weighted = kwargs.pop('weighted', False) for key in kwargs: raise TypeError('unexpected keyword argument {0!r}'.format(key)) if self.token and self.token.type == eof: if not as_accept and expectation and eof not in expectation: raise UnexpectedTokenError(expectation, self.token) elif as_accept and eof in expectation: return self.token elif as_accept: return None return self.token token = None while token is None: # Stop if we reached the end of the input. cursor = self.scanner.cursor if not self.scanner: token = Token(eof, cursor, None, None) break value = None # Try to match the expected tokens. if weighted: for rule_name in expectation: if rule_name == eof: continue rules = self.rules_map.get(rule_name) if rules is None: raise ValueError('unknown rule', rule_name) for rule in rules: value = rule.tokenize(self.scanner) if value: break if value: break self.scanner.restore(cursor) # Match the rest of the rules, but only if we're not acting # like the accept() method that doesn't need the next token # for raising an UnexpectedTokenError. if not value: if as_accept and weighted: # Check only skippable rules if we're only trying to accept # a certain token type and may consume any skippable tokens # until then. check_rules = self.skippable_rules else: check_rules = self.rules for rule in check_rules: if weighted and expectation and rule.name in expectation: # Skip rules that we already tried. continue value = rule.tokenize(self.scanner) if value: break self.scanner.restore(cursor) if not value: if as_accept: return None token = Token(None, cursor, self.scanner.char, None) else: assert rule, ""we should have a rule by now"" if type(value) is not Token: if isinstance(value, tuple): value, string_repr = value else: string_repr = None value = Token(rule.name, cursor, value, string_repr) token = value expected = rule.name in expectation if not expected and rule.skip: # If we didn't expect this rule to match, and if its skippable, # just skip it. :-) token = None elif not expected and as_accept: # If we didn't expect this rule to match but are just accepting # instead of expecting, restore to the original location and stop. self.scanner.restore(cursor) return None self.token = token if as_accept and token and token.type == eof: if eof in expectation: return token return None if token.type is None: raise TokenizationError(token) if not as_accept and expectation and token.type not in expectation: raise UnexpectedTokenError(expectation, token) assert not as_accept or (token and token.type in expectation) return token" 901,"def append(self, event, help=""""): """"""Creates a new event. `event` may be iterable or string Args: event (str): Name of event to declare Kwrgs: help (str): Help string for the event Raises: TypeError **Please** describe the event and its calling arguments in the help string. """""" if isinstance(event, str): self._events[event] = HookList(is_waterfall=self.is_waterfall) self._help[event] = (help, getframeinfo(stack()[1][0])) if not help: logger.warning(""Great, don't say anything about your hooks and \ wait for plugin creators to figure it out."") elif isinstance(event, Iterable): # Depricated. It does not give the ability to give help string # TODO: Remove this for name in event: self.append(name) else: raise TypeError(""Invalid event name!"")" 902,"def hook(self, function, event, dependencies): """"""Tries to load the hook to the event Args: function (func): Function that will be called when the event is called Kwargs: dependencies (str): String or Iterable with modules whose hooks should be called before this one Raises: NameError Note that the dependencies are module-wide, that means that if `parent.foo` and `parent.bar` are both subscribed to `example` event and `child` enumerates `parent` as dependcy, **both** `foo` and `bar` must be called in order for the dependcy to get resolved. """""" # Hooks all events (recursively) if event is None: for e in self._events.keys(): self.hook(function, e, dependencies) return # Hook multiple, but specific events (recursively) if not isinstance(event, str) and isinstance(event, Iterable): for e in event: self.hook(function, e, dependencies) return # Hook a simple event event_list = self._events.get(event, None) if event_list is None: raise NameError( ""Invalid key provided '%s'. Valid options: %s"" % (event, "", "".join(self._events.keys())) ) return return event_list.hook(function, dependencies)" 903,"def call(path, *args, encoding=""utf-8"", show_command=False): """"""使用 subprocess.check_output 调用 git 命令。 :param str path: git 仓库文件夹路径。 :param \*args: git 的附加参数。 :returns: 错误代码和调用结果。 :rtype: int :rtype: string git 返回的信息,若执行出错则为错误信息。 """""" returncode = 0 output = None try: # 2015-10-10 zrong # 在 windows 上使用 universal_newlines=True # 会导致输出信息为中文时出现编码错误 # 原因是 check_out 中读入 stdout 内容的 read 方法没有传递编码参数 # 因此不再使用 universal_newlines=True 这个参数 # 而改用直接返回 bytes,然后对其解码 arg_list = get_args(path, *args) if show_command: print('git call args:', arg_list) output = subprocess.check_output(arg_list, stderr=subprocess.STDOUT) output = output.decode(encoding=encoding) except subprocess.CalledProcessError as err: returncode = err.returncode output = err.output.decode(encoding=encoding) return returncode, output" 904,"def get_args(path, *args, work_tree=True, bare=False): """"""获取可被 subprogress 执行的 git 参数 list。 :param str path: git 仓库文件夹路径。 :param \*args: git 的附加参数。 :param bare: 是否视为 bare 库 """""" base = [ 'git' ] if path: base.append('-C') base.append(path) if bare: base.append('--bare') base.append(""--git-dir=""+path) else: base.append(""--git-dir=""+os.path.join(path, "".git"")) if work_tree: base.append(""--work-tree=""+path) for arg in args: base.append(arg) return base" 905,"def get_branches(path): """"""获取当前所有分支名称的列表。 :param str path: git 仓库文件夹路径。 :return: 分支名称列表。当前分支位于列表第一项。 :rtype: list """""" code, output = call(path, 'branch', '--list') if code > 0: return None branches = output.split('\n') newbr = [None] for br in branches: if br: if br[0] == '*': newbr[0] = br[2:] else: newbr.append(br[2:]) return newbr" 906,"def clone(giturl, gitpath): """"""clone 一个 git 库。 :param str giturl: git 仓库的 url 地址。 :param str gitpath: git 仓库保存路径。 """""" gitArgs = ['git', 'clone', giturl, gitpath] slog.info(' '.join(gitArgs)) return subprocess.call(gitArgs)" 907,"def get_hash(path, cut=0): """"""获取可被 git 的 HEAD 的 sha1 值。 :param str path: git 仓库文件夹路径。 :param int cut: 包含的 sha1 值的长度。0代表不剪切。 :returns: 剪切过的 sha1 的值。 :rtype: str """""" code, output = call(path, 'rev-parse', 'HEAD') if code > 0: return None # maybe the string is with a linebreak. sha1 = output.strip() if cut > 0: sha1 = sha1[:7] return sha1" 908,"def update_submodules(path, init=True, update=True): """"""更新子模块。 :param str path: git 仓库文件夹路径。 :param bool init: 是否初始化子模块。 :param bool update: 是否更新子模块。 """""" succ = None if init: arg = get_args(path, 'submodule', 'init', work_tree=False) slog.info(' '.join(arg)) succ = subprocess.call(arg) if succ>0: slog.error('git execute error!') return succ if update: arg = get_args(path, ""submodule"", ""update"", work_tree=False) slog.info(' '.join(arg)) succ = subprocess.call(arg) if succ>0: slog.error('git execute error!') return succ return succ" 909,"def print_message(self, message, verbosity_needed=1): """""" Prints the message, if verbosity is high enough. """""" if self.args.verbosity >= verbosity_needed: print(message)" 910,"def error(self, message, code=1): """""" Prints the error, and exits with the given code. """""" sys.stderr.write(message) sys.exit(code)" 911,"def parse_db_settings(self, settings): """""" Parse out database settings from filename or DJANGO_SETTINGS_MODULE. """""" if settings == 'DJANGO_SETTINGS_MODULE': django_settings = os.environ.get('DJANGO_SETTINGS_MODULE') self.print_message(""Getting settings file from DJANGO_SETTINGS_MODULE=%s"" % django_settings) path_pieces = django_settings.split('.') path_pieces[-1] = '%s.py' % path_pieces[-1] settings = os.path.join(*path_pieces) self.print_message(""Parsing settings from settings file '%s'"" % settings) parser = DatabaseSettingsParser() with open(settings) as settings_file: settings_ast = ast.parse(settings_file.read()) parser.visit(settings_ast) try: return parser.database_settings['default'] except KeyError as e: self.error(""Missing key or value for: %s\nSettings must be of the form: %s"" % (e, self.settings_format))" 912,"def initialize_db_args(self, settings, db_key): """""" Initialize connection arguments for postgres commands. """""" self.print_message(""Initializing database settings for %s"" % db_key, verbosity_needed=2) db_member = self.databases[db_key] db_name = settings.get('NAME') if db_name and not db_member['name']: db_member['name'] = db_name db_member['password'] = settings.get('PASSWORD') args = [] for key in ['USER', 'HOST', 'PORT']: value = settings.get(key) if value: self.print_message(""Adding parameter %s"" % key.lower, verbosity_needed=2) args.append('--%s=%s' % (key.lower(), value)) db_member['args'] = args" 913,"def download_file(self, url, filename): """""" Download file from url to filename. """""" self.print_message(""Downloading to file '%s' from URL '%s'"" % (filename, url)) try: db_file = urllib2.urlopen(url) with open(filename, 'wb') as output: output.write(db_file.read()) db_file.close() except Exception as e: self.error(str(e)) self.print_message(""File downloaded"")" 914,"def unzip_file_if_necessary(self, source_file): """""" Unzip file if zipped. """""" if source_file.endswith("".gz""): self.print_message(""Decompressing '%s'"" % source_file) subprocess.check_call([""gunzip"", ""--force"", source_file]) source_file = source_file[:-len("".gz"")] return source_file" 915,"def download_file_from_url(self, source_app, url): """""" Download file from source app or url, and return local filename. """""" if source_app: source_name = source_app else: source_name = urlparse.urlparse(url).netloc.replace('.', '_') filename = self.create_file_name(source_name) self.download_file(url, filename) return filename" 916,"def dump_database(self): """""" Create dumpfile from postgres database, and return filename. """""" db_file = self.create_file_name(self.databases['source']['name']) self.print_message(""Dumping postgres database '%s' to file '%s'"" % (self.databases['source']['name'], db_file)) self.export_pgpassword('source') args = [ ""pg_dump"", ""-Fc"", ""--no-acl"", ""--no-owner"", ""--dbname=%s"" % self.databases['source']['name'], ""--file=%s"" % db_file, ] args.extend(self.databases['source']['args']) subprocess.check_call(args) return db_file" 917,"def drop_database(self): """""" Drop postgres database. """""" self.print_message(""Dropping database '%s'"" % self.databases['destination']['name']) self.export_pgpassword('destination') args = [ ""dropdb"", ""--if-exists"", self.databases['destination']['name'], ] args.extend(self.databases['destination']['args']) subprocess.check_call(args)" 918,"def create_database(self): """""" Create postgres database. """""" self.print_message(""Creating database '%s'"" % self.databases['destination']['name']) self.export_pgpassword('destination') args = [ ""createdb"", self.databases['destination']['name'], ] args.extend(self.databases['destination']['args']) for arg in self.databases['destination']['args']: if arg[:7] == '--user=': args.append('--owner=%s' % arg[7:]) subprocess.check_call(args)" 919,"def replace_postgres_db(self, file_url): """""" Replace postgres database with database from specified source. """""" self.print_message(""Replacing postgres database"") if file_url: self.print_message(""Sourcing data from online backup file '%s'"" % file_url) source_file = self.download_file_from_url(self.args.source_app, file_url) elif self.databases['source']['name']: self.print_message(""Sourcing data from database '%s'"" % self.databases['source']['name']) source_file = self.dump_database() else: self.print_message(""Sourcing data from local backup file %s"" % self.args.file) source_file = self.args.file self.drop_database() self.create_database() source_file = self.unzip_file_if_necessary(source_file) self.print_message(""Importing '%s' into database '%s'"" % (source_file, self.databases['destination']['name'])) args = [ ""pg_restore"", ""--no-acl"", ""--no-owner"", ""--dbname=%s"" % self.databases['destination']['name'], source_file, ] args.extend(self.databases['destination']['args']) subprocess.check_call(args)" 920,"def get_file_url_for_heroku_app(self, source_app): """""" Get latest backup URL from heroku pg:backups (or pgbackups). """""" self.print_message(""Getting backup url for Heroku app '%s'"" % source_app) args = [ ""heroku"", ""pg:backups:url"", ""--app=%s"" % source_app, ] if self.args.use_pgbackups: args = [ ""heroku"", ""pgbackups:url"", ""--app=%s"" % source_app, ] return subprocess.check_output(args).strip().decode('ascii')" 921,"def capture_heroku_database(self): """""" Capture Heroku database backup. """""" self.print_message(""Capturing database backup for app '%s'"" % self.args.source_app) args = [ ""heroku"", ""pg:backups:capture"", ""--app=%s"" % self.args.source_app, ] if self.args.use_pgbackups: args = [ ""heroku"", ""pgbackups:capture"", ""--app=%s"" % self.args.source_app, ""--expire"", ] subprocess.check_call(args)" 922,"def reset_heroku_database(self): """""" Reset Heroku database. """""" self.print_message(""Resetting database for app '%s'"" % self.args.destination_app) args = [ ""heroku"", ""pg:reset"", ""--app=%s"" % self.args.destination_app, ""DATABASE_URL"", ] subprocess.check_call(args)" 923,"def replace_heroku_db(self, file_url): """""" Replace Heroku database with database from specified source. """""" self.print_message(""Replacing database for Heroku app '%s'"" % self.args.destination_app) self.reset_heroku_database() if file_url: self.print_message(""Restoring from URL '%s'"" % file_url) args = [ ""heroku"", ""pg:backups:restore"", file_url, ""--app=%s"" % self.args.destination_app, ""DATABASE"", ""--confirm"", self.args.destination_app, ] if self.args.use_pgbackups: args = [ ""heroku"", ""pgbackups:restore"", ""--app=%s"" % self.args.destination_app, ""DATABASE_URL"", ""--confirm"", self.args.destination_app, file_url, ] subprocess.check_call(args) else: # TODO perhaps add support for file -> heroku by piping to pg:psql self.print_message(""Pushing data from database '%s'"" % self.databases['source']['name']) self.print_message(""NOTE: Any postgres authentication settings you passed to paragres "" ""will be ignored.\nIf desired, you can export PG* variables.\n"" ""You will be prompted for your psql password."") args = [ ""heroku"", ""pg:push"", self.databases['source']['name'], ""DATABASE_URL"", ""--app=%s"" % self.args.destination_app, ] subprocess.check_call(args)" 924,"def run(self): """""" Replace a database with the data from the specified source. """""" self.print_message(""\nBeginning database replacement process.\n"") if self.args.source_settings: settings = self.parse_db_settings(self.args.source_settings) self.initialize_db_args(settings, 'source') if self.args.settings: settings = self.parse_db_settings(self.args.settings) self.initialize_db_args(settings, 'destination') if self.args.capture: self.capture_heroku_database() file_url = self.args.url if self.args.source_app: self.print_message(""Sourcing data from backup for Heroku app '%s'"" % self.args.source_app) file_url = self.get_file_url_for_heroku_app(self.args.source_app) if self.args.destination_app: self.replace_heroku_db(file_url) elif self.databases['destination']['name']: self.replace_postgres_db(file_url) self.print_message(""\nDone.\n\nDon't forget to update the Django Site entry if necessary!"")" 925,"def import_task_modules(): """""" Import all installed apps and add modules to registry """""" top_level_modules = settings.INSTALLED_APPS module_names = [] for module in top_level_modules: #Import package mod = import_module(module) #Find all modules in package path for loader, module_name, is_pkg in pkgutil.walk_packages(mod.__path__): if not module_name.startswith(""__""): #If the module is not __init__, add it to the registry submod_name = ""{0}.{1}"".format(module,module_name) module_names.append(submod_name) #Once everything is imported, the metaclass will register them automatically modules = map(import_module, module_names) return modules" 926,"def find_in_registry(category = None, namespace = None, name = None): """""" Find a given category/namespace/name combination in the registry category - string, see utils.inputs.registrycategories namespace - module namespace, see settings.NAMESPACE name - lowercase name of module """""" selected_registry = registry if category is not None: selected_registry = [re for re in selected_registry if re.category==category] if namespace is not None: selected_registry = [re for re in selected_registry if re.namespace==namespace] if name is not None: selected_registry = [re for re in selected_registry if re.name==name] if len(selected_registry)>0: return [sr.cls for sr in selected_registry] return None" 927,"def list(self, service_rec=None, host_rec=None, hostfilter=None): """""" List a specific service or all services :param service_rec: t_services.id :param host_rec: t_hosts.id :param hostfilter: Valid hostfilter or None :return: [(svc.t_services.id, svc.t_services.f_hosts_id, svc.t_hosts.f_ipaddr, svc.t_hosts.f_hostname, svc.t_services.f_proto, svc.t_services.f_number, svc.t_services.f_status, svc.t_services.f_name, svc.t_services.f_banner), ...] """""" return self.send.service_list(service_rec, host_rec, hostfilter)" 928,"def info(self, svc_rec=None, ipaddr=None, proto=None, port=None): """""" Information about a service. :param svc_rec: t_services.id :param ipaddr: IP Address :param proto: Protocol (tcp, udp, info) :param port: Port (0-65535) :return: [ service_id, host_id, ipv4, ipv6, hostname, proto, number, status, name, banner ] """""" return self.send.service_info(svc_rec, ipaddr, proto, port)" 929,"def add(self, ipaddr=None, proto=None, port=None, fields=None): """""" Add a service record :param ipaddr: IP Address :param proto: Protocol (tcp, udp, info) :param port: Port (0-65535) :param fields: Extra fields :return: (True/False, t_services.id or response message) """""" return self.send.service_add(ipaddr, proto, port, fields)" 930,"def delete(self, svc_rec=None, ipaddr=None, proto=None, port=None): """""" Delete a t_services record :param svc_rec: t_services.id :param ipaddr: IP Address or t_hosts.id :param proto: Protocol (tcp, udp, info) :param port: Port (0-65535) :return: [True, Response Message] """""" return self.send.service_del(svc_rec, ipaddr, proto, port)" 931,"def report_list(self, service_id=None, service_port=None, hostfilter=None): """""" Returns a list of ports with IPs, banners and vulnerabilities (warning, slow!) :param service_id: t_services.id :param service_port: Port (tcp/#, udp/#, info/#) :param hostfilter: Valid hostfilter or None :return: { 'port': [t_hosts.f_ipaddr, t_services.f_banner, (t_vulndata.f_vulnid, t_vulndata.f_title, t_vulndata.f_severity, t_vulndata.f_cvss_score), ...} """""" return self.send.service_report_list(service_id, service_port, hostfilter)" 932,"def vulns_list(self, service_id=None, service_port=None, hostfilter=None): """""" List of vulnerabilities for a service :param service_id: t_services.id :param service_port: tcp/#, udp/# or info/# :param hostfilter: Valid hostfilter or None :return: t_services.rows.as_list() """""" return self.send.service_vulns_list(service_id, service_port, hostfilter)" 933,"def connect(nodes): ''' Connect a list of nodes. Connected nodes have an ``output`` member which is the following node in the line. The last node's ``output`` is a :class:`Queue` for easy plumbing. ''' for a, b in zip(nodes[:-1], nodes[1:]): a.output = b b.output = queues.Queue()" 934,"def render_layout(layout_name, content, **context): """"""Uses a jinja template to wrap the content inside a layout. Wraps the content inside a block and adds the extend statement before rendering it with jinja. The block name can be specified in the layout_name after the filename separated by a colon. The default block name is ""content"". """""" layout_block = ""content"" if "":"" in layout_name: layout_name, layout_block = layout_name.split("":"") tpl = '{%% extends ""%s"" %%}{%% block %s %%}%s{%% endblock %%}' % (layout_name, layout_block, content) return render_template_string(tpl, **context)" 935,"def parse_template(app, filename): """"""Parses the given template using the jinja environment of the given app and returns the AST. ASTs are cached in parse_template.cache """""" if not hasattr(parse_template, ""cache""): parse_template.cache = {} if filename not in parse_template.cache: source = get_template_source(app, filename) parse_template.cache[filename] = app.jinja_env.parse(source, filename=filename) return parse_template.cache[filename]" 936,"def jinja_node_to_python(node): """"""Converts a Jinja2 node to its python equivalent """""" if isinstance(node, nodes.Const): return node.value if isinstance(node, nodes.Neg): return -jinja_node_to_python(node.node) if isinstance(node, nodes.Name): return node.name if isinstance(node, (nodes.List, nodes.Tuple)): value = [] for i in node.items: value.append(jinja_node_to_python(i)) return value if isinstance(node, nodes.Dict): value = {} for pair in node.items: value[pair.key.value] = jinja_node_to_python(pair.value) return value if isinstance(node, nodes.Call): if not isinstance(node.node, nodes.Name) or node.node.name not in (""_"", ""translate"", ""gettext""): raise FormDefinitionError(""Cannot convert function calls from jinja to python other than translation calls"") return lazy_translate(jinja_node_to_python(node.args[0])) raise Exception(""Cannot convert jinja nodes to python"")" 937,"def groups(self): """"""Get the list of Groups (by dn) that the bound CSH LDAP member object is in. """""" group_list = [] all_groups = self.get('memberof') for group_dn in all_groups: if self.__ldap_group_ou__ in group_dn: group_list.append(group_dn) return group_list" 938,"def in_group(self, group, dn=False): """"""Get whether or not the bound CSH LDAP member object is part of a group. Arguments: group -- the CSHGroup object (or distinguished name) of the group to check membership for """""" if dn: return group in self.groups() return group.check_member(self)" 939,"def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0): ''' Wrapper for the scipy.signal.savgol_filter function that handles Nan values. See: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/3 Returns ------- y : ndarray, same shape as `x` The filtered data. ''' # linearly interpolate missing values before filtering x = np.ma.masked_invalid(pd.Series(x).interpolate()) try: # start filtering from the first non-zero value since these won't be addressed by # the interpolation above ind = np.isfinite(x).nonzero()[0][0] x[ind:] = signal.savgol_filter(x[ind:], window_length, polyorder, deriv, delta, axis, mode, cval) except IndexError: pass return np.ma.masked_invalid(x)" 940,"def feedback_results_to_measurements_frame(feedback_result): ''' Extract measured data from `FeedbackResults` instance into `pandas.DataFrame`. ''' index = pd.Index(feedback_result.time * 1e-3, name='seconds') df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_fb, feedback_result.V_hv, feedback_result.fb_resistor, feedback_result.hv_resistor]), columns=['V_fb', 'V_hv', 'fb_resistor', 'hv_resistor'], index=index) df_feedback.insert(0, 'frequency', feedback_result.frequency) return df_feedback" 941,"def feedback_results_to_impedance_frame(feedback_result): ''' Extract computed impedance data from `FeedbackResults` instance into `pandas.DataFrame`. ''' index = pd.Index(feedback_result.time * 1e-3, name='seconds') df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_actuation() .filled(np.NaN), feedback_result.capacitance() .filled(np.NaN), feedback_result.Z_device() .filled(np.NaN)]), columns=['V_actuation', 'capacitance', 'impedance'], index=index) df_feedback.insert(0, 'frequency', feedback_result.frequency) df_feedback.insert(1, 'voltage', feedback_result.voltage) return df_feedback" 942,"def get_firmwares(): ''' Return `dmf_control_board` compiled Arduino hex file paths. This function may be used to locate firmware binaries that are available for flashing to [Arduino Mega2560][1] boards. [1]: http://arduino.cc/en/Main/arduinoBoardMega2560 ''' return OrderedDict([(board_dir.name, [f.abspath() for f in board_dir.walkfiles('*.hex')]) for board_dir in package_path().joinpath('firmware').dirs()])" 943,"def safe_series_resistor_index_read(f, self, channel, resistor_index=None): ''' This decorator checks the resistor-index from the current context _(i.e., the result of `self.series_resistor_index`)_. If the resistor-index specified by the `resistor_index` keyword argument is different than the current context value, the series-resistor-index is temporarily set to the value of `resistor_index` to execute the wrapped function before restoring back to the original value. ''' if resistor_index is not None: original_resistor_index = self.series_resistor_index(channel) # Save state of resistor-index if resistor_index != original_resistor_index: self.set_series_resistor_index(channel, resistor_index) value = f(self, channel) if (resistor_index is not None and resistor_index != original_resistor_index): # Restore state of resistor-index self.set_series_resistor_index(channel, original_resistor_index) return value" 944,"def remote_command(function, self, *args, **kwargs): ''' Catch `RuntimeError` exceptions raised by remote control board firmware commands and re-raise as more specific `FirmwareError` exception type, which includes command code and return code. ''' try: return function(self, *args, **kwargs) except RuntimeError, exception: error_message = str(exception) match = CRE_REMOTE_ERROR.match(error_message) if match: # Exception message matches format of remote firmware error. command_code = int(match.group('command_int')) return_code = int(match.group('return_code_int')) raise FirmwareError(command_code, return_code) match = CRE_REMOTE_COMMAND_ERROR.match(error_message) if match: # Exception message matches format of remote firmware error. command_code = int(match.group('command_int')) command_name = NAMES_BY_COMMAND_CODE[command_code] raise RuntimeError(CRE_REMOTE_COMMAND_ERROR.sub(command_name, error_message)) # Not a remote firmware error, so raise original exception. raise" 945,"def _upgrade(self): """""" Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software. """""" logging.debug('[FeedbackResults]._upgrade()') if hasattr(self, 'version'): version = Version.fromstring(self.version) else: version = Version(0) logging.debug('[FeedbackResults] version=%s, class_version=%s' % (str(version), self.class_version)) if version > Version.fromstring(self.class_version): logging.debug('[FeedbackResults] version>class_version') raise FutureVersionError(Version.fromstring(self.class_version), version) elif version < Version.fromstring(self.class_version): if version < Version(0, 1): self.calibration = FeedbackCalibration() if version < Version(0, 2): # flag invalid data points self.version = str(Version(0, 2)) self.fb_resistor[self.V_fb > 5] = -1 self.hv_resistor[self.V_hv > 5] = -1 if version < Version(0, 3): self.attempt = 0 if version < Version(0, 4): del self.sampling_time_ms del self.delay_between_samples_ms self.voltage = self.options.voltage del self.options del self.attempt if version < Version(0, 5): self.area = 0 self.version = str(Version(0, 5)) if version < Version(0, 6): self.amplifier_gain = None self.vgnd_hv = None self.vgnd_fb = None self.version = str(Version(0, 6)) logging.info('[FeedbackResults] upgrade to version %s' % self.version) else: # Else the versions are equal and don't need to be upgraded. pass" 946,"def V_total(self): ''' Compute the input voltage (i.e., ``V1``) based on the measured high-voltage feedback values for ``V2``, using the high-voltage transfer function. See also -------- :meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled. ''' ind = mlab.find(self.hv_resistor >= 0) V1 = np.empty(self.hv_resistor.shape) V1.fill(np.nan) V1[ind] = compute_from_transfer_function(self.calibration.hw_version .major, 'V1', V2=self.V_hv[ind], R1=10e6, R2=self.calibration.R_hv [self.hv_resistor[ind]], C2=self.calibration.C_hv [self.hv_resistor[ind]], f=self.frequency) # convert to masked array V1 = np.ma.masked_invalid(pd.Series(V1, pd.to_datetime(self.time, unit='s') ).interpolate(method='time').values) V1.fill_value = np.nan V1.data[V1.mask] = V1.fill_value return V1" 947,"def V_actuation(self): ''' Return the voltage drop across the device (i.e., the ``Z1`` load) for each feedback measurement. Consider the feedback circuit diagrams below for the feedback measurement circuits of the two the control board hardware versions. .. code-block:: none # Hardware V1 # # Hardware V2 # V_1 @ frequency V_1 @ frequency ┬ ┯ ┯ │ ┌─┴─┐ ┌─┴─┐ ┌───┐ V_actuation │ │Z_1│ │Z_1│ ┌─┤Z_2├─┐ │ └─┬─┘ └─┬─┘ │ └───┘ │ ┴ ├───O V_2 │ │ │\ ├───O V_2 ┌─┴─┐ └────┴──│-\__│ │Z_2│ ┌──│+/ └─┬─┘ │ │/ ═╧═ │ ¯ ═╧═ ¯ Note that in the case of **hardware version 1**, the input voltage ``V1`` is divided across ``Z1`` *and* the feedback measurement load ``Z2``. Therefore, the effective *actuation* voltage across the DMF device is less than ``V1``. Specifically, the effective *actuation* voltage is ``V1 - V2``. In **hardware version 2**, since the positive terminal of the op-amp is attached to *(virtual)* ground, the negative op-amp terminal is also at ground potential. It follows that the actuation voltage is equal to ``V1`` on **hardware version 2**. ''' if self.calibration.hw_version.major == 1: return self.V_total() - np.array(self.V_fb) else: return self.V_total()" 948,"def Z_device(self, filter_order=None, window_size=None, tol=0.05): ''' Compute the impedance *(including resistive and capacitive load)* of the DMF device *(i.e., dielectric and droplet)*. See :func:`calibrate.compute_from_transfer_function` for details. ''' ind = mlab.find(self.fb_resistor >= 0) Z1 = np.empty(self.fb_resistor.shape) Z1.fill(np.nan) # convert to masked array Z1 = np.ma.masked_invalid(Z1) R2 = self.calibration.R_fb[self.fb_resistor[ind]] C2 = self.calibration.C_fb[self.fb_resistor[ind]] Z1[ind] = compute_from_transfer_function(self.calibration.hw_version .major, 'Z1', V1=self.V_total()[ind], V2=self.V_fb[ind], R2=R2, C2=C2, f=self.frequency) Z1 = np.ma.masked_invalid(pd.Series(Z1, pd.to_datetime(self.time, unit='s') ).interpolate(method='time').values) Z1.fill_value = np.nan Z1.data[Z1.mask] = Z1.fill_value # if we're filtering and we don't have a window size specified, # automatically determine one if filter_order and window_size is None: window_size = self._get_window_size(tol) # if the filter_order or window size is None or if the window size is # smaller than filter_order + 2, don't filter if (filter_order is None or window_size is None or window_size < filter_order + 2): pass else: # if the window size is less than half the sample length if window_size and window_size < len(Z1) / 2: # suppress polyfit warnings with warnings.catch_warnings(): warnings.simplefilter(""ignore"") Z1 = savgol_filter(Z1, window_size, filter_order) else: # fit a line result = self.mean_velocity(tol=tol) if result['dt'] and \ result['dt'] > 0.1 * self.time[-1] and result['p'][0] > 0: if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 x = result['p'][0]*self.time + result['p'][1] C = self.area * (x * (c_drop - c_filler) / \ np.sqrt(self.area) + c_filler) Z1 = 1.0 / (2.0 * math.pi * self.frequency * C) Z1[mlab.find(self.time==result['t_end'])[0]+1:] = \ Z1[mlab.find(self.time==result['t_end'])[0]] else: Z1 = np.mean(Z1)*np.ones(Z1.shape) return Z1" 949,"def force(self, Ly=None): ''' Estimate the applied force (in Newtons) on a drop according to the electromechanical model [1]. Ly is the length of the actuated electrode along the y-axis (perpendicular to the direction of motion) in milimeters. By default, use the square root of the actuated electrode area, i.e., Ly=Lx=sqrt(Area) To get the force normalized by electrode width (i.e., in units of N/mm), set Ly=1.0. 1. Chatterjee et al., ""Electromechanical model for actuating liquids in a two-plate droplet microfluidic device,"" Lab on a Chip, no. 9 (2009): 1219-1229. ''' if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 if Ly is None: Ly = np.sqrt(self.area) return 1e3 * Ly * 0.5 * (c_drop - c_filler) * self.V_actuation()**2" 950,"def capacitance(self, filter_order=None, window_size=None, tol=0.05): ''' Compute the capacitance of the DMF device _(i.e., dielectric and droplet)_ based on the computed impedance value. Note: this assumes impedance is purely capacitive load. TODO: Is this assumption ok? ''' C = np.ma.masked_invalid(1.0 / (2.0 * math.pi * self.frequency * self.Z_device(filter_order=filter_order, window_size=window_size, tol=tol))) C.fill_value = np.nan C.data[C.mask] = C.fill_value return C" 951,"def x_position(self, filter_order=None, window_size=None, tol=0.05, Lx=None): ''' Calculate $x$-position according to: __ | C | ╲╱ a ⋅ | - - c_f | | a | x = ────────────── c_d - c_f where: - $C$ is the measured capacitance. - $c_f$ is the capacitance of the filler medium per unit area _(e.g., air)_. - $c_d$ is the capacitance of an electrode completely covered in liquid per unit area. - $a$ is the area of the actuated electrode(s). Note that this equation for $x$ assumes a single drop moving across an electrode with a length along the x-axis of Lx. If no value is provided for Lx, the electrode is assumed to be square, i.e., Lx=Ly=sqrt(area) ''' if self.calibration._c_drop: c_drop = self.calibration.c_drop(self.frequency) else: c_drop = self.capacitance()[-1] / self.area if self.calibration._c_filler: c_filler = self.calibration.c_filler(self.frequency) else: c_filler = 0 if Lx is None: Lx = np.sqrt(self.area) return (self.capacitance(filter_order=filter_order, window_size=window_size, tol=tol) / self.area \ - c_filler) / (c_drop - c_filler) * Lx" 952,"def mean_velocity(self, tol=0.05, Lx=None): ''' Calculate the mean velocity for a step (mm/ms which is equivalent to m/s). Fit a line to the capacitance data and get the slope. ''' dx = None dt = None p = None ind = None t_end = None if self.area == 0: return dict(dx=dx, dt=dt, p=p, ind=ind, t_end=t_end) x = self.x_position(Lx=Lx) # find the first and last valid indices ind_start = mlab.find(x.mask==False)[0] ind_last = mlab.find(x.mask==False)[-1] # if the original x value is within tol % of the final x value, include # all samples if x[ind_start] > (1 - tol) * x[ind_last] or x[ind_last] < 0: ind_stop = ind_last else: # otherwise, stop when x reaches (1 - tol) % of it's final value ind_stop = mlab.find(x > (1 - tol) * x[ind_last])[0] ind = [ind_start, ind_stop] # if we have at least 2 valid samples if len(ind) >=2: dx = np.diff(x[ind])[0] dt = np.diff(self.time[ind])[0] # ms # suppress polyfit warnings with warnings.catch_warnings(): warnings.simplefilter(""ignore"") # fit a line to the data p = np.polyfit(self.time[ind[0]:ind[1]], x[ind[0]:ind[1]], 1) # find time when the the line intercepts x[ind_last] ind_stop = mlab.find(self.time > \ (x[ind_last] - p[1]) / p[0]) if len(ind_stop): t_end = self.time[ind_stop[0]] else: t_end = self.time[-1] return dict(dx=dx, dt=dt, p=p, ind=ind, t_end=t_end)" 953,"def to_frame(self, filter_order=3): """""" Convert data to a `pandas.DataFrame`. Parameters ---------- filter_order : int Filter order to use when filtering Z_device, capacitance, x_position, and dxdt. Data is filtered using a Savitzky-Golay filter with a window size that is adjusted based on the mean velocity of the drop (see _get_window_size). Returns ------- pandas.DataFrame This DataFrame is indexed by a utc_timestamp and contains the following columns: frequency: actuation frequency (Hz) target_voltage: target voltage (V) voltage: measured voltage (V) force: actuation force (uN/mm) area: actuated area (mm^2) Z_device_filtered: filtered device impedance for actuated area (Ohms) capacitance_filtered: filtered device capacitance for actuated area (F) x_position_filtered: filtered x-position of the drop (mm) dxdt_filtered: filtered instantaneous velocity of the drop (mm/s) Z_device: device impedance for actuated area (Ohms) capacitance: device capacitance for actuated area (F) x_position: x-position of the drop (mm) dxdt: instantaneous velocity of the drop (mm/s) dx: difference in the drop's x-position over the course of the step (mm) dt: time the drop is considered to have been ""moving"" (s) mean_velocity: mean drop velocity (mm/s) peak_velocity: peak drop velocity calculated from filtered instantaneous velocity (mm/s) window_size: windows size used for Savitzky-Golay filter (# bins) filter_order: order used for Savitzky-Golay filter (integer) """""" window_size = self._get_window_size() L = np.sqrt(self.area) velocity_results = self.mean_velocity(Lx=L) mean_velocity = None peak_velocity = None dx = 0 dt = 0 dxdt = np.zeros(len(self.time)) dxdt_filtered = np.zeros(len(self.time)) # if the window size is too small for filtering, set filter_order to None if filter_order and window_size and window_size < filter_order + 2: filter_order = None if velocity_results and velocity_results['dx']: mean_velocity = velocity_results['p'][0] * 1e3 dx = velocity_results['dx'] dt = velocity_results['dt'] * 1e-3 # convert to seconds t, dxdt = self.dxdt(Lx=L) # interpolate dxdt to use the same time points as the impedance values. dxdt = np.interp(self.time, t, dxdt) * 1e3 # multiply by 1000 to convert to mm/s dxdt = np.ma.masked_invalid(dxdt) t, dxdt_filtered = self.dxdt(filter_order=filter_order, Lx=L) # interpolate dxdt_filtered to use the same time points as the impedance values. dxdt_filtered = np.interp(self.time, t, dxdt_filtered) * 1e3 # multiply by 1000 to convert to mm/s dxdt_filtered = np.ma.masked_invalid(dxdt_filtered) # calculate peak velocity from filtered data peak_velocity = np.max(dxdt_filtered) index = pd.Index(self.time * 1e-3, name='step_time') df = pd.DataFrame({'target_voltage': self.voltage, # V 'voltage': self.V_actuation(), # V 'force': self.force(Ly=1.0) * 1e6, # uN/mm 'Z_device_filtered': self.Z_device(filter_order=filter_order), # Ohms 'capacitance_filtered': self.capacitance(filter_order=filter_order), # F 'x_position_filtered': self.x_position(filter_order=filter_order), # mm 'dxdt_filtered': dxdt_filtered, # mm/s 'Z_device': self.Z_device(), # Ohms 'capacitance': self.capacitance(), # F 'x_position': self.x_position(), # mm 'dxdt': dxdt, # mm/s }, index=index) df['frequency'] = self.frequency df['area'] = self.area # mm^2 df['dx'] = dx # mm df['dt'] = dt # s df['mean_velocity'] = mean_velocity # mm/s df['peak_velocity'] = peak_velocity # mm/s df['window_size'] = window_size df['filter_order'] = filter_order # re-order columns return df[[u'frequency', u'target_voltage', u'voltage', u'force', u'area', u'Z_device_filtered', u'capacitance_filtered', u'x_position_filtered', u'dxdt_filtered', u'Z_device', u'capacitance', u'x_position', u'dxdt', u'dx', u'dt', u'mean_velocity', u'peak_velocity', u'window_size', u'filter_order']]" 954,"def _upgrade(self): """""" Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software. """""" logging.debug(""[FeedbackResultsSeries]._upgrade()"") version = Version.fromstring(self.version) logging.debug('[FeedbackResultsSeries] version=%s, class_version=%s', str(version), self.class_version) if version > Version.fromstring(self.class_version): logging.debug('[FeedbackResultsSeries] version>class_version') raise FutureVersionError(Version.fromstring(self.class_version), version) elif version < Version.fromstring(self.class_version): if version < Version(0, 1): self.time = [None]*len(self.data) self.version = str(Version(0, 1))" 955,"def c_drop(self, frequency): ''' Capacitance of an electrode covered in liquid, normalized per unit area (i.e., units are F/mm^2). ''' try: return np.interp(frequency, self._c_drop['frequency'], self._c_drop['capacitance'] ) except: pass return self._c_drop" 956,"def c_filler(self, frequency): ''' Capacitance of an electrode covered in filler media (e.g., air or oil), normalized per unit area (i.e., units are F/mm^2). ''' try: return np.interp(frequency, self._c_filler['frequency'], self._c_filler['capacitance'] ) except: pass return self._c_filler" 957,"def _upgrade(self): """""" Upgrade the serialized object if necessary. Raises: FutureVersionError: file was written by a future version of the software. """""" logging.debug(""[FeedbackCalibration]._upgrade()"") version = Version.fromstring(self.version) logging.debug('[FeedbackCalibration] version=%s, class_version=%s', str(version), self.class_version) if version > Version.fromstring(self.class_version): logging.debug('[FeedbackCalibration] version>class_version') raise FutureVersionError(Version.fromstring(self.class_version), version) elif version < Version.fromstring(self.class_version): if version < Version(0, 1): self._c_filler = None self._c_drop = None self.version = str(Version(0, 1)) if version < Version(0, 2): self.hw_version = Version(1) self.version = str(Version(0, 2)) logging.info('[FeedbackCalibration] upgrade to version %s', self.version) if version < Version(0, 2): self.hw_version = Version(1) self.version = str(Version(0, 2)) logging.info('[FeedbackCalibration] upgrade to version %s', self.version) if version < Version(0, 3): self.version = str(Version(0, 3)) logging.info('[FeedbackCalibration] upgrade to version %s', self.version)" 958,"def force_to_voltage(self, force, frequency): ''' Convert a force in uN/mm to voltage. Parameters ---------- force : float Force in **uN/mm**. frequency : float Actuation frequency. Returns ------- float Actuation voltage to apply :data:`force` at an actuation frequency of :data:`frequency`. ''' c_drop = self.calibration.c_drop(frequency) # if c_filler hasn't been set, assume c_filler = 0 if self.calibration._c_filler: c_filler = self.calibration.c_filler(frequency) else: c_filler = 0 return np.sqrt(force * 1e-9/ (0.5 * (c_drop - c_filler)))" 959,"def series_capacitance(self, channel, resistor_index=None): ''' Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to read the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series capacitance value for the specified channel. ''' if resistor_index is None: resistor_index = self.series_resistor_index(channel) value = self._series_capacitance(channel) try: if channel == 0: self.calibration.C_hv[resistor_index] = value else: self.calibration.C_fb[resistor_index] = value except: pass return value" 960,"def series_resistance(self, channel, resistor_index=None): ''' Parameters ---------- channel : int Analog channel index. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the capacitance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- float Return the current series resistance value for the specified channel. ''' if resistor_index is None: resistor_index = self.series_resistor_index(channel) value = self._series_resistance(channel) try: if channel == 0: self.calibration.R_hv[resistor_index] = value else: self.calibration.R_fb[resistor_index] = value except: pass return value" 961,"def set_series_capacitance(self, channel, value, resistor_index=None): ''' Set the current series capacitance value for the specified channel. Parameters ---------- channel : int Analog channel index. value : float Series capacitance value. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to read the resistance before restoring back to the original value. Returns ------- int Return code from embedded call. ''' if resistor_index is None: resistor_index = self.series_resistor_index(channel) try: if channel == 0: self.calibration.C_hv[resistor_index] = value else: self.calibration.C_fb[resistor_index] = value except: pass return self._set_series_capacitance(channel, value)" 962,"def set_series_resistance(self, channel, value, resistor_index=None): ''' Set the current series resistance value for the specified channel. Parameters ---------- channel : int Analog channel index. value : float Series resistance value. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the resistance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- int Return code from embedded call. ''' if resistor_index is None: resistor_index = self.series_resistor_index(channel) try: if channel == 0: self.calibration.R_hv[resistor_index] = value else: self.calibration.R_fb[resistor_index] = value except: pass return self._set_series_resistance(channel, value)" 963,"def connect(self, port=None, baud_rate=115200): ''' Parameters ---------- port : str or list-like, optional Port (or list of ports) to try to connect to as a DMF Control Board. baud_rate : int, optional Returns ------- str Port DMF control board was connected on. Raises ------ RuntimeError If connection could not be established. IOError If no ports were specified and Arduino Mega2560 not found on any port. ''' if isinstance(port, types.StringTypes): ports = [port] else: ports = port if not ports: # No port was specified. # # Try ports matching Mega2560 USB vendor/product ID. ports = serial_ports().index.tolist() if not ports: raise IOError(""Arduino Mega2560 not found on any port."") for comport_i in ports: if self.connected(): self.disconnect() self.port = None self._i2c_devices = {} # Try to connect to control board on available ports. try: logger.debug('Try to connect to: %s', comport_i) # Explicitly cast `comport_i` to string since `Base.connect` # Boost Python binding does not support unicode strings. # # Fixes [issue 8][issue-8]. # # [issue-8]: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/8 Base.connect(self, str(comport_i), baud_rate) self.port = comport_i break except BadVGND, exception: logger.warning(exception) break except RuntimeError, exception: continue else: raise RuntimeError('Could not connect to control board on any of ' 'the following ports: %s' % ports) name = self.name() version = self.hardware_version() firmware = self.software_version() serial_number_string = """" try: serial_number_string = "", S/N %03d"" % self.serial_number except: # Firmware does not support `serial_number` attribute. pass logger.info(""Connected to %s v%s (Firmware: %s%s)"" % (name, version, firmware, serial_number_string)) logger.info(""Poll control board for series resistors and "" ""capacitance values."") self._read_calibration_data() try: self.__aref__ = self._aref() logger.info(""Analog reference = %.2f V"" % self.__aref__) except: # Firmware does not support `__aref__` attribute. pass # Check VGND for both analog channels expected = 2 ** 10/2 v = {} channels = [0, 1] damaged = [] for channel in channels: try: v[channel] = np.mean(self.analog_reads(channel, 10)) logger.info(""A%d VGND = %.2f V (%.2f%% of Aref)"", channel, self.__aref__ * v[channel] / (2 ** 10), 100.0 * v[channel] / (2 ** 10)) # Make sure that the VGND is close to the expected value; # otherwise, the op-amp may be damaged (expected error # is <= 10%). if np.abs(v[channel] - expected) / expected > .1: damaged.append(channel) except: # Firmware does not support `__aref__` attribute. break # Scan I2C bus to generate list of connected devices. self._i2c_scan() if damaged: # At least one of the analog input channels appears to be damaged. if len(damaged) == 1: msg = ""Analog channel %d appears"" % damaged[0] else: msg = ""Analog channels %s appear"" % damaged raise BadVGND(msg + "" to be damaged. You may need to replace the "" ""op-amp on the control board."") return self.RETURN_OK" 964,"def persistent_write(self, address, byte, refresh_config=False): ''' Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings. ''' self._persistent_write(address, byte) if refresh_config: self.load_config(False)" 965,"def persistent_read_multibyte(self, address, count=None, dtype=np.uint8): ''' Read a chunk of data from persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). count : int, optional Number of values to read. If not set, read a single value of the specified :data:`dtype`. dtype : numpy.dtype, optional The type of the value(s) to read. Returns ------- dtype or numpy.array(dtype=dtype) If :data:`count` is ``None``, return single value. Otherwise, return array of values. ''' nbytes = np.dtype(dtype).itemsize if count is not None: nbytes *= count # Read enough bytes starting at specified address to match the # requested number of the specified data type. data_bytes = np.array([self.persistent_read(address + i) for i in xrange(nbytes)], dtype=np.uint8) # Cast byte array as array of specified data type. result = data_bytes.view(dtype) # If no count was specified, we return a scalar value rather than the # resultant array. if count is None: return result[0] return result" 966,"def persistent_write_multibyte(self, address, data, refresh_config=False): ''' Write multiple bytes to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). data : numpy.array Data to write. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings. ''' for i, byte in enumerate(data.view(np.uint8)): self.persistent_write(address + i, int(byte)) if refresh_config: self.load_config(False)" 967,"def measure_impedance(self, sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, rms, state): ''' Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. Parameters ---------- sampling_window_ms : float Length of sampling window (in milleseconds) for each RMS/peak-to-peak voltage measurement. n_sampling_windows : int Number of RMS/peak-to-peak voltage measurements to take. delay_between_windows_ms : float Delay (in milleseconds) between RMS/peak-to-peak voltage measurements. interleave_samples : bool If ``True``, interleave RMS/peak-to-peak measurements for analog channels. For example, ``[, , , , ..., , ]`` where ``i`` and ``j`` correspond to two different analog channels. If ``False``, all measurements for each analog channel are taken together. For example, ``[, ..., , , ..., ]`` where ``i`` and ``j`` correspond to two different analog channels. rms : bool If ``True``, a RMS voltage measurement is collected for each sampling window. Otherwise, peak-to-peak measurements are collected. state : list State of device channels. Length should be equal to the number of device channels. Returns ------- :class:`FeedbackResults` ''' state_ = uint8_tVector() for i in range(0, len(state)): state_.append(int(state[i])) buffer = np.array(Base.measure_impedance(self, sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, rms, state_)) return self.measure_impedance_buffer_to_feedback_result(buffer)" 968,"def sweep_channels(self, sampling_window_ms, n_sampling_windows_per_channel, delay_between_windows_ms, interleave_samples, rms, channel_mask): ''' Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. For each channel in the channel mask. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. Parameters ---------- sampling_window_ms : float Length of sampling window (in milleseconds) for each RMS/peak-to-peak voltage measurement. n_sampling_windows_per_channel : int Number of RMS/peak-to-peak voltage measurements to take. delay_between_windows_ms : float Delay (in milleseconds) between RMS/peak-to-peak voltage measurements. interleave_samples : bool If ``True``, interleave RMS/peak-to-peak measurements for analog channels. For example, ``[, , , , ..., , ]`` where ``i`` and ``j`` correspond to two different analog channels. If ``False``, all measurements for each analog channel are taken together. For example, ``[, ..., , , ..., ]`` where ``i`` and ``j`` correspond to two different analog channels. rms : bool If ``True``, a RMS voltage measurement is collected for each sampling window. Otherwise, peak-to-peak measurements are collected. channel_mask : array-like State of device channels. Length should be equal to the number of device channels. Returns ------- pandas.DataFrame Table containing one actuation RMS measurement and one device load impedance measurement per row and the columns ``frequency``, ``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and ``impedance``. Rows are indexed by time since first measurement in frame. ''' channel_cumsum = np.cumsum(channel_mask) # figure out how many channels are in the mask, and how many we can scan # per request n_channels_in_mask = channel_cumsum[-1] max_channels_per_call = (self.MAX_PAYLOAD_LENGTH - 4*4) / \ (3*2) / n_sampling_windows_per_channel # cache the channel mask self._channel_mask_cache = np.array(channel_mask) buffer = np.zeros(4) for i in range(int(math.ceil(n_channels_in_mask / max_channels_per_call))): # figure out which channels to include in this call ind = np.logical_and(channel_cumsum >= i * max_channels_per_call, channel_cumsum < (i + 1) * max_channels_per_call) # copy those channels from the cached mask channel_mask_ = np.zeros(len(self._channel_mask_cache), dtype=int) channel_mask_[ind] = self._channel_mask_cache[ind] # convert it to a uint8_tVector channel_mask_uint8 = uint8_tVector() channel_mask_uint8.extend(channel_mask_) buffer = buffer[:-4] buffer = np.concatenate((buffer, np.array(Base.sweep_channels(self, sampling_window_ms, n_sampling_windows_per_channel, delay_between_windows_ms, interleave_samples, rms, channel_mask_uint8)))) return self.sweep_channels_buffer_to_feedback_result(buffer)" 969,"def sweep_channels_slow(self, sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, use_rms, channel_mask): ''' Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. For each channel in the channel mask. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. **N.B.,** Use one firmware call per channel, as opposed to scanning all channels with a single firmware call as in :meth:`sweep_channels` method. Returns ------- pandas.DataFrame Table containing one actuation RMS measurement and one device load impedance measurement per row and the columns ``frequency``, ``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and ``impedance``. Rows are indexed by time since first measurement in frame. ''' channel_count = len(channel_mask) scan_count = sum(channel_mask) frames = [] print '' scan_count_i = 0 # Iterate through channel mask, measuring impedance for each selected # channel in the mask. for channel_i, state_i in enumerate(channel_mask): if state_i: scan_count_i += 1 print '\rMeasure impedance: {} ({}/{})'.format(channel_i, scan_count_i, scan_count), channel_states_i = [0] * channel_count channel_states_i[channel_i] = 1 start_time_i = datetime.utcnow() feedback_results_i = \ self.measure_impedance(sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, use_rms, channel_states_i) # Convert custom feedback results object into a # `pandas.DataFrame`. df_result_i =\ feedback_results_to_impedance_frame(feedback_results_i) df_result_i.insert(2, 'channel_i', channel_i) df_result_i.insert(0, 'utc_start', start_time_i) frames.append(df_result_i) print '' if not frames: df_result = pd.DataFrame(None, columns=['utc_start', 'seconds', 'channel_i', 'frequency', 'V_actuation', 'capacitance', 'impedance']) else: df_result = pd.concat(frames) return df_result" 970,"def i2c_write(self, address, data): ''' Parameters ---------- address : int Address of I2C device. data : array-like Array of bytes to send to device. ''' data_ = uint8_tVector() for i in range(0, len(data)): data_.append(int(data[i])) Base.i2c_write(self, address, data_)" 971,"def read_all_series_channel_values(self, f, channel): ''' Return all values for the specified channel of the type corresponding to the function `f`, where `f` is either `self.series_resistance` or `self.series_capacitance`. ''' values = [] channel_max_param_count = [3, 5] for i in range(channel_max_param_count[channel]): try: values.append(f(channel, i)) except RuntimeError: break return values" 972,"def write_all_series_channel_values(self, read_f, write_f, channel, values): ''' Return all values for the specified channel of the type corresponding to the function `f`, where `f` is either `self.series_resistance` or `self.series_capacitance`. ''' # Create a copy of the new values we intend to write. Otherwise, if # `values` is a reference to the calibration object owned by the # control board, it can be overwritten in the following step which will # prevent the update. # # See http://microfluidics.utoronto.ca/trac/dropbot/ticket/81 values = copy.deepcopy(values) # Read the current values, and only update the values that are # different. original_values = self.read_all_series_channel_values(read_f, channel) # Make sure that the number of supplied values matches the number of # corresponding values read from the channel. assert(len(values) == len(original_values)) for i in range(len(original_values)): if values[i] != original_values[i]: write_f(channel, values[i], i)" 973,"def _get_files_modified(): """"""Get the list of modified files that are Python or Jinja2."""""" cmd = ""git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD"" _, files_modified, _ = run(cmd) extensions = [re.escape(ext) for ext in list(SUPPORTED_FILES) + ["".rst""]] test = ""(?:{0})$"".format(""|"".join(extensions)) return list(filter(lambda f: re.search(test, f), files_modified))" 974,"def _get_git_author(): """"""Return the git author from the git variables."""""" _, stdout, _ = run(""git var GIT_AUTHOR_IDENT"") git_author = stdout[0] return git_author[:git_author.find("">"") + 1]" 975,"def _get_component(filename, default=""global""): """"""Get component name from filename."""""" if hasattr(filename, ""decode""): filename = filename.decode() parts = filename.split(os.path.sep) if len(parts) >= 3: if parts[1] in ""modules legacy ext"".split(): return parts[2] if len(parts) >= 2: if parts[1] in ""base celery utils"".split(): return parts[1] if len(parts) >= 1: if parts[0] in ""grunt docs"".split(): return parts[0] return default" 976,"def _prepare_commit_msg(tmp_file, author, files_modified=None, template=None): """"""Prepare the commit message in tmp_file. It will build the commit message prefilling the component line, as well as the signature using the git author and the modified files. The file remains untouched if it is not empty. """""" files_modified = files_modified or [] template = template or ""{component}:\n\nSigned-off-by: {author}\n{extra}"" if hasattr(template, ""decode""): template = template.decode() with open(tmp_file, ""r"", ""utf-8"") as fh: contents = fh.readlines() msg = filter(lambda x: not (x.startswith(""#"") or x.isspace()), contents) if len(list(msg)): return component = ""unknown"" components = _get_components(files_modified) if len(components) == 1: component = components[0] elif len(components) > 1: component = ""/"".join(components) contents.append( ""# WARNING: Multiple components detected - consider splitting "" ""commit.\r\n"" ) with open(tmp_file, ""w"", ""utf-8"") as fh: fh.write(template.format(component=component, author=author, extra="""".join(contents)))" 977,"def _check_message(message, options): """"""Checking the message and printing the errors."""""" options = options or dict() options.update(get_options()) options.update(_read_local_kwalitee_configuration()) errors = check_message(message, **options) if errors: for error in errors: print(error, file=sys.stderr) return False return True" 978,"def prepare_commit_msg_hook(argv): """"""Hook: prepare a commit message."""""" options = get_options() # Check if the repo has a configuration repo options.update(_read_local_kwalitee_configuration()) _prepare_commit_msg(argv[1], _get_git_author(), _get_files_modified(), options.get('template')) return 0" 979,"def commit_msg_hook(argv): """"""Hook: for checking commit message (prevent commit)."""""" with open(argv[1], ""r"", ""utf-8"") as fh: message = ""\n"".join(filter(lambda x: not x.startswith(""#""), fh.readlines())) options = {""allow_empty"": True} if not _check_message(message, options): click.echo( ""Aborting commit due to commit message errors (override with "" ""'git commit --no-verify')."", file=sys.stderr) raise click.Abort return 0" 980,"def post_commit_hook(argv): """"""Hook: for checking commit message."""""" _, stdout, _ = run(""git log -1 --format=%B HEAD"") message = ""\n"".join(stdout) options = {""allow_empty"": True} if not _check_message(message, options): click.echo( ""Commit message errors (fix with 'git commit --amend')."", file=sys.stderr) return 1 # it should not fail with exit return 0" 981,"def _read_local_kwalitee_configuration(directory="".""): """"""Check if the repo has a ``.kwalitee.yaml`` file."""""" filepath = os.path.abspath(os.path.join(directory, '.kwalitee.yml')) data = {} if os.path.exists(filepath): with open(filepath, 'r') as file_read: data = yaml.load(file_read.read()) return data" 982,"def _pre_commit(files, options): """"""Run the check on files of the added version. They might be different than the one on disk. Equivalent than doing a git stash, check, and git stash pop. """""" errors = [] tmpdir = mkdtemp() files_to_check = [] try: for (file_, content) in files: # write staged version of file to temporary directory dirname, filename = os.path.split(os.path.abspath(file_)) prefix = os.path.commonprefix([dirname, tmpdir]) dirname = os.path.relpath(dirname, start=prefix) dirname = os.path.join(tmpdir, dirname) if not os.path.isdir(dirname): os.makedirs(dirname) filename = os.path.join(dirname, filename) with open(filename, ""wb"") as fh: fh.write(content) files_to_check.append((file_, filename)) for (file_, filename) in files_to_check: errors += list(map(lambda x: ""{0}: {1}"".format(file_, x), check_file(filename, **options) or [])) finally: shutil.rmtree(tmpdir, ignore_errors=True) return errors" 983,"def pre_commit_hook(argv): """"""Hook: checking the staged files."""""" options = get_options() # Check if the repo has a configuration repo options.update(_read_local_kwalitee_configuration()) files = [] for filename in _get_files_modified(): # get the staged version of the file and # write the staged version to temp dir with its full path to # avoid overwriting files with the same name _, stdout, _ = run(""git show :{0}"".format(filename), raw_output=True) files.append((filename, stdout)) errors = _pre_commit(files, options) for error in errors: if hasattr(error, ""decode""): error = error.decode() click.echo(error, file=sys.stderr) if errors: click.echo( ""Aborting commit due to kwalitee errors (override with "" ""'git commit --no-verify')."", file=sys.stderr) raise click.Abort return 0" 984,"def run(command, raw_output=False): """"""Run a command using subprocess. :param command: command line to be run :type command: str :param raw_output: does not attempt to convert the output as unicode :type raw_output: bool :return: error code, output (``stdout``) and error (``stderr``) :rtype: tuple """""" p = Popen(command.split(), stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() # On python 3, subprocess.Popen returns bytes objects. if not raw_output: return ( p.returncode, [line.rstrip() for line in stdout.decode(""utf-8"").splitlines()], [line.rstrip() for line in stderr.decode(""utf-8"").splitlines()] ) else: return (p.returncode, stdout, stderr)" 985,"def mpl_weight2qt(weight): """"""Convert a weight from matplotlib definition to a Qt weight Parameters ---------- weight: int or string Either an integer between 1 and 1000 or a string out of :attr:`weights_mpl2qt` Returns ------- int One type of the PyQt5.QtGui.QFont.Weight"""""" try: weight = weights_mpl2qt[weight] except KeyError: try: weight = float(weight) / 10 except (ValueError, TypeError): weight = QtGui.QFont.Normal else: try: weight = min(filter(lambda w: w >= weight, weights_qt2mpl), key=lambda w: abs(w - weight)) except ValueError: weight = QtGui.QFont.Normal return weight" 986,"def artist_to_qfont(artist): """"""Convert a :class:`matplotlib.text.Text` artist to a QFont object Parameters ---------- artist: matplotlib.text.Text The text artist, e.g. an axes title Returns ------- PyQt5.QtGui.QFont The QFont object"""""" size = int(artist.get_size()) weight = mpl_weight2qt(artist.get_weight()) italic = artist.get_style() == 'italic' for family in artist.get_family(): if family in ['sans-serif', 'cursive', 'monospace', 'serif']: for name in mpl.rcParams['font.' + family]: font = QtGui.QFont(name, size, weight, italic) if font.exactMatch(): break else: font = QtGui.QFont(family, size, weight, italic) return font" 987,"def choose_font(self, font=None): """"""Choose a font for the label through a dialog"""""" fmt_widget = self.parent() if font is None: if self.current_font: font, ok = QFontDialog.getFont( self.current_font, fmt_widget, 'Select %s font' % self.fmto_name, QFontDialog.DontUseNativeDialog) else: font, ok = QFontDialog.getFont(fmt_widget) if not ok: return self.current_font = font properties = self.load_properties() properties.update(self.qfont_to_artist_props(font)) fmt_widget.set_obj(properties) self.refresh()" 988,"def refresh(self): """"""Refresh the widgets from the current font"""""" font = self.current_font # refresh btn_bold self.btn_bold.blockSignals(True) self.btn_bold.setChecked(font.weight() > 50) self.btn_bold.blockSignals(False) # refresh btn_italic self.btn_italic.blockSignals(True) self.btn_italic.setChecked(font.italic()) self.btn_italic.blockSignals(False) # refresh font size self.spin_box.blockSignals(True) self.spin_box.setValue(font.pointSize()) self.spin_box.blockSignals(False)" 989,"def init_app(self, app): """""" Initializes a Flask object `app`: binds the HTML prettifying with app.after_request. :param app: The Flask application object. """""" app.config.setdefault('PRETTIFY', False) if app.config['PRETTIFY']: app.after_request(self._prettify_response)" 990,"def _prettify_response(self, response): """""" Prettify the HTML response. :param response: A Flask Response object. """""" if response.content_type == 'text/html; charset=utf-8': ugly = response.get_data(as_text=True) soup = BeautifulSoup(ugly, 'html.parser') pretty = soup.prettify(formatter='html') response.direct_passthrough = False response.set_data(pretty) return response" 991,"async def _call(self, params): """"""Call the SABnzbd API"""""" if self._session.closed: raise SabnzbdApiException('Session already closed') p = {**self._default_params, **params} try: async with timeout(self._timeout, loop=self._session.loop): async with self._session.get(self._api_url, params=p) as resp: data = await resp.json() if data.get('status', True) is False: self._handle_error(data, params) else: return data except aiohttp.ClientError: raise SabnzbdApiException('Unable to communicate with Sabnzbd API') except asyncio.TimeoutError: raise SabnzbdApiException('SABnzbd API request timed out')" 992,"async def refresh_data(self): """"""Refresh the cached SABnzbd queue data"""""" queue = await self.get_queue() history = await self.get_history() totals = {} for k in history: if k[-4:] == 'size': totals[k] = self._convert_size(history.get(k)) self.queue = {**totals, **queue}" 993,"def _convert_size(self, size_str): """"""Convert units to GB"""""" suffix = size_str[-1] if suffix == 'K': multiplier = 1.0 / (1024.0 * 1024.0) elif suffix == 'M': multiplier = 1.0 / 1024.0 elif suffix == 'T': multiplier = 1024.0 else: multiplier = 1 try: val = float(size_str.split(' ')[0]) return val * multiplier except ValueError: return 0.0" 994,"def _handle_error(self, data, params): """"""Handle an error response from the SABnzbd API"""""" error = data.get('error', 'API call failed') mode = params.get('mode') raise SabnzbdApiException(error, mode=mode)" 995,"def __generate_key(self, config): """""" Generate the ssh key, and return the ssh config location """""" cwd = config.get('ssh_path', self._install_directory()) if config.is_affirmative('create', default=""yes""): if not os.path.exists(cwd): os.makedirs(cwd) if not os.path.exists(os.path.join(cwd, config.get('keyname'))): command = ""ssh-keygen -t %(type)s -f %(keyname)s -N "" % config.to_dict() lib.call(command, cwd=cwd, output_log_level=logging.DEBUG) if not config.has('ssh_path'): config.set('ssh_path', cwd) config.set('ssh_key_path', os.path.join(config.get('ssh_path'), config.get('keyname')))" 996,"def __install_ssh_config(self, config): """""" Install the ssh configuration """""" if not config.is_affirmative('use_global_ssh', default=""no""): ssh_config_injection = self._build_ssh_config(config) if not os.path.exists(ssh_config_path): if self.injections.in_noninjected_file(ssh_config_path, ""Host %s"" % config.get('host')): if config.is_affirmative('override'): self.injections.inject(ssh_config_path, ssh_config_injection) else: self.injections.inject(ssh_config_path, ssh_config_injection) else: self.injections.inject(ssh_config_path, ssh_config_injection) self.injections.commit()" 997,"def _build_ssh_config(self, config): """""" build the ssh injection configuration """""" ssh_config_injection = ssh_config_template % { 'host': config.get('host'), 'hostname': config.get('hostname'), 'ssh_key_path': config.get('ssh_key_path'), 'user': config.get('user') } if config.has('port'): ssh_config_injection += "" Port {0}\n"".format(config.get('port')) return ssh_config_injection" 998,"def extract_followups(task): """""" Retrieve callbacks and errbacks from provided task instance, disables tasks callbacks. """""" callbacks = task.request.callbacks errbacks = task.request.errbacks task.request.callbacks = None return {'link': callbacks, 'link_error': errbacks}" 999,"def gen_procfile(ctx, wsgi, dev): """"""Generates Procfiles which can be used with honcho or foreman. """""" if wsgi is None: if os.path.exists(""wsgi.py""): wsgi = ""wsgi.py"" elif os.path.exists(""app.py""): wsgi = ""app.py"" else: wsgi = ""app.py"" ctx.invoke(gen_apppy) def write_procfile(filename, server_process, debug): processes = [server_process] + current_app.processes procfile = [] for name, cmd in procfile_processes(processes, debug).iteritems(): procfile.append(""%s: %s"" % (name, cmd)) with open(filename, ""w"") as f: f.write(""\n"".join(procfile)) write_procfile(""Procfile"", (""web"", [""gunicorn"", wsgi]), False) if dev: write_procfile(""Procfile.dev"", (""web"", [""frasco"", ""serve""]), True)" 1000,"def add(self, host, filename, data, f_type, f_other_type=None, f_text=''): """""" Add evidence :param host: db.t_hosts.id :param filename: Filename :param data: Content of file :param f_type: Evidence type :param f_other_type: If f_type is 'Other' what type it is :param f_text: Text information about the evidence :return: (True/False, response message) """""" return self.send.evidence_add(host, filename, data, f_type, f_other_type, f_text)" 1001,"def utc_mktime(utc_tuple): """"""Returns number of seconds elapsed since epoch Note that no timezone are taken into consideration. utc tuple must be: (year, month, day, hour, minute, second) """""" if len(utc_tuple) == 6: utc_tuple += (0, 0, 0) return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0))" 1002,"def f(self): """""" Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """""" if self.data.minute == 0: return self.g() return u'%s:%s' % (self.g(), self.i())" 1003,"def g(self): ""Hour, 12-hour format without leading zeros; i.e. '1' to '12'"" if self.data.hour == 0: return 12 if self.data.hour > 12: return self.data.hour - 12 return self.data.hour" 1004,"def P(self): """""" Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension. """""" if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return u'%s %s' % (self.f(), self.a())" 1005,"def I(self): ""'1' if Daylight Savings Time, '0' otherwise."" if self.timezone and self.timezone.dst(self.data): return u'1' else: return u'0'" 1006,"def S(self): ""English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"" if self.data.day in (11, 12, 13): # Special case return u'th' last = self.data.day % 10 if last == 1: return u'st' if last == 2: return u'nd' if last == 3: return u'rd' return u'th'" 1007,"def t(self): ""Number of days in the given month; i.e. '28' to '31'"" return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]" 1008,"def T(self): ""Time zone of this machine; e.g. 'EST' or 'MDT'"" name = self.timezone and self.timezone.tzname(self.data) or None if name is None: name = self.format('O') return unicode(name)" 1009,"def U(self): ""Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"" if getattr(self.data, 'tzinfo', None): return int(calendar.timegm(self.data.utctimetuple())) else: return int(time.mktime(self.data.timetuple()))" 1010,"def W(self): ""ISO-8601 week number of year, weeks starting on Monday"" # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt week_number = None jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1 weekday = self.data.weekday() + 1 day_of_year = self.z() if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4: if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)): week_number = 53 else: week_number = 52 else: if calendar.isleap(self.data.year): i = 366 else: i = 365 if (i - day_of_year) < (4 - weekday): week_number = 1 else: j = day_of_year + (7 - weekday) + (jan1_weekday - 1) week_number = j // 7 if jan1_weekday > 4: week_number -= 1 return week_number" 1011,"def z(self): ""Day of the year; i.e. '0' to '365'"" doy = self.year_days[self.data.month] + self.data.day if self.L() and self.data.month > 2: doy += 1 return doy" 1012,"def Z(self): """""" Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. """""" if not self.timezone: return 0 offset = self.timezone.utcoffset(self.data) # Only days can be negative, so negative offsets have days=-1 and # seconds positive. Positive offsets have days=0 return offset.days * 86400 + offset.seconds" 1013,"def print_metric(name, count, elapsed): """"""A metric function that prints to standard output :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds """""" _do_print(name, count, elapsed, file=sys.stdout)" 1014,"def stderr_metric(name, count, elapsed): """"""A metric function that prints to standard error :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds """""" _do_print(name, count, elapsed, file=sys.stderr)" 1015,"def make_multi_metric(*metrics): """"""Make a new metric function that calls the supplied metrics :arg functions metrics: metric functions :rtype: function """""" def multi_metric(name, count, elapsed): """"""Calls multiple metrics (closure)"""""" for m in metrics: m(name, count, elapsed) return multi_metric" 1016,"def _is_orphan(scc, graph): """""" Return False iff the given scc is reachable from elsewhere. """""" return all(p in scc for v in scc for p in graph.parents(v))" 1017,"def key_cycles(): """""" Collect cyclic garbage, and return the strongly connected components that were keeping the garbage alive. """""" graph = garbage() sccs = graph.strongly_connected_components() return [scc for scc in sccs if _is_orphan(scc, graph)]" 1018,"def _run_command(self, command, **kwargs): """"""Wrapper to pass command to plowshare. :param command: The command to pass to plowshare. :type command: str :param **kwargs: Additional keywords passed into :type **kwargs: dict :returns: Object containing either output of plowshare command or an error message. :rtype: dict :raises: Exception """""" try: return {'output': subprocess.check_output(command, **kwargs)} except Exception as e: return {'error': str(e)}" 1019,"def _hosts_by_success(self, hosts=[]): """"""Order hosts by most successful (least amount of errors) first. :param hosts: List of hosts. :type hosts: list :returns: List of hosts sorted by successful connections. :rtype: list """""" hosts = hosts if hosts else self.hosts return sorted(hosts, key=lambda h: self._host_errors[h])" 1020,"def _filter_sources(self, sources): """"""Remove sources with errors and return ordered by host success. :param sources: List of potential sources to connect to. :type sources: list :returns: Sorted list of potential sources without errors. :rtype: list """""" filtered, hosts = [], [] for source in sources: if 'error' in source: continue filtered.append(source) hosts.append(source['host_name']) return sorted(filtered, key=lambda s: self._hosts_by_success(hosts).index(s['host_name']))" 1021,"def upload(self, filename, number_of_hosts): """"""Upload the given file to the specified number of hosts. :param filename: The filename of the file to upload. :type filename: str :param number_of_hosts: The number of hosts to connect to. :type number_of_hosts: int :returns: A list of dicts with 'host_name' and 'url' keys for all successful uploads or an empty list if all uploads failed. :rtype: list """""" return self.multiupload(filename, self.random_hosts(number_of_hosts))" 1022,"def download(self, sources, output_directory, filename): """"""Download a file from one of the provided sources The sources will be ordered by least amount of errors, so most successful hosts will be tried first. In case of failure, the next source will be attempted, until the first successful download is completed or all sources have been depleted. :param sources: A list of dicts with 'host_name' and 'url' keys. :type sources: list :param output_directory: Directory to save the downloaded file in. :type output_directory: str :param filename: Filename assigned to the downloaded file. :type filename: str :returns: A dict with 'host_name' and 'filename' keys if the download is successful, or an empty dict otherwise. :rtype: dict """""" valid_sources = self._filter_sources(sources) if not valid_sources: return {'error': 'no valid sources'} manager = Manager() successful_downloads = manager.list([]) def f(source): if not successful_downloads: result = self.download_from_host( source, output_directory, filename) if 'error' in result: self._host_errors[source['host_name']] += 1 else: successful_downloads.append(result) multiprocessing.dummy.Pool(len(valid_sources)).map(f, valid_sources) return successful_downloads[0] if successful_downloads else {}" 1023,"def download_from_host(self, source, output_directory, filename): """"""Download a file from a given host. This method renames the file to the given string. :param source: Dictionary containing information about host. :type source: dict :param output_directory: Directory to place output in. :type output_directory: str :param filename: The filename to rename to. :type filename: str :returns: Dictionary with information about downloaded file. :rtype: dict """""" result = self._run_command( [""plowdown"", source[""url""], ""-o"", output_directory, ""--temp-rename""], stderr=open(""/dev/null"", ""w"") ) result['host_name'] = source['host_name'] if 'error' in result: return result temporary_filename = self.parse_output( result['host_name'], result['output']) result['filename'] = os.path.join(output_directory, filename) result.pop('output') os.rename(temporary_filename, result['filename']) return result" 1024,"def multiupload(self, filename, hosts): """"""Upload file to multiple hosts simultaneously The upload will be attempted for each host until the optimal file redundancy is achieved (a percentage of successful uploads) or the host list is depleted. :param filename: The filename of the file to upload. :type filename: str :param hosts: A list of hosts as defined in the master host list. :type hosts: list :returns: A list of dicts with 'host_name' and 'url' keys for all successful uploads or an empty list if all uploads failed. :rtype: list """""" manager = Manager() successful_uploads = manager.list([]) def f(host): if len(successful_uploads) / float(len(hosts)) < \ settings.MIN_FILE_REDUNDANCY: # Optimal redundancy not achieved, keep going result = self.upload_to_host(filename, host) if 'error' in result: self._host_errors[host] += 1 else: successful_uploads.append(result) multiprocessing.dummy.Pool(len(hosts)).map( f, self._hosts_by_success(hosts)) return list(successful_uploads)" 1025,"def upload_to_host(self, filename, hostname): """"""Upload a file to the given host. This method relies on 'plowup' being installed on the system. If it succeeds, this method returns a dictionary with the host name, and the final URL. Otherwise, it returns a dictionary with the host name and an error flag. :param filename: The filename of the file to upload. :type filename: str :param hostname: The host you are uploading the file to. :type hostname: str :returns: Dictionary containing information about upload to host. :rtype: dict """""" result = self._run_command( [""plowup"", hostname, filename], stderr=open(""/dev/null"", ""w"") ) result['host_name'] = hostname if 'error' not in result: result['url'] = self.parse_output(hostname, result.pop('output')) return result" 1026,"def parse_output(self, hostname, output): """"""Parse plowup's output. For now, we just return the last line. :param hostname: Name of host you are working with. :type hostname: str :param output: Dictionary containing information about a plowshare action. :type output: dict :returns: Parsed and decoded output list. :rtype: list """""" if isinstance(output, bytes): output = output.decode('utf-8') return output.split()[-1]" 1027,"def set(self, keyword, default, from_env=True): """""" Set value on self if not already set. If unset, attempt to retrieve from environment variable of same name (unless disabled via 'from_env'). If 'default' value is not a string, evaluate environment variable as a Python type. If no env variables are found, fallback to 'default' value. """""" env_key = '{}{}'.format(self.ENV_PREFIX, keyword.upper()) if hasattr(self, keyword): return getattr(self, keyword) value = default if from_env and (env_key in env): env_val = env.get(env_key) should_eval = not isinstance(default, str) try: value = literal_eval(env_val) if should_eval else env_val except (ValueError, SyntaxError): raise ValueError(""Unable to cast %r to %r"" % ( env_val, type.__name__)) setattr(self, keyword, value) return getattr(self, keyword)" 1028,"def _generate_queues(queues, exchange, platform_queue): """""" Queues known by this worker """""" return set([ Queue('celery', exchange, routing_key='celery'), Queue(platform_queue, exchange, routing_key='#'), ] + [ Queue(q_name, exchange, routing_key=q_name) for q_name in queues ])" 1029,"def _erf(x): """""" Port of cephes ``ndtr.c`` ``erf`` function. See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c """""" T = [ 9.60497373987051638749E0, 9.00260197203842689217E1, 2.23200534594684319226E3, 7.00332514112805075473E3, 5.55923013010394962768E4, ] U = [ 3.35617141647503099647E1, 5.21357949780152679795E2, 4.59432382970980127987E3, 2.26290000613890934246E4, 4.92673942608635921086E4, ] # Shorcut special cases if x == 0: return 0 if x >= MAXVAL: return 1 if x <= -MAXVAL: return -1 if abs(x) > 1: return 1 - erfc(x) z = x * x return x * _polevl(z, T, 4) / _p1evl(z, U, 5)" 1030,"def _erfc(a): """""" Port of cephes ``ndtr.c`` ``erfc`` function. See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c """""" # approximation for abs(a) < 8 and abs(a) >= 1 P = [ 2.46196981473530512524E-10, 5.64189564831068821977E-1, 7.46321056442269912687E0, 4.86371970985681366614E1, 1.96520832956077098242E2, 5.26445194995477358631E2, 9.34528527171957607540E2, 1.02755188689515710272E3, 5.57535335369399327526E2, ] Q = [ 1.32281951154744992508E1, 8.67072140885989742329E1, 3.54937778887819891062E2, 9.75708501743205489753E2, 1.82390916687909736289E3, 2.24633760818710981792E3, 1.65666309194161350182E3, 5.57535340817727675546E2, ] # approximation for abs(a) >= 8 R = [ 5.64189583547755073984E-1, 1.27536670759978104416E0, 5.01905042251180477414E0, 6.16021097993053585195E0, 7.40974269950448939160E0, 2.97886665372100240670E0, ] S = [ 2.26052863220117276590E0, 9.39603524938001434673E0, 1.20489539808096656605E1, 1.70814450747565897222E1, 9.60896809063285878198E0, 3.36907645100081516050E0, ] # Shortcut special cases if a == 0: return 1 if a >= MAXVAL: return 0 if a <= -MAXVAL: return 2 x = a if a < 0: x = -a # computationally cheaper to calculate erf for small values, I guess. if x < 1: return 1 - erf(a) z = -a * a z = math.exp(z) if x < 8: p = _polevl(x, P, 8) q = _p1evl(x, Q, 8) else: p = _polevl(x, R, 5) q = _p1evl(x, S, 6) y = (z * p) / q if a < 0: y = 2 - y return y" 1031,"def _polevl(x, coefs, N): """""" Port of cephes ``polevl.c``: evaluate polynomial See https://github.com/jeremybarnes/cephes/blob/master/cprob/polevl.c """""" ans = 0 power = len(coefs) - 1 for coef in coefs: try: ans += coef * x**power except OverflowError: pass power -= 1 return ans" 1032,"def _ndtri(y): """""" Port of cephes ``ndtri.c``: inverse normal distribution function. See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtri.c """""" # approximation for 0 <= abs(z - 0.5) <= 3/8 P0 = [ -5.99633501014107895267E1, 9.80010754185999661536E1, -5.66762857469070293439E1, 1.39312609387279679503E1, -1.23916583867381258016E0, ] Q0 = [ 1.95448858338141759834E0, 4.67627912898881538453E0, 8.63602421390890590575E1, -2.25462687854119370527E2, 2.00260212380060660359E2, -8.20372256168333339912E1, 1.59056225126211695515E1, -1.18331621121330003142E0, ] # Approximation for interval z = sqrt(-2 log y ) between 2 and 8 # i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. P1 = [ 4.05544892305962419923E0, 3.15251094599893866154E1, 5.71628192246421288162E1, 4.40805073893200834700E1, 1.46849561928858024014E1, 2.18663306850790267539E0, -1.40256079171354495875E-1, -3.50424626827848203418E-2, -8.57456785154685413611E-4, ] Q1 = [ 1.57799883256466749731E1, 4.53907635128879210584E1, 4.13172038254672030440E1, 1.50425385692907503408E1, 2.50464946208309415979E0, -1.42182922854787788574E-1, -3.80806407691578277194E-2, -9.33259480895457427372E-4, ] # Approximation for interval z = sqrt(-2 log y ) between 8 and 64 # i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. P2 = [ 3.23774891776946035970E0, 6.91522889068984211695E0, 3.93881025292474443415E0, 1.33303460815807542389E0, 2.01485389549179081538E-1, 1.23716634817820021358E-2, 3.01581553508235416007E-4, 2.65806974686737550832E-6, 6.23974539184983293730E-9, ] Q2 = [ 6.02427039364742014255E0, 3.67983563856160859403E0, 1.37702099489081330271E0, 2.16236993594496635890E-1, 1.34204006088543189037E-2, 3.28014464682127739104E-4, 2.89247864745380683936E-6, 6.79019408009981274425E-9, ] sign_flag = 1 if y > (1 - EXP_NEG2): y = 1 - y sign_flag = 0 # Shortcut case where we don't need high precision # between -0.135 and 0.135 if y > EXP_NEG2: y -= 0.5 y2 = y ** 2 x = y + y * (y2 * _polevl(y2, P0, 4) / _p1evl(y2, Q0, 8)) x = x * ROOT_2PI return x x = math.sqrt(-2.0 * math.log(y)) x0 = x - math.log(x) / x z = 1.0 / x if x < 8.0: # y > exp(-32) = 1.2664165549e-14 x1 = z * _polevl(z, P1, 8) / _p1evl(z, Q1, 8) else: x1 = z * _polevl(z, P2, 8) / _p1evl(z, Q2, 8) x = x0 - x1 if sign_flag != 0: x = -x return x" 1033,"def erfinv(z): """""" Calculate the inverse error function at point ``z``. This is a direct port of the SciPy ``erfinv`` function, originally written in C. Parameters ---------- z : numeric Returns ------- float References ---------- + https://en.wikipedia.org/wiki/Error_function#Inverse_functions + http://functions.wolfram.com/GammaBetaErf/InverseErf/ Examples -------- >>> round(erfinv(0.1), 12) 0.088855990494 >>> round(erfinv(0.5), 12) 0.476936276204 >>> round(erfinv(-0.5), 12) -0.476936276204 >>> round(erfinv(0.95), 12) 1.38590382435 >>> round(erf(erfinv(0.3)), 3) 0.3 >>> round(erfinv(erf(0.5)), 3) 0.5 >>> erfinv(0) 0 >>> erfinv(1) inf >>> erfinv(-1) -inf """""" if abs(z) > 1: raise ValueError(""`z` must be between -1 and 1 inclusive"") # Shortcut special cases if z == 0: return 0 if z == 1: return inf if z == -1: return -inf # otherwise calculate things. return _ndtri((z + 1) / 2.0) / math.sqrt(2)" 1034,"def get_cmap(name, lut=None): """""" Returns the specified colormap. Parameters ---------- name: str or :class:`matplotlib.colors.Colormap` If a colormap, it returned unchanged. %(cmap_note)s lut: int An integer giving the number of entries desired in the lookup table Returns ------- matplotlib.colors.Colormap The colormap specified by `name` See Also -------- show_colormaps: A function to display all available colormaps Notes ----- Different from the :func::`matpltolib.pyplot.get_cmap` function, this function changes the number of colors if `name` is a :class:`matplotlib.colors.Colormap` instance to match the given `lut`."""""" if name in rcParams['colors.cmaps']: colors = rcParams['colors.cmaps'][name] lut = lut or len(colors) return FixedColorMap.from_list(name=name, colors=colors, N=lut) elif name in _cmapnames: colors = _cmapnames[name] lut = lut or len(colors) return FixedColorMap.from_list(name=name, colors=colors, N=lut) else: cmap = mpl_get_cmap(name) # Note: we could include the `lut` in the call of mpl_get_cmap, but # this raises a ValueError for colormaps like 'viridis' in mpl version # 1.5. Besides the mpl_get_cmap function does not modify the lut if # it does not match if lut is not None and cmap.N != lut: cmap = FixedColorMap.from_list( name=cmap.name, colors=cmap(np.linspace(0, 1, lut)), N=lut) return cmap" 1035,"def _get_cmaps(names): """"""Filter the given `names` for colormaps"""""" import matplotlib.pyplot as plt available_cmaps = list( chain(plt.cm.cmap_d, _cmapnames, rcParams['colors.cmaps'])) names = list(names) wrongs = [] for arg in (arg for arg in names if (not isinstance(arg, Colormap) and arg not in available_cmaps)): if isinstance(arg, str): similarkeys = get_close_matches(arg, available_cmaps) if similarkeys != []: warn(""Colormap %s not found in standard colormaps.\n"" ""Similar colormaps are %s."" % (arg, ', '.join(similarkeys))) else: warn(""Colormap %s not found in standard colormaps.\n"" ""Run function without arguments to see all colormaps"" % arg) names.remove(arg) wrongs.append(arg) if not names and not wrongs: names = sorted(m for m in available_cmaps if not m.endswith(""_r"")) return names" 1036,"def show_colormaps(names=[], N=10, show=True, use_qt=None): """"""Function to show standard colormaps from pyplot Parameters ---------- ``*args``: str or :class:`matplotlib.colors.Colormap` If a colormap, it returned unchanged. %(cmap_note)s N: int, optional Default: 11. The number of increments in the colormap. show: bool, optional Default: True. If True, show the created figure at the end with pyplot.show(block=False) use_qt: bool If True, use the :class:`psy_simple.widgets.color.ColormapDialog.show_colormaps`, if False use a matplotlib implementation based on [1]_. If None, use the Qt implementation if it is running in the psyplot GUI. Returns ------- psy_simple.widgets.color.ColormapDialog or matplitlib.figure.Figure Depending on `use_qt`, either an instance of the :class:`psy_simple.widgets.color.ColormapDialog` or the :class:`matplotlib.figure.Figure` References ---------- .. [1] http://matplotlib.org/1.2.1/examples/pylab_examples/show_colormaps.html """""" names = safe_list(names) if use_qt or (use_qt is None and psyplot.with_gui): from psy_simple.widgets.colors import ColormapDialog from psyplot_gui.main import mainwindow return ColormapDialog.show_colormap(names, N, show, parent=mainwindow) import matplotlib.pyplot as plt # This example comes from the Cookbook on www.scipy.org. According to the # history, Andrew Straw did the conversion from an old page, but it is # unclear who the original author is. a = np.vstack((np.linspace(0, 1, 256).reshape(1, -1))) # Get a list of the colormaps in matplotlib. Ignore the ones that end with # '_r' because these are simply reversed versions of ones that don't end # with '_r' cmaps = _get_cmaps(names) nargs = len(cmaps) + 1 fig = plt.figure(figsize=(5, 10)) fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99) for i, m in enumerate(cmaps): ax = plt.subplot(nargs, 1, i+1) plt.axis(""off"") plt.pcolormesh(a, cmap=get_cmap(m, N + 1)) pos = list(ax.get_position().bounds) fig.text(pos[0] - 0.01, pos[1], m, fontsize=10, horizontalalignment='right') fig.canvas.set_window_title(""Figure %i: Predefined colormaps"" % fig.number) if show: plt.show(block=False) return fig" 1037,"def _create_stdout_logger(logging_level): """""" create a logger to stdout. This creates logger for a series of module we would like to log information on. """""" out_hdlr = logging.StreamHandler(sys.stdout) out_hdlr.setFormatter(logging.Formatter( '[%(asctime)s] %(message)s', ""%H:%M:%S"" )) out_hdlr.setLevel(logging_level) for name in LOGGING_NAMES: log = logging.getLogger(name) log.addHandler(out_hdlr) log.setLevel(logging_level)" 1038,"def main(): """"""Generate a PDF using the async method."""""" docraptor = DocRaptor() print(""Create PDF"") resp = docraptor.create( { ""document_content"": ""

python-docraptor

Async Test

"", ""test"": True, ""async"": True, } ) print(""Status ID: {status_id}"".format(status_id=resp[""status_id""])) status_id = resp[""status_id""] resp = docraptor.status(status_id) print("" {status}"".format(status=resp[""status""])) while resp[""status""] != ""completed"": time.sleep(3) resp = docraptor.status(status_id) print("" {status}"".format(status=resp[""status""])) print(""Download to test_async.pdf"") with open(""test_async.pdf"", ""wb"") as pdf_file: pdf_file.write(docraptor.download(resp[""download_key""]).content) print(""[DONE]"")" 1039,"def get_alternate_types_resolving_forwardref_union_and_typevar(typ, _memo: List[Any] = None) \ -> Tuple[Any, ...]: """""" Returns a tuple of all alternate types allowed by the `typ` type annotation. If typ is a TypeVar, * if the typevar is bound, return get_alternate_types_resolving_forwardref_union_and_typevar(bound) * if the typevar has constraints, return a tuple containing all the types listed in the constraints (with appropriate recursive call to get_alternate_types_resolving_forwardref_union_and_typevar for each of them) * otherwise return (object, ) If typ is a Union, return a tuple containing all the types listed in the union (with appropriate recursive call to get_alternate_types_resolving_forwardref_union_and_typevar for each of them) If typ is a forward reference, it is evaluated and this method is applied to the results. Otherwise (typ, ) is returned Note that this function automatically prevent infinite recursion through forward references such as in `A = Union[str, 'A']`, by keeping a _memo of already met symbols. :param typ: :return: """""" # avoid infinite recursion by using a _memo _memo = _memo or [] if typ in _memo: return tuple() # remember that this was already explored _memo.append(typ) if is_typevar(typ): if hasattr(typ, '__bound__') and typ.__bound__ is not None: # TypeVar is 'bound' to a class if hasattr(typ, '__contravariant__') and typ.__contravariant__: # Contravariant means that only super classes of this type are supported! raise Exception('Contravariant TypeVars are not supported') else: # only subclasses of this are allowed (even if not covariant, because as of today we cant do otherwise) return get_alternate_types_resolving_forwardref_union_and_typevar(typ.__bound__, _memo=_memo) elif hasattr(typ, '__constraints__') and typ.__constraints__ is not None: if hasattr(typ, '__contravariant__') and typ.__contravariant__: # Contravariant means that only super classes of this type are supported! raise Exception('Contravariant TypeVars are not supported') else: # TypeVar is 'constrained' to several alternate classes, meaning that subclasses of any of them are # allowed (even if not covariant, because as of today we cant do otherwise) return tuple(typpp for c in typ.__constraints__ for typpp in get_alternate_types_resolving_forwardref_union_and_typevar(c, _memo=_memo)) else: # A non-parametrized TypeVar means 'any' return object, elif is_union_type(typ): # do not use typ.__args__, it may be wrong # the solution below works even in typevar+config cases such as u = Union[T, str][Optional[int]] return tuple(t for typpp in get_args(typ, evaluate=True) for t in get_alternate_types_resolving_forwardref_union_and_typevar(typpp, _memo=_memo)) elif is_forward_ref(typ): return get_alternate_types_resolving_forwardref_union_and_typevar(resolve_forward_ref(typ), _memo=_memo) else: return typ," 1040,"def robust_isinstance(inst, typ) -> bool: """""" Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic class so that the instance check works. It is also robust to Union and Any. :param inst: :param typ: :return: """""" if typ is Any: return True if is_typevar(typ): if hasattr(typ, '__constraints__') and typ.__constraints__ is not None: typs = get_args(typ, evaluate=True) return any(robust_isinstance(inst, t) for t in typs) elif hasattr(typ, '__bound__') and typ.__bound__ is not None: return robust_isinstance(inst, typ.__bound__) else: # a raw TypeVar means 'anything' return True else: if is_union_type(typ): typs = get_args(typ, evaluate=True) return any(robust_isinstance(inst, t) for t in typs) else: return isinstance(inst, get_base_generic_type(typ))" 1041,"def get_pretty_type_str(object_type) -> str: """""" Utility method to check if a type is a subclass of typing.{List,Dict,Set,Tuple}. In that case returns a user-friendly character string with the inner item types, such as Dict[str, int]. :param object_type: :return: type.__name__ if type is not a subclass of typing.{List,Dict,Set,Tuple}, otherwise type__name__[list of inner_types.__name__] """""" try: # DO NOT resolve forward references otherwise this can lead to infinite recursion contents_item_type, contents_key_type = _extract_collection_base_type(object_type, resolve_fwd_refs=False) if isinstance(contents_item_type, tuple): return object_type.__name__ + '[' \ + ', '.join([get_pretty_type_str(item_type) for item_type in contents_item_type]) + ']' else: if contents_key_type is not None: return object_type.__name__ + '[' + get_pretty_type_str(contents_key_type) + ', ' \ + get_pretty_type_str(contents_item_type) + ']' elif contents_item_type is not None: return object_type.__name__ + '[' + get_pretty_type_str(contents_item_type) + ']' except Exception as e: pass if is_union_type(object_type): return 'Union[' + ', '.join([get_pretty_type_str(item_type) for item_type in get_args(object_type, evaluate=True)]) + ']' elif is_typevar(object_type): # typevars usually do not display their namespace so str() is compact. And it displays the cov/contrav symbol return str(object_type) else: try: return object_type.__name__ except: return str(object_type)" 1042,"def is_collection(object_type, strict: bool = False) -> bool: """""" Utility method to check if a type is a subclass of typing.{List,Dict,Set,Tuple} or of list, dict, set, tuple. If strict is set to True, the method will return True only if the class IS directly one of the base collection classes :param object_type: :param strict: if set to True, this method will look for a strict match. :return: """""" if object_type is None or object_type is Any or is_union_type(object_type) or is_typevar(object_type): return False elif strict: return object_type == dict \ or object_type == list \ or object_type == tuple \ or object_type == set \ or get_base_generic_type(object_type) == Dict \ or get_base_generic_type(object_type) == List \ or get_base_generic_type(object_type) == Set \ or get_base_generic_type(object_type) == Tuple else: return issubclass(object_type, Dict) \ or issubclass(object_type, List) \ or issubclass(object_type, Set) \ or issubclass(object_type, Tuple) \ or issubclass(object_type, dict) \ or issubclass(object_type, list) \ or issubclass(object_type, tuple) \ or issubclass(object_type, set)" 1043,"def get_all_subclasses(typ, recursive: bool = True, _memo = None) -> Sequence[Type[Any]]: """""" Returns all subclasses, and supports generic types. It is recursive by default See discussion at https://github.com/Stewori/pytypes/issues/31 :param typ: :param recursive: a boolean indicating whether recursion is needed :param _memo: internal variable used in recursion to avoid exploring subclasses that were already explored :return: """""" _memo = _memo or set() # if we have collected the subclasses for this already, return if typ in _memo: return [] # else remember that we have collected them, and collect them _memo.add(typ) if is_generic_type(typ): # We now use get_origin() to also find all the concrete subclasses in case the desired type is a generic sub_list = get_origin(typ).__subclasses__() else: sub_list = typ.__subclasses__() # recurse result = [] for t in sub_list: # only keep the origins in the list to = get_origin(t) or t try: if to is not typ and to not in result and is_subtype(to, typ, bound_typevars={}): result.append(to) except: # catching an error with is_subtype(Dict, Dict[str, int], bound_typevars={}) pass # recurse if recursive: for typpp in sub_list: for t in get_all_subclasses(typpp, recursive=True, _memo=_memo): # unfortunately we have to check 't not in sub_list' because with generics strange things happen # also is_subtype returns false when the parent is a generic if t not in sub_list and is_subtype(t, typ, bound_typevars={}): result.append(t) return result" 1044,"def eval_forward_ref(typ: _ForwardRef): """""" Climbs the current stack until the given Forward reference has been resolved, or raises an InvalidForwardRefError :param typ: the forward reference to resolve :return: """""" for frame in stack(): m = getmodule(frame[0]) m_name = m.__name__ if m is not None else '' if m_name.startswith('parsyfiles.tests') or not m_name.startswith('parsyfiles'): try: # print(""File {}:{}"".format(frame.filename, frame.lineno)) return typ._eval_type(frame[0].f_globals, frame[0].f_locals) except NameError: pass raise InvalidForwardRefError(typ)" 1045,"def is_valid_pep484_type_hint(typ_hint, allow_forward_refs: bool = False): """""" Returns True if the provided type is a valid PEP484 type hint, False otherwise. Note: string type hints (forward references) are not supported by default, since callers of this function in parsyfiles lib actually require them to be resolved already. :param typ_hint: :param allow_forward_refs: :return: """""" # most common case first, to be faster try: if isinstance(typ_hint, type): return True except: pass # optionally, check forward reference try: if allow_forward_refs and is_forward_ref(typ_hint): return True except: pass # finally check unions and typevars try: return is_union_type(typ_hint) or is_typevar(typ_hint) except: return False" 1046,"def is_pep484_nonable(typ): """""" Checks if a given type is nonable, meaning that it explicitly or implicitly declares a Union with NoneType. Nested TypeVars and Unions are supported. :param typ: :return: """""" # TODO rely on typing_inspect if there is an answer to https://github.com/ilevkivskyi/typing_inspect/issues/14 if typ is type(None): return True elif is_typevar(typ) or is_union_type(typ): return any(is_pep484_nonable(tt) for tt in get_alternate_types_resolving_forwardref_union_and_typevar(typ)) else: return False" 1047,"def _extract_collection_base_type(collection_object_type, exception_if_none: bool = True, resolve_fwd_refs: bool = True) -> Tuple[Type, Optional[Type]]: """""" Utility method to extract the base item type from a collection/iterable item type. Throws * a TypeError if the collection_object_type a Dict with non-string keys. * an AttributeError if the collection_object_type is actually not a collection * a TypeInformationRequiredError if somehow the inner type can't be found from the collection type (either if dict, list, set, tuple were used instead of their typing module equivalents (Dict, List, Set, Tuple), or if the latter were specified without inner content types (as in Dict instead of Dict[str, Foo]) :param collection_object_type: :return: a tuple containing the collection's content type (which may itself be a Tuple in case of a Tuple) and the collection's content key type for dicts (or None) """""" contents_item_type = None contents_key_type = None check_var(collection_object_type, var_types=type, var_name='collection_object_type') is_tuple = False if is_tuple_type(collection_object_type): # Tuple is a special construct, is_generic_type does not work is_tuple = True # --old: hack into typing module # if hasattr(collection_object_type, '__args__') and collection_object_type.__args__ is not None: # contents_item_type = collection_object_type.__args__ # --new : using typing_inspect # __args = get_last_args(collection_object_type) # this one works even in typevar+config cases such as t = Tuple[int, Tuple[T, T]][Optional[int]] __args = get_args(collection_object_type, evaluate=True) if len(__args) > 0: contents_item_type = __args elif issubclass(collection_object_type, Mapping): # Dictionary-like if is_generic_type(collection_object_type): # --old: hack into typing module # if hasattr(collection_object_type, '__args__') and collection_object_type.__args__ is not None: # contents_key_type, contents_item_type = collection_object_type.__args__ # --new : using typing_inspect # __args = get_last_args(collection_object_type) # this one works even in typevar+config cases such as d = Dict[int, Tuple[T, T]][Optional[int]] __args = get_args(collection_object_type, evaluate=True) if len(__args) > 0: contents_key_type, contents_item_type = __args if not issubclass(contents_key_type, str): raise TypeError('Collection object has type Dict, but its PEP484 type hints declare ' 'keys as being of type ' + str(contents_key_type) + ' which is not supported. Only ' 'str keys are supported at the moment, since we use them as item names') elif issubclass(collection_object_type, Iterable): # List or Set. Should we rather use Container here ? if is_generic_type(collection_object_type): # --old: hack into typing module # if hasattr(collection_object_type, '__args__') and collection_object_type.__args__ is not None: # contents_item_type = collection_object_type.__args__[0] # --new : using typing_inspect # __args = get_last_args(collection_object_type) # this one works even in typevar+config cases such as i = Iterable[Tuple[T, T]][Optional[int]] __args = get_args(collection_object_type, evaluate=True) if len(__args) > 0: contents_item_type, = __args elif issubclass(collection_object_type, dict) or issubclass(collection_object_type, list)\ or issubclass(collection_object_type, tuple) or issubclass(collection_object_type, set): # the error is now handled below with the other under-specified types situations pass else: # Not a collection raise AttributeError('Cannot extract collection base type, object type ' + str(collection_object_type) + ' is not a collection') # Finally return if something was found, otherwise tell it try: if contents_item_type is None or contents_item_type is Parameter.empty: # Empty type hints raise TypeInformationRequiredError.create_for_collection_items(collection_object_type, contents_item_type) elif is_tuple: # --- tuple: Iterate on all sub-types resolved = [] for t in contents_item_type: # Check for empty type hints if contents_item_type is None or contents_item_type is Parameter.empty: raise TypeInformationRequiredError.create_for_collection_items(collection_object_type, t) # Resolve any forward references if needed if resolve_fwd_refs: t = resolve_forward_ref(t) resolved.append(t) # Final type hint compliance if not is_valid_pep484_type_hint(t): raise InvalidPEP484TypeHint.create_for_collection_items(collection_object_type, t) if resolve_fwd_refs: contents_item_type = tuple(resolved) else: # --- Not a tuple # resolve any forward references first if resolve_fwd_refs: contents_item_type = resolve_forward_ref(contents_item_type) # check validity then if not is_valid_pep484_type_hint(contents_item_type): # Invalid type hints raise InvalidPEP484TypeHint.create_for_collection_items(collection_object_type, contents_item_type) except TypeInformationRequiredError as e: # only raise it if the flag says it if exception_if_none: raise e.with_traceback(e.__traceback__) return contents_item_type, contents_key_type" 1048,"def get_validated_attribute_type_info(typ, item_type, attr_name): """""" Routine to validate that typ is a valid non-empty PEP484 type hint. If it is a forward reference, it will be resolved :param typ: :param item_type: :param attr_name: :return: """""" if (typ is None) or (typ is Parameter.empty): raise TypeInformationRequiredError.create_for_object_attributes(item_type, attr_name, typ) # resolve forward references typ = resolve_forward_ref(typ) if not is_valid_pep484_type_hint(typ): raise InvalidPEP484TypeHint.create_for_object_attributes(item_type, attr_name, typ) return typ" 1049,"def get_constructor_attributes_types(item_type) -> Dict[str, Tuple[Type[Any], bool]]: """""" Utility method to return a dictionary of attribute name > attribute type from the constructor of a given type It supports PEP484 and 'attrs' declaration, see https://github.com/python-attrs/attrs. :param item_type: :return: a dictionary containing for each attr name, a tuple (type, is_mandatory) """""" res = dict() try: # -- Try to read an 'attr' declaration and to extract types and optionality from parsyfiles.plugins_optional.support_for_attrs import get_attrs_declarations decls = get_attrs_declarations(item_type) # check that types are correct for attr_name, v in decls.items(): typ, is_optional = v # -- Get and check that the attribute type is PEP484 compliant typ = get_validated_attribute_type_info(typ, item_type, attr_name) # -- optional = attrs'Optional validator was used, or a default value was set, or type is pep484 Optional is_optional = is_optional or is_pep484_nonable(typ) # -- store both info in result dict res[attr_name] = (typ, not is_optional) return res except: # ImportError or NotAnAttrsClassError but we obviously cant import the latter. pass # do not specify a type and use 'pass' so as to reset the exception context # -- Fallback to PEP484 # first get the signature of the class constructor s = _get_constructor_signature(item_type) # then extract the type and optionality of each attribute and raise errors if needed for attr_name in s.parameters.keys(): # skip the 'self' attribute if attr_name != 'self': # -- Get and check that the attribute type is PEP484 compliant typ = get_validated_attribute_type_info(s.parameters[attr_name].annotation, item_type, attr_name) # -- is the attribute mandatory ? is_mandatory = (s.parameters[attr_name].default is Parameter.empty) and not is_pep484_nonable(typ) # -- store both info in result dict res[attr_name] = (typ, is_mandatory) return res" 1050,"def create_for_collection_items(item_type, hint): """""" Helper method for collection items :param item_type: :return: """""" # this leads to infinite loops # try: # prt_type = get_pretty_type_str(item_type) # except: # prt_type = str(item_type) return TypeInformationRequiredError(""Cannot parse object of type {t} as a collection: this type has no valid "" ""PEP484 type hint about its contents: found {h}. Please use a standard "" ""PEP484 declaration such as Dict[str, Foo] or List[Foo]"" """".format(t=str(item_type), h=hint))" 1051,"def create_for_object_attributes(item_type, faulty_attribute_name: str, hint): """""" Helper method for constructor attributes :param item_type: :return: """""" # this leads to infinite loops # try: # prt_type = get_pretty_type_str(item_type) # except: # prt_type = str(item_type) return TypeInformationRequiredError(""Cannot create instances of type {t}: constructor attribute '{a}' has an"" "" invalid PEP484 type hint: {h}."".format(t=str(item_type), a=faulty_attribute_name, h=hint))" 1052,"def bounding_box_from_annotation(source=None, padding=None, **kwargs): """"""bounding_box_from_annotation(source, padding, **kwargs) -> bounding_box Creates a bounding box from the given parameters, which are, in general, annotations read using :py:func:`bob.ip.facedetect.read_annotation_file`. Different kinds of annotations are supported, given by the ``source`` keyword: * ``direct`` : bounding boxes are directly specified by keyword arguments ``topleft`` and ``bottomright`` * ``eyes`` : the left and right eyes are specified by keyword arguments ``leye`` and ``reye`` * ``left-profile`` : the left eye and the mouth are specified by keyword arguments ``eye`` and ``mouth`` * ``right-profile`` : the right eye and the mouth are specified by keyword arguments ``eye`` and ``mouth`` * ``ellipse`` : the face ellipse as well as face angle and axis radius is provided by keyword arguments ``center``, ``angle`` and ``axis_radius`` If a ``source`` is specified, the according keywords must be given as well. Otherwise, the source is estimated from the given keyword parameters if possible. If 'topleft' and 'bottomright' are given (i.e., the 'direct' source), they are taken as is. Note that the 'bottomright' is NOT included in the bounding box. Please assure that the aspect ratio of the bounding box is 6:5 (height : width). For source 'ellipse', the bounding box is computed to capture the whole ellipse, even if it is rotated. For other sources (i.e., 'eyes'), the center of the two given positions is computed, and the ``padding`` is applied, which is relative to the distance between the two given points. If ``padding`` is ``None`` (the default) the default_paddings of this source are used instead. These padding is required to keep an aspect ratio of 6:5. **Parameters:** ``source`` : str or ``None`` The type of annotations present in the list of keyword arguments, see above. ``padding`` : {'top':float, 'bottom':float, 'left':float, 'right':float} This padding is added to the center between the given points, to define the top left and bottom right positions in the bounding box; values are relative to the distance between the two given points; ignored for some of the ``source``\s ``kwargs`` : key=value Further keyword arguments specifying the annotations. **Returns:** bounding_box : :py:class:`BoundingBox` The bounding box that was estimated from the given annotations. """""" if source is None: # try to estimate the source for s,k in available_sources.items(): # check if the according keyword arguments are given if k[0] in kwargs and k[1] in kwargs: # check if we already assigned a source before if source is not None: raise ValueError(""The given list of keywords (%s) is ambiguous. Please specify a source"" % kwargs) # assign source source = s # check if a source could be estimated from the keywords if source is None: raise ValueError(""The given list of keywords (%s) could not be interpreted"" % kwargs) assert source in available_sources # use default padding if not specified if padding is None: padding = default_paddings[source] keys = available_sources[source] if source == 'ellipse': # compute the tight bounding box for the ellipse angle = kwargs['angle'] axis = kwargs['axis_radius'] center = kwargs['center'] dx = abs(math.cos(angle) * axis[0]) + abs(math.sin(angle) * axis[1]) dy = abs(math.sin(angle) * axis[0]) + abs(math.cos(angle) * axis[1]) top = center[0] - dy bottom = center[0] + dy left = center[1] - dx right = center[1] + dx elif padding is None: # There is no padding to be applied -> take nodes as they are top = kwargs[keys[0]][0] bottom = kwargs[keys[1]][0] left = kwargs[keys[0]][1] right = kwargs[keys[1]][1] else: # apply padding pos_0 = kwargs[keys[0]] pos_1 = kwargs[keys[1]] tb_center = float(pos_0[0] + pos_1[0]) / 2. lr_center = float(pos_0[1] + pos_1[1]) / 2. distance = math.sqrt((pos_0[0] - pos_1[0])**2 + (pos_0[1] - pos_1[1])**2) top = tb_center + padding['top'] * distance bottom = tb_center + padding['bottom'] * distance left = lr_center + padding['left'] * distance right = lr_center + padding['right'] * distance return BoundingBox((top, left), (bottom - top, right - left))" 1053,"def expected_eye_positions(bounding_box, padding = None): """"""expected_eye_positions(bounding_box, padding) -> eyes Computes the expected eye positions based on the relative coordinates of the bounding box. This function can be used to translate between bounding-box-based image cropping and eye-location-based alignment. The returned eye locations return the **average** eye locations, no landmark detection is performed. **Parameters:** ``bounding_box`` : :py:class:`BoundingBox` The face bounding box as detected by one of the functions in ``bob.ip.facedetect``. ``padding`` : {'top':float, 'bottom':float, 'left':float, 'right':float} The padding that was used for the ``eyes`` source in :py:func:`bounding_box_from_annotation`, has a proper default. **Returns:** ``eyes`` : {'reye' : (rey, rex), 'leye' : (ley, lex)} A dictionary containing the average left and right eye annotation. """""" if padding is None: padding = default_paddings['eyes'] top, left, right = padding['top'], padding['left'], padding['right'] inter_eye_distance = (bounding_box.size[1]) / (right - left) return { 'reye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.left_f - left/2.*inter_eye_distance), 'leye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.right_f - right/2.*inter_eye_distance) }" 1054,"def parallel_part(data, parallel): """"""parallel_part(data, parallel) -> part Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable. **Parameters:** ``data`` : [object] A list of data that should be split up into ``parallel`` parts ``parallel`` : int or ``None`` The total number of parts, in which the data should be split into **Returns:** ``part`` : [object] The desired partition of the ``data`` """""" if parallel is None or ""SGE_TASK_ID"" not in os.environ: return data data_per_job = int(math.ceil(float(len(data)) / float(parallel))) task_id = int(os.environ['SGE_TASK_ID']) first = (task_id-1) * data_per_job last = min(len(data), task_id * data_per_job) return data[first:last]" 1055,"def quasi_random_indices(number_of_total_items, number_of_desired_items = None): """"""quasi_random_indices(number_of_total_items, [number_of_desired_items]) -> index Yields an iterator to a quasi-random list of indices that will contain exactly the number of desired indices (or the number of total items in the list, if this is smaller). This function can be used to retrieve a consistent and reproducible list of indices of the data, in case the ``number_of_total_items`` is lower that the given ``number_of_desired_items``. **Parameters:** ``number_of_total_items`` : int The total number of elements in the collection, which should be sub-sampled ``number_of_desired_items`` : int or ``None`` The number of items that should be used; if ``None`` or greater than ``number_of_total_items``, all indices are yielded **Yields:** ``index`` : int An iterator to indices, which will span ``number_of_total_items`` evenly. """""" # check if we need to compute a sublist at all if number_of_desired_items is None or number_of_desired_items >= number_of_total_items or number_of_desired_items < 0: for i in range(number_of_total_items): yield i else: increase = float(number_of_total_items)/float(number_of_desired_items) # generate a regular quasi-random index list for i in range(number_of_desired_items): yield int((i +.5)*increase)" 1056,"def exception_class(self, exception): """"""Return a name representing the class of an exception."""""" cls = type(exception) if cls.__module__ == 'exceptions': # Built-in exception. return cls.__name__ return ""%s.%s"" % (cls.__module__, cls.__name__)" 1057,"def request_info(self, request): """""" Return a dictionary of information for a given request. This will be run once for every request. """""" # We have to re-resolve the request path here, because the information # is not stored on the request. view, args, kwargs = resolve(request.path) for i, arg in enumerate(args): kwargs[i] = arg parameters = {} parameters.update(kwargs) parameters.update(request.POST.items()) environ = request.META return { ""session"": dict(request.session), 'cookies': dict(request.COOKIES), 'headers': dict(get_headers(environ)), 'env': dict(get_environ(environ)), ""remote_ip"": request.META[""REMOTE_ADDR""], ""parameters"": parameters, ""action"": view.__name__, ""application"": view.__module__, ""method"": request.method, ""url"": request.build_absolute_uri() }" 1058,"def run(self, training_set, trainer, filename = ""bootstrapped_model.hdf5"", force = False): """"""run(training_set, trainer, [filename], [force]) -> model Runs the bootstrapped training of a strong classifier using the given training data and a strong classifier trainer. The training set need to contain extracted features already, as this function will need the features several times. **Parameters:** ``training_set`` : :py:class:`TrainingSet` The training set containing pre-extracted feature files ``trainer`` : :py:class:`bob.learn.boosting.Boosting` A strong boosting trainer to use for selecting the weak classifiers and their weights for each round. ``filename`` : str A filename, where to write the resulting strong classifier to. This filename is also used as a base to compute filenames of intermediate files, which store results of each of the bootstrapping steps. ``force`` : bool If set to ``False`` (the default), the bootstrapping will continue the round, where it has been stopped during the last run (reading the current stage from respective files). If set to ``True``, the training will start from the beginning. **Returns:** ``model`` : :py:class:`bob.learn.boosting.BoostedMachine` The resulting strong classifier, a weighted combination of weak classifiers. """""" feature_extractor = training_set.feature_extractor() training_data = None training_labels = None model = None positive_indices, negative_indices = set(), set() for b in range(self.m_number_of_rounds): # check if old results are present temp_file = ""%s_round_%d.hdf5"" % (os.path.splitext(filename)[0], b+1) if os.path.exists(temp_file) and not force: logger.info(""Loading already computed stage %d from %s."", b+1, temp_file) model, positives, negatives = self._load(bob.io.base.HDF5File(temp_file)) positive_indices |= positives negative_indices |= negatives else: if positive_indices or negative_indices: # load data from previous rounds logger.info(""Getting training data of previous rounds"") training_data, training_labels = training_set.sample(positive_indices = positive_indices, negative_indices = negative_indices) positive_indices, negative_indices = set(), set() # get data for current round logger.info(""Getting new data for bootstrapping round %d"", b+1) new_data, new_labels = training_set.sample(model, self.m_number_of_positive_examples_per_round, self.m_number_of_negative_examples_per_round) if training_data is None: training_data = new_data else: training_data = numpy.append(training_data, new_data, axis=0) if training_labels is None: training_labels = new_labels else: training_labels = numpy.append(training_labels, new_labels, axis=0) logger.info(""Starting training with %d examples"", training_data.shape[0]) model = trainer.train(training_data, training_labels, self.m_number_of_weak_learners_per_round[b], model) # write model and extractor to temporary file to be able to catch up later logger.info(""Saving results for stage %d to file %s"", b+1, temp_file) self._save(bob.io.base.HDF5File(temp_file, 'w'), model, training_set.positive_indices, training_set.negative_indices) feature_extractor.model_indices = model.indices # finally, return the trained model return model" 1059,"def _save(self, hdf5, model, positives, negatives): """"""Saves the given intermediate state of the bootstrapping to file."""""" # write the model and the training set indices to the given HDF5 file hdf5.set(""PositiveIndices"", sorted(list(positives))) hdf5.set(""NegativeIndices"", sorted(list(negatives))) hdf5.create_group(""Model"") hdf5.cd(""Model"") model.save(hdf5) del hdf5" 1060,"def _load(self, hdf5): """"""Loads the intermediate state of the bootstrapping from file."""""" positives = set(hdf5.get(""PositiveIndices"")) negatives = set(hdf5.get(""NegativeIndices"")) hdf5.cd(""Model"") model = bob.learn.boosting.BoostedMachine(hdf5) return model, positives, negatives" 1061,"def _optimize_switches(self, minimumLength=2): ''' converts sequences of compare/cjmp to switch instructions this must happen BEFORE linking minimumLength describes the minum amount of sequential compare/cjmp combinations needed to switch to a switch AUTOMATICALLY called by compile ''' # locate all the targets of switch statements q = util.OneTimeQueue() targets = {} for i in range(len(self)): if isinstance(self[i], Compare) and isinstance(self[i+1], CondJmp): q.append(i) while q: front = q.popleft() i = front targets[i] = {} targets[i][self[i].arg1] = self[i+1].arg1 while isinstance(self[i+2], Compare) and isinstance(self[i+3], CondJmp): i += 2 targets[front][self[i].arg1] = self[i+1].arg1 q.remove(i) if len(targets[front]) < minimumLength: # don't convert single cjmps to switches del targets[front] # now replace sequences with their switch statements # in order for our instruction numbers to be valid, do this # in reverse order _keys = targets.keys() _keys.sort() _keys.reverse() for i in _keys: del self[i:i+(len(targets[i])*2)] self.insert(i, Switch(targets[i]))" 1062,"def undelay(self): '''resolves all delayed arguments''' i = 0 while i < len(self): op = self[i] i += 1 if hasattr(op, 'arg1'): if isinstance(op.arg1,DelayedArg): op.arg1 = op.arg1.resolve() if isinstance(op.arg1,CodeBlock): op.arg1.undelay()" 1063,"def setup_logging(log_level=logging.INFO): """"""Set up the logging."""""" logging.basicConfig(level=log_level) fmt = (""%(asctime)s %(levelname)s (%(threadName)s) "" ""[%(name)s] %(message)s"") colorfmt = ""%(log_color)s{}%(reset)s"".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' # Suppress overly verbose logs from libraries that aren't helpful logging.getLogger('requests').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level)" 1064,"def get_arguments(): """"""Get parsed arguments."""""" parser = argparse.ArgumentParser(""Lupupy: Command Line Utility"") parser.add_argument( '-u', '--username', help='Username', required=False) parser.add_argument( '-p', '--password', help='Password', required=False) parser.add_argument( '--arm', help='Arm alarm to mode', required=False, default=False, action=""store_true"") parser.add_argument( '-i', '--ip_address', help='IP of the Lupus panel', required=False) parser.add_argument( '--disarm', help='Disarm the alarm', required=False, default=False, action=""store_true"") parser.add_argument( '--home', help='Set to home mode', required=False, default=False, action=""store_true"") parser.add_argument( '--devices', help='Output all devices', required=False, default=False, action=""store_true"") parser.add_argument( '--history', help='Get the history', required=False, default=False, action=""store_true"") parser.add_argument( '--status', help='Get the status of the panel', required=False, default=False, action=""store_true"") parser.add_argument( '--debug', help='Enable debug logging', required=False, default=False, action=""store_true"") parser.add_argument( '--quiet', help='Output only warnings and errors', required=False, default=False, action=""store_true"") return parser.parse_args()" 1065,"def call(): """"""Execute command line helper."""""" args = get_arguments() if args.debug: log_level = logging.DEBUG elif args.quiet: log_level = logging.WARN else: log_level = logging.INFO setup_logging(log_level) lupusec = None if not args.username or not args.password or not args.ip_address: raise Exception(""Please supply a username, password and ip."") def _devicePrint(dev, append=''): _LOGGER.info(""%s%s"", dev.desc, append) try: if args.username and args.password and args.ip_address: lupusec = lupupy.Lupusec(ip_address=args.ip_address, username=args.username, password=args.password) if args.arm: if lupusec.get_alarm().set_away(): _LOGGER.info('Alarm mode changed to armed') else: _LOGGER.warning('Failed to change alarm mode to armed') if args.disarm: if lupusec.get_alarm().set_standby(): _LOGGER.info('Alarm mode changed to disarmed') else: _LOGGER.warning('Failed to change alarm mode to disarmed') if args.home: if lupusec.get_alarm().set_home(): _LOGGER.info('Alarm mode changed to home') else: _LOGGER.warning('Failed to change alarm mode to home') if args.history: _LOGGER.info(json.dumps(lupusec.get_history()['hisrows'], indent=4, sort_keys=True)) if args.status: _LOGGER.info('Mode of panel: %s', lupusec.get_alarm().mode) if args.devices: for device in lupusec.get_devices(): _devicePrint(device) except lupupy.LupusecException as exc: _LOGGER.error(exc) finally: _LOGGER.info('--Finished running--')" 1066,"def get_member_ibutton(self, val): """"""Get a CSHMember object. Arguments: val -- the iButton ID of the member Returns: None if the iButton supplied does not correspond to a CSH Member """""" members = self.__con__.search_s( CSHMember.__ldap_user_ou__, ldap.SCOPE_SUBTREE, ""(ibutton=%s)"" % val, ['ipaUniqueID']) if members: return CSHMember( self, members[0][1]['ipaUniqueID'][0].decode('utf-8'), False) return None" 1067,"def get_member_slackuid(self, slack): """"""Get a CSHMember object. Arguments: slack -- the Slack UID of the member Returns: None if the Slack UID provided does not correspond to a CSH Member """""" members = self.__con__.search_s( CSHMember.__ldap_user_ou__, ldap.SCOPE_SUBTREE, ""(slackuid=%s)"" % slack, ['ipaUniqueID']) if members: return CSHMember( self, members[0][1]['ipaUniqueID'][0].decode('utf-8'), False) return None" 1068,"def get_directorship_heads(self, val): """"""Get the head of a directorship Arguments: val -- the cn of the directorship """""" __ldap_group_ou__ = ""cn=groups,cn=accounts,dc=csh,dc=rit,dc=edu"" res = self.__con__.search_s( __ldap_group_ou__, ldap.SCOPE_SUBTREE, ""(cn=eboard-%s)"" % val, ['member']) ret = [] for member in res[0][1]['member']: try: ret.append(member.decode('utf-8')) except UnicodeDecodeError: ret.append(member) except KeyError: continue return [CSHMember(self, dn.split('=')[1].split(',')[0], True) for dn in ret]" 1069,"def enqueue_mod(self, dn, mod): """"""Enqueue a LDAP modification. Arguments: dn -- the distinguished name of the object to modify mod -- an ldap modfication entry to enqueue """""" # mark for update if dn not in self.__pending_mod_dn__: self.__pending_mod_dn__.append(dn) self.__mod_queue__[dn] = [] self.__mod_queue__[dn].append(mod)" 1070,"def flush_mod(self): """"""Flush all pending LDAP modifications."""""" for dn in self.__pending_mod_dn__: try: if self.__ro__: for mod in self.__mod_queue__[dn]: if mod[0] == ldap.MOD_DELETE: mod_str = ""DELETE"" elif mod[0] == ldap.MOD_ADD: mod_str = ""ADD"" else: mod_str = ""REPLACE"" print(""{} VALUE {} = {} FOR {}"".format(mod_str, mod[1], mod[2], dn)) else: self.__con__.modify_s(dn, self.__mod_queue__[dn]) except ldap.TYPE_OR_VALUE_EXISTS: print(""Error! Conflicting Batch Modification: %s"" % str(self.__mod_queue__[dn])) continue except ldap.NO_SUCH_ATTRIBUTE: print(""Error! Conflicting Batch Modification: %s"" % str(self.__mod_queue__[dn])) continue self.__mod_queue__[dn] = None self.__pending_mod_dn__ = []" 1071,"def detect_encoding(value): """"""Returns the character encoding for a JSON string."""""" # https://tools.ietf.org/html/rfc4627#section-3 if six.PY2: null_pattern = tuple(bool(ord(char)) for char in value[:4]) else: null_pattern = tuple(bool(char) for char in value[:4]) encodings = { # Zero is a null-byte, 1 is anything else. (0, 0, 0, 1): 'utf-32-be', (0, 1, 0, 1): 'utf-16-be', (1, 0, 0, 0): 'utf-32-le', (1, 0, 1, 0): 'utf-16-le', } return encodings.get(null_pattern, 'utf-8')" 1072,"def _merge_params(url, params): """"""Merge and encode query parameters with an URL."""""" if isinstance(params, dict): params = list(params.items()) scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) url_params = urllib.parse.parse_qsl(query, keep_blank_values=True) url_params.extend(params) query = _encode_data(url_params) return urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))" 1073,"def json(self, **kwargs): """"""Decodes response as JSON."""""" encoding = detect_encoding(self.content[:4]) value = self.content.decode(encoding) return simplejson.loads(value, **kwargs)" 1074,"def links(self): """"""A dict of dicts parsed from the response 'Link' header (if set)."""""" # ; rel=""next"", ; rel=""last""' # becomes # { # 'last': {'rel': 'last', 'url': 'https://example.com/?page=34'}, # 'next': {'rel': 'next', 'url': 'https://example.com/?page=2'}, # }, result = {} if 'Link' in self.headers: value = self.headers['Link'] for part in re.split(r', *<', value): link = {} vs = part.split(';') # First section is always an url. link['url'] = vs.pop(0).strip('\'"" <>') for v in vs: if '=' in v: key, v = v.split('=') link[key.strip('\'"" ')] = v.strip('\'"" ') rkey = link.get('rel') or link['url'] result[rkey] = link return result" 1075,"def raise_for_status(self): """"""Raises HTTPError if the request got an error."""""" if 400 <= self.status_code < 600: message = 'Error %s for %s' % (self.status_code, self.url) raise HTTPError(message)" 1076,"def unpack_text_io_wrapper(fp, encoding): """""" If *fp* is a #io.TextIOWrapper object, this function returns the underlying binary stream and the encoding of the IO-wrapper object. If *encoding* is not None and does not match with the encoding specified in the IO-wrapper, a #RuntimeError is raised. """""" if isinstance(fp, io.TextIOWrapper): if fp.writable() and encoding is not None and fp.encoding != encoding: msg = 'TextIOWrapper.encoding({0!r}) != {1!r}' raise RuntimeError(msg.format(fp.encoding, encoding)) if encoding is None: encoding = fp.encoding fp = fp.buffer return fp, encoding" 1077,"def metric(cls, name, count, elapsed): """"""A metric function that buffers through numpy :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds """""" if name is None: warnings.warn(""Ignoring unnamed metric"", stacklevel=3) return with cls.lock: # register with atexit on first call if cls.dump_atexit and not cls.instances: atexit.register(cls.dump) try: self = cls.instances[name] except KeyError: self = cls.instances[name] = cls(name) self.temp.write(self.struct.pack(count, elapsed))" 1078,"def dump(cls): """"""Output all recorded metrics"""""" with cls.lock: if not cls.instances: return atexit.unregister(cls.dump) cls._pre_dump() for self in cls.instances.values(): self._dump() cls._post_dump()" 1079,"def _dump(self): """"""dump data for an individual metric. For internal use only."""""" try: self.temp.seek(0) # seek to beginning arr = np.fromfile(self.temp, self.dtype) self.count_arr = arr['count'] self.elapsed_arr = arr['elapsed'] if self.calc_stats: # calculate mean & standard deviation self.count_mean = np.mean(self.count_arr) self.count_std = np.std(self.count_arr) self.elapsed_mean = np.mean(self.elapsed_arr) self.elapsed_std = np.std(self.elapsed_arr) self._output() finally: self.temp.close() self._cleanup()" 1080,"def list(self, host_rec=None, service_rec=None, hostfilter=None): """""" Returns a list of vulnerabilities based on t_hosts.id or t_services.id. If neither are set then statistical results are added :param host_rec: db.t_hosts.id :param service_rec: db.t_services.id :param hostfilter: Valid hostfilter or None :return: [(vulndata) ...] if host_rec or service_rec set :return: [(vulndata, vuln_cnt, [vuln_ip, ...], [services ...]) ...] if nothing sent """""" return self.send.vuln_list(host_rec, service_rec, hostfilter)" 1081,"def ip_info(self, vuln_name=None, vuln_id=None, ip_list_only=True, hostfilter=None): """""" List of all IP Addresses with a vulnerability :param vuln_name: t_vulndata.f_vulnid :param vuln_id: t_vulndata.id :param ip_list_only: IP List only (default) or rest of t_hosts fields :param hostfilter: Valid hostfilter or none :return: [(ip, hostname) ...] or [(ip, hostname, t_service_vulns.f_proof, t_service_vulns.f_status), ...] """""" return self.send.vuln_ip_info(vuln_name, vuln_id, ip_list_only, hostfilter)" 1082,"def service_list(self, vuln_name=None, vuln_id=None, hostfilter=None): """""" Returns a dictionary of vulns with services and IP Addresses :param vuln_name: t_vulndata.f_vulnid :param vuln_id: t_vulndata.id :param hostfilter: Valid hostfilter or none :return: {'vuln-id': {'port': [ ip, hostname ]} ...} ... """""" return self.send.vuln_service_list(vuln_name, vuln_id, hostfilter)" 1083,"def import_code(mod_code, mod_name): """"""Create a module object by code. @param mod_code: the code that the module contains. @param mod_name: module name. """""" mod_obj = imp.new_module(mod_name) mod_obj.__file__ = None exec_(mod_code, mod_obj.__dict__, mod_obj.__dict__) add_to_sys_modules(mod_name=mod_name, mod_obj=mod_obj) return mod_obj" 1084,"def import_name(mod_name): """"""Import a module by module name. @param mod_name: module name. """""" try: mod_obj_old = sys.modules[mod_name] except KeyError: mod_obj_old = None if mod_obj_old is not None: return mod_obj_old __import__(mod_name) mod_obj = sys.modules[mod_name] return mod_obj" 1085,"def import_path(mod_path, mod_name): """"""Import a module by module file path. @param mod_path: module file path. @param mod_name: module name. """""" mod_code = open(mod_path).read() mod_obj = import_code( mod_code=mod_code, mod_name=mod_name, ) if not hasattr(mod_obj, '__file__'): mod_obj.__file__ = mod_path return mod_obj" 1086,"def import_obj( uri, mod_name=None, mod_attr_sep='::', attr_chain_sep='.', retn_mod=False, ): """"""Load an object from a module. @param uri: an uri specifying which object to load. An `uri` consists of two parts: module URI and attribute chain, e.g. `a/b/c.py::x.y.z` or `a.b.c::x.y.z` # Module URI E.g. `a/b/c.py` or `a.b.c`. Can be either a module name or a file path. Whether it is a file path is determined by whether it ends with `.py`. # Attribute chain E.g. `x.y.z`. @param mod_name: module name. Must be given when `uri` specifies a module file path, not a module name. @param mod_attr_sep: the separator between module name and attribute name. @param attr_chain_sep: the separator between parts of attribute name. @retn_mod: whether return module object. """""" if mod_attr_sep is None: mod_attr_sep = '::' uri_parts = split_uri(uri=uri, mod_attr_sep=mod_attr_sep) protocol, mod_uri, attr_chain = uri_parts if protocol == 'py': mod_obj = import_name(mod_uri) else: if not mod_name: msg = ( 'Argument `mod_name` must be given when loading by file path.' ) raise ValueError(msg) mod_obj = import_path(mod_uri, mod_name=mod_name) if not attr_chain: if retn_mod: return mod_obj, None else: return mod_obj if attr_chain_sep is None: attr_chain_sep = '.' attr_obj = get_attr_chain( obj=mod_obj, attr_chain=attr_chain, sep=attr_chain_sep, ) if retn_mod: return mod_obj, attr_obj else: return attr_obj" 1087,"def add_to_sys_modules(mod_name, mod_obj=None): """"""Add a module object to `sys.modules`. @param mod_name: module name, used as key to `sys.modules`. If `mod_name` is `a.b.c` while modules `a` and `a.b` are not existing, empty modules will be created for `a` and `a.b` as well. @param mod_obj: a module object. If None, an empty module object will be created. """""" mod_snames = mod_name.split('.') parent_mod_name = '' parent_mod_obj = None for mod_sname in mod_snames: if parent_mod_name == '': current_mod_name = mod_sname else: current_mod_name = parent_mod_name + '.' + mod_sname if current_mod_name == mod_name: current_mod_obj = mod_obj else: current_mod_obj = sys.modules.get(current_mod_name, None) if current_mod_obj is None: current_mod_obj = imp.new_module(current_mod_name) sys.modules[current_mod_name] = current_mod_obj if parent_mod_obj is not None: setattr(parent_mod_obj, mod_sname, current_mod_obj) parent_mod_name = current_mod_name parent_mod_obj = current_mod_obj" 1088,"def split_uri(uri, mod_attr_sep='::'): """"""Split given URI into a tuple of (protocol, module URI, attribute chain). @param mod_attr_sep: the separator between module name and attribute name. """""" uri_parts = uri.split(mod_attr_sep, 1) if len(uri_parts) == 2: mod_uri, attr_chain = uri_parts else: mod_uri = uri_parts[0] attr_chain = None if mod_uri.startswith('py://'): protocol = 'py' mod_uri = mod_uri[5:] elif mod_uri.startswith('file://'): protocol = 'file' mod_uri = mod_uri[7:] # If no protocol prefix is present, and the uri ends with `.py`, then # consider the uri as module file path instead of module name. elif mod_uri.endswith('.py'): protocol = 'file' else: protocol = 'py' info = (protocol, mod_uri, attr_chain) return info" 1089,"def get_attr_chain(obj, attr_chain, sep='.'): """"""Get the last attribute of given attribute chain. E.g. `get_attr_chain(x, 'a.b.c')` is equivalent to `x.a.b.c`. @param obj: an object @param attr_chain: a chain of attribute names @param sep: separator for the chain of attribute names """""" if sep is None: sep = '.' attr_names = attr_chain.split(sep) new_obj = obj for attr_name in attr_names: new_obj = getattr(new_obj, attr_name) return new_obj" 1090,"def is_closed(self): """""" _check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned. """""" for t in self.smi_vector: found = False for s in self.sm_vector: if self.observation_table[s] == self.observation_table[t]: self.equiv_classes[t] = s found = True break if not found: return False, t return True, None" 1091,"def _fill_table_entry(self, row, col): """""""""" Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None """""" prefix = self._membership_query(row) full_output = self._membership_query(row + col) length = len(commonprefix([prefix, full_output])) self.observation_table[row, col] = full_output[length:]" 1092,"def _run_in_hypothesis(self, mma, w_string, index): """""""""" Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string """""" state = mma[0] for i in range(index): for arc in state: if mma.isyms.find(arc.ilabel) == w_string[i]: state = mma[arc.nextstate] s_index = arc.nextstate # The id of the state is its index inside the Sm list access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string" 1093,"def _check_suffix(self, w_string, access_string, index): """""" Checks if access string suffix matches with the examined string suffix Args: w_string (str): The examined string to be consumed access_string (str): The access string for the state index (int): The index value for selecting the prefix of w Returns: bool: A boolean valuei indicating if matching was successful """""" prefix_as = self._membership_query(access_string) full_as = self._membership_query(access_string + w_string[index:]) prefix_w = self._membership_query(w_string[:index]) full_w = self._membership_query(w_string) length = len(commonprefix([prefix_as, full_as])) as_suffix = full_as[length:] length = len(commonprefix([prefix_w, full_w])) w_suffix = full_w[length:] if as_suffix != w_suffix: logging.debug('Access string state incorrect') return True logging.debug('Access string state correct.') return False" 1094,"def _find_bad_transition(self, mma, w_string): """""" Checks for bad DFA transitions using the examined string Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: str: The prefix of the examined string that matches """""" conj_out = mma.consume_input(w_string) targ_out = self._membership_query(w_string) # TODO: handle different length outputs from conjecture and target # hypothesis. length = min(len(conj_out), len(targ_out)) diff = [i for i in range(length) if conj_out[i] != targ_out[i]] if len(diff) == 0: diff_index = len(targ_out) else: diff_index = diff[0] low = 0 high = len(w_string) while True: i = (low + high) / 2 length = len(self._membership_query(w_string[:i])) if length == diff_index + 1: return w_string[:i] elif length < diff_index + 1: low = i + 1 else: high = i - 1" 1095,"def _process_counter_example(self, mma, w_string): """""""" Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None """""" w_string = self._find_bad_transition(mma, w_string) diff = len(w_string) same = 0 while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) is_diff = self._check_suffix(w_string, access_string, i) if is_diff: diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)" 1096,"def _ot_make_closed(self, access_string): """""" Given a state input_string in Smi that is not equivalent with any state in Sm this method will move that state in Sm create a corresponding Smi state and fill the corresponding entries in the table. Args: access_string (str): State access string Returns: None """""" self.observation_table.sm_vector.append(access_string) for i in self.alphabet: self.observation_table.smi_vector.append(access_string + i) for e in self.observation_table.em_vector: self._fill_table_entry(access_string + i, e)" 1097,"def get_mealy_conjecture(self): """""" Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table. """""" mma = MealyMachine() for s in self.observation_table.sm_vector: for i in self.alphabet: dst = self.observation_table.equiv_classes[s + i] # If dst == None then the table is not closed. if dst is None: logging.debug('Conjecture attempt on non closed table.') return None o = self.observation_table[s, i] src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(dst) mma.add_arc(src_id, dst_id, i, o) # This works only for Mealy machines for s in mma.states: s.final = True return mma" 1098,"def _init_table(self): """""" Initialize the observation table. """""" self.observation_table.sm_vector.append('') self.observation_table.smi_vector = list(self.alphabet) self.observation_table.em_vector = list(self.alphabet) for i in self.observation_table.em_vector: self._fill_table_entry('', i) for s, e in product(self.observation_table.smi_vector, self.observation_table.em_vector): self._fill_table_entry(s, e)" 1099,"def learn_mealy_machine(self): """""" Implements the high level loop of the algorithm for learning a Mealy machine. Args: None Returns: MealyMachine: The learned mealy machine """""" logging.info('Initializing learning procedure.') self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed and consistent while not closed: logging.debug('Checking if table is closed.') closed, string = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(string) else: logging.debug('Table closed.') # Create conjecture mma = self.get_mealy_conjecture() logging.info('Generated conjecture machine with %d states.', len(list(mma.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(mma) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info( 'Processing counterexample %input_string with length %d.', counter_example, len(counter_example)) self._process_counter_example(mma, counter_example) logging.info('Learning complete.') return mma" 1100,"def get_headers(environ): """""" Returns only proper HTTP headers. """""" for key, value in environ.iteritems(): key = str(key) if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield key[5:].replace('_', '-').title(), value elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield key.replace('_', '-').title(), value" 1101,"def get_host(environ): """"""Return the real host for the given WSGI environment. This takes care of the `X-Forwarded-Host` header. :param environ: the WSGI environment to get the host of. """""" scheme = environ.get('wsgi.url_scheme') if 'HTTP_X_FORWARDED_HOST' in environ: result = environ['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in environ: result = environ['HTTP_HOST'] else: result = environ['SERVER_NAME'] if (scheme, str(environ['SERVER_PORT'])) not \ in (('https', '443'), ('http', '80')): result += ':' + environ['SERVER_PORT'] if result.endswith(':80') and scheme == 'http': result = result[:-3] elif result.endswith(':443') and scheme == 'https': result = result[:-4] return result" 1102,"def parse_library(lib_files): """""" Analizuje pliki podane w liście lib_files Zwraca instancję MusicLibrary """""" tracks, playlists = lib_files lib = MusicLibrary() lib_length = len(tracks) i = 0 writer = lib.ix.writer() previous_procent_done_str = """" for f in tracks: track_info = TrackInfo(f) lib.add_track_internal(track_info, writer) current_percent_done_str = ""%d%%"" % (i / lib_length * 100) if current_percent_done_str != previous_procent_done_str: logs.print_info(""Analizowanie biblioteki muzycznej... "" + current_percent_done_str) previous_procent_done_str = current_percent_done_str i += 1.0 logs.print_info(""Analizowanie playlist..."") for f in playlists: with open(f, 'r') as fo: playlist_dict = loads(fo.read()) playlist = Playlist(lib, f, playlist_dict['title'], playlist_dict['tracks']) lib.add_playlist(playlist) writer.commit() logs.print_info(""Optymalizacja index-u..."") lib.ix.optimize() return lib" 1103,"def full_subgraph(self, objects): """""" Return the subgraph of this graph whose vertices are the given ones and whose edges are the edges of the original graph between those vertices. """""" vertices = ElementTransformSet(transform=id) out_edges = KeyTransformDict(transform=id) in_edges = KeyTransformDict(transform=id) for obj in objects: vertices.add(obj) out_edges[obj] = [] in_edges[obj] = [] edges = set() head = {} tail = {} for referrer in vertices: for edge in self._out_edges[referrer]: referent = self._head[edge] if referent not in vertices: continue edges.add(edge) tail[edge] = referrer head[edge] = referent out_edges[referrer].append(edge) in_edges[referent].append(edge) return ObjectGraph._raw( vertices=vertices, edges=edges, out_edges=out_edges, in_edges=in_edges, head=head, tail=tail, )" 1104,"def _raw(cls, vertices, edges, out_edges, in_edges, head, tail): """""" Private constructor for direct construction of an ObjectGraph from its attributes. vertices is the collection of vertices out_edges and in_edges map vertices to lists of edges head and tail map edges to objects. """""" self = object.__new__(cls) self._out_edges = out_edges self._in_edges = in_edges self._head = head self._tail = tail self._vertices = vertices self._edges = edges return self" 1105,"def _from_objects(cls, objects): """""" Private constructor: create graph from the given Python objects. The constructor examines the referents of each given object to build up a graph showing the objects and their links. """""" vertices = ElementTransformSet(transform=id) out_edges = KeyTransformDict(transform=id) in_edges = KeyTransformDict(transform=id) for obj in objects: vertices.add(obj) out_edges[obj] = [] in_edges[obj] = [] # Edges are identified by simple integers, so # we can use plain dictionaries for mapping # edges to their heads and tails. edge_label = itertools.count() edges = set() head = {} tail = {} for referrer in vertices: for referent in gc.get_referents(referrer): if referent not in vertices: continue edge = next(edge_label) edges.add(edge) tail[edge] = referrer head[edge] = referent out_edges[referrer].append(edge) in_edges[referent].append(edge) return cls._raw( vertices=vertices, edges=edges, out_edges=out_edges, in_edges=in_edges, head=head, tail=tail, )" 1106,"def annotated(self): """""" Annotate this graph, returning an AnnotatedGraph object with the same structure. """""" # Build up dictionary of edge annotations. edge_annotations = {} for edge in self.edges: if edge not in edge_annotations: # We annotate all edges from a given object at once. referrer = self._tail[edge] known_refs = annotated_references(referrer) for out_edge in self._out_edges[referrer]: referent = self._head[out_edge] if known_refs[referent]: annotation = known_refs[referent].pop() else: annotation = None edge_annotations[out_edge] = annotation annotated_vertices = [ AnnotatedVertex( id=id(vertex), annotation=object_annotation(vertex), ) for vertex in self.vertices ] annotated_edges = [ AnnotatedEdge( id=edge, annotation=edge_annotations[edge], head=id(self._head[edge]), tail=id(self._tail[edge]), ) for edge in self.edges ] return AnnotatedGraph( vertices=annotated_vertices, edges=annotated_edges, )" 1107,"def export_image(self, filename='refcycle.png', format=None, dot_executable='dot'): """""" Export graph as an image. This requires that Graphviz is installed and that the ``dot`` executable is in your path. The *filename* argument specifies the output filename. The *format* argument lets you specify the output format. It may be any format that ``dot`` understands, including extended format specifications like ``png:cairo``. If omitted, the filename extension will be used; if no filename extension is present, ``png`` will be used. The *dot_executable* argument lets you provide a full path to the ``dot`` executable if necessary. """""" return self.annotated().export_image( filename=filename, format=format, dot_executable=dot_executable, )" 1108,"def owned_objects(self): """""" List of gc-tracked objects owned by this ObjectGraph instance. """""" return ( [ self, self.__dict__, self._head, self._tail, self._out_edges, self._out_edges._keys, self._out_edges._values, self._in_edges, self._in_edges._keys, self._in_edges._values, self._vertices, self._vertices._elements, self._edges, ] + list(six.itervalues(self._out_edges)) + list(six.itervalues(self._in_edges)) )" 1109,"def find_by_typename(self, typename): """""" List of all objects whose type has the given name. """""" return self.find_by(lambda obj: type(obj).__name__ == typename)" 1110,"def set_input(self, key, value): """""" Sets the to """""" if key not in self._inputs: raise InputException(""Key {0} is not a valid input!"".format(key)) self._inputs[key].value = value" 1111,"def get_input(self, key, force=False): """""" Get the value of if it already exists, or prompt for it if not """""" if key not in self._inputs: raise InputException(""Key {0} is not a valid input!"".format(key)) if self._inputs[key].prompt: prompt = self._inputs[key].prompt elif self._inputs[key].is_bool(): prompt = ""{0}?"".format(key) else: prompt = ""please enter your {0}"".format(key) help_text = self._inputs[key].help if hasattr(self._inputs[key], 'help') else None if self._inputs[key].value is EMPTY or force: default_value = None if self._inputs[key].default is not EMPTY: default_value = self._inputs[key].default if self._inputs[key].value is not EMPTY: default_value = self._inputs[key].value input_value = EMPTY while input_value is EMPTY or input_value == '?': if input_value == '?' and help_text: print(help_text) input_value = lib.prompt( prompt, default=default_value, bool_type=self._inputs[key].in_type, secret=self._inputs[key].is_secret) self._inputs[key].value = input_value return self._inputs[key].value" 1112,"def get_unset_inputs(self): """""" Return a set of unset inputs """""" return set([k for k, v in self._inputs.items() if v.is_empty(False)])" 1113,"def prompt_unset_inputs(self, force=False): """""" Prompt for unset input values """""" for k, v in self._inputs.items(): if force or v.is_empty(False): self.get_input(k, force=force)" 1114,"def values(self, with_defaults=True): """""" Return the values dictionary, defaulting to default values """""" return dict(((k, str(v)) for k, v in self._inputs.items() if not v.is_empty(with_defaults)))" 1115,"def write_values(self): """""" Return the dictionary with which to write values """""" return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False)))" 1116,"def add_inputs_from_inputstring(self, input_string): """""" Add inputs using the input string format: gitroot==~/workspace username password? main_branch==comp_main """""" raw_params = input_string.split('\n') param_attributes = (self._parse_param_line(rp) for rp in raw_params if len(rp.strip(' \t')) > 0) for param, attributes in param_attributes: self.add_input(param, attributes)" 1117,"def _parse_param_line(self, line): """""" Parse a single param line. """""" value = line.strip('\n \t') if len(value) > 0: i = Input() if value.find('#') != -1: value, extra_attributes = value.split('#') try: extra_attributes = eval(extra_attributes) except SyntaxError: raise InputException(""Incorrectly formatted input for {0}!"".format(value)) if not isinstance(extra_attributes, dict): raise InputException(""Incorrectly formatted input for {0}!"".format(value)) if 'prompt' in extra_attributes: i.prompt = extra_attributes['prompt'] if 'help' in extra_attributes: i.help = extra_attributes['help'] if 'type' in extra_attributes: i.in_type = extra_attributes['type'] if i.in_type.find('/') != -1: i.in_type, i.out_type = i.in_type.split('/') if 'cast' in extra_attributes: i.out_type = extra_attributes['cast'] if value.find('==') != -1: value, default = value.split('==') i.default = default if value.endswith('?'): value = value[:-1] i.is_secret = True return (value, i) return None" 1118,"def extract_csv(zip_path, destination): """""" Extract the first CSV file found in the given ``zip_path`` ZIP file to the ``destination`` file. Raises :class:`LookupError` if no CSV file can be found in the ZIP. """""" with zipfile.ZipFile(zip_path) as zf: member_to_unzip = None for member in zf.namelist(): if member.endswith('.csv'): member_to_unzip = member break if not member_to_unzip: raise LookupError( ""Couldn't find any CSV file in the archive"" ) with zf.open(member_to_unzip) as zfp, \ open(destination, 'wb') as dfp: dfp.write(zfp.read())" 1119,"def download(self, overwrite=True): """""" Download the zipcodes CSV file. If ``overwrite`` is set to False, the file won't be downloaded if it already exists. """""" if overwrite or not os.path.exists(self.file_path): _, f = tempfile.mkstemp() try: urlretrieve(self.DOWNLOAD_URL, f) extract_csv(f, self.file_path) finally: os.remove(f)" 1120,"def get_locations(self): """""" Return the zipcodes mapping as a list of ``{zipcode: location}`` dicts. The zipcodes file will be downloaded if necessary. """""" if not self.zipcode_mapping: self.download(overwrite=False) zipcode_mapping = {} with UnicodeReader(self.file_path, delimiter=';', encoding='latin1') as csv_reader: # Skip header next(csv_reader) for line in csv_reader: zipcode_mapping[int(line[1])] = Location( official_name=line[0], canton=line[5], municipality=line[3] ) self.zipcode_mapping = zipcode_mapping return self.zipcode_mapping" 1121,"def get_zipcodes_for_canton(self, canton): """""" Return the list of zipcodes for the given canton code. """""" zipcodes = [ zipcode for zipcode, location in self.get_locations().items() if location.canton == canton ] return zipcodes" 1122,"def get_cantons(self): """""" Return the list of unique cantons, sorted by name. """""" return sorted(list(set([ location.canton for location in self.get_locations().values() ])))" 1123,"def get_municipalities(self): """""" Return the list of unique municipalities, sorted by name. """""" return sorted(list(set([ location.municipality for location in self.get_locations().values() ])))" 1124,"def term_vector(self, params): ''' params are either True/False, 'with_offsets', 'with_positions', 'with_positions_offsets' ''' if params == True: self[self.field]['term_vector'] = 'yes' elif params == False: self[self.field]['term_vector'] = 'no' else: self[self.field]['term_vector'] = params return self" 1125,"def _get_formula_class(self, formula): """""" get a formula class object if it exists, else create one, add it to the dict, and pass return it. """""" # recursive import otherwise from sprinter.formula.base import FormulaBase if formula in LEGACY_MAPPINGS: formula = LEGACY_MAPPINGS[formula] formula_class, formula_url = formula, None if ':' in formula: formula_class, formula_url = formula.split("":"", 1) if formula_class not in self._formula_dict: try: self._formula_dict[formula_class] = lib.get_subclass_from_module(formula_class, FormulaBase) except (SprinterException, ImportError): logger.info(""Downloading %s..."" % formula_class) try: self._pip.install_egg(formula_url or formula_class) try: self._formula_dict[formula_class] = lib.get_subclass_from_module(formula_class, FormulaBase) except ImportError: logger.debug(""FeatureDict import Error"", exc_info=sys.exc_info()) raise SprinterException(""Error: Unable to retrieve formula %s!"" % formula_class) except PipException: logger.error(""ERROR: Unable to download %s!"" % formula_class) return self._formula_dict[formula_class]" 1126,"def is_backup_class(cls): """"""Return true if given class supports back up. Currently this means a gludb.data.Storable-derived class that has a mapping as defined in gludb.config"""""" return True if ( isclass(cls) and issubclass(cls, Storable) and get_mapping(cls, no_mapping_ok=True) ) else False" 1127,"def add_package( self, pkg_name, recurse=True, include_bases=True, parent_pkg=None ): """"""Add all classes to the backup in the specified package (including all modules and all sub-packages) for which is_backup_class returns True. Note that self.add_class is used, so base classes will added as well. Parameters: * pkg_name - a string representing the package name. It may be relative _if_ parent_pkg is supplied as well * recurse - (default value of True) if False, sub-packages will _not_ be examined * include_bases - (default value of True) is passed directly to add_class for every class added * parent_pkg - a string representing the parent package of the relative package specified in pkg_name. Note that you should specify parent_pkg _only_ if pkg_name should be interpreted as relative An an example of both relative and absolute package imports, these are equivalent: ```` backup.add_package('toppackage.subpackage') backup.add_package('subpackage', parent_pkg='toppackage') ```` """""" if parent_pkg: pkg = import_module('.' + pkg_name, parent_pkg) else: pkg = import_module(pkg_name) for module_loader, name, ispkg in pkgutil.walk_packages(pkg.__path__): if not ispkg: # Module mod = import_module('.' + name, pkg_name) for name, member in getmembers(mod): if is_backup_class(member): self.add_class(member, include_bases=include_bases) elif recurse: # Package and we're supposed to recurse self.add_package( pkg_name + '.' + name, recurse=True, include_bases=include_bases, parent_pkg=parent_pkg )" 1128,"def add_class(self, cls, include_bases=True): """"""Add the specified class (which should be a class object, _not_ a string). By default all base classes for which is_backup_class returns True will also be added. `include_bases=False` may be spcified to suppress this behavior. The total number of classes added is returned. Note that if is_backup_class does not return True for the class object passed in, 0 will be returned. If you specify include_bases=False, then the maximum value that can be returned is 1."""""" if not is_backup_class(cls): return 0 added = 0 cls_name = backup_name(cls) if cls_name not in self.classes: self.classes[cls_name] = cls self.log(""Added class for backup: %s"", cls_name) added = 1 if include_bases: for candidate_cls in getmro(cls): if is_backup_class(cls): # Note that we don't keep recursing on base classes added += self.add_class(candidate_cls, include_bases=False) return added" 1129,"def log(self, entry, *args): """"""Append the string supplied to the log (a list of strings). If additional arguments are supplied, then first string is assumed to be a format string and the other args are used for string interpolation. For instance `backup.log(""%d + %d == %d"", 1, 1, 2)` would result in the string `'1 + 1 == 2'` being logged"""""" if args: entry = entry % args self.backup_log.append(entry)" 1130,"def run_backup(self): """"""The actual backup is performed. The data for all added classes is extracted and written to a file per class where each line (terminated by a line feed character) is the JSON representing a single object. Those files are all archived in a single gzip'ed tarball which is stored in the AWS S3 bucket specified when the current instance of Backup was created"""""" self.log(""Starting backup at %s"", now_field()) self.log(""Backup config object created at %s"", self.timestamp) # Make sure we're good to go for fld in ['aws_access_key', 'aws_secret_key', 'bucketname']: val = getattr(self, fld, None) if not val: self.log(""Backup cannot start: %s is a required field"", fld) raise ValueError(self.backup_log[-1]) # Start the compressed tarball our data is stored in backup_file = NamedTemporaryFile(suffix="".tar.gz"") backup_tarfile = tarfile.open(fileobj=backup_file, mode='w:gz') for cls_name, cls in self.classes.items(): self.log(""Backing up %s"", cls_name) rec_count = 0 with NamedTemporaryFile() as record_file: for rec in cls.find_all(): write_line(record_file, rec.to_data()) rec_count += 1 record_file.flush() backup_tarfile.add(record_file.name, arcname=cls_name+'.json') self.log(""%s => %d records backed up"", cls_name, rec_count) # Finalize archive backup_tarfile.close() backup_file.flush() backup_size = os.stat(backup_file.name)[6] # Figure out key name for archived file key_name = ('Backup_' + now_field() + '.tar.gz').replace(':', '_') # upload archive to s3 if os.environ.get('DEBUG', False) or os.environ.get('travis', False): # Local or CI - connect to our mock s3 service conn = S3Connection( '', '', is_secure=False, port=8888, host='localhost', calling_format=OrdinaryCallingFormat() ) else: conn = S3Connection(self.aws_access_key, self.aws_secret_key) bucket = conn.get_bucket(self.bucketname) key = Key(bucket) key.key = key_name self.log( ""Sending %s [size=%d bytes] with key name %s"", backup_file.name, backup_size, key_name ) # TODO: should probably look into a multi-part upload for larger backup key.set_contents_from_filename(backup_file.name) self.log(""Sent %s"", backup_file.name) # All done backup_file.close() self.log(""Backup completed"") # return the bucket name and key name for the completed backup return self.bucketname, key_name" 1131,"def list_dir(sourceDir, include_source=None, include_file=True): """"""与 :func:`os.listdir()` 类似,但提供一些筛选功能,且返回生成器对象。 :param str sourceDir: 待处理的文件夹。 :param bool include_source: 遍历结果中是否包含源文件夹的路径。 :param bool include_file: 是否包含文件。True 表示返回的内容中既包含文件,又 包含文件夹;Flase 代表仅包含文件夹。 :return: 一个生成器对象。 """""" for cur_file in os.listdir(sourceDir): if cur_file.lower() == "".ds_store"": continue pathWithSource = os.path.join(sourceDir, cur_file) if include_file or os.path.isdir(pathWithSource): if include_source: yield pathWithSource else: yield cur_file" 1132,"def copy_dir(sou_dir, dst_dir, del_dst=False, del_subdst=False): """""":func:`shutil.copytree()` 也能实现类似功能, 但前者要求目标文件夹必须不存在。 而 copy_dir 没有这个要求,它可以将 sou_dir 中的文件合并到 dst_dir 中。 :param str sou_dir: 待复制的文件夹; :param str dst_dir: 目标文件夹; :param bool del_dst: 是否删除目标文件夹。 :param bool del_subdst: 是否删除目标子文件夹。 """""" if del_dst and os.path.isdir(del_dst): shutil.rmtree(dst_dir) os.makedirs(dst_dir, exist_ok=True) for cur_file in list_dir(sou_dir): dst_file = os.path.join(dst_dir, cur_file) cur_file = os.path.join(sou_dir, cur_file) if os.path.isdir(cur_file): if del_subdst and os.path.isdir(dst_file): shutil.rmtree(dst_file) os.makedirs(dst_file, exist_ok=True) copy_dir(cur_file, dst_file) else: shutil.copyfile(cur_file, dst_file)" 1133,"def get_files(path, ext=[], include=True): """"""遍历提供的文件夹的所有子文件夹,饭后生成器对象。 :param str path: 待处理的文件夹。 :param list ext: 扩展名列表。 :param bool include: 若值为 True,代表 ext 提供的是包含列表; 否则是排除列表。 :returns: 一个生成器对象。 """""" has_ext = len(ext)>0 for p, d, fs in os.walk(path): for f in fs: if has_ext: in_ext = False for name in ext: if f.endswith(name): in_ext = True break if (include and in_ext) or \ (not include and not in_ext): yield os.path.join(p,f) else: yield os.path.join(p, f)" 1134,"def read_file(file_path, **kws): """"""读取文本文件的内容。 :param str file_path: 文件路径。 :returns: 文件内容。 :rtype: str """""" kw = {""mode"":""r"", ""encoding"":""utf-8""} if kws: for k,v in kws.items(): kw[k] = v with open(file_path, **kw) as afile: txt = afile.read() return txt" 1135,"def write_file(file_path, txt, **kws): """"""将文本内容写入文件。 :param str file_path: 文件路径。 :param str txt: 待写入的文件内容。 """""" if not os.path.exists(file_path): upDir = os.path.dirname(file_path) if not os.path.isdir(upDir): os.makedirs(upDir) kw = {""mode"":""w"", ""encoding"":""utf-8""} if kws: for k,v in kws.items(): kw[k] = v with open(file_path, **kw) as afile: afile.write(txt)" 1136,"def write_by_templ(templ, target, sub_value, safe=False): """"""根据模版写入文件。 :param str templ: 模版文件所在路径。 :param str target: 要写入的文件所在路径。 :param dict sub_value: 被替换的内容。 """""" templ_txt = read_file(templ) txt = None if safe: txt = Template(templ_txt).safe_substitute(sub_value) else: txt = Template(templ_txt).substitute(sub_value) write_file(target, txt)" 1137,"def get_md5(path): """"""获取文件的 MD5 值。 :param str path: 文件路径。 :returns: MD5 值。 :rtype: str """""" with open(path,'rb') as f: md5obj = hashlib.md5() md5obj.update(f.read()) return md5obj.hexdigest() raise FileNotFoundError(""Error when get md5 for %s!""%path)" 1138,"def create_zip(files, trim_arcname=None, target_file=None, **zipfile_args): """"""创建一个 zip 文件。 :param list files: 要创建zip 的文件列表。 :param int trim_arcname: 若提供这个值,则使用 ZipFile.write(filename, filename[trim_arcname:]) 进行调用。 :returns: zip 文件的路径。 :rtype: str """""" zipname = None azip = None if not target_file: azip = tempfile.NamedTemporaryFile(mode='wb', delete=False) zipname = azip.name else: azip = target_file zipname = target_file.name if hasattr(azip, 'read') else azip slog.info('Package %d files to ""%s""'%(len(files), azip.name)) fileNum = len(files) curFile = 0 zipfile_args['mode'] = 'w' if not zipfile_args.get('compression'): zipfile_args['compression'] = zipfile.ZIP_DEFLATED with zipfile.ZipFile(azip, **zipfile_args) as zipf: for f in files: percent = round(curFile/fileNum*100) sys.stdout.write('\r%d%%'%(percent)) sys.stdout.flush() zipf.write(f, f[trim_arcname:] if trim_arcname else None ) curFile = curFile+1 sys.stdout.write('\r100%\n') sys.stdout.flush() if hasattr(azip, 'close'): azip.close() return zipname" 1139,"def get_max_ver(fmt, filelist): """"""有一堆字符串,文件名均包含 %d.%d.%d 形式版本号,返回其中版本号最大的那个。 我一般用它来检测一堆发行版中版本号最大的那个文件。 :param str fmt: 要检测测字符串形式,例如 rookout-%s.tar.gz ,其中 %s 会被正则替换。 :param list files: 字符串列表。 :returns: 版本号最大的字符串。 :rtype: str """""" x, y, z = 0,0,0 verpat = fmt%'(\d+).(\d+).(\d+)' verre = re.compile(r''+verpat+'', re.M) for f in filelist: match = verre.search(f) if match: x1 = int(match.group(1)) y1 = int(match.group(2)) z1 = int(match.group(3)) if x1 >= x and y1 >= y: x = x1 y = y1 z = z1 verfmt = fmt%('%d.%d.%d') name = verfmt%(x, y, z) if x == 0 and y == 0 and z == 0: slog.info('Can not find the string ""%s"" !'%name) return None return name" 1140,"def merge_dicts(d1, d2): """"""合并两个无限深度的 dict 会自动合并 list 格式 :param dict d1: 被合并的 dict :param dict d2: 待合并的 dict :returns: 一个新的生成器对象 :rtype: generator """""" for k in set(d1.keys()).union(d2.keys()): if k in d1 and k in d2: if isinstance(d1[k], dict) and isinstance(d2[k], dict): yield (k, dict(merge_dicts(d1[k], d2[k]))) elif isinstance(d1[k], list): if isinstance(d2[k], list): d1[k].extend(d2[k]) else: d1[k].append(d2[k]) yield(k, d1) else: # If one of the values is not a dict, you can't continue merging it. # Value from second dict overrides one in first and we move on. yield (k, d2[k]) # Alternatively, replace this with exception raiser to alert you of value conflicts elif k in d1: yield (k, d1[k]) else: yield (k, d2[k])" 1141,"def process( hw_num: int, problems_to_do: Optional[Iterable[int]] = None, prefix: Optional[Path] = None, by_hand: Optional[Iterable[int]] = None, ) -> None: """"""Process the homework problems in ``prefix`` folder. Arguments --------- hw_num The number of this homework problems_to_do, optional A list of the problems to be processed prefix, optional A `~pathlib.Path` to this homework assignment folder by_hand, optional A list of the problems that should be labeled to be completed by hand and have an image with the solution included. """""" if prefix is None: prefix = Path(""."") problems: Iterable[Path] if problems_to_do is None: # The glob syntax here means a the filename must start with # homework-, be followed the homework number, followed by a # dash, then a digit representing the problem number for this # homework number, then any number of characters (in practice # either nothing or, rarely, another digit), then the ipynb # extension. Examples: # homework-1-1.ipynb, homework-10-1.ipynb, homework-3-10.ipynb problems = list(prefix.glob(f""homework-{hw_num}-[0-9]*.ipynb"")) else: problems = [prefix / f""homework-{hw_num}-{i}.ipynb"" for i in problems_to_do] problems = sorted(problems, key=lambda k: k.stem[-1]) output_directory: Path = (prefix / ""output"").resolve() fw = FilesWriter(build_directory=str(output_directory)) assignment_zip_name = output_directory / f""homework-{hw_num}.zip"" solution_zip_name = output_directory / f""homework-{hw_num}-soln.zip"" assignment_pdfs: List[BytesIO] = [] solution_pdfs: List[BytesIO] = [] assignment_pdf: bytes solution_pdf: bytes assignment_nb: str solution_nb: str res: Dict[str, Union[str, bool]] = { ""delete_pymarkdown"": True, ""global_content_filter"": {""include_raw"": False}, } for problem in problems: print(""Working on:"", problem) res[""unique_key""] = problem.stem problem_number = int(problem.stem.split(""-"")[-1]) if by_hand is not None and problem_number in by_hand: res[""by_hand""] = True else: res[""by_hand""] = False problem_fname = str(problem.resolve()) # Process assignments res[""remove_solution""] = True assignment_pdf, _ = pdf_exp.from_filename(problem_fname, resources=res) assignment_pdfs.append(BytesIO(assignment_pdf)) assignment_nb, _ = nb_exp.from_filename(problem_fname, resources=res) with ZipFile(assignment_zip_name, mode=""a"") as zip_file: zip_file.writestr(problem.name, assignment_nb) # Process solutions res[""remove_solution""] = False solution_pdf, _ = pdf_exp.from_filename(problem_fname, resources=res) solution_pdfs.append(BytesIO(solution_pdf)) solution_nb, _ = nb_exp.from_filename(problem_fname, resources=res) with ZipFile(solution_zip_name, mode=""a"") as zip_file: zip_file.writestr(problem.stem + ""-soln"" + problem.suffix, solution_nb) resources: Dict[str, Any] = { ""metadata"": { ""name"": f""homework-{hw_num}"", ""path"": str(prefix), ""modified_date"": date.today().strftime(""%B %d, %Y""), }, ""output_extension"": "".pdf"", } fw.write(combine_pdf_as_bytes(assignment_pdfs), resources, f""homework-{hw_num}"") resources[""metadata""][""name""] = f""homework-{hw_num}-soln"" fw.write(combine_pdf_as_bytes(solution_pdfs), resources, f""homework-{hw_num}-soln"")" 1142,"def main(argv: Optional[Sequence[str]] = None) -> None: """"""Parse arguments and process the homework assignment."""""" parser = ArgumentParser(description=""Convert Jupyter Notebook assignments to PDFs"") parser.add_argument( ""--hw"", type=int, required=True, help=""Homework number to convert"", dest=""hw_num"", ) parser.add_argument( ""-p"", ""--problems"", type=int, help=""Problem numbers to convert"", dest=""problems"", nargs=""*"", ) parser.add_argument( ""--by-hand"", type=int, help=""Problem numbers to be completed by hand"", dest=""by_hand"", nargs=""*"", ) args = parser.parse_args(argv) prefix = Path(f""homework/homework-{args.hw_num}"") process(args.hw_num, args.problems, prefix=prefix, by_hand=args.by_hand)" 1143,"def get_object_by_name(content, object_type, name, regex=False): ''' Get the vsphere object associated with a given text name Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py ''' container = content.viewManager.CreateContainerView( content.rootFolder, [object_type], True ) for c in container.view: if regex: if re.match(name, c.name): return c elif c.name == name: return c" 1144,"def get_vm_by_name(content, name, regex=False): ''' Get a VM by its name ''' return get_object_by_name(content, vim.VirtualMachine, name, regex)" 1145,"def get_all(content, container, object_type): ''' Get all items of a certain type Example: get_all(content, vim.Datastore) return all datastore objects ''' obj_list = list() view_manager = content.viewManager object_view = view_manager.CreateContainerView( container, [object_type], True ) for obj in object_view.view: if isinstance(obj, object_type): obj_list.append(obj) object_view.Destroy() return obj_list" 1146,"def get_datacenter(content, obj): ''' Get the datacenter to whom an object belongs ''' datacenters = content.rootFolder.childEntity for d in datacenters: dch = get_all(content, d, type(obj)) if dch is not None and obj in dch: return d" 1147,"def get_all_vswitches(content): ''' Get all the virtual switches ''' vswitches = [] hosts = get_all_hosts(content) for h in hosts: for s in h.config.network.vswitch: vswitches.append(s) return vswitches" 1148,"def print_vm_info(vm): ''' Print information for a particular virtual machine ''' summary = vm.summary print('Name : ', summary.config.name) print('Path : ', summary.config.vmPathName) print('Guest : ', summary.config.guestFullName) annotation = summary.config.annotation if annotation is not None and annotation != '': print('Annotation : ', annotation) print('State : ', summary.runtime.powerState) if summary.guest is not None: ip = summary.guest.ipAddress if ip is not None and ip != '': print('IP : ', ip) if summary.runtime.question is not None: print('Question : ', summary.runtime.question.text) print('')" 1149,"def module_import(module_path): """"""Imports the module indicated in name Args: module_path: string representing a module path such as 'app.config' or 'app.extras.my_module' Returns: the module matching name of the last component, ie: for 'app.extras.my_module' it returns a reference to my_module Raises: BadModulePathError if the module is not found """""" try: # Import whole module path. module = __import__(module_path) # Split into components: ['contour', # 'extras','appengine','ndb_persistence']. components = module_path.split('.') # Starting at the second component, set module to a # a reference to that component. at the end # module with be the last component. In this case: # ndb_persistence for component in components[1:]: module = getattr(module, component) return module except ImportError: raise BadModulePathError( 'Unable to find module ""%s"".' % (module_path,))" 1150,"def find_contour_yaml(config_file=__file__, names=None): """""" Traverse directory trees to find a contour.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of contour.yaml or None if not found """""" checked = set() contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names) if not contour_yaml: contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names) return contour_yaml" 1151,"def _find_countour_yaml(start, checked, names=None): """"""Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found """""" extensions = [] if names: for name in names: if not os.path.splitext(name)[1]: extensions.append(name + "".yaml"") extensions.append(name + "".yml"") yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in yaml_names: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return" 1152,"def _load_yaml_config(path=None): """"""Open and return the yaml contents."""""" countour_yaml_path = path or find_contour_yaml() if countour_yaml_path is None: logging.debug(""countour.yaml not found."") return None with open(countour_yaml_path) as yaml_file: return yaml_file.read()" 1153,"def build_parser(): """""" _build_parser_ Set up CLI parser options, parse the CLI options an return the parsed results """""" parser = argparse.ArgumentParser( description='dockerstache templating util' ) parser.add_argument( '--output', '-o', help='Working directory to render dockerfile and templates', dest='output', default=None ) parser.add_argument( '--input', '-i', help='Working directory containing dockerfile and script mustache templates', dest='input', default=os.getcwd() ) parser.add_argument( '--context', '-c', help='JSON file containing context dictionary to render templates', dest='context', default=None ) parser.add_argument( '--defaults', '-d', help='JSON file containing default context dictionary to render templates', dest='defaults', default=None ) parser.add_argument( '--inclusive', help='include non .mustache files from template', default=False, action='store_true' ) parser.add_argument( '--exclude', '-e', help='exclude files from template in this list', default=[], nargs='+' ) opts = parser.parse_args() return vars(opts)" 1154,"def main(): """""" _main_ Create a CLI parser and use that to run the template rendering process """""" options = build_parser() try: run(**options) except RuntimeError as ex: msg = ( ""An error occurred running dockerstache: {} "" ""please see logging info above for details"" ).format(ex) LOGGER.error(msg) sys.exit(1)" 1155,"def _guess_type_from_validator(validator): """""" Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator in order to unpack the validators. :param validator: :return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used) or None if no inner 'instance_of' validator is found """""" if isinstance(validator, _OptionalValidator): # Optional : look inside return _guess_type_from_validator(validator.validator) elif isinstance(validator, _AndValidator): # Sequence : try each of them for v in validator.validators: typ = _guess_type_from_validator(v) if typ is not None: return typ return None elif isinstance(validator, _InstanceOfValidator): # InstanceOf validator : found it ! return validator.type else: # we could not find the type return None" 1156,"def is_optional(attr): """""" Helper method to find if an attribute is mandatory :param attr: :return: """""" return isinstance(attr.validator, _OptionalValidator) or (attr.default is not None and attr.default is not NOTHING)" 1157,"def get_attrs_declarations(item_type): """""" Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional) :param item_type: :return: """""" # this will raise an error if the type is not an attr-created type attribs = fields(item_type) res = dict() for attr in attribs: attr_name = attr.name # -- is the attribute mandatory ? optional = is_optional(attr) # -- get and check the attribute type typ = guess_type_from_validators(attr) # -- store both info in result dict res[attr_name] = (typ, optional) return res" 1158,"def preprocess( self, nb: ""NotebookNode"", resources: dict ) -> Tuple[""NotebookNode"", dict]: """"""Remove any raw cells from the Notebook. By default, exclude raw cells from the output. Change this by including global_content_filter->include_raw = True in the resources dictionary. This preprocessor is necessary because the NotebookExporter doesn't include the exclude_raw config."""""" if not resources.get(""global_content_filter"", {}).get(""include_raw"", False): keep_cells = [] for cell in nb.cells: if cell.cell_type != ""raw"": keep_cells.append(cell) nb.cells = keep_cells return nb, resources" 1159,"def preprocess( self, nb: ""NotebookNode"", resources: dict ) -> Tuple[""NotebookNode"", dict]: """"""Preprocess the entire notebook."""""" if ""remove_solution"" not in resources: raise KeyError(""The resources dictionary must have a remove_solution key."") if resources[""remove_solution""]: keep_cells_idx = [] for index, cell in enumerate(nb.cells): if ""## solution"" in cell.source.lower(): keep_cells_idx.append(index) # The space at the end of the test string here is important elif len(keep_cells_idx) > 0 and cell.source.startswith(""### ""): keep_cells_idx.append(index) keep_cells = nb.cells[: keep_cells_idx[0] + 1] for i in keep_cells_idx[1:]: keep_cells.append(nb.cells[i]) if resources[""by_hand""]: keep_cells.append(by_hand_cell) else: if ""sketch"" in nb.cells[i].source.lower(): keep_cells.append(sketch_cell) else: keep_cells.append(md_expl_cell) keep_cells.append(code_ans_cell) keep_cells.append(md_ans_cell) nb.cells = keep_cells return nb, resources" 1160,"def preprocess( self, nb: ""NotebookNode"", resources: dict ) -> Tuple[""NotebookNode"", dict]: """"""Preprocess the entire Notebook."""""" for index, cell in enumerate(nb.cells): if ""## Solution"" in cell.source: nb.cells[index + 1].source = """" return nb, resources" 1161,"def preprocess( self, nb: ""NotebookNode"", resources: dict ) -> Tuple[""NotebookNode"", dict]: """"""Preprocess the entire Notebook."""""" exam_num = resources[""exam_num""] time = resources[""time""] date = resources[""date""] nb.cells.insert(0, new_markdown_cell(source=""---"")) nb.cells.insert(0, new_markdown_cell(source="""")) nb.cells.insert(0, exam_instructions_cell) first_cell_source = ( ""# ME 2233: Thermodynamic Principles\n\n"" f""# Exam {exam_num} - {time}\n\n# {date}"" ) nb.cells.insert(0, new_markdown_cell(source=first_cell_source)) return nb, resources" 1162,"def parse_from_dict(json_dict): """""" Given a Unified Uploader message, parse the contents and return a MarketOrderList. :param dict json_dict: A Unified Uploader message as a JSON dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within. """""" order_columns = json_dict['columns'] order_list = MarketOrderList( upload_keys=json_dict['uploadKeys'], order_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']) region_id = rowset['regionID'] type_id = rowset['typeID'] order_list.set_empty_region(region_id, type_id, generated_at) for row in rowset['rows']: order_kwargs = _columns_to_kwargs( SPEC_TO_KWARG_CONVERSION, order_columns, row) order_kwargs.update({ 'region_id': region_id, 'type_id': type_id, 'generated_at': generated_at, }) order_kwargs['order_issue_date'] = parse_datetime(order_kwargs['order_issue_date']) order_list.add_order(MarketOrder(**order_kwargs)) return order_list" 1163,"def encode_to_json(order_list): """""" Encodes this list of MarketOrder instances to a JSON string. :param MarketOrderList order_list: The order list to serialize. :rtype: str """""" rowsets = [] for items_in_region_list in order_list._orders.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] for order in items_in_region_list.orders: issue_date = gen_iso_datetime_str(order.order_issue_date) # The order in which these values are added is crucial. It must # match STANDARD_ENCODED_COLUMNS. rows.append([ order.price, order.volume_remaining, order.order_range, order.order_id, order.volume_entered, order.minimum_volume, order.is_bid, issue_date, order.order_duration, order.station_id, order.solar_system_id, ]) rowsets.append(dict( generatedAt = generated_at, regionID = region_id, typeID = type_id, rows = rows, )) json_dict = { 'resultType': 'orders', 'version': '0.1', 'uploadKeys': order_list.upload_keys, 'generator': order_list.order_generator, 'currentTime': gen_iso_datetime_str(now_dtime_in_utc()), # This must match the order of the values in the row assembling portion # above this. 'columns': STANDARD_ENCODED_COLUMNS, 'rowsets': rowsets, } return json.dumps(json_dict)" 1164,"def weather(query): """"""weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to determine location entity in query and fetch weather info for that location (using yahoo apis). """""" print 'Identifying the location . . .' try: response = unirest.post(""https://textanalysis.p.mashape.com/nltk-stanford-ner"", headers={ ""X-Mashape-Key"": ""E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP"", ""Content-Type"": ""application/x-www-form-urlencoded"" }, params={ ""text"": query } ) except: print 'Unable to connect to internet' return location = '' for entity in response.body['result'].split(): word,tag = entity.split('/') if(tag == 'LOCATION'): location += ' '+word if(location != ''): print 'Gathering weather information for'+location import urllib2, urllib, json baseurl = ""https://query.yahooapis.com/v1/public/yql?"" yql_query = ""select * from weather.forecast where woeid in \ (select woeid from geo.places(1) where text=\""""+location+""\"")"" yql_url = baseurl + urllib.urlencode({'q':yql_query}) + ""&format=json"" try: result = urllib2.urlopen(yql_url).read() data = json.loads(result) result = data['query']['results']['channel'] print result['location']['city']+' '+result['location']['country']+' '+result['location']['region'] print result['item']['condition']['date'] print result['item']['condition']['text'] print result['item']['condition']['temp']+' '+result['units']['temperature'] except: print 'Unable to connect to internet' else: print 'Unable to get the location.'" 1165,"def generic(query): """""" generic(query) -- process a generic user query using the Stanford NLTK NER and duckduckgo api. """""" try: response = unirest.post(""https://textanalysis.p.mashape.com/nltk-stanford-ner"", headers={ ""X-Mashape-Key"": ""E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP"", ""Content-Type"": ""application/x-www-form-urlencoded"" }, params={ ""text"": query } ) except: print 'Unable to connect to internet' return web_query = '' for entity in response.body['result'].split(): word,tag = entity.split('/') if(tag != 'O'): web_query += ' '+word if(web_query != ''): web_query = web_query.strip().split() duckduckgo.query(web_query) else: print 'I do not know how to process this query at this moment.'" 1166,"def _can_construct_from_str(strict_mode: bool, from_type: Type, to_type: Type) -> bool: """""" Returns true if the provided types are valid for constructor_with_str_arg conversion Explicitly declare that we are not able to convert primitive types (they already have their own converters) :param strict_mode: :param from_type: :param to_type: :return: """""" return to_type not in {int, float, bool}" 1167,"def are_flags_valid(packet_type, flags): """"""True when flags comply with [MQTT-2.2.2-1] requirements based on packet_type; False otherwise. Parameters ---------- packet_type: MqttControlPacketType flags: int Integer representation of 4-bit MQTT header flags field. Values outside of the range [0, 15] will certainly cause the function to return False. Returns ------- bool """""" if packet_type == MqttControlPacketType.publish: rv = 0 <= flags <= 15 elif packet_type in (MqttControlPacketType.pubrel, MqttControlPacketType.subscribe, MqttControlPacketType.unsubscribe): rv = flags == 2 elif packet_type in (MqttControlPacketType.connect, MqttControlPacketType.connack, MqttControlPacketType.puback, MqttControlPacketType.pubrec, MqttControlPacketType.pubcomp, MqttControlPacketType.suback, MqttControlPacketType.unsuback, MqttControlPacketType.pingreq, MqttControlPacketType.pingresp, MqttControlPacketType.disconnect): rv = flags == 0 else: raise NotImplementedError(packet_type) return rv" 1168,"def decode(f): """"""Extract a `MqttFixedHeader` from ``f``. Parameters ---------- f: file Object with read method. Raises ------- DecodeError When bytes decoded have values incompatible with a `MqttFixedHeader` object. UnderflowDecodeError When end-of-stream is encountered before the end of the fixed header. Returns ------- int Number of bytes consumed from ``f``. MqttFixedHeader Header object extracted from ``f``. """""" decoder = mqtt_io.FileDecoder(f) (byte_0,) = decoder.unpack(mqtt_io.FIELD_U8) packet_type_u4 = (byte_0 >> 4) flags = byte_0 & 0x0f try: packet_type = MqttControlPacketType(packet_type_u4) except ValueError: raise DecodeError('Unknown packet type 0x{:02x}.'.format(packet_type_u4)) if not are_flags_valid(packet_type, flags): raise DecodeError('Invalid flags for packet type.') num_bytes, num_remaining_bytes = decoder.unpack_varint(4) return decoder.num_bytes_consumed, MqttFixedHeader(packet_type, flags, num_remaining_bytes)" 1169,"def encode_body(self, f): """""" Parameters ---------- f: file File-like object with a write method. Returns ------- int Number of bytes written to ``f``. """""" num_bytes_written = 0 num_bytes_written += self.__encode_name(f) num_bytes_written += self.__encode_protocol_level(f) num_bytes_written += self.__encode_connect_flags(f) num_bytes_written += self.__encode_keep_alive(f) num_bytes_written += mqtt_io.encode_utf8(self.client_id, f) if self.will is not None: num_bytes_written += mqtt_io.encode_utf8(self.will.topic, f) num_bytes_written += mqtt_io.encode_bytes(self.will.message, f) if self.username is not None: num_bytes_written += mqtt_io.encode_utf8(self.username, f) if self.password is not None: num_bytes_written += mqtt_io.encode_utf8(self.password, f) return num_bytes_written" 1170,"def decode_body(cls, header, f): """"""Generates a `MqttSubscribe` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `subscribe`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttSubscribe Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.subscribe decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) topics = [] while header.remaining_len > decoder.num_bytes_consumed: num_str_bytes, name = decoder.unpack_utf8() max_qos, = decoder.unpack(mqtt_io.FIELD_U8) try: sub_topic = MqttTopic(name, max_qos) except ValueError: raise DecodeError('Invalid QOS {}'.format(max_qos)) topics.append(sub_topic) assert header.remaining_len == decoder.num_bytes_consumed return decoder.num_bytes_consumed, MqttSubscribe(packet_id, topics)" 1171,"def decode_body(cls, header, f): """"""Generates a `MqttSuback` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `suback`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttSuback Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.suback decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) results = [] while header.remaining_len > decoder.num_bytes_consumed: result, = decoder.unpack(mqtt_io.FIELD_U8) try: results.append(SubscribeResult(result)) except ValueError: raise DecodeError('Unsupported result {:02x}.'.format(result)) assert header.remaining_len == decoder.num_bytes_consumed return decoder.num_bytes_consumed, MqttSuback(packet_id, results)" 1172,"def decode_body(cls, header, f): """"""Generates a `MqttPublish` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `publish`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPublish Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.publish dupe = bool(header.flags & 0x08) retain = bool(header.flags & 0x01) qos = ((header.flags & 0x06) >> 1) if qos == 0 and dupe: # The DUP flag MUST be set to 0 for all QoS 0 messages # [MQTT-3.3.1-2] raise DecodeError(""Unexpected dupe=True for qos==0 message [MQTT-3.3.1-2]."") decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) num_bytes_consumed, topic_name = decoder.unpack_utf8() if qos != 0: # See MQTT 3.1.1 section 3.3.2.2 # See https://github.com/kcallin/mqtt-codec/issues/5 packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) else: packet_id = 0 payload_len = header.remaining_len - decoder.num_bytes_consumed payload = decoder.read(payload_len) return decoder.num_bytes_consumed, MqttPublish(packet_id, topic_name, payload, dupe, qos, retain)" 1173,"def decode_body(cls, header, f): """"""Generates a `MqttPubrel` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pubrel`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPubrel Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.pubrel decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_U16) if header.remaining_len != decoder.num_bytes_consumed: raise DecodeError('Extra bytes at end of packet.') return decoder.num_bytes_consumed, MqttPubrel(packet_id)" 1174,"def decode_body(cls, header, f): """"""Generates a `MqttUnsubscribe` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `unsubscribe`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttUnsubscribe Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.unsubscribe decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) topics = [] while header.remaining_len > decoder.num_bytes_consumed: num_str_bytes, topic = decoder.unpack_utf8() topics.append(topic) assert header.remaining_len - decoder.num_bytes_consumed == 0 return decoder.num_bytes_consumed, MqttUnsubscribe(packet_id, topics)" 1175,"def decode_body(cls, header, f): """"""Generates a `MqttUnsuback` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `unsuback`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttUnsuback Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.unsuback decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len)) packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID) if header.remaining_len != decoder.num_bytes_consumed: raise DecodeError('Extra bytes at end of packet.') return decoder.num_bytes_consumed, MqttUnsuback(packet_id)" 1176,"def decode_body(cls, header, f): """"""Generates a `MqttPingreq` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pingreq`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPingreq Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.pingreq if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttPingreq()" 1177,"def decode_body(cls, header, f): """"""Generates a `MqttPingresp` packet given a `MqttFixedHeader`. This method asserts that header.packet_type is `pingresp`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttPingresp Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.pingresp if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttPingresp()" 1178,"def decode_body(cls, header, f): """"""Generates a :class:`MqttDisconnect` packet given a :class:`MqttFixedHeader`. This method asserts that header.packet_type is :const:`MqttControlPacketType.disconnect`. Parameters ---------- header: MqttFixedHeader f: file Object with a read method. Raises ------ DecodeError When there are extra bytes at the end of the packet. Returns ------- int Number of bytes consumed from ``f``. MqttDisconnect Object extracted from ``f``. """""" assert header.packet_type == MqttControlPacketType.disconnect if header.remaining_len != 0: raise DecodeError('Extra bytes at end of packet.') return 0, MqttDisconnect()" 1179,"def getter(name, key=None): """""" Creates a read-only property for the attribute name *name*. If a *key* function is provided, it can be used to post-process the value of the attribute. """""" if not key: key = lambda x: x def wrapper(self): return key(getattr(self, name)) wrapper.__name__ = wrapper.__qualname__ = name return property(wrapper)" 1180,"def connect(self): """""" Sets up your Phabricator session, it's not necessary to call this directly """""" if self.token: self.phab_session = {'token': self.token} return req = self.req_session.post('%s/api/conduit.connect' % self.host, data={ 'params': json.dumps(self.connect_params), 'output': 'json', '__conduit__': True, }) # Parse out the response (error handling ommitted) result = req.json()['result'] self.phab_session = { 'sessionKey': result['sessionKey'], 'connectionID': result['connectionID'], }" 1181,"def request(self, method, params=None): """""" Make a request to a method in the phabricator API :param method: Name of the API method to call :type method: basestring :param params: Optional dict of params to pass :type params: dict """""" if params is None: params = {} if not self.phab_session: self.connect() url = '%s/api/%s' % (self.host, method) params['__conduit__'] = self.phab_session req = self.req_session.post(url, data={ 'params': json.dumps(params), 'output': 'json', }) return json.loads( req.content.decode(), object_pairs_hook=collections.OrderedDict )['result']" 1182,"def get_musiclibrary(): lib_files = music_library.get_file_list(config.library_path) global lib lib = music_library.parse_library(lib_files) """""":type :musiclibrary.MusicLibrary"""""" return lib" 1183,"def install(force=False): """"""Install git hooks."""""" ret, git_dir, _ = run(""git rev-parse --show-toplevel"") if ret != 0: click.echo( ""ERROR: Please run from within a GIT repository."", file=sys.stderr) raise click.Abort git_dir = git_dir[0] hooks_dir = os.path.join(git_dir, HOOK_PATH) for hook in HOOKS: hook_path = os.path.join(hooks_dir, hook) if os.path.exists(hook_path): if not force: click.echo( ""Hook already exists. Skipping {0}"".format(hook_path), file=sys.stderr) continue else: os.unlink(hook_path) source = os.path.join(sys.prefix, ""bin"", ""kwalitee-"" + hook) os.symlink(os.path.normpath(source), hook_path) return True" 1184,"def uninstall(): """"""Uninstall git hooks."""""" ret, git_dir, _ = run(""git rev-parse --show-toplevel"") if ret != 0: click.echo( ""ERROR: Please run from within a GIT repository."", file=sys.stderr) raise click.Abort git_dir = git_dir[0] hooks_dir = os.path.join(git_dir, HOOK_PATH) for hook in HOOKS: hook_path = os.path.join(hooks_dir, hook) if os.path.exists(hook_path): os.remove(hook_path) return True" 1185,"def find_promulgation_date(line): """""" >>> find_promulgation_date(""Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre..."") '2010-04-16' """""" line = line.split(' du ')[1] return format_date(re.search(r""(\d\d? \w\w\w+ \d\d\d\d)"", line).group(1))" 1186,"def historic_doslegs_parse(html, url_an=None, logfile=sys.stderr, nth_dos_in_page=0, parse_previous_works=True, parse_next_works=True): """""" Parse an AN dosleg like http://www.assemblee-nationale.fr/13/dossiers/accord_Montenegro_mobilite_jeunes.asp nth_dos_in_page, parse_previous_works and parse_next_works are for internal logic """""" data = { 'url_dossier_assemblee': clean_url(url_an), 'urgence': False, } def log_error(*error): print('## ERROR ###', *error, file=logfile) def log_warning(*error): print('## WARNING ###', *error, file=logfile) soup = BeautifulSoup(html, 'lxml') legislature, slug = parse_national_assembly_url(data['url_dossier_assemblee']) data['assemblee_slug'] = slug if legislature: data['assemblee_legislature'] = legislature else: # strange link (old dosleg) log_error('NO LEGISLATURE IN AN LINK: ' + data['url_dossier_assemblee']) data['assemblee_id'] = '%s-%s' % (data.get('assemblee_legislature', ''), data['assemblee_slug']) data['steps'] = [] curr_institution = 'assemblee' curr_stage = '1ère lecture' last_section = None # Travaux des commissions/Discussion en séance publique last_step_index = 0 travaux_prep_already = False another_dosleg_inside = None predicted_next_step = None # For unfinished projects, we try to catch the next step previous_works = None url_jo = None html_lines = html.split('\n') for i, line in enumerate(html_lines): def parse_line(): return BeautifulSoup(line, 'lxml') def line_text(): return parse_line().text.strip() def get_last_step(): if len(data['steps']) > 0: return data['steps'][-1] return {} if '' in line or '' in line: data['long_title'] = line_text() if '
Travaux des commissions
' in line: last_section = line_text() if '

Travaux préparatoires
' in line: if travaux_prep_already: if parse_next_works and not nth_dos_in_page: log_warning('FOUND ANOTHER DOSLEG INSIDE THE DOSLEG') another_dosleg_inside = '\n'.join(html.split('\n')[last_step_index + 1:]) if not nth_dos_in_page: break travaux_prep_already = False else: travaux_prep_already = True if not parse_next_works and travaux_prep_already and nth_dos_in_page: continue # Senat 1ère lecture, CMP, ... if '' in line: text = line_text() last_section = None if 'Dossier en ligne sur le site du Sénat' in text: data['url_dossier_senat'] = clean_url(parse_line().select( 'a')[-1].attrs['href']) text = text.replace( '(Dossier en ligne sur le site du Sénat)', '') if 'Sénat' in text: curr_institution = 'senat' elif 'Assemblée nationale' in text: curr_institution = 'assemblee' elif 'Commission Mixte Paritaire' in text or 'Lecture texte CMP' in text: curr_institution = 'CMP' curr_stage = 'CMP' elif 'Conseil Constitutionnel' in text: curr_institution = 'conseil constitutionnel' curr_stage = 'constitutionnalité' elif 'Congrès du Parlement' in text: curr_institution = 'congrès' curr_stage = 'congrès' if '1ère lecture' in text: curr_stage = '1ère lecture' elif '2e lecture' in text: curr_stage = '2ème lecture' elif 'Nouvelle lecture' in text: curr_stage = 'nouv. lect.' elif 'Lecture définitive' in text: curr_stage = 'l. définitive' if not curr_stage: curr_stage = text.split('-')[-1].strip().lower() if curr_stage == ""création de la commission d'enquête"": log_warning('COMMISSION D\'ENQUETE') return None if '>Proposition de résolution européenne<' in line: log_warning('PROPOSITION DE RESOLUTION EUROPEENE') return None if '>Accès aux Travaux préparatoires' in line and not previous_works: previous_works = clean_url(urljoin(url_an, parse_line().find('a').attrs['href'])) curr_step = None # conseil. consti. has no step but we should get the link no_step_but_good_link = False if 'Rapport portant également sur les propositions' in line: continue elif re.search(r']* href=[^>]*>(projet de loi|proposition de loi|proposition de résolution)', line, re.I): curr_step = 'depot' if curr_stage == 'CMP': continue elif "">Texte de la commission"" in line or '/ta-commission/' in line: curr_step = 'commission' elif '/ta/' in line or '/leg/tas' in line: if get_last_step().get('stage') != curr_stage: curr_step = 'depot' if curr_stage == 'CMP': curr_step = 'commission' else: curr_step = 'hemicycle' elif ('/rapports/' in line or '/rap/' in line) and last_section and 'commissions' in last_section: if get_last_step().get('step') == 'commission': # log_warning('DOUBLE COMMISSION LINE: %s' % line) continue curr_step = 'commission' elif 'www.conseil-constitutionnel.fr/decision/' in line: no_step_but_good_link = True # no commissions for l. définitive if curr_stage == 'l. définitive' and curr_step == 'commission': continue if curr_step or no_step_but_good_link: # if same step previously, replace or not the url if get_last_step().get('step') == curr_step: # log_warning('DOUBLE STEP: %s' % line) # remove last step since we prefer text links instead of reports links # TODO: add report link as bonus_url last_url = get_last_step().get('source_url') if not last_url or ('/rapports/' in last_url or '/rap/' in last_url): data['steps'] = data['steps'][:-1] # looks like the last url was already a text, let's assume it's a multi-depot else: # multi-depot if not CMP # TODO: re-order multi depot if curr_institution == 'senat' and curr_stage != 'CMP': curr_step = 'depot' links = [a.attrs.get('href') for a in parse_line().select('a')] links = [ href for href in links if href and 'fiches_id' not in href and '/senateur/' not in href and 'javascript:' not in href] if not links: log_error('NO LINK IN LINE: %s' % (line,)) continue urls_raps = [] urls_others = [] for href in links: if '/rap/' in href or '/rapports/' in href: urls_raps.append(href) else: urls_others.append(href) cmp_commission_other_url = None if len(urls_others) > 0: url = urls_others[0] # CMP commission should produce two texts, one for each institution if curr_step == 'commission' and curr_stage == 'CMP' and len(urls_others) > 1: cmp_commission_other_url = clean_url(urljoin(url_an, urls_others[1])) else: url = urls_raps[0] url = clean_url(urljoin(url_an, url)) real_institution = curr_institution if curr_stage == 'CMP' and curr_step == 'hemicycle': if 'assemblee-nationale.fr' in url: real_institution = 'assemblee' elif 'senat.fr' in url: real_institution = 'senat' step = { 'institution': real_institution, 'stage': curr_stage, 'source_url': url, } if curr_step: step['step'] = curr_step if cmp_commission_other_url: step['cmp_commission_other_url'] = cmp_commission_other_url # try to detect a date for test_line in (line, html_lines[i-1]): test_line = test_line.replace('1er', '1') date_match = re.search(r'(déposée? le|adoptée? .*? le|modifiée? .*?|rejetée? .*?)\s*(\d\d? \w\w\w+ \d\d\d\d)', test_line, re.I) if date_match: step['date'] = format_date(date_match.group(2)) else: date_match = re.search(r'(mis en ligne le)\s*(\d\d? \w\w\w+ \d\d\d\d)', test_line, re.I) if date_match: step['date'] = format_date(date_match.group(2)) if 'date' in step and 'beginning' not in data: data['beginning'] = step['date'] data['steps'].append(step) predicted_next_step = None last_step_index = i if 'publiée au Journal Officiel' in line and not url_jo: links = [clean_url(a.attrs['href']) for a in parse_line().select('a') if 'legifrance' in a.attrs.get('href', '')] if not links: log_error('NO GOOD LINK IN LINE: %s' % (line,)) continue url_jo = links[-1] if 'Le Gouvernement a engagé la procédure accélérée' in line or 'engagement de la procédure accélérée' in line: data['urgence'] = True # Next step prediction via small clues # TODO: this could be done via last_section (we parse two times the same thing) # TODO: this fails for CMP hemicycle senat if curr_stage != 'CMP': if '>Discussion en séance publique<' in line: predicted_next_step = { 'institution': curr_institution, 'stage': curr_stage, 'step': 'hemicycle', } elif '>Travaux des commissions<' in line: predicted_next_step = { 'institution': curr_institution, 'stage': curr_stage, 'step': 'commission', } metas = {} for meta in soup.select('meta'): if 'name' in meta.attrs: metas[meta.attrs['name']] = meta.attrs['content'] if not url_jo: url_jo = metas.get('LIEN_LOI_PROMULGUEE') if url_jo: data['url_jo'] = clean_url(url_jo) promulgation_step = { 'institution': 'gouvernement', 'stage': 'promulgation', 'source_url': data['url_jo'], } if metas.get('LOI_PROMULGUEE'): data['end'] = find_promulgation_date(metas.get('LOI_PROMULGUEE')) promulgation_step['date'] = data['end'] data['steps'].append(promulgation_step) # add predicted next step for unfinished projects elif predicted_next_step: data['steps'].append(predicted_next_step) if 'url_dossier_senat' not in data or 'dossier-legislatif' not in data['url_dossier_senat']: senat_url = find_senat_url(data) if senat_url: data['url_dossier_senat'] = senat_url # append previous works if there are some if previous_works and parse_previous_works: log_warning('MERGING %s WITH PREVIOUS WORKS %s' % (url_an, previous_works)) resp = download_historic_dosleg(previous_works) prev_data = historic_doslegs_parse( resp.text, previous_works, logfile=logfile, nth_dos_in_page=nth_dos_in_page, parse_next_works=False) if prev_data: prev_data = prev_data[nth_dos_in_page] if len(prev_data) > 1 else prev_data[0] data = merge_previous_works_an(prev_data, data) else: log_warning('INVALID PREVIOUS WORKS', previous_works) # is this part of a dosleg previous works ? next_legislature = data['assemblee_legislature'] + 1 if 'assemblee_legislature' in data else 9999 if parse_next_works and next_legislature < 15: # TODO: parse 15th legislature from open data if it exists resp = download_historic_dosleg(url_an.replace('/%d/' % data['assemblee_legislature'], '/%d/' % (data['assemblee_legislature'] + 1))) if resp.status_code == 200: recent_data = historic_doslegs_parse( resp.text, resp.url, logfile=logfile, nth_dos_in_page=nth_dos_in_page, parse_previous_works=False) if recent_data: log_warning('FOUND MORE RECENT WORKS', resp.url) recent_data = recent_data[nth_dos_in_page] if len(recent_data) > 1 else recent_data[0] data = merge_previous_works_an(data, recent_data) if another_dosleg_inside: others = historic_doslegs_parse(another_dosleg_inside, url_an, logfile=logfile, nth_dos_in_page=nth_dos_in_page+1) if others: return [data] + others return [data]" 1187,"def auto_need(form): """"""Automatically ``need()`` the relevant Fanstatic resources for a form. This function automatically utilises libraries in the ``js.*`` namespace (such as ``js.jquery``, ``js.tinymce`` and so forth) to allow Fanstatic to better manage these resources (caching, minifications) and avoid duplication across the rest of your application. """""" requirements = form.get_widget_requirements() for library, version in requirements: resources = resource_mapping[library] if not isinstance(resources, list): # pragma: no cover (bw compat only) resources = [resources] for resource in resources: resource.need()" 1188,"def setup_logger(): """""" setup basic logger """""" logger = logging.getLogger('dockerstache') logger.setLevel(logging.INFO) handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(logging.INFO) logger.addHandler(handler) return logger" 1189,"def named_any(name): """""" Retrieve a Python object by its fully qualified name from the global Python module namespace. The first part of the name, that describes a module, will be discovered and imported. Each subsequent part of the name is treated as the name of an attribute of the object specified by all of the name which came before it. @param name: The name of the object to return. @return: the Python object identified by 'name'. """""" assert name, 'Empty module name' names = name.split('.') topLevelPackage = None moduleNames = names[:] while not topLevelPackage: if moduleNames: trialname = '.'.join(moduleNames) try: topLevelPackage = __import__(trialname) except Exception, ex: moduleNames.pop() else: if len(names) == 1: raise Exception(""No module named %r"" % (name,)) else: raise Exception('%r does not name an object' % (name,)) obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj" 1190,"def for_name(modpath, classname): ''' Returns a class of ""classname"" from module ""modname"". ''' module = __import__(modpath, fromlist=[classname]) classobj = getattr(module, classname) return classobj()" 1191,"def _convert(self, val): """""" Convert the type if necessary and return if a conversion happened. """""" if isinstance(val, dict) and not isinstance(val, DotDict): return DotDict(val), True elif isinstance(val, list) and not isinstance(val, DotList): return DotList(val), True return val, False" 1192,"def full_subgraph(self, vertices): """""" Return the subgraph of this graph whose vertices are the given ones and whose edges are the edges of the original graph between those vertices. """""" obj_map = {vertex.id: vertex for vertex in vertices} edges = [ edge for vertex_id in obj_map for edge in self._out_edges[vertex_id] if edge.head in obj_map ] return AnnotatedGraph( vertices=obj_map.values(), edges=edges, )" 1193,"def to_json(self): """""" Convert to a JSON string. """""" obj = { ""vertices"": [ { ""id"": vertex.id, ""annotation"": vertex.annotation, } for vertex in self.vertices ], ""edges"": [ { ""id"": edge.id, ""annotation"": edge.annotation, ""head"": edge.head, ""tail"": edge.tail, } for edge in self._edges ], } # Ensure that we always return unicode output on Python 2. return six.text_type(json.dumps(obj, ensure_ascii=False))" 1194,"def from_json(cls, json_graph): """""" Reconstruct the graph from a graph exported to JSON. """""" obj = json.loads(json_graph) vertices = [ AnnotatedVertex( id=vertex[""id""], annotation=vertex[""annotation""], ) for vertex in obj[""vertices""] ] edges = [ AnnotatedEdge( id=edge[""id""], annotation=edge[""annotation""], head=edge[""head""], tail=edge[""tail""], ) for edge in obj[""edges""] ] return cls(vertices=vertices, edges=edges)" 1195,"def export_json(self, filename): """""" Export graph in JSON form to the given file. """""" json_graph = self.to_json() with open(filename, 'wb') as f: f.write(json_graph.encode('utf-8'))" 1196,"def import_json(cls, filename): """""" Import graph from the given file. The file is expected to contain UTF-8 encoded JSON data. """""" with open(filename, 'rb') as f: json_graph = f.read().decode('utf-8') return cls.from_json(json_graph)" 1197,"def to_dot(self): """""" Produce a graph in DOT format. """""" edge_labels = { edge.id: edge.annotation for edge in self._edges } edges = [self._format_edge(edge_labels, edge) for edge in self._edges] vertices = [ DOT_VERTEX_TEMPLATE.format( vertex=vertex.id, label=dot_quote(vertex.annotation), ) for vertex in self.vertices ] return DOT_DIGRAPH_TEMPLATE.format( edges="""".join(edges), vertices="""".join(vertices), )" 1198,"def export_image(self, filename='refcycle.png', format=None, dot_executable='dot'): """""" Export graph as an image. This requires that Graphviz is installed and that the ``dot`` executable is in your path. The *filename* argument specifies the output filename. The *format* argument lets you specify the output format. It may be any format that ``dot`` understands, including extended format specifications like ``png:cairo``. If omitted, the filename extension will be used; if no filename extension is present, ``png`` will be used. The *dot_executable* argument lets you provide a full path to the ``dot`` executable if necessary. """""" # Figure out what output format to use. if format is None: _, extension = os.path.splitext(filename) if extension.startswith('.') and len(extension) > 1: format = extension[1:] else: format = 'png' # Convert to 'dot' format. dot_graph = self.to_dot() # We'll send the graph directly to the process stdin. cmd = [ dot_executable, '-T{}'.format(format), '-o{}'.format(filename), ] dot = subprocess.Popen(cmd, stdin=subprocess.PIPE) dot.communicate(dot_graph.encode('utf-8'))" 1199,"def serve_command(info, host, port, reload, debugger, eager_loading, with_threads): """"""Runs a local development server for the Flask application. This local server is recommended for development purposes only but it can also be used for simple intranet deployments. By default it will not support any sort of concurrency at all to simplify debugging. This can be changed with the --with-threads option which will enable basic multithreading. The reloader and debugger are by default enabled if the debug flag of Flask is enabled and disabled otherwise. """""" from werkzeug.serving import run_simple debug = get_debug_flag() if reload is None: reload = bool(debug) if debugger is None: debugger = bool(debug) if eager_loading is None: eager_loading = not reload app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) # Extra startup messages. This depends a but on Werkzeug internals to # not double execute when the reloader kicks in. if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': # If we have an import path we can print it out now which can help # people understand what's being served. If we do not have an # import path because the app was loaded through a callback then # we won't print anything. if info.app_import_path is not None: print(' * Serving Flask app ""%s""' % info.app_import_path) if debug is not None: print(' * Forcing debug mode %s' % (debug and 'on' or 'off')) reloader_path = '.' if info.app_import_path: if os.path.isdir(info.app_import_path): reloader_path = info.app_import_path elif os.path.isfile(info.app_import_path): reloader_path = os.path.dirname(info.app_import_path) extra_files = get_reloader_extra_files(reloader_path) run_simple(host, port, app, use_reloader=reload, extra_files=extra_files, use_debugger=debugger, threaded=with_threads)" 1200,"def load_calls(self, call_type='jsonrpc'): """"""Loads the KvasirAPI calls into API.call based on the call_type variable. Utilizes the `Calls` class to establish an attribute-based access method. For instance a configuration with an instance called 'internal' will create an API.call that can be used like this: API.call.internal.hosts.list() # return all hosts from Kvasir instance 'internal' :param call_type: string of 'jsonrpc' or 'restapi' :return: self.call dictionary """""" valid = False if call_type == 'jsonrpc': #from jsonrpc import Hosts, Services, Accounts, Vulns, OS, NetBIOS, Evidence import jsonrpc as api_calls self.api_calls = api_calls valid = True #if call_type == 'rest' # TODO: Implement restful API functions #from restapi import hosts, services, accounts, vulns, os, netbios, evidence if valid: # if kvasir configuration is valid, go through the instances and build the self.call dict for instance, values in self.configuration.instances_dict.items(): self.call[instance] = Calls() self.call[instance].call_type = call_type self.call[instance].hosts = self.api_calls.Hosts(values, self.configuration.web2py_dir) self.call[instance].services = self.api_calls.Services(values, self.configuration.web2py_dir) self.call[instance].accounts = self.api_calls.Accounts(values, self.configuration.web2py_dir) self.call[instance].vulns = self.api_calls.Vulns(values, self.configuration.web2py_dir) self.call[instance].os = self.api_calls.OpSys(values, self.configuration.web2py_dir) self.call[instance].snmp = self.api_calls.SNMP(values, self.configuration.web2py_dir) self.call[instance].netbios = self.api_calls.NetBIOS(values, self.configuration.web2py_dir) self.call[instance].evidence = self.api_calls.Evidence(values, self.configuration.web2py_dir) self.call[instance].stats = self.api_calls.Stats(values, self.configuration.web2py_dir)" 1201,"def install_brew(target_path): """""" Install brew to the target path """""" if not os.path.exists(target_path): try: os.makedirs(target_path) except OSError: logger.warn(""Unable to create directory %s for brew."" % target_path) logger.warn(""Skipping..."") return extract_targz(HOMEBREW_URL, target_path, remove_common_prefix=True)" 1202,"def scales(self, image): """"""scales(image) -> scale, shape Computes the all possible scales for the given image and yields a tuple of the scale and the scaled image shape as an iterator. **Parameters::** ``image`` : array_like(2D or 3D) The image, for which the scales should be computed **Yields:** ``scale`` : float The next scale of the image to be considered ``shape`` : (int, int) or (int, int, int) The shape of the image, when scaled with the current ``scale`` """""" # compute the minimum scale so that the patch size still fits into the given image minimum_scale = max(self.m_patch_box.size_f[0] / image.shape[-2], self.m_patch_box.size_f[1] / image.shape[-1]) if self.m_lowest_scale: maximum_scale = min(minimum_scale / self.m_lowest_scale, 1.) else: maximum_scale = 1. current_scale_power = 0. # iterate over all possible scales while True: # scale the image scale = minimum_scale * math.pow(self.m_scale_factor, current_scale_power) if scale > maximum_scale: # image is smaller than the requested minimum size break current_scale_power -= 1. scaled_image_shape = bob.ip.base.scaled_output_shape(image, scale) # return both the scale and the scaled image size yield scale, scaled_image_shape" 1203,"def sample_scaled(self, shape): """"""sample_scaled(shape) -> bounding_box Yields an iterator that iterates over all sampled bounding boxes in the given (scaled) image shape. **Parameters:** ``shape`` : (int, int) or (int, int, int) The (current) shape of the (scaled) image **Yields:** ``bounding_box`` : :py:class:`BoundingBox` An iterator iterating over all bounding boxes that are valid for the given shape """""" for y in range(0, shape[-2]-self.m_patch_box.bottomright[0], self.m_distance): for x in range(0, shape[-1]-self.m_patch_box.bottomright[1], self.m_distance): # create bounding box for the current shift yield self.m_patch_box.shift((y,x))" 1204,"def sample(self, image): """"""sample(image) -> bounding_box Yields an iterator over all bounding boxes in different scales that are sampled for the given image. **Parameters:** ``image`` : array_like(2D or 3D) The image, for which the bounding boxes should be generated **Yields:** ``bounding_box`` : :py:class:`BoundingBox` An iterator iterating over all bounding boxes for the given ``image`` """""" for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image for bb in self.sample_scaled(scaled_image_shape): # extract features for yield bb.scale(1./scale)" 1205,"def iterate(self, image, feature_extractor, feature_vector): """"""iterate(image, feature_extractor, feature_vector) -> bounding_box Scales the given image, and extracts features from all possible bounding boxes. For each of the sampled bounding boxes, this function fills the given pre-allocated feature vector and yields the current bounding box. **Parameters:** ``image`` : array_like(2D) The given image to extract features for ``feature_extractor`` : :py:class:`FeatureExtractor` The feature extractor to use to extract the features for the sampled patches ``feature_vector`` : :py:class:`numpy.ndarray` (1D, uint16) The pre-allocated feature vector that will be filled inside this function; needs to be of size :py:attr:`FeatureExtractor.number_of_features` **Yields:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box for which the current features are extracted for """""" for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image feature_extractor.prepare(image, scale) for bb in self.sample_scaled(scaled_image_shape): # extract features for feature_extractor.extract_indexed(bb, feature_vector) yield bb.scale(1./scale)" 1206,"def iterate_cascade(self, cascade, image, threshold = None): """"""iterate_cascade(self, cascade, image, [threshold]) -> prediction, bounding_box Iterates over the given image and computes the cascade of classifiers. This function will compute the cascaded classification result for the given ``image`` using the given ``cascade``. It yields a tuple of prediction value and the according bounding box. If a ``threshold`` is specified, only those ``prediction``\s are returned, which exceed the given ``threshold``. .. note:: The ``threshold`` does not overwrite the cascade thresholds `:py:attr:`Cascade.thresholds`, but only threshold the final prediction. Specifying the ``threshold`` here is just slightly faster than thresholding the yielded prediction. **Parameters:** ``cascade`` : :py:class:`Cascade` The cascade that performs the predictions ``image`` : array_like(2D) The image for which the predictions should be computed ``threshold`` : float The threshold, which limits the number of predictions **Yields:** ``prediction`` : float The prediction value for the current bounding box ``bounding_box`` : :py:class:`BoundingBox` An iterator over all possible sampled bounding boxes (which exceed the prediction ``threshold``, if given) """""" for scale, scaled_image_shape in self.scales(image): # prepare the feature extractor to extract features from the given image cascade.prepare(image, scale) for bb in self.sample_scaled(scaled_image_shape): # return the prediction and the bounding box, if the prediction is over threshold prediction = cascade(bb) if threshold is None or prediction > threshold: yield prediction, bb.scale(1./scale)" 1207,"def pass_service(*names): """"""Injects a service instance into the kwargs """""" def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): for name in names: kwargs[name] = service_proxy(name) return f(*args, **kwargs) return wrapper return decorator" 1208,"def get_conn(): """"""Return a connection to DynamoDB."""""" if os.environ.get('DEBUG', False) or os.environ.get('travis', False): # In DEBUG mode - use the local DynamoDB # This also works for travis since we'll be running dynalite conn = DynamoDBConnection( host='localhost', port=8000, aws_access_key_id='TEST', aws_secret_access_key='TEST', is_secure=False ) else: # Regular old production conn = DynamoDBConnection() return conn" 1209,"def map_index_val(index_val): """"""Xform index_val so that it can be stored/queried."""""" if index_val is None: return DynamoMappings.NONE_VAL index_val = str(index_val) if not index_val: return DynamoMappings.EMPTY_STR_VAL return index_val" 1210,"def table_schema_call(self, target, cls): """"""Perform a table schema call. We call the callable target with the args and keywords needed for the table defined by cls. This is how we centralize the Table.create and Table ctor calls. """""" index_defs = [] for name in cls.index_names() or []: index_defs.append(GlobalIncludeIndex( gsi_name(name), parts=[HashKey(name)], includes=['value'] )) return target( cls.get_table_name(), connection=get_conn(), schema=[HashKey('id')], global_indexes=index_defs or None )" 1211,"def ensure_table(self, cls): """"""Required functionality."""""" exists = True conn = get_conn() try: descrip = conn.describe_table(cls.get_table_name()) assert descrip is not None except ResourceNotFoundException: # Expected - this is what we get if there is no table exists = False except JSONResponseError: # Also assuming no table exists = False if not exists: table = self.table_schema_call(Table.create, cls) assert table is not None" 1212,"def find_one(self, cls, id): """"""Required functionality."""""" try: db_result = self.get_class_table(cls).lookup(id) except ItemNotFound: # according to docs, this shouldn't be required, but it IS db_result = None if not db_result: return None obj = cls.from_data(db_result['value']) return obj" 1213,"def find_all(self, cls): """"""Required functionality."""""" final_results = [] table = self.get_class_table(cls) for db_result in table.scan(): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results" 1214,"def find_by_index(self, cls, index_name, value): """"""Required functionality."""""" query_args = { index_name + '__eq': DynamoMappings.map_index_val(value), 'index': gsi_name(index_name) } final_results = [] for db_result in self.get_class_table(cls).query_2(**query_args): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results" 1215,"def save(self, obj): """"""Required functionality."""""" if not obj.id: obj.id = uuid() stored_data = { 'id': obj.id, 'value': obj.to_data() } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = DynamoMappings.map_index_val(val) table = self.get_class_table(obj.__class__) item = Item(table, data=stored_data) item.save(overwrite=True)" 1216,"def process_event(self, name, subject, data): """""" Process a single event. :param name: :param subject: :param data: """""" method_mapping = Registry.get_event(name) if not method_mapping: log.info('@{}.process_event no subscriber for event `{}`' .format(self.__class__.__name__, name)) return for event, methods in method_mapping.items(): event_instance = event(subject, data) log.info('@{}.process_event `{}` for subject `{}`'.format( self.__class__.__name__, event_instance.__class__.__name__, subject )) for method in methods: with self._context_manager: log.info('>> Calling subscriber `{}`' .format(method.__name__)) method(event_instance)" 1217,"def thread(self): """""" Start a thread for this consumer. """""" log.info('@{}.thread starting'.format(self.__class__.__name__)) thread = threading.Thread(target=thread_wrapper(self.consume), args=()) thread.daemon = True thread.start()" 1218,"def create(parser: Parser, obj: PersistedObject = None): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param obj: :return: """""" if obj is not None: return _InvalidParserException('Error ' + str(obj) + ' cannot be parsed using ' + str(parser) + ' since ' + ' this parser does not support ' + obj.get_pretty_file_mode()) else: return _InvalidParserException('Error this parser is neither SingleFile nor MultiFile !')" 1219,"def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject, parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" First parse all children from the parsing plan, then calls _build_object_from_parsed_children :param desired_type: :param obj: :param parsing_plan_for_children: :param logger: :param options: :return: """""" pass" 1220,"def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Overrides the parent method to add log messages. :param logger: the logger to use during parsing (optional: None is supported) :param options: :return: """""" in_root_call = False if logger is not None: # log only for the root object, not for the children that will be created by the code below if not hasattr(_BaseParsingPlan.thrd_locals, 'flag_exec') \ or _BaseParsingPlan.thrd_locals.flag_exec == 0: # print('Executing Parsing Plan for ' + str(self)) logger.debug('Executing Parsing Plan for [{location}]' ''.format(location=self.obj_on_fs_to_parse.get_pretty_location(append_file_ext=False))) _BaseParsingPlan.thrd_locals.flag_exec = 1 in_root_call = True # Common log message logger.debug('(P) ' + get_parsing_plan_log_str(self.obj_on_fs_to_parse, self.obj_type, log_only_last=not in_root_call, parser=self.parser)) try: res = super(_BaseParsingPlan, self).execute(logger, options) if logger.isEnabledFor(DEBUG): logger.info('(P) {loc} -> {type} SUCCESS !' ''.format(loc=self.obj_on_fs_to_parse.get_pretty_location( blank_parent_part=not GLOBAL_CONFIG.full_paths_in_logs, compact_file_ext=True), type=get_pretty_type_str(self.obj_type))) else: logger.info('SUCCESS parsed [{loc}] as a [{type}] successfully. Parser used was [{parser}]' ''.format(loc=self.obj_on_fs_to_parse.get_pretty_location(compact_file_ext=True), type=get_pretty_type_str(self.obj_type), parser=str(self.parser))) if in_root_call: # print('Completed parsing successfully') logger.debug('Completed parsing successfully') return res finally: # remove threadlocal flag if needed if in_root_call: _BaseParsingPlan.thrd_locals.flag_exec = 0" 1221,"def _execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementation of the parent class method. Checks that self.parser is a _BaseParser, and calls the appropriate parsing method. :param logger: :param options: :return: """""" if isinstance(self.parser, _BaseParser): if (not self.is_singlefile) and self.parser.supports_multifile(): return self.parser._parse_multifile(self.obj_type, self.obj_on_fs_to_parse, self._get_children_parsing_plan(), logger, options) elif self.is_singlefile and self.parser.supports_singlefile(): return self.parser._parse_singlefile(self.obj_type, self.get_singlefile_path(), self.get_singlefile_encoding(), logger, options) else: raise _InvalidParserException.create(self.parser, self.obj_on_fs_to_parse) else: raise TypeError('Parser attached to this _BaseParsingPlan is not a ' + str(_BaseParser))" 1222,"def create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, _main_call: bool = True): """""" Implements the abstract parent method by using the recursive parsing plan impl. Subclasses wishing to produce their own parsing plans should rather override _create_parsing_plan in order to benefit from this same log msg. :param desired_type: :param filesystem_object: :param logger: :param _main_call: internal parameter for recursive calls. Should not be changed by the user. :return: """""" in_root_call = False # -- log msg only for the root call, not for the children that will be created by the code below if _main_call and (not hasattr(AnyParser.thrd_locals, 'flag_init') or AnyParser.thrd_locals.flag_init == 0): # print('Building a parsing plan to parse ' + str(filesystem_object) + ' into a ' + # get_pretty_type_str(desired_type)) logger.debug('Building a parsing plan to parse [{location}] into a {type}' ''.format(location=filesystem_object.get_pretty_location(append_file_ext=False), type=get_pretty_type_str(desired_type))) AnyParser.thrd_locals.flag_init = 1 in_root_call = True # -- create the parsing plan try: pp = self._create_parsing_plan(desired_type, filesystem_object, logger, log_only_last=(not _main_call)) finally: # remove threadlocal flag if needed if in_root_call: AnyParser.thrd_locals.flag_init = 0 # -- log success only if in root call if in_root_call: # print('Parsing Plan created successfully') logger.debug('Parsing Plan created successfully') # -- finally return return pp" 1223,"def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False): """""" Adds a log message and creates a recursive parsing plan. :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return: """""" logger.debug('(B) ' + get_parsing_plan_log_str(filesystem_object, desired_type, log_only_last=log_only_last, parser=self)) return AnyParser._RecursiveParsingPlan(desired_type, filesystem_object, self, logger)" 1224,"def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[T], logger: Logger) -> Dict[str, ParsingPlan[T]]: """""" This method is called by the _RecursiveParsingPlan when created. Implementing classes should return a dictionary containing a ParsingPlan for each child they plan to parse using this framework. Note that for the files that will be parsed using a parsing library it is not necessary to return a ParsingPlan. In other words, implementing classes should return here everything they need for their implementation of _parse_multifile to succeed. Indeed during parsing execution, the framework will call their _parse_multifile method with that same dictionary as an argument (argument name is 'parsing_plan_for_children', see _BaseParser). :param obj_on_fs: :param desired_type: :param logger: :return: """""" pass" 1225,"def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementation of the parent method : since this is a multifile parser, this is not implemented. :param desired_type: :param file_path: :param encoding: :param logger: :param options: :return: """""" raise Exception('Not implemented since this is a MultiFileParser')" 1226,"def create(parser_func: Union[ParsingMethodForStream, ParsingMethodForFile], caught: Exception): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser_func: :param caught: :return: """""" msg = 'Caught TypeError while calling parsing function \'' + str(parser_func.__name__) + '\'. ' \ 'Note that the parsing function signature should be ' + parsing_method_stream_example_signature_str \ + ' (streaming=True) or ' + parsing_method_file_example_signature_str + ' (streaming=False).' \ 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) return CaughtTypeError(msg).with_traceback(caught.__traceback__)" 1227,"def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Relies on the inner parsing function to parse the file. If _streaming_mode is True, the file will be opened and closed by this method. Otherwise the parsing function will be responsible to open and close. :param desired_type: :param file_path: :param encoding: :param options: :return: """""" opts = get_options_for_id(options, self.get_id_for_options()) if self._streaming_mode: # We open the stream, and let the function parse from it file_stream = None try: # Open the file with the appropriate encoding file_stream = open(file_path, 'r', encoding=encoding) # Apply the parsing function if self.function_args is None: return self._parser_func(desired_type, file_stream, logger, **opts) else: return self._parser_func(desired_type, file_stream, logger, **self.function_args, **opts) except TypeError as e: raise CaughtTypeError.create(self._parser_func, e) finally: if file_stream is not None: # Close the File in any case file_stream.close() else: # the parsing function will open the file itself if self.function_args is None: return self._parser_func(desired_type, file_path, encoding, logger, **opts) else: return self._parser_func(desired_type, file_path, encoding, logger, **self.function_args, **opts)" 1228,"def queryByPortSensor(portiaConfig, edgeId, port, sensor, strategy=SummaryStrategies.PER_HOUR, interval=1, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'fill':'none', 'min': True, 'max': True, 'sum': True, 'avg': True, 'median': False, 'mode': False, 'stddev': False, 'spread': False }): """"""Returns a pandas data frame with the portia select resultset"""""" header = {'Accept': 'text/csv'} endpoint = '/summary/device/{0}/port/{1}/sensor/{2}/{3}/{4}{5}'.format( edgeId, port, sensor, resolveStrategy(strategy), interval, utils.buildGetParams(params) ) response = utils.httpGetRequest(portiaConfig, endpoint, header) if response.status_code == 200: try: dimensionSeries = pandas.read_csv( StringIO(response.text), sep=';' ) if portiaConfig['debug']: print( '[portia-debug]: {0} rows'.format( len(dimensionSeries.index) ) ) return dimensionSeries except: raise Exception('couldn\'t create pandas data frame') else: raise Exception('couldn\'t retrieve data')" 1229,"def _process_counter_example(self, mma, w_string): """""""" Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Returns: None """""" diff = len(w_string) same = 0 membership_answer = self._membership_query(w_string) while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) if membership_answer != self._membership_query(access_string + w_string[i:]): diff = i else: same = i if diff - same == 1: break exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp) return 0" 1230,"def get_dfa_conjecture(self): """""" Utilize the observation table to construct a Mealy Machine. The library used for representing the Mealy Machine is the python bindings of the openFST library (pyFST). Args: None Returns: MealyMachine: A mealy machine build based on a closed and consistent observation table. """""" dfa = DFA(self.alphabet) for s in self.observation_table.sm_vector: for i in self.alphabet: dst = self.observation_table.equiv_classes[s + i] # If dst == None then the table is not closed. if dst == None: logging.debug('Conjecture attempt on non closed table.') return None obsrv = self.observation_table[s, i] src_id = self.observation_table.sm_vector.index(s) dst_id = self.observation_table.sm_vector.index(dst) dfa.add_arc(src_id, dst_id, i, obsrv) # Mark the final states in the hypothesis automaton. i = 0 for s in self.observation_table.sm_vector: dfa[i].final = self.observation_table[s, self.epsilon] i += 1 return dfa" 1231,"def _init_table(self): """""" Initialize the observation table. """""" self.observation_table.sm_vector.append(self.epsilon) self.observation_table.smi_vector = list(self.alphabet) self.observation_table.em_vector.append(self.epsilon) self._fill_table_entry(self.epsilon, self.epsilon) for s in self.observation_table.smi_vector: self._fill_table_entry(s, self.epsilon)" 1232,"def learn_dfa(self, mma=None): """""" Implements the high level loop of the algorithm for learning a Mealy machine. Args: mma (DFA): The input automaton Returns: MealyMachine: A string and a model for the Mealy machine to be learned. """""" logging.info('Initializing learning procedure.') if mma: self._init_table_from_dfa(mma) else: self._init_table() logging.info('Generating a closed and consistent observation table.') while True: closed = False # Make sure that the table is closed while not closed: logging.debug('Checking if table is closed.') closed, string = self.observation_table.is_closed() if not closed: logging.debug('Closing table.') self._ot_make_closed(string) else: logging.debug('Table closed.') # Create conjecture dfa = self.get_dfa_conjecture() logging.info('Generated conjecture machine with %d states.',len(list(dfa.states))) # _check correctness logging.debug('Running equivalence query.') found, counter_example = self._equivalence_query(dfa) # Are we done? if found: logging.info('No counterexample found. Hypothesis is correct!') break # Add the new experiments into the table to reiterate the # learning loop logging.info('Processing counterexample %s with length %d.', counter_example, len(counter_example)) self._process_counter_example(dfa, counter_example) logging.info('Learning complete.') logging.info('Learned em_vector table is the following:') logging.info(self.observation_table.em_vector) return '', dfa" 1233,"def print_error_to_io_stream(err: Exception, io: TextIOBase, print_big_traceback : bool = True): """""" Utility method to print an exception's content to a stream :param err: :param io: :param print_big_traceback: :return: """""" if print_big_traceback: traceback.print_tb(err.__traceback__, file=io, limit=-GLOBAL_CONFIG.multiple_errors_tb_limit) else: traceback.print_tb(err.__traceback__, file=io, limit=-1) io.writelines(' ' + str(err.__class__.__name__) + ' : ' + str(err))" 1234,"def should_hide_traceback(e): """""" Returns True if we can hide the error traceback in the warnings messages """""" if type(e) in {WrongTypeCreatedError, CascadeError, TypeInformationRequiredError}: return True elif type(e).__name__ in {'InvalidAttributeNameForConstructorError', 'MissingMandatoryAttributeFiles'}: return True else: return False" 1235,"def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: """""" Implementation of AnyParser API """""" raise Exception('This should never happen, since this parser relies on underlying parsers')" 1236,"def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject, parsing_plan_for_children: Dict[str, AnyParser._RecursiveParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementation of AnyParser API """""" raise Exception('This should never happen, since this parser relies on underlying parsers')" 1237,"def add_parser_to_cascade(self, parser: AnyParser, typ: Type = None): """""" Adds the provided parser to this cascade. If this is the first parser, it will configure the cascade according to the parser capabilities (single and multifile support, extensions). Subsequent parsers will have to support the same capabilities at least, to be added. :param parser: :param typ: :return: """""" # the first parser added will configure the cascade if not self.configured: self.supported_exts = parser.supported_exts self.supported_types = parser.supported_types # check if new parser is compliant with previous ones if self.supports_singlefile(): if not parser.supports_singlefile(): raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (singlefile support)') if self.supports_multifile(): if not parser.supports_multifile(): raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (multifile support)') if AnyObject not in parser.supported_types: if typ is None: # in that case the expected types for this parser will be self.supported_types if AnyObject in self.supported_types: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (the cascade supports any type while the parser only supports ' + str(parser.supported_types) + ')') else: missing_types = set(self.supported_types) - set(parser.supported_types) if len(missing_types) > 0: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the ' 'cascades configuration (supported types should at least contain the supported types ' 'already in place. The parser misses type(s) ' + str(missing_types) + ')') else: # a parser is added but with a specific type target (parallel cascade) if typ == AnyObject: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the expected type ""Any"", ' 'it only supports ' + str(parser.supported_types)) # else: # if get_base_generic_type(typ) not in parser.supported_types: # raise ValueError( # 'Cannot add this parser to this parsing cascade : it does not match the expected type ' + # str(typ) + ', it only supports ' + str(parser.supported_types)) missing_exts = set(self.supported_exts) - set(parser.supported_exts) if len(missing_exts) > 0: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (supported extensions should at least contain the supported extensions already in ' 'place. The parser misses extension(s) ' + str(missing_exts) + ')') # finally add it self._parsers_list.append((typ, parser))" 1238,"def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False) -> ParsingPlan[T]: """""" Creates a parsing plan to parse the given filesystem object into the given desired_type. This overrides the method in AnyParser, in order to provide a 'cascading' parsing plan :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return: """""" # build the parsing plan logger.debug('(B) ' + get_parsing_plan_log_str(filesystem_object, desired_type, log_only_last=log_only_last, parser=self)) return CascadingParser.CascadingParsingPlan(desired_type, filesystem_object, self, self._parsers_list, logger=logger)" 1239,"def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementation of AnyParser API """""" # first use the base parser to parse something compliant with the conversion chain first = self._base_parser._parse_singlefile(self._converter.from_type, file_path, encoding, logger, options) # then apply the conversion chain return self._converter.convert(desired_type, first, logger, options)" 1240,"def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: """""" Implementation of AnyParser API """""" return self._base_parser._get_parsing_plan_for_multifile_children(obj_on_fs, self._converter.from_type, logger)" 1241,"def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject, parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementation of AnyParser API """""" # first use the base parser # first = self._base_parser._parse_multifile(desired_type, obj, parsing_plan_for_children, logger, options) first = self._base_parser._parse_multifile(self._converter.from_type, obj, parsing_plan_for_children, logger, options) # then apply the conversion chain return self._converter.convert(desired_type, first, logger, options)" 1242,"def are_worth_chaining(base_parser: Parser, to_type: Type[S], converter: Converter[S,T]) -> bool: """""" Utility method to check if it makes sense to chain this parser configured with the given to_type, with this converter. It is an extension of ConverterChain.are_worth_chaining :param base_parser: :param to_type: :param converter: :return: """""" if isinstance(converter, ConversionChain): for conv in converter._converters_list: if not Parser.are_worth_chaining(base_parser, to_type, conv): return False # all good return True else: return Parser.are_worth_chaining(base_parser, to_type, converter)" 1243,"def set_mode(self, mode): """"""Set Lupusec alarm mode."""""" _LOGGER.debug('State change called from alarm device') if not mode: _LOGGER.info('No mode supplied') elif mode not in CONST.ALL_MODES: _LOGGER.warning('Invalid mode') response_object = self._lupusec.set_mode(CONST.MODE_TRANSLATION[mode]) if response_object['result'] != 1: _LOGGER.warning('Mode setting unsuccessful') self._json_state['mode'] = mode _LOGGER.info('Mode set to: %s', mode) return True" 1244,"def parse_amendements_summary(url, json_response): """""" json schema : { infoGenerales: { nb_resultats, debut, nb_docs }, data_table: 'id|numInit|titreDossierLegislatif|urlDossierLegislatif|' 'instance|numAmend|urlAmend|designationArticle|' 'designationAlinea|dateDepot|signataires|sort' } NB : the json response does not contain the dispositif and expose, that's why we call it ""amendement's summary"" """""" amendements = [] fields = [convert_camelcase_to_underscore(field) for field in json_response['infoGenerales']['description_schema'].split('|')] for row in json_response['data_table']: values = row.split('|') amd = AmendementSummary(**dict(zip(fields, values))) amd.legislature = re.search(r'www.assemblee-nationale.fr/(\d+)/', amd.url_amend).groups()[0] amendements.append(amd) return AmendementSearchResult(**{ 'url': url, 'total_count': json_response['infoGenerales']['nb_resultats'], 'start': json_response['infoGenerales']['debut'], 'size': json_response['infoGenerales']['nb_docs'], 'results': amendements })" 1245,"def get(self, **kwargs): """""" :param texteRecherche: :param numAmend: :param idArticle: :param idAuteur: :param idDossierLegislatif: :param idExamen: :param idExamens: :param periodeParlementaire: :param dateDebut: :param dateFin: :param rows: :param start: :param sort: """""" params = self.default_params.copy() params.update(kwargs) start = time.time() response = requests.get(self.base_url, params=params) end = time.time() LOGGER.debug( 'fetched amendements with search params: %s in %0.2f s', params, end - start ) return parse_amendements_summary(response.url, response.json())" 1246,"def to_utf8(value): """"""Returns a string encoded using UTF-8. This function comes from `Tornado`_. :param value: A unicode or string to be encoded. :returns: The encoded string. """""" if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value" 1247,"def to_unicode(value): """"""Returns a unicode string from a string, using UTF-8 to decode if needed. This function comes from `Tornado`_. :param value: A unicode or string to be decoded. :returns: The decoded string. """""" if isinstance(value, str): return value.decode('utf-8') assert isinstance(value, unicode) return value" 1248,"def find_all_commands(management_dir): """""" Find all valid commands in a directory management_dir : directory path return - List of commands """""" try: #Find all commands in the directory that are not __init__.py and end in .py. Then, remove the trailing .py return [f[:-3] for f in os.listdir(management_dir) if f.endswith('.py') and not f.startswith(""__"")] except OSError: #If nothing is found, return empty return []" 1249,"def find_commands_module(app_name): """""" Find the commands module in each app (if it exists) and return the path app_name : The name of an app in the INSTALLED_APPS setting return - path to the app """""" parts = app_name.split('.') parts.append('commands') parts.reverse() part = parts.pop() path = None #Load the module if needed try: f, path, descr = imp.find_module(part, path) except ImportError as e: if os.path.basename(os.getcwd()) != part: raise e else: try: if f: f.close() except UnboundLocalError: log.error(""Could not import module {0} at path {1}. Sys.path is {2}"".format(part, path, sys.path)) #Go down level by and level and try to load the module at each level while parts: part = parts.pop() f, path, descr = imp.find_module(part, [path] if path else None) if f: f.close() return path" 1250,"def get_commands(): """""" Get all valid commands return - all valid commands in dictionary form """""" commands = {} #Try to load the settings file (settings can be specified on the command line) and get the INSTALLED_APPS try: from percept.conf.base import settings apps = settings.INSTALLED_APPS except KeyError: apps = [] #For each app, try to find the command module (command folder in the app) #Then, try to load all commands in the directory for app_name in apps: try: path = find_commands_module(app_name) commands.update(dict([(name, app_name) for name in find_all_commands(path)])) except ImportError as e: pass return commands" 1251,"def execute(self): """""" Run the command with the command line arguments """""" #Initialize the option parser parser = LaxOptionParser( usage=""%prog subcommand [options] [args]"", option_list=BaseCommand.option_list #This will define what is allowed input to the parser (ie --settings=) ) #Parse the options options, args = parser.parse_args(self.argv) #Handle --settings and --pythonpath properly options = handle_default_options(options) try: #Get the name of the subcommand subcommand = self.argv[1] except IndexError: #If the subcommand name cannot be found, set it to help subcommand = 'help' #If the subcommand is help, print the usage of the parser, and available command names if subcommand == 'help': if len(args) <= 2: parser.print_help() sys.stdout.write(self.help_text + '\n') else: #Otherwise, run the given command self.fetch_command(subcommand).run_from_argv(self.argv)" 1252,"def help_text(self): """""" Formats and prints the help text from the command list """""" help_text = '\n'.join(sorted(get_commands().keys())) help_text = ""\nCommands:\n"" + help_text return help_text" 1253,"def missing(self, field, last=True): ''' Numeric fields support specific handling for missing fields in a doc. The missing value can be _last, _first, or a custom value (that will be used for missing docs as the sort value). missing('price') > {""price"" : {""missing"": ""_last"" } } missing('price',False) > {""price"" : {""missing"": ""_first""} } ''' if last: self.append({field: {'missing': '_last'}}) else: self.append({field: {'missing': '_first'}}) return self" 1254,"def ensure_table(self, cls): """"""Ensure table's existence - as per the gludb spec."""""" cur = self._conn().cursor() table_name = cls.get_table_name() index_names = cls.index_names() or [] cols = ['id text primary key', 'value text'] for name in index_names: cols.append(name + ' text') cur.execute('create table if not exists %s (%s)' % ( table_name, ','.join(cols) )) for name in index_names: cur.execute('create index if not exists %s on %s(%s)' % ( table_name + '_' + name + '_idx', table_name, name )) self._conn().commit() cur.close()" 1255,"def find_by_index(self, cls, index_name, value): """"""Find all rows matching index query - as per the gludb spec."""""" cur = self._conn().cursor() query = 'select id,value from %s where %s = ?' % ( cls.get_table_name(), index_name ) found = [] for row in cur.execute(query, (value,)): id, data = row[0], row[1] obj = cls.from_data(data) assert id == obj.id found.append(obj) cur.close() return found" 1256,"def delete(self, obj): """"""Required functionality."""""" del_id = obj.get_id() if not del_id: return cur = self._conn().cursor() tabname = obj.__class__.get_table_name() query = 'delete from %s where id = ?' % tabname cur.execute(query, (del_id,)) self._conn().commit() cur.close()" 1257,"def register(self): """""" Registers a new device with the name entity_id. This device has permissions for services like subscribe, publish and access historical data. """""" register_url = self.base_url + ""api/0.1.0/register"" register_headers = { ""apikey"": str(self.owner_api_key), ""resourceID"": str(self.entity_id), ""serviceType"": ""publish,subscribe,historicData"" } with self.no_ssl_verification(): r = requests.get(register_url, {}, headers=register_headers) response = r.content.decode(""utf-8"") if ""APIKey"" in str(r.content.decode(""utf-8"")): response = json.loads(response[:-331] + ""}"") # Temporary fix to a middleware bug, should be removed in future response[""Registration""] = ""success"" else: response = json.loads(response) response[""Registration""] = ""failure"" return response" 1258,"def no_ssl_verification(self): """""" Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release."""""" try: from functools import partialmethod except ImportError: # Python 2 fallback: https://gist.github.com/carymrobbins/8940382 from functools import partial class partialmethod(partial): def __get__(self, instance, owner): if instance is None: return self return partial(self.func, instance, *(self.args or ()), **(self.keywords or {})) old_request = requests.Session.request requests.Session.request = partialmethod(old_request, verify=False) warnings.filterwarnings('ignore', 'Unverified HTTPS request') yield warnings.resetwarnings() requests.Session.request = old_request" 1259,"def publish(self, data): """""" This function allows an entity to publish data to the middleware. Args: data (string): contents to be published by this entity. """""" if self.entity_api_key == """": return {'status': 'failure', 'response': 'No API key found in request'} publish_url = self.base_url + ""api/0.1.0/publish"" publish_headers = {""apikey"": self.entity_api_key} publish_data = { ""exchange"": ""amq.topic"", ""key"": str(self.entity_id), ""body"": str(data) } with self.no_ssl_verification(): r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers) response = dict() if ""No API key"" in str(r.content.decode(""utf-8"")): response[""status""] = ""failure"" r = json.loads(r.content.decode(""utf-8""))['message'] elif 'publish message ok' in str(r.content.decode(""utf-8"")): response[""status""] = ""success"" r = r.content.decode(""utf-8"") else: response[""status""] = ""failure"" r = r.content.decode(""utf-8"") response[""response""] = str(r) return response" 1260,"def db(self, entity, query_filters=""size=10""): """""" This function allows an entity to access the historic data. Args: entity (string): Name of the device to listen to query_filters (string): Elastic search response format string example, ""pretty=true&size=10"" """""" if self.entity_api_key == """": return {'status': 'failure', 'response': 'No API key found in request'} historic_url = self.base_url + ""api/0.1.0/historicData?"" + query_filters historic_headers = { ""apikey"": self.entity_api_key, ""Content-Type"": ""application/json"" } historic_query_data = json.dumps({ ""query"": { ""match"": { ""key"": entity } } }) with self.no_ssl_verification(): r = requests.get(historic_url, data=historic_query_data, headers=historic_headers) response = dict() if ""No API key"" in str(r.content.decode(""utf-8"")): response[""status""] = ""failure"" else: r = r.content.decode(""utf-8"") response = r return response" 1261,"def bind(self, devices_to_bind): """""" This function allows an entity to list the devices to subscribe for data. This function must be called at least once, before doing a subscribe. Subscribe function will listen to devices that are bound here. Args: devices_to_bind (list): an array of devices to listen to. Example bind([""test100"",""testDemo""]) """""" if self.entity_api_key == """": return {'status': 'failure', 'response': 'No API key found in request'} url = self.base_url + ""api/0.1.0/subscribe/bind"" headers = {""apikey"": self.entity_api_key} data = { ""exchange"": ""amq.topic"", ""keys"": devices_to_bind, ""queue"": self.entity_id } with self.no_ssl_verification(): r = requests.post(url, json=data, headers=headers) response = dict() if ""No API key"" in str(r.content.decode(""utf-8"")): response[""status""] = ""failure"" r = json.loads(r.content.decode(""utf-8""))['message'] elif 'bind queue ok' in str(r.content.decode(""utf-8"")): response[""status""] = ""success"" r = r.content.decode(""utf-8"") else: response[""status""] = ""failure"" r = r.content.decode(""utf-8"") response[""response""] = str(r) return response" 1262,"def unbind(self, devices_to_unbind): """""" This function allows an entity to unbound devices that are already bound. Args: devices_to_unbind (list): an array of devices that are to be unbound ( stop listening) Example unbind([""test10"",""testDemo105""]) """""" if self.entity_api_key == """": return {'status': 'failure', 'response': 'No API key found in request'} url = self.base_url + ""api/0.1.0/subscribe/unbind"" headers = {""apikey"": self.entity_api_key} data = { ""exchange"": ""amq.topic"", ""keys"": devices_to_unbind, ""queue"": self.entity_id } with self.no_ssl_verification(): r = requests.delete(url, json=data, headers=headers) print(r) response = dict() if ""No API key"" in str(r.content.decode(""utf-8"")): response[""status""] = ""failure"" r = json.loads(r.content.decode(""utf-8""))['message'] elif 'unbind' in str(r.content.decode(""utf-8"")): response[""status""] = ""success"" r = r.content.decode(""utf-8"") else: response[""status""] = ""failure"" r = r.content.decode(""utf-8"") response[""response""] = str(r) return response" 1263,"def subscribe(self, devices_to_bind=[]): """""" This function allows an entity to subscribe for data from the devices specified in the bind operation. It creates a thread with an event loop to manager the tasks created in start_subscribe_worker. Args: devices_to_bind (list): an array of devices to listen to """""" if self.entity_api_key == """": return {'status': 'failure', 'response': 'No API key found in request'} self.bind(devices_to_bind) loop = asyncio.new_event_loop() t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,)) t1.daemon = True t1.start()" 1264,"def start_subscribe_worker(self, loop): """""" Switch to new event loop as a thread and run until complete. """""" url = self.base_url + ""api/0.1.0/subscribe"" task = loop.create_task(self.asynchronously_get_data(url + ""?name={0}"".format(self.entity_id))) asyncio.set_event_loop(loop) loop.run_until_complete(task) self.event_loop = loop" 1265,"async def asynchronously_get_data(self, url): """""" Asynchronously get data from Chunked transfer encoding of https://smartcity.rbccps.org/api/0.1.0/subscribe. (Only this function requires Python 3. Rest of the functions can be run in python2. Args: url (string): url to subscribe """""" headers = {""apikey"": self.entity_api_key} try: async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: async with session.get(url, headers=headers, timeout=3000) as response: while True: # loop over for each chunk of data chunk = await response.content.readchunk() if not chunk: break if platform == ""linux"" or platform == ""linux2"": # In linux systems, readchunk() returns a tuple chunk = chunk[0] resp = dict() resp[""data""] = chunk.decode() current_milli_time = lambda: int(round(time() * 1000)) resp[""timestamp""] = str(current_milli_time()) self.subscribe_data = resp except Exception as e: print(""\n********* Oops: "" + url + "" "" + str(type(e)) + str(e) + "" *********\n"") print('\n********* Closing TCP: {} *********\n'.format(url))" 1266,"def stop_subscribe(self): """""" This function is used to stop the event loop created when subscribe is called. But this function doesn't stop the thread and should be avoided until its completely developed. """""" asyncio.gather(*asyncio.Task.all_tasks()).cancel() self.event_loop.stop() self.event_loop.close()" 1267,"def timesince(d, now=None): """""" Takes two datetime objects and returns the time between d and now as a nicely formatted string, e.g. ""10 minutes"". If d occurs after now, then ""0 minutes"" is returned. Units used are years, months, weeks, days, hours, and minutes. Seconds and microseconds are ignored. Up to two adjacent units will be displayed. For example, ""2 weeks, 3 days"" and ""1 year, 3 months"" are possible outputs, but ""2 weeks, 3 hours"" and ""1 year, 5 days"" are not. Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since """""" chunks = ( (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)), (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)), (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)), (60 * 60 * 24, lambda n : ungettext('day', 'days', n)), (60 * 60, lambda n: ungettext('hour', 'hours', n)), (60, lambda n: ungettext('minute', 'minutes', n)) ) # Convert datetime.date to datetime.datetime for comparison. if not isinstance(d, datetime.datetime): d = datetime.datetime(d.year, d.month, d.day) if now and not isinstance(now, datetime.datetime): now = datetime.datetime(now.year, now.month, now.day) if not now: if d.tzinfo: now = datetime.datetime.now(LocalTimezone(d)) else: now = datetime.datetime.now() # ignore microsecond part of 'd' since we removed it from 'now' delta = now - (d - datetime.timedelta(0, 0, d.microsecond)) since = delta.days * 24 * 60 * 60 + delta.seconds if since <= 0: # d is in the future compared to now, stop processing. return u'0 ' + ugettext('minutes') for i, (seconds, name) in enumerate(chunks): count = since // seconds if count != 0: break s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)} if i + 1 < len(chunks): # Now get the second item seconds2, name2 = chunks[i + 1] count2 = (since - (seconds * count)) // seconds2 if count2 != 0: s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)} return s" 1268,"def timeuntil(d, now=None): """""" Like timesince, but returns a string measuring the time until the given time. """""" if not now: if getattr(d, 'tzinfo', None): now = datetime.datetime.now(LocalTimezone(d)) else: now = datetime.datetime.now() return timesince(now, d)" 1269,"def updateCache(self, service, url, new_data, new_data_dt): """""" :param new_data: a string representation of the data :param new_data_dt: a timezone aware datetime object giving the timestamp of the new_data :raise MemcachedException: if update failed """""" key = self._get_key(service, url) # clear existing data try: value = self.client.get(key) if value: data = pickle.loads(value, encoding=""utf8"") if ""time_stamp"" in data: cached_data_dt = parse(data[""time_stamp""]) if new_data_dt > cached_data_dt: self.client.delete(key) # may raise MemcachedException logger.info( ""IN cache (key: {}), older DELETE"".format(key)) else: logger.info( ""IN cache (key: {}), newer KEEP"".format(key)) return else: logger.info(""NOT IN cache (key: {})"".format(key)) except MemcachedException as ex: logger.error( ""Clear existing data (key: {}) ==> {}"".format(key, str(ex))) return # store new value in cache cdata, time_to_store = self._make_cache_data( service, url, new_data, {}, 200, new_data_dt) self.client.set(key, cdata, time=time_to_store) # may raise MemcachedException logger.info( ""MemCached SET (key {}) for {:d} seconds"".format( key, time_to_store))" 1270,"def delete_all_eggs(self): """""" delete all the eggs in the directory specified """""" path_to_delete = os.path.join(self.egg_directory, ""lib"", ""python"") if os.path.exists(path_to_delete): shutil.rmtree(path_to_delete)" 1271,"def install_egg(self, egg_name): """""" Install an egg into the egg directory """""" if not os.path.exists(self.egg_directory): os.makedirs(self.egg_directory) self.requirement_set.add_requirement( InstallRequirement.from_line(egg_name, None)) try: self.requirement_set.prepare_files(self.finder) self.requirement_set.install(['--prefix=' + self.egg_directory], []) except DistributionNotFound: self.requirement_set.requirements._keys.remove(egg_name) raise PipException()" 1272,"def call(cls, iterable, *a, **kw): """""" Calls every item in *iterable* with the specified arguments. """""" return cls(x(*a, **kw) for x in iterable)" 1273,"def map(cls, iterable, func, *a, **kw): """""" Iterable-first replacement of Python's built-in `map()` function. """""" return cls(func(x, *a, **kw) for x in iterable)" 1274,"def filter(cls, iterable, cond, *a, **kw): """""" Iterable-first replacement of Python's built-in `filter()` function. """""" return cls(x for x in iterable if cond(x, *a, **kw))" 1275,"def unique(cls, iterable, key=None): """""" Yields unique items from *iterable* whilst preserving the original order. """""" if key is None: key = lambda x: x def generator(): seen = set() seen_add = seen.add for item in iterable: key_val = key(item) if key_val not in seen: seen_add(key_val) yield item return cls(generator())" 1276,"def chunks(cls, iterable, n, fill=None): """""" Collects elements in fixed-length chunks. """""" return cls(itertools.zip_longest(*[iter(iterable)] * n, fillvalue=fill))" 1277,"def concat(cls, iterables): """""" Similar to #itertools.chain.from_iterable(). """""" def generator(): for it in iterables: for element in it: yield element return cls(generator())" 1278,"def chain(cls, *iterables): """""" Similar to #itertools.chain.from_iterable(). """""" def generator(): for it in iterables: for element in it: yield element return cls(generator())" 1279,"def attr(cls, iterable, attr_name): """""" Applies #getattr() on all elements of *iterable*. """""" return cls(getattr(x, attr_name) for x in iterable)" 1280,"def of_type(cls, iterable, types): """""" Filters using #isinstance(). """""" return cls(x for x in iterable if isinstance(x, types))" 1281,"def partition(cls, iterable, pred): """""" Use a predicate to partition items into false and true entries. """""" t1, t2 = itertools.tee(iterable) return cls(itertools.filterfalse(pred, t1), filter(pred, t2))" 1282,"def count(cls, iterable): """""" Returns the number of items in an iterable. """""" iterable = iter(iterable) count = 0 while True: try: next(iterable) except StopIteration: break count += 1 return count" 1283,"def column_names(self, table): """"""An iterable of column names, for a particular table or view."""""" table_info = self.execute( u'PRAGMA table_info(%s)' % quote(table)) return (column['name'] for column in table_info)" 1284,"def execute(self, sql, *args, **kwargs): ''' Run raw SQL on the database, and receive relaxing output. This is sort of the foundational method that most of the others build on. ''' try: self.cursor.execute(sql, *args) except self.sqlite3.InterfaceError, msg: raise self.sqlite3.InterfaceError(unicode(msg) + '\nTry converting types or pickling.') rows = self.cursor.fetchall() self.__commit_if_necessary(kwargs) if None == self.cursor.description: return None else: colnames = [d[0].decode('utf-8') for d in self.cursor.description] rawdata = [OrderedDict(zip(colnames,row)) for row in rows] return rawdata" 1285,"def create_index(self, columns, table_name, if_not_exists = True, unique = False, **kwargs): 'Create a unique index on the column(s) passed.' index_name = simplify(table_name) + u'_' + u'_'.join(map(simplify, columns)) if unique: sql = u'CREATE UNIQUE INDEX %s ON %s (%s)' else: sql = u'CREATE INDEX %s ON %s (%s)' first_param = u'IF NOT EXISTS ' + index_name if if_not_exists else index_name params = (first_param, quote(table_name), ','.join(map(quote, columns))) self.execute(sql % params, **kwargs)" 1286,"def create_table(self, data, table_name, error_if_exists = False, **kwargs): 'Create a table based on the data, but don\'t insert anything.' converted_data = convert(data) if len(converted_data) == 0 or converted_data[0] == []: raise ValueError(u'You passed no sample values, or all the values you passed were null.') else: startdata = OrderedDict(converted_data[0]) # Select a non-null item for k, v in startdata.items(): if v != None: break else: v = None if_not_exists = u'' if error_if_exists else u'IF NOT EXISTS' # Do nothing if all items are null. if v != None: try: # This is vulnerable to injection. sql = u''' CREATE TABLE %s %s ( %s %s );''' % (if_not_exists, quote(table_name), quote(k), get_column_type(startdata[k])) self.execute(sql, commit = False) except: raise else: self.commit() for row in converted_data: self.__check_and_add_columns(table_name, row)" 1287,"def get_var(self, key): 'Retrieve one saved variable from the database.' vt = quote(self.__vars_table) data = self.execute(u'SELECT * FROM %s WHERE `key` = ?' % vt, [key], commit = False) if data == []: raise NameError(u'The DumpTruck variables table doesn\'t have a value for %s.' % key) else: tmp = quote(self.__vars_table_tmp) row = data[0] self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False) # This is vulnerable to injection self.execute(u'CREATE TEMPORARY TABLE %s (`value` %s)' % (tmp, row['type']), commit = False) # This is ugly self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [row['value']], commit = False) value = self.dump(tmp)[0]['value'] self.execute(u'DROP TABLE %s' % tmp, commit = False) return value" 1288,"def save_var(self, key, value, **kwargs): 'Save one variable to the database.' # Check whether Highwall's variables table exists self.__check_or_create_vars_table() column_type = get_column_type(value) tmp = quote(self.__vars_table_tmp) self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False) # This is vulnerable to injection self.execute(u'CREATE TABLE %s (`value` %s)' % (tmp, column_type), commit = False) # This is ugly self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [value], commit = False) table = (quote(self.__vars_table), tmp) params = [key, column_type] self.execute(u''' INSERT OR REPLACE INTO %s (`key`, `type`, `value`) SELECT ? AS key, ? AS type, value FROM %s ''' % table, params) self.execute(u'DROP TABLE %s' % tmp, commit = False) self.__commit_if_necessary(kwargs)" 1289,"def tablesAndViews(self): """"""Return a sequence of (name,type) pairs where type is either ""table"" or ""view""."""""" result = self.execute( u'SELECT name,type FROM sqlite_master WHERE type in (""table"", ""view"")', commit=False) return ((row['name'],row['type']) for row in result)" 1290,"def drop(self, table_name = 'dumptruck', if_exists = False, **kwargs): 'Drop a table.' return self.execute(u'DROP TABLE %s %s;' % ('IF EXISTS' if if_exists else '', quote(table_name)), **kwargs)" 1291,"def __install_perforce(self, config): """""" install perforce binary """""" if not system.is_64_bit(): self.logger.warn(""Perforce formula is only designed for 64 bit systems! Not install executables..."") return False version = config.get('version', 'r13.2') key = 'osx' if system.is_osx() else 'linux' perforce_packages = package_dict[version][key] d = self.directory.install_directory(self.feature_name) if not os.path.exists(d): os.makedirs(d) self.logger.info(""Downloading p4 executable..."") with open(os.path.join(d, ""p4""), 'wb+') as fh: fh.write(lib.cleaned_request('get', url_prefix + perforce_packages['p4']).content) self.directory.symlink_to_bin(""p4"", os.path.join(d, ""p4"")) self.p4_command = os.path.join(d, ""p4"") self.logger.info(""Installing p4v..."") if system.is_osx(): return self._install_p4v_osx(url_prefix + perforce_packages['p4v']) else: return self._install_p4v_linux(url_prefix + perforce_packages['p4v'])" 1292,"def _install_p4v_osx(self, url, overwrite=False): """""" Install perforce applications and binaries for mac """""" package_exists = False root_dir = os.path.expanduser(os.path.join(""~"", ""Applications"")) package_exists = len([x for x in P4V_APPLICATIONS if os.path.exists(os.path.join(root_dir, x))]) if not package_exists or overwrite: lib.extract_dmg(url, root_dir) else: self.logger.warn(""P4V exists already in %s! Not overwriting..."" % root_dir) return True" 1293,"def _install_p4v_linux(self, url): """""" Install perforce applications and binaries for linux """""" lib.extract_targz(url, self.directory.install_directory(self.feature_name), remove_common_prefix=True) bin_path = os.path.join(self.directory.install_directory(self.feature_name), 'bin') if os.path.exists(bin_path): for f in os.listdir(bin_path): self.directory.symlink_to_bin(f, os.path.join(bin_path, f)) return True" 1294,"def __write_p4settings(self, config): """""" write perforce settings """""" self.logger.info(""Writing p4settings..."") root_dir = os.path.expanduser(config.get('root_path')) p4settings_path = os.path.join(root_dir, "".p4settings"") if os.path.exists(p4settings_path): if self.target.get('overwrite_p4settings', False): self.logger.info(""Overwriting existing p4settings..."") os.remove(p4settings_path) else: return with open(p4settings_path, ""w+"") as p4settings_file: p4settings_file.write(p4settings_template % config.to_dict()) if config.get('write_password_p4settings', 'no'): p4settings_file.write(""\nP4PASSWD=%s"" % config['password'])" 1295,"def __configure_client(self, config): """""" write the perforce client """""" self.logger.info(""Configuring p4 client..."") client_dict = config.to_dict() client_dict['root_path'] = os.path.expanduser(config.get('root_path')) os.chdir(client_dict['root_path']) client_dict['hostname'] = system.NODE client_dict['p4view'] = config['p4view'] % self.environment.target.get_context_dict() client = re.sub('//depot', ' //depot', p4client_template % client_dict) self.logger.info(lib.call(""%s client -i"" % self.p4_command, stdin=client, env=self.p4environ, cwd=client_dict['root_path']))" 1296,"def __install_eggs(self, config): """""" Install eggs for a particular configuration """""" egg_carton = (self.directory.install_directory(self.feature_name), 'requirements.txt') eggs = self.__gather_eggs(config) self.logger.debug(""Installing eggs %s..."" % eggs) self.__load_carton(egg_carton, eggs) self.__prepare_eggs(egg_carton, config)" 1297,"def __add_paths(self, config): """""" add the proper resources into the environment """""" bin_path = os.path.join(self.directory.install_directory(self.feature_name), 'bin') whitelist_executables = self._get_whitelisted_executables(config) for f in os.listdir(bin_path): for pattern in BLACKLISTED_EXECUTABLES: if re.match(pattern, f): continue if whitelist_executables and f not in whitelist_executables: continue self.directory.symlink_to_bin(f, os.path.join(bin_path, f))" 1298,"def analyse_body_paragraph(body_paragraph, labels=None): """"""Analyse commit body paragraph and return (label, message). >>> analyse_body_paragraph('* BETTER Foo and bar.', >>> ... {'BETTER': 'Improvements'}) ('BETTER', 'Foo and bar.') >>> analyse_body_paragraph('* Foo and bar.') (None, 'Foo and bar.') >>> analyse_body_paragraph('Foo and bar.') (None, None) """""" # try to find leading label first: for label, dummy in labels: if body_paragraph.startswith('* ' + label): return (label, body_paragraph[len(label) + 3:].replace('\n ', ' ')) # no conformed leading label found; do we have leading asterisk? if body_paragraph.startswith('* '): return (None, body_paragraph[2:].replace('\n ', ' ')) # no leading asterisk found; ignore this paragraph silently: return (None, None)" 1299,"def remove_ticket_directives(message): """"""Remove ticket directives like ""(closes #123). >>> remove_ticket_directives('(closes #123)') '(#123)' >>> remove_ticket_directives('(foo #123)') '(foo #123)' """""" if message: message = re.sub(r'closes #', '#', message) message = re.sub(r'addresses #', '#', message) message = re.sub(r'references #', '#', message) return message" 1300,"def amended_commits(commits): """"""Return those git commit sha1s that have been amended later."""""" # which SHA1 are declared as amended later? amended_sha1s = [] for message in commits.values(): amended_sha1s.extend(re.findall(r'AMENDS\s([0-f]+)', message)) return amended_sha1s" 1301,"def enrich_git_log_dict(messages, labels): """"""Enrich git log with related information on tickets."""""" for commit_sha1, message in messages.items(): # detect module and ticket numbers for each commit: component = None title = message.split('\n')[0] try: component, title = title.split("":"", 1) component = component.strip() except ValueError: pass # noqa paragraphs = [analyse_body_paragraph(p, labels) for p in message.split('\n\n')] yield { 'sha1': commit_sha1, 'component': component, 'title': title.strip(), 'tickets': re.findall(r'\s(#\d+)', message), 'paragraphs': [ (label, remove_ticket_directives(message)) for label, message in paragraphs ], }" 1302,"def release(obj, commit='HEAD', components=False): """"""Generate release notes."""""" options = obj.options repository = obj.repository try: sha = 'oid' commits = _pygit2_commits(commit, repository) except ImportError: try: sha = 'hexsha' commits = _git_commits(commit, repository) except ImportError: click.echo('To use this feature, please install pygit2. ' 'GitPython will also work but is not recommended ' '(python <= 2.7 only).', file=sys.stderr) return 2 messages = OrderedDict([(getattr(c, sha), c.message) for c in commits]) for commit_sha1 in amended_commits(messages): if commit_sha1 in messages: del messages[commit_sha1] full_messages = list( enrich_git_log_dict(messages, options.get('commit_msg_labels')) ) indent = ' ' if components else '' wrapper = textwrap.TextWrapper( width=70, initial_indent=indent + '- ', subsequent_indent=indent + ' ', ) for label, section in options.get('commit_msg_labels'): if section is None: continue bullets = [] for commit in full_messages: bullets += [ {'text': bullet, 'component': commit['component']} for lbl, bullet in commit['paragraphs'] if lbl == label and bullet is not None ] if len(bullets) > 0: click.echo(section) click.echo('~' * len(section)) click.echo() if components: def key(cmt): return cmt['component'] for component, bullets in itertools.groupby( sorted(bullets, key=key), key): bullets = list(bullets) if len(bullets) > 0: click.echo('+ {}'.format(component)) click.echo() for bullet in bullets: click.echo(wrapper.fill(bullet['text'])) click.echo() else: for bullet in bullets: click.echo(wrapper.fill(bullet['text'])) click.echo() return 0" 1303,"def incr(**vars): """"""Increments context variables """""" for k, v in vars: current_context.vars.setdefault(k, 0) current_context[k] += v" 1304,"def set_default_var(**vars): """"""Sets context variables using the key/value provided in the options """""" for k, v in vars.iteritems(): current_context.vars.setdefault(k, v)" 1305,"def incr_obj(obj, **attrs): """"""Increments context variables """""" for name, value in attrs.iteritems(): v = getattr(obj, name, None) if not hasattr(obj, name) or v is None: v = 0 setattr(obj, name, v + value)" 1306,"def redirect(view=None, url=None, **kwargs): """"""Redirects to the specified view or url """""" if view: if url: kwargs[""url""] = url url = flask.url_for(view, **kwargs) current_context.exit(flask.redirect(url))" 1307,"def lines(query): """"""lines(query) -- print the number of lines in a given file """""" filename = support.get_file_name(query) if(os.path.isfile(filename)): with open(filename) as openfile: print len(openfile.readlines()) else: print 'File not found : ' + filename" 1308,"def words(query): """"""lines(query) -- print the number of words in a given file """""" filename = support.get_file_name(query) if(os.path.isfile(filename)): with open(filename) as openfile: print len(openfile.read().split()) else: print 'File not found : ' + filename" 1309,"def file_info(query): """"""file_info(query) -- print some human readable information of a given file """""" filename = support.get_file_name(query) if(os.path.isfile(filename)): stat_info = os.stat(filename) owner_name = pwd.getpwuid(stat_info.st_uid).pw_name print 'owner : ' + owner_name file_size = support.get_readable_filesize(stat_info.st_size) print 'size : ' + file_size print 'created : ' + time.ctime(stat_info.st_ctime) print 'last modified : ' + time.ctime(stat_info.st_mtime) else: print 'file not found'" 1310,"def make_executable(query): """"""make_executable(query) -- give executable permissions to a given file """""" filename = support.get_file_name(query) if(os.path.isfile(filename)): os.system('chmod +x '+filename) else: print 'file not found'" 1311,"def add_to_path(query): """""" add_to_path(query) -- add user given path to environment PATH variable. """""" new_entry = support.get_path(query) if(new_entry): print 'Adding '+new_entry+' to PATH variable.' print '''1 : confirm 2 : cancel ''' choice = int(raw_input('>> ')) if(choice == 1): home_dir = os.path.expanduser('~') bashrc = open(os.path.join(home_dir, "".bashrc""), ""a"") bashrc.write('\n\nexport PATH=\""'+new_entry+':$PATH\""\n') bashrc.close() os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc')) print 'Success!!' print os.system('echo $PATH') else: print 'We were unable to extract the \'path\' from your query.'" 1312,"def system_info(query): """"""system_info(query) -- print system specific information like OS, kernel, architecture etc. """""" proc = subprocess.Popen([""uname -o""], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print ""operating system : ""+str(out), proc = subprocess.Popen([""uname""], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print ""kernel : ""+str(out), proc = subprocess.Popen([""uname -r""], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print ""kernel release : ""+str(out), proc = subprocess.Popen([""uname -m""], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print ""architecture : ""+str(out), proc = subprocess.Popen([""uname -n""], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print ""network node name : ""+str(out)," 1313,"def statsd_metric(name, count, elapsed): """"""Metric that records to statsd & graphite"""""" with statsd.pipeline() as pipe: pipe.incr(name, count) pipe.timing(name, int(round(1000 * elapsed)))" 1314,"def setup(template_paths={}, autoescape=False, cache_size=100, auto_reload=True, bytecode_cache=True): """"""Setup Jinja enviroment eg. sketch.jinja.setup({ 'app': self.config.paths['app_template_basedir'], 'sketch': self.config.paths['sketch_template_dir'], }) :param template_paths: Dictionary of paths to templates (template_name => template_path) :param autoescape: Autoescape :param cache_size: :param auto_reload: :param bytecode_cache: """""" global _jinja_env, _jinja_loaders if not _jinja_env: _jinja_env = JinjaEnviroment( autoescape=autoescape, cache_size=cache_size, auto_reload=auto_reload, bytecode_cache=None) # @TODO alter so Marshall is not used # if bytecode_cache and GAE_CACHE: # _jinja_env.bytecode_cache = GAEMemcacheBytecodeCache() if type(template_paths) == type(''): template_paths = {'site': template_paths} if len(template_paths) < 1: logging.exception('Sketch: jinja.setup: no template sets configured') return False if len(template_paths) == 1: template_set_name = template_paths.keys()[0] tp = template_paths[template_set_name] if tp in _jinja_loaders: _jinja_env.loader = _jinja_loaders[tp] else: _jinja_env.loader = _jinja_loaders[tp] = jinja2.FileSystemLoader(tp) return True if len(template_paths) > 1: loaders = {} for dirn, path in template_paths.items(): loaders[dirn] = jinja2.FileSystemLoader(path) _jinja_env.loader = SubdirLoader(loaders) return True logging.error('Sketch: jinja.setup: no template sets configured (fallthrough)') logging.error(_jinja_loaders)" 1315,"def render(template_name, template_vars={}, template_set='site', template_theme=None, template_extension='html', template_content=None): """"""Given a template path, a template name and template variables will return rendered content using jinja2 library :param template_path: Path to template directory :param template_name: Name of template :param vars: (Optional) Template variables """""" global _jinja_env if not _jinja_env: raise 'Jinja env not setup' try: _jinja_env.filters['timesince'] = timesince _jinja_env.filters['timeuntil'] = timeuntil _jinja_env.filters['date'] = date_format _jinja_env.filters['time'] = time_format _jinja_env.filters['shortdate'] = short_date _jinja_env.filters['isodate'] = iso_date _jinja_env.filters['rfcdate'] = rfc2822_date _jinja_env.filters['tformat'] = datetimeformat _jinja_env.filters['timestamp'] = timestamp except NameError as errstr: logging.info('Helper import error: %s' % errstr) _template_name = ""%s.%s"" % (template_name, template_extension) template = _jinja_env.get_template(_template_name, parent=template_theme) return template.render(template_vars)" 1316,"def compile_file(env, src_path, dst_path, encoding='utf-8', base_dir=''): """"""Compiles a Jinja2 template to python code. :param env: a Jinja2 Environment instance. :param src_path: path to the source file. :param dst_path: path to the destination file. :param encoding: template encoding. :param base_dir: the base path to be removed from the compiled template filename. """""" src_file = file(src_path, 'r') source = src_file.read().decode(encoding) name = src_path.replace(base_dir, '') raw = env.compile(source, name=name, filename=name, raw=True) src_file.close() dst_file = open(dst_path, 'w') dst_file.write(raw) dst_file.close()" 1317,"def compile_dir(env, src_path, dst_path, pattern=r'^.*\.html$', encoding='utf-8', base_dir=None): """"""Compiles a directory of Jinja2 templates to python code. :param env: a Jinja2 Environment instance. :param src_path: path to the source directory. :param dst_path: path to the destination directory. :param encoding: template encoding. :param base_dir: the base path to be removed from the compiled template filename. """""" from os import path, listdir, mkdir file_re = re.compile(pattern) if base_dir is None: base_dir = src_path for filename in listdir(src_path): src_name = path.join(src_path, filename) dst_name = path.join(dst_path, filename) if path.isdir(src_name): mkdir(dst_name) compile_dir(env, src_name, dst_name, encoding=encoding, base_dir=base_dir) elif path.isfile(src_name) and file_re.match(filename): compile_file(env, src_name, dst_name, encoding=encoding, base_dir=base_dir)" 1318,"def _pre_dump(cls): """"""Output all recorded stats"""""" shutil.rmtree(cls.outdir, ignore_errors=True) os.makedirs(cls.outdir) super(PlotMetric, cls)._pre_dump()" 1319,"def _histogram(self, which, mu, sigma, data): """"""plot a histogram. For internal use only"""""" weights = np.ones_like(data)/len(data) # make bar heights sum to 100% n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5) plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma)) plt.xlabel('Items' if which == 'count' else 'Seconds') plt.ylabel('Frequency') plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: ""{:.1f}%"".format(y*100)))" 1320,"def _scatter(self): """"""plot a scatter plot of count vs. elapsed. For internal use only"""""" plt.scatter(self.count_arr, self.elapsed_arr) plt.title('{}: Count vs. Elapsed'.format(self.name)) plt.xlabel('Items') plt.ylabel('Seconds')" 1321,"def bind(self, __fun, *args, **kwargs): """""" Bind a worker function to the future. This worker function will be executed when the future is executed. """""" with self._lock: if self._running or self._completed or self._cancelled: raise RuntimeError('Future object can not be reused') if self._worker: raise RuntimeError('Future object is already bound') self._worker = functools.partial(__fun, *args, **kwargs) return self" 1322,"def add_done_callback(self, fun): """""" Adds the callback *fun* to the future so that it be invoked when the future completed. The future completes either when it has been completed after being started with the :meth:`start` method (independent of whether an error occurs or not) or when either :meth:`set_result` or :meth:`set_exception` is called. If the future is already complete, *fun* will be invoked directly. The function *fun* must accept the future as its sole argument. """""" with self._lock: if self._completed: fun() else: self._done_callbacks.append(fun)" 1323,"def enqueue(self): """""" Mark the future as being enqueued in some kind of executor for futures. Calling :meth:`start()` with the *as_thread* parameter as :const:`True` will raise a :class:`RuntimeError` after this method has been called. This method will also validate the state of the future. """""" with self._lock: if self._enqueued: raise RuntimeError('Future object is already enqueued') if self._running: raise RuntimeError('Future object is already running') if self._completed: raise RuntimeError('Future object can not be restarted') if not self._worker: raise RuntimeError('Future object is not bound') self._enqueued = True" 1324,"def start(self, as_thread=True): """""" Execute the future in a new thread or in the current thread as specified by the *as_thread* parameter. :param as_thread: Execute the future in a new, separate thread. If this is set to :const:`False`, the future will be executed in the calling thread. """""" with self._lock: if as_thread: self.enqueue() # Validate future state if self._cancelled: return self._running = True if as_thread: self._thread = threading.Thread(target=self._run) self._thread.start() return self self._run()" 1325,"def result(self, timeout=None, do_raise=True): """""" Retrieve the result of the future, waiting for it to complete or at max *timeout* seconds. :param timeout: The number of maximum seconds to wait for the result. :param do_raise: Set to False to prevent any of the exceptions below to be raised and return :const:`None` instead. :raise Cancelled: If the future has been cancelled. :raise Timeout: If the *timeout* has been exceeded. :raise BaseException: Anything the worker has raised. :return: Whatever the worker bound to the future returned. """""" with self._lock: self.wait(timeout, do_raise=do_raise) if self._exc_info: if not do_raise: return None # Its more important to re-raise the exception from the worker. self._exc_retrieved = True reraise(*self._exc_info) if self._cancelled: if not do_raise: return None raise self.Cancelled() return self._result" 1326,"def exception(self, timeout=None, do_raise=True): """""" Returns the exception value by the future's worker or :const:`None`. :param timeout: :param do_raise: :param Cancelled: :param Timeout: :return: :const:`None` or an exception value. """""" with self._lock: self.wait(timeout, do_raise=do_raise) if not self._exc_info: return None self._exc_retrieved = True if self._cancelled: raise self.Cancelled() return self._exc_info[1]" 1327,"def cancel(self, mark_completed_as_cancelled=False): """""" Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway. """""" with self._lock: if not self._completed or mark_completed_as_cancelled: self._cancelled = True callbacks = self._prepare_done_callbacks() callbacks()" 1328,"def set_result(self, result): """""" Allows you to set the result of the future without requiring the future to actually be executed. This can be used if the result is available before the future is run, allowing you to keep the future as the interface for retrieving the result data. :param result: The result of the future. :raise RuntimeError: If the future is already enqueued. """""" with self._lock: if self._enqueued: raise RuntimeError('can not set result of enqueued Future') self._result = result self._completed = True callbacks = self._prepare_done_callbacks() callbacks()" 1329,"def set_exception(self, exc_info): """""" This method allows you to set an exception in the future without requring that exception to be raised from the futures worker. This method can be called on an unbound future. :param exc_info: Either an exception info tuple or an exception value. In the latter case, the traceback will be automatically generated from the parent frame. :raise RuntimeError: If the future is already enqueued. """""" if not isinstance(exc_info, tuple): if not isinstance(exc_info, BaseException): raise TypeError('expected BaseException instance') try: # TODO: Filld the traceback so it appears as if the exception # was actually raised by the caller? (Not sure if possible) raise exc_info except: exc_info = sys.exc_info() exc_info = (exc_info[0], exc_info[1], exc_info[2]) with self._lock: if self._enqueued: raise RuntimeError('can not set exception of enqueued Future') self._exc_info = exc_info self._completed = True callbacks = self._prepare_done_callbacks() callbacks()" 1330,"def wait(self, timeout=None, do_raise=False): """""" Wait for the future to complete. If *timeout* is specified, it must be a floating point number representing the maximum number of seconds to wait. :param timeout: The maximum number of seconds to wait for the future to complete. :param do_raise: Raise :class:`Timeout` when a timeout occurred. :raise Timeout: If a timeout occurred and *do_raise* was True. :return: :const:`True` if the future completed, :const:`False` if a timeout occurred and *do_raise* was set to False. """""" if timeout is not None: timeout = float(timeout) start = time.clock() with self._lock: while not self._completed and not self._cancelled: if timeout is not None: time_left = timeout - (time.clock() - start) else: time_left = None if time_left is not None and time_left <= 0.0: if do_raise: raise self.Timeout() else: return False self._lock.wait(time_left) return True" 1331,"def enqueue(self, future): """""" Enqueue a future to be processed by one of the threads in the pool. The future must be bound to a worker and not have been started yet. """""" future.enqueue() with self._lock: if self._shutdown: raise RuntimeError('ThreadPool has been shut down and can no ' 'longer accept futures.') self._queue.append(future) if len(self._running) == len(self._workers): self._new_worker() self._lock.notify_all()" 1332,"def submit(self, __fun, *args, **kwargs): """""" Creates a new future and enqueues it. Returns the future. """""" future = Future().bind(__fun, *args, **kwargs) self.enqueue(future) return future" 1333,"def cancel(self, cancel_running=True, mark_completed_as_cancelled=False): """""" Cancel all futures queued in the pool. If *cancel_running* is True, futures that are currently running in the pool are cancelled as well. """""" with self._lock: for future in self._queue: future.cancel(mark_completed_as_cancelled) if cancel_running: for future in self._running: future.cancel(mark_completed_as_cancelled) self._queue.clear()" 1334,"def shutdown(self, wait=True): """""" Shut down the pool. If *wait* is True, it will wait until all futures are completed. Alternatively, you can use the #wait() method to wait with timeout supported. """""" with self._lock: self._shutdown = True self._lock.notify_all() if wait: self.wait()" 1335,"def wait(self, timeout=None): """""" Wait until all futures are completed. You should call this method only after calling #shutdown(). Returns #False if all futures are complete, #False if there are still some running. """""" tbegin = _get_timeout_begin(timeout) with self._lock: while self._queue or self._running: remainder = _get_timeout_remainder(tbegin, timeout) if remainder is not None and remainder <= 0.0: return False # timeout self._lock.wait(remainder) if self._shutdown: for worker in self._workers: worker.join() return True" 1336,"def timeit(func): """""" Returns the number of seconds that a function took along with the result """""" @wraps(func) def timer_wrapper(*args, **kwargs): """""" Inner function that uses the Timer context object """""" with Timer() as timer: result = func(*args, **kwargs) return result, timer return timer_wrapper" 1337,"def timeout(seconds): """""" Raises a TimeoutError if a function does not terminate within specified seconds. """""" def _timeout_error(signal, frame): raise TimeoutError(""Operation did not finish within \ {} seconds"".format(seconds)) def timeout_decorator(func): @wraps(func) def timeout_wrapper(*args, **kwargs): signal.signal(signal.SIGALRM, _timeout_error) signal.alarm(seconds) try: return func(*args, **kwargs) finally: signal.alarm(0) return timeout_wrapper return timeout_decorator" 1338,"def create(location: str, extensions_found: List[str] = None): # -> NoParserFoundForObject: """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param location: :param extensions_found: :return: """""" if not extensions_found: return ObjectPresentMultipleTimesOnFileSystemError('Object : ' + location + ' is present multiple ' 'times on the file system.') else: return ObjectPresentMultipleTimesOnFileSystemError('Object : ' + location + ' is present multiple ' 'times on the file system , with extensions : ' + str(extensions_found) + '. Only one version of each ' 'object should be provided. If you need multiple files' ' to create this object, you should create a multifile' ' object instead (with each file having its own name and' ' a shared prefix)')" 1339,"def create(location: str, simpleobjects_found = None, complexobject_attributes_found = None): # -> ObjectNotFoundOnFileSystemError: """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param location: :return: """""" if len(complexobject_attributes_found) > 0 or len(simpleobjects_found) > 0: return ObjectNotFoundOnFileSystemError('Mandatory object : ' + location + ' could not be found on the file' ' system, either as a multifile or as a singlefile with any ' 'extension, but it seems that this is because you have left the ' 'extension in the location name. Please remove the file extension ' 'from the location name and try again') else: return ObjectNotFoundOnFileSystemError('Mandatory object : ' + location + ' could not be found on the file' ' system, either as a multifile or as a singlefile with any ' 'extension.')" 1340,"def get_unique_object_contents(self, location: str) -> Tuple[bool, str, Union[str, Dict[str, str]]]: """""" Utility method to find a unique singlefile or multifile object. This method throws * ObjectNotFoundOnFileSystemError if no file is found * ObjectPresentMultipleTimesOnFileSystemError if the object is found multiple times (for example with several file extensions, or as a file AND a folder) * IllegalContentNameError if a multifile child name is None or empty string. It relies on the abstract methods of this class (find_simpleobject_file_occurrences and find_multifile_object_children) to find the various files present. :param location: a location identifier compliant with the provided file mapping configuration :return: [True, singlefile_ext, singlefile_path] if a unique singlefile object is present ; False, MULTIFILE_EXT, complexobject_attributes_found] if a unique multifile object is present, with complexobject_attributes_found being a dictionary {name: location} """""" # First check what is present on the filesystem according to the filemapping simpleobjects_found = self.find_simpleobject_file_occurrences(location) complexobject_attributes_found = self.find_multifile_object_children(location, no_errors=True) # Then handle the various cases if len(simpleobjects_found) > 1 \ or (len(simpleobjects_found) == 1 and len(complexobject_attributes_found) > 0): # the object is present several times > error u = simpleobjects_found u.update(complexobject_attributes_found) raise ObjectPresentMultipleTimesOnFileSystemError.create(location, list(u.keys())) elif len(simpleobjects_found) == 1: # a singlefile object > create the output is_single_file = True ext = list(simpleobjects_found.keys())[0] singlefile_object_file_path = simpleobjects_found[ext] return is_single_file, ext, singlefile_object_file_path elif len(complexobject_attributes_found) > 0: # a multifile object > create the output is_single_file = False ext = MULTIFILE_EXT if '' in complexobject_attributes_found.keys() or None in complexobject_attributes_found.keys(): raise IllegalContentNameError.create(location, complexobject_attributes_found[MULTIFILE_EXT]) return is_single_file, ext, complexobject_attributes_found else: # handle special case of multifile object with no children (if applicable) if self.is_multifile_object_without_children(location): is_single_file = False ext = MULTIFILE_EXT return is_single_file, ext, dict() else: # try if by any chance the issue is that location has an extension loc_without_ext = splitext(location)[0] simpleobjects_found = self.find_simpleobject_file_occurrences(loc_without_ext) complexobject_attributes_found = self.find_multifile_object_children(loc_without_ext, no_errors=True) # the object was not found in a form that can be parsed raise ObjectNotFoundOnFileSystemError.create(location, simpleobjects_found, complexobject_attributes_found)" 1341,"def find_multifile_object_children(self, parent_location: str, no_errors: bool = False) -> Dict[str, str]: """""" Implementing classes should return a dictionary of , containing the named elements in this multifile object. :param parent_location: the absolute file prefix of the parent item. :return: a dictionary of {item_name : item_prefix} """""" pass" 1342,"def get_pretty_location(self, blank_parent_part: bool = False, append_file_ext: bool = True, compact_file_ext: bool = False): """""" Utility method to return a string representing the location, mode and extension of this file. :return: """""" if append_file_ext: if compact_file_ext: suffix = self.ext if self.is_singlefile else '' else: suffix = ' (' + self.get_pretty_file_ext() + ')' else: suffix = '' if blank_parent_part: # TODO sep should be replaced with the appropriate separator in flat mode idx = self.location.rfind(sep) return (' ' * (idx-1-len(sep))) + '|--' + self.location[(idx+1):] + suffix else: return self.location + suffix" 1343,"def get_pretty_child_location(self, child_name, blank_parent_part: bool = False): """""" Utility method to return a string representation of the location of a child :param child_name: :param blank_parent_part: :return: """""" if blank_parent_part: idx = len(self.location) return (' ' * (idx-3)) + '|--' + child_name else: # TODO sep should be replaced with the appropriate separator in flat mode return self.location + sep + child_name" 1344,"def create_persisted_object(self, location: str, logger: Logger) -> PersistedObject: """""" Creates a PersistedObject representing the object at location 'location', and recursively creates all of its children :param location: :param logger: :return: """""" #print('Checking all files under ' + location) logger.debug('Checking all files under [{loc}]'.format(loc=location)) obj = FileMappingConfiguration.RecursivePersistedObject(location=location, file_mapping_conf=self, logger=logger) #print('File checks done') logger.debug('File checks done') return obj" 1345,"def find_multifile_object_children(self, parent_location, no_errors: bool = False) -> Dict[str, str]: """""" Implementation of the parent abstract method. In this mode, root_path should be a valid folder, and each item is a subfolder (multifile) or a file (singlefile): location/ |-singlefile_sub_item1. |-singlefile_sub_item2. |-multifile_sub_item3/ |- ... :param parent_location: the absolute file prefix of the parent item. it may be a folder (non-flat mode) or a folder + a file name prefix (flat mode) :param no_errors: a boolean used in internal recursive calls in order to catch errors. Should not be changed by users. :return: a dictionary of {item_name : item_prefix} """""" # (1) Assert that folder_path is a folder if not isdir(parent_location): if no_errors: return dict() else: raise ValueError('Cannot find a multifileobject at location \'' + parent_location + '\' : location is ' 'not a valid folder') else: # (2) List folders (multifile objects or collections) all_subfolders = [dir_ for dir_ in listdir(parent_location) if isdir(join(parent_location, dir_))] items = {item_name: join(parent_location, item_name) for item_name in all_subfolders} # (3) List singlefiles *without* their extension items.update({ item_name: join(parent_location, item_name) for item_name in [file_name[0:file_name.rindex(EXT_SEPARATOR)] for file_name in listdir(parent_location) if isfile(join(parent_location, file_name)) and EXT_SEPARATOR in file_name] }) # (4) return all return items" 1346,"def is_multifile_object_without_children(self, location: str) -> bool: """""" Returns True if an item with this location is present as a multifile object without children. For this implementation, this means that there is a folder without any files in it :param location: :return: """""" return isdir(location) and len(self.find_multifile_object_children(location)) == 0" 1347,"def get_multifile_object_child_location(self, parent_item_prefix: str, child_name: str) -> str: """""" Implementation of the parent abstract method. In this mode the attribute is a file inside the parent object folder :param parent_item_prefix: the absolute file prefix of the parent item. :return: the file prefix for this attribute """""" check_var(parent_item_prefix, var_types=str, var_name='parent_item_prefix') check_var(child_name, var_types=str, var_name='item_name') # assert that folder_path is a folder if not isdir(parent_item_prefix): raise ValueError( 'Cannot get attribute item in non-flat mode, parent item path is not a folder : ' + parent_item_prefix) return join(parent_item_prefix, child_name)" 1348,"def find_multifile_object_children(self, parent_location, no_errors: bool = False) -> Dict[str, str]: """""" Implementation of the parent abstract method. In this mode, each item is a set of files with the same prefix than location, separated from the attribute name by the character sequence . The location may also be directly a folder, in which case the sub items dont have a prefix. example if location = '/' parent_folder/ |-file_prefixsinglefile_sub_item1. |-file_prefixsinglefile_sub_item2. |-file_prefixmultifile_sub_item3singlesub1. |-file_prefixmultifile_sub_item3singlesub2. example if location = '/ parent_folder/ |-singlefile_sub_item1. |-singlefile_sub_item2. |-multifile_sub_item3singlesub1. |-multifile_sub_item3singlesub2. :param parent_location: the absolute file prefix of the parent item. It may be a folder (special case of the root folder) but typically is just a file prefix :param no_errors: :return: a dictionary of , """""" if parent_location == '': parent_location = '.' # (1) Find the base directory and base name if isdir(parent_location): # special case: parent location is the root folder where all the files are. parent_dir = parent_location base_prefix = '' start_with = '' else: parent_dir = dirname(parent_location) if parent_dir is '': parent_dir = '.' # TODO one day we'll rather want to have a uniform definition of 'location' across filemappings # Indeed as of today, location is not abstract from the file mapping implementation, since we # ""just"" use basename() rather than replacing os separators with our separator: base_prefix = basename(parent_location) # --> so it should already include self.separator to be valid start_with = self.separator # (2) list children files that are singlefiles content_files = [content_file for content_file in listdir(parent_dir) # -> we are in flat mode : should be a file not a folder : if isfile(join(parent_dir,content_file)) # -> we are looking for children of a specific item : and content_file.startswith(base_prefix) # -> we are looking for multifile child items only : and content_file != base_prefix # -> they should start with the separator (or with nothing in case of the root folder) : and (content_file[len(base_prefix):]).startswith(start_with) # -> they should have a valid extension : and (content_file[len(base_prefix + start_with):]).count(EXT_SEPARATOR) >= 1 ] # (3) build the resulting dictionary of item_name > item_prefix item_prefixes = dict() for item_file in content_files: end_name = item_file.find(self.separator, len(base_prefix + start_with)) if end_name == -1: end_name = item_file.find(EXT_SEPARATOR, len(base_prefix + start_with)) item_name = item_file[len(base_prefix + start_with):end_name] item_prefixes[item_name] = join(parent_dir, base_prefix + start_with + item_name) return item_prefixes" 1349,"def is_multifile_object_without_children(self, location: str) -> bool: """""" Returns True if an item with this location is present as a multifile object without children. For this implementation, this means that there is a file with the appropriate name but without extension :param location: :return: """""" # (1) Find the base directory and base name if isdir(location): # special case: parent location is the root folder where all the files are. return len(self.find_multifile_object_children(location)) == 0 else: # TODO same comment than in find_multifile_object_children if exists(location): # location is a file without extension. We can accept that as being a multifile object without children return True else: return False" 1350,"def get_multifile_object_child_location(self, parent_location: str, child_name: str): """""" Implementation of the parent abstract method. In this mode the attribute is a file with the same prefix, separated from the parent object name by the character sequence :param parent_location: the absolute file prefix of the parent item. :param child_name: :return: the file prefix for this attribute """""" check_var(parent_location, var_types=str, var_name='parent_path') check_var(child_name, var_types=str, var_name='item_name') # a child location is built by adding the separator between the child name and the parent location return parent_location + self.separator + child_name" 1351,"def find_simpleobject_file_occurrences(self, location) -> Dict[str, str]: """""" Implementation of the parent abstract method. :param location: :return: a dictionary{ext : file_path} """""" parent_dir = dirname(location) if parent_dir is '': parent_dir = '.' base_prefix = basename(location) # trick : is sep_for_flat is a dot, we have to take into account that there is also a dot for the extension min_sep_count = (1 if self.separator == EXT_SEPARATOR else 0) possible_object_files = {object_file[len(base_prefix):]: join(parent_dir, object_file) for object_file in listdir(parent_dir) if isfile(parent_dir + '/' + object_file) and object_file.startswith(base_prefix) # file must be named base_prefix.something and object_file != base_prefix and object_file[len(base_prefix)] == EXT_SEPARATOR and (object_file[len(base_prefix):]).count(EXT_SEPARATOR) == 1 # and no other item separator should be present in the something and (object_file[len(base_prefix):]).count(self.separator) == min_sep_count} return possible_object_files" 1352,"def special_links_replace(text, urls): ''' Replace simplified Regulations and Guidelines links into actual links. 'urls' dictionary is expected to provide actual links to the targeted Regulations and Guidelines, as well as to the PDF file. ''' match_number = r'([A-Za-z0-9]+)' + r'(\+*)' reference_list = [(r'regulations:article:' + match_number, urls['regulations']), (r'regulations:regulation:' + match_number, urls['regulations']), (r'guidelines:article:' + match_number, urls['guidelines']), (r'guidelines:guideline:' + match_number, urls['guidelines']), ] anchor_list = [(r'regulations:contents', urls['regulations'] + r'#contents'), (r'guidelines:contents', urls['guidelines'] + r'#contents'), (r'regulations:top', urls['regulations'] + r'#'), (r'guidelines:top', urls['guidelines'] + r'#'), (r'link:pdf', urls['pdf'] + '.pdf'), ] retval = text for match, repl in reference_list: retval = re.sub(match, repl + r'#\1\2', retval) for match, repl in anchor_list: retval = re.sub(match, repl, retval) return retval" 1353,"def list2html(text): ''' Very simple replacement for lists, no nesting, not even two lists in the same 'text'... (yet sufficient for the current regulations) Assumes list is in a paragraph. ''' match = r'- (.+)\n' replace = r'

  • \1
  • \n' text = re.sub(match, replace, text) # Set start of list text = text.replace('
  • ', '

    • ', 1) # Set end of list tmp = text.rsplit('
    • ', 1) return '

    '.join(tmp)" 1354,"def link2html(text): ''' Turns md links to html ''' match = r'\[([^\]]+)\]\(([^)]+)\)' replace = r'\1' return re.sub(match, replace, text)" 1355,"def simple_md2html(text, urls): ''' Convert a text from md to html ''' retval = special_links_replace(text, urls) # Create a par break for double newlines retval = re.sub(r'\n\n', r'

    ', retval) # Create a visual br for every new line retval = re.sub(r'\n', r'
    \n', retval) # Do we really need this ? Help reduce the diff to only '\n' diff. retval = re.sub(r'""', r'"', retval) retval = list2html(retval) return link2html(retval)" 1356,"def generate_ul(self, a_list): ''' Determines if we should generate th 'ul' around the list 'a_list' ''' return len(a_list) > 0 and (isinstance(a_list[0], Rule) or isinstance(a_list[0], LabelDecl))" 1357,"def get_version_info(): """"""Extract version information as a dictionary from version.py."""""" version_info = {} with open(os.path.join(""refcycle"", ""version.py""), 'r') as f: version_code = compile(f.read(), ""version.py"", 'exec') exec(version_code, version_info) return version_info" 1358,"def div_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]: """"""Filter the JSON ``value`` for alert divs. Arguments --------- key Key of the structure value Values in the structure format Output format of the processing meta Meta information """""" if key != ""Div"" or format != ""latex"": return None [[_, classes, _], contents] = value try: alert_type = [name.split(""-"")[1] for name in classes if ""-"" in name][0] except IndexError: return None if alert_type not in ALLOWED_ALERT_TYPES.__members__: return None filtered = [RawBlock(""latex"", rf""\begin{{{alert_type}box}}"")] filtered.extend(contents) filtered.append(RawBlock(""latex"", rf""\end{{{alert_type}box}}"")) return filtered" 1359,"def convert_div(text: str, format: Optional[str] = None) -> ""applyJSONFilters"": """"""Apply the `dev_filter` action to the text."""""" return applyJSONFilters([div_filter], text, format=format)" 1360,"def raw_html_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]: """"""Filter the JSON ``value`` for raw html to convert to LaTeX. Arguments --------- key Key of the structure value Values in the structure format Output format of the processing meta Meta information """""" if key == ""RawInline"" and format == ""latex"" and value[0] == ""html"": if value[1] == """": filtered = [RawInline(""latex"", r""\textsuperscript{"")] elif value[1] == """": filtered = [RawInline(""latex"", ""}"")] elif value[1] == """": filtered = [RawInline(""latex"", r""\textsubscript{"")] elif value[1] == """": filtered = [RawInline(""latex"", ""}"")] else: return None return filtered return None" 1361,"def convert_raw_html(text: str, format: Optional[str] = None) -> ""applyJSONFilters"": """"""Apply the `raw_html_filter` action to the text."""""" return applyJSONFilters([raw_html_filter], text, format=format)" 1362,"def add(self, element): """"""Add an element to this set."""""" key = self._transform(element) if key not in self._elements: self._elements[key] = element" 1363,"def discard(self, element): """"""Remove an element. Do not raise an exception if absent."""""" key = self._transform(element) if key in self._elements: del self._elements[key]" 1364,"def add_items_to_message(msg, log_dict): """"""Utility function to add dictionary items to a log message."""""" out = msg for key, value in log_dict.items(): out += "" {}={}"".format(key, value) return out" 1365,"def log_event(event, logger=root_logger, **log_dict): """""" Utility function for logging an event (e.g. for metric analysis). If no logger is given, fallback to the root logger. """""" msg = ""event={}"".format(event) msg = add_items_to_message(msg, log_dict) log_dict.update({'event': event}) logger.info(msg, extra=log_dict)" 1366,"def metric(cls, name, count, elapsed): """"""A metric function that writes multiple CSV files :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds """""" if name is None: warnings.warn(""Ignoring unnamed metric"", stacklevel=3) return with cls.lock: if not cls.instances: # first call shutil.rmtree(cls.outdir, ignore_errors=True) os.makedirs(cls.outdir) if cls.dump_atexit: atexit.register(cls.dump) try: self = cls.instances[name] except KeyError: self = cls.instances[name] = cls(name) self.writer.writerow((count, ""%f""%elapsed))" 1367,"def dump(cls): """"""Output all recorded metrics"""""" with cls.lock: if not cls.instances: return atexit.unregister(cls.dump) for self in cls.instances.values(): self.fh.close()" 1368,"def metric(self, name, count, elapsed): """"""A metric function that writes a single CSV file :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds """""" if name is None: warnings.warn(""Ignoring unnamed metric"", stacklevel=3) return with self.lock: self.writer.writerow((name, count, ""%f""%elapsed))" 1369,"def dump(self): """"""Output all recorded metrics"""""" with self.lock: atexit.unregister(self.dump) self.fh.close()" 1370,"def read(parts): """""" Build an absolute path from parts array and and return the contents of the resulting file. Assume UTF-8 encoding. """""" cur_dir = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(cur_dir, *parts), ""rb"", ""utf-8"") as f: return f.read()" 1371,"def find_meta(meta): """""" Extract __*meta*__ from META_FILE. """""" meta_match = re.search( r""^__{meta}__ = ['\""]([^'\""]*)['\""]"".format(meta=meta), META_FILE, re.M ) if meta_match: return meta_match.group(1) raise RuntimeError(""Unable to find __{meta}__ string."".format(meta=meta))" 1372,"def ensure_clean_git(operation='operation'): """""" Verify that git has no uncommitted changes """""" if os.system('git diff-index --quiet HEAD --'): print(""Unstaged or uncommitted changes detected. {} aborted."".format( operation.capitalize())) sys.exit()" 1373,"def load(self): """""" _load_ Load the defaults file if specified and overlay the json file on top of that """""" if self._defaults_file is not None: if not os.path.exists(self._defaults_file): msg = ""Unable to find defaults file: {}"".format(self._defaults_file) LOGGER.error(msg) raise RuntimeError(msg) with open(self._defaults_file, 'r') as handle: self._defaults = json.load(handle) self.update(self._defaults) if self._settings_file is None: msg = ""No context file has been provided"" LOGGER.error(msg) raise RuntimeError(msg) if not os.path.exists(self._settings_file): msg = ""Unable to find settings file: {}"".format(self._settings_file) LOGGER.error(msg) raise RuntimeError(msg) with open(self._settings_file, 'r') as handle: settings = json.load(handle) update(self, settings) return" 1374,"def check_var(var, var_types:Union[type, List[type]] =None, var_name=None, enforce_not_none:bool = True, allowed_values:Set = None, min_value = None, min_strict:bool = False, max_value = None, max_strict:bool = False, min_len:int = None, min_len_strict:bool = False, max_len:int = None, max_len_strict:bool = False): """""" Helper method to check that an object has certain properties: * not none * a certain type * in some accepted values * in some accepted range :param var: the object to check :param var_types: the type(s) to enforce. If None, type will not be enforced :param var_name: the name of the varioable to be used in error messages :param enforce_not_none: boolean, default True. Whether to enforce that var is not None. :param allowed_values: an optional set of allowed values :param min_value: an optional minimum value :param min_strict: if True, only values strictly greater than the minimum value will be accepted :param max_value: an optional maximum value :param max_strict: if True, only values strictly lesser than the minimum value will be accepted :return: """""" var_name = var_name or 'object' if enforce_not_none and (var is None): # enforce not none raise MissingMandatoryParameterException('Error, ' + var_name + '"" is mandatory, it should be non-None') if not (var is None) and not (var_types is None): # enforce type if not isinstance(var_types, list): var_types = [var_types] match = False for var_type in var_types: # just in case, even though users should use FunctionType or MethodType which is the true type if var_type is Callable: if callable(var): match = True break else: if isinstance(var, var_type): match = True break if not match: raise TypeError('Error, ' + var_name + '"" should be one of type(s) ' + str(var_types) + ', found: ' + str(type(var))) if var is not None: if allowed_values is not None: # enforce allowed values if var not in allowed_values: raise TypeError('Error, ' + var_name + '"" should be one of ""' + str(allowed_values) + '"", found: ' + str(var)) if min_value is not None: # enforce min value if min_strict: if not (var > min_value): raise TypeError( 'Error, ' + var_name + '"" should be strictly greater than ""' + str(min_value) + '"", found: ' + str(var)) else: if not (var >= min_value): raise TypeError( 'Error, ' + var_name + '"" should be greater than ""' + str(min_value) + '"", found: ' + str(var)) if max_value is not None: # enforce max value if max_strict: if not (var < max_value): raise TypeError( 'Error, ' + var_name + '"" should be strictly lesser than ""' + str(max_value) + '"", found: ' + str(var)) else: if not (var <= max_value): raise TypeError( 'Error, ' + var_name + '"" should be lesser than ""' + str(max_value) + '"", found: ' + str(var)) if min_len is not None: # enforce min length if min_len_strict: if not (len(var) > min_len): raise TypeError( 'Error, ' + var_name + '"" length should be strictly greater than ""' + str(min_len) + '"", found: ' + str(len(var))) else: if not (len(var) >= min_len): raise TypeError( 'Error, ' + var_name + '"" length should be greater than ""' + str(min_len) + '"", found: ' + str(len(var))) if max_len is not None: # enforce max length if max_len_strict: if not (len(var) < max_len): raise TypeError( 'Error, ' + var_name + '"" length should be strictly lesser than ""' + str(max_len) + '"", found: ' + str(len(var))) else: if not (len(var) <= max_len): raise TypeError( 'Error, ' + var_name + '"" length should be lesser than ""' + str(max_len) + '"", found: ' + str(len(var)))" 1375,"def hasmethod(obj, meth): """""" Checks if an object, obj, has a callable method, meth return True or False """""" if hasattr(obj, meth): return callable(getattr(obj,meth)) return False" 1376,"def hasvar(obj, var): """""" Checks if object, obj has a variable var return True or False """""" if hasattr(obj, var): return not callable(getattr(obj, var)) return False" 1377,"def getmethattr(obj, meth): """""" Returns either the variable value or method invocation """""" if hasmethod(obj, meth): return getattr(obj, meth)() elif hasvar(obj, meth): return getattr(obj, meth) return None" 1378,"def assure_obj_child_dict(obj, var): """"""Assure the object has the specified child dict """""" if not var in obj or type(obj[var]) != type({}): obj[var] = {} return obj" 1379,"def warmup(f): """""" Decorator to run warmup before running a command """""" @wraps(f) def wrapped(self, *args, **kwargs): if not self.warmed_up: self.warmup() return f(self, *args, **kwargs) return wrapped" 1380,"def install_required(f): """""" Return an exception if the namespace is not already installed """""" @wraps(f) def wrapped(self, *args, **kwargs): if self.directory.new: raise SprinterException(""Namespace %s is not yet installed!"" % self.namespace) return f(self, *args, **kwargs) return wrapped" 1381,"def install(self): """""" Install the environment """""" self.phase = PHASE.INSTALL if not self.directory.new: self.logger.info(""Namespace %s directory already exists!"" % self.namespace) self.source = load_manifest(self.directory.manifest_path) return self.update() try: self.logger.info(""Installing environment %s..."" % self.namespace) self.directory.initialize() self.install_sandboxes() self.instantiate_features() self.grab_inputs() self._specialize() for feature in self.features.run_order: self.run_action(feature, 'sync') self.inject_environment_config() self._finalize() except Exception: self.logger.debug("""", exc_info=sys.exc_info()) self.logger.info(""An error occured during installation!"") if not self.ignore_errors: self.clear_all() self.logger.info(""Removing installation %s..."" % self.namespace) self.directory.remove() et, ei, tb = sys.exc_info() reraise(et, ei, tb)" 1382,"def update(self, reconfigure=False): """""" update the environment """""" try: self.phase = PHASE.UPDATE self.logger.info(""Updating environment %s..."" % self.namespace) self.install_sandboxes() self.instantiate_features() # We don't grab inputs, only on install # updates inputs are grabbed on demand # self.grab_inputs(reconfigure=reconfigure) if reconfigure: self.grab_inputs(reconfigure=True) else: self._copy_source_to_target() self._specialize(reconfigure=reconfigure) for feature in self.features.run_order: self.run_action(feature, 'sync') self.inject_environment_config() self._finalize() except Exception: self.logger.debug("""", exc_info=sys.exc_info()) et, ei, tb = sys.exc_info() reraise(et, ei, tb)" 1383,"def remove(self): """""" remove the environment """""" try: self.phase = PHASE.REMOVE self.logger.info(""Removing environment %s..."" % self.namespace) self.instantiate_features() self._specialize() for feature in self.features.run_order: try: self.run_action(feature, 'sync') except FormulaException: # continue trying to remove any remaining features. pass self.clear_all() self.directory.remove() self.injections.commit() if self.error_occured: self.logger.error(warning_template) self.logger.error(REMOVE_WARNING) except Exception: self.logger.debug("""", exc_info=sys.exc_info()) et, ei, tb = sys.exc_info() reraise(et, ei, tb)" 1384,"def deactivate(self): """""" deactivate the environment """""" try: self.phase = PHASE.DEACTIVATE self.logger.info(""Deactivating environment %s..."" % self.namespace) self.directory.rewrite_config = False self.instantiate_features() self._specialize() for feature in self.features.run_order: self.logger.info(""Deactivating %s..."" % feature[0]) self.run_action(feature, 'deactivate') self.clear_all() self._finalize() except Exception: self.logger.debug("""", exc_info=sys.exc_info()) et, ei, tb = sys.exc_info() reraise(et, ei, tb)" 1385,"def validate(self): """""" Validate the target environment """""" self.phase = PHASE.VALIDATE self.logger.info(""Validating %s..."" % self.namespace) self.instantiate_features() context_dict = {} if self.target: for s in self.target.formula_sections(): context_dict[""%s:root_dir"" % s] = self.directory.install_directory(s) context_dict['config:root_dir'] = self.directory.root_dir context_dict['config:node'] = system.NODE self.target.add_additional_context(context_dict) for feature in self.features.run_order: self.run_action(feature, 'validate', run_if_error=True)" 1386,"def clear_all(self): """""" clear all files that were to be injected """""" self.injections.clear_all() for config_file in CONFIG_FILES: self.injections.clear(os.path.join(""~"", config_file))" 1387,"def write_debug_log(self, file_path): """""" Write the debug log to a file """""" with open(file_path, ""wb+"") as fh: fh.write(system.get_system_info().encode('utf-8')) # writing to debug stream self._debug_stream.seek(0) fh.write(self._debug_stream.read().encode('utf-8')) fh.write(""The following errors occured:\n"".encode('utf-8')) for error in self._errors: fh.write((error + ""\n"").encode('utf-8')) for k, v in self._error_dict.items(): if len(v) > 0: fh.write((""Error(s) in %s with formula %s:\n"" % k).encode('utf-8')) for error in v: fh.write((error + ""\n"").encode('utf-8'))" 1388,"def write_manifest(self): """""" Write the manifest to the file """""" if os.path.exists(self.directory.manifest_path): self.main_manifest.write(open(self.directory.manifest_path, ""w+""))" 1389,"def message_failure(self): """""" return a failure message, if one exists """""" if not isinstance(self.main_manifest, Manifest): return None return self.main_manifest.get('config', 'message_failure', default=None)" 1390,"def warmup(self): """""" initialize variables necessary to perform a sprinter action """""" self.logger.debug(""Warming up..."") try: if not isinstance(self.source, Manifest) and self.source: self.source = load_manifest(self.source) if not isinstance(self.target, Manifest) and self.target: self.target = load_manifest(self.target) self.main_manifest = self.target or self.source except lib.BadCredentialsException: e = sys.exc_info()[1] self.logger.error(str(e)) raise SprinterException(""Fatal error! Bad credentials to grab manifest!"") if not getattr(self, 'namespace', None): if self.target: self.namespace = self.target.namespace elif not self.namespace and self.source: self.namespace = self.source.namespace else: raise SprinterException(""No environment name has been specified!"") self.directory_root = self.custom_directory_root if not self.directory: if not self.directory_root: self.directory_root = os.path.join(self.root, self.namespace) self.directory = Directory(self.directory_root, shell_util_path=self.shell_util_path) if not self.injections: self.injections = Injections(wrapper=""%s_%s"" % (self.sprinter_namespace.upper(), self.namespace), override=""SPRINTER_OVERRIDES"") if not self.global_injections: self.global_injections = Injections(wrapper=""%s"" % self.sprinter_namespace.upper() + ""GLOBALS"", override=""SPRINTER_OVERRIDES"") # append the bin, in the case sandboxes are necessary to # execute commands further down the sprinter lifecycle os.environ['PATH'] = self.directory.bin_path() + "":"" + os.environ['PATH'] self.warmed_up = True" 1391,"def _inject_config_source(self, source_filename, files_to_inject): """""" Inject existing environmental config with namespace sourcing. Returns a tuple of the first file name and path found. """""" # src_path = os.path.join(self.directory.root_dir, source_filename) # src_exec = ""[ -r %s ] && . %s"" % (src_path, src_path) src_exec = ""[ -r {0} ] && . {0}"".format(os.path.join(self.directory.root_dir, source_filename)) # The ridiculous construction above is necessary to avoid failing tests(!) for config_file in files_to_inject: config_path = os.path.expanduser(os.path.join(""~"", config_file)) if os.path.exists(config_path): self.injections.inject(config_path, src_exec) break else: config_file = files_to_inject[0] config_path = os.path.expanduser(os.path.join(""~"", config_file)) self.logger.info(""No config files found to source %s, creating ~/%s!"" % (source_filename, config_file)) self.injections.inject(config_path, src_exec) return (config_file, config_path)" 1392,"def _finalize(self): """""" command to run at the end of sprinter's run """""" self.logger.info(""Finalizing..."") self.write_manifest() if self.directory.rewrite_config: # always ensure .rc is written (sourcing .env) self.directory.add_to_rc('') # prepend brew for global installs if system.is_osx() and self.main_manifest.is_affirmative('config', 'use_global_packagemanagers'): self.directory.add_to_env('__sprinter_prepend_path ""%s"" PATH' % '/usr/local/bin') self.directory.add_to_env('__sprinter_prepend_path ""%s"" PATH' % self.directory.bin_path()) self.directory.add_to_env('__sprinter_prepend_path ""%s"" LIBRARY_PATH' % self.directory.lib_path()) self.directory.add_to_env('__sprinter_prepend_path ""%s"" C_INCLUDE_PATH' % self.directory.include_path()) self.directory.finalize() self.injections.commit() self.global_injections.commit() if not os.path.exists(os.path.join(self.root, "".global"")): self.logger.debug(""Global directory doesn't exist! creating..."") os.makedirs(os.path.join(self.root, "".global"")) self.logger.debug(""Writing shell util file..."") with open(self.shell_util_path, 'w+') as fh: fh.write(shell_utils_template) if self.error_occured: raise SprinterException(""Error occured!"") if self.message_success(): self.logger.info(self.message_success()) self.logger.info(""Done!"") self.logger.info(""NOTE: Please remember to open new shells/terminals to use the modified environment"")" 1393,"def _build_logger(self, level=logging.INFO): """""" return a logger. if logger is none, generate a logger from stdout """""" self._debug_stream = StringIO() logger = logging.getLogger('sprinter') # stdout log out_hdlr = logging.StreamHandler(sys.stdout) out_hdlr.setLevel(level) logger.addHandler(out_hdlr) # debug log debug_hdlr = logging.StreamHandler(self._debug_stream) debug_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s')) debug_hdlr.setLevel(logging.DEBUG) logger.addHandler(debug_hdlr) logger.setLevel(logging.DEBUG) return logger" 1394,"def run_action(self, feature, action, run_if_error=False, raise_exception=True): """""" Run an action, and log it's output in case of errors """""" if len(self._error_dict[feature]) > 0 and not run_if_error: return error = None instance = self.features[feature] try: getattr(instance, action)() # catch a generic exception within a feature except Exception as e: e = sys.exc_info()[1] self.logger.info(""An exception occurred with action %s in feature %s!"" % (action, feature)) self.logger.debug(""Exception"", exc_info=sys.exc_info()) error = str(e) self.log_feature_error(feature, str(e)) # any error in a feature should fail immediately - unless it occurred # from the remove() method in which case continue the rest of the # feature removal from there if error is not None and raise_exception: exception_msg = ""%s action failed for feature %s: %s"" % (action, feature, error) if self.phase == PHASE.REMOVE: raise FormulaException(exception_msg) else: raise SprinterException(exception_msg) return error" 1395,"def _specialize(self, reconfigure=False): """""" Add variables and specialize contexts """""" # add in the 'root_dir' directories to the context dictionaries for manifest in [self.source, self.target]: context_dict = {} if manifest: for s in manifest.formula_sections(): context_dict[""%s:root_dir"" % s] = self.directory.install_directory(s) context_dict['config:root_dir'] = self.directory.root_dir context_dict['config:node'] = system.NODE manifest.add_additional_context(context_dict) self._validate_manifest() for feature in self.features.run_order: if not reconfigure: self.run_action(feature, 'resolve') # if a target doesn't exist, no need to prompt. instance = self.features[feature] if instance.target: self.run_action(feature, 'prompt')" 1396,"def _copy_source_to_target(self): """""" copy source user configuration to target """""" if self.source and self.target: for k, v in self.source.items('config'): # always have source override target. self.target.set_input(k, v)" 1397,"def grab_inputs(self, reconfigure=False): """""" Resolve the source and target config section """""" self._copy_source_to_target() if self.target: self.target.grab_inputs(force=reconfigure)" 1398,"def connect(host, username, password, port=443, verify=False, debug=False): ''' Connect to a vCenter via the API :param host: Hostname or IP of the vCenter :type host: str or unicode :param username: Username :type user: str or unicode :param password: Password :type user: str or unicode :param port: Port on which the vCenter API is running (default: 443) :type port: int :param verify: Whether to verify SSL certs upon connection (default: False) :type verify: bool :param debug: Debug option (default: False) :type debug: bool :return: Content :rtype: vim.ServiceInstanceContent ''' context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if not verify: # Disable warnings about unsigned certificates context.verify_mode = ssl.CERT_NONE requests.packages.urllib3.disable_warnings() try: si = SmartConnect( host=host, user=username, pwd=password, port=port, sslContext=context ) # Register auto disconnect atexit.register(Disconnect, si) # Return content return si.RetrieveContent() except IOError as e: print('I/O error({0}): {1}'.format(e.errno, e.strerror)) except vmodl.MethodFault as e: print('Connection could not be established', file=sys.stderr) raise ConnectionError('Connection could not be established') print('Caught vmodl fault: ', e.msg, file=sys.stderr) if debug: traceback.print_exc() except Exception as e: print('Caught exception:', str(e), file=sys.stderr) if debug: traceback.print_exc()" 1399,"def parse(ignore_file='.gitignore', git_dir='.git', additional_files=(), global_=True, root_dir=None, defaults=True): """""" Collects a list of all ignore patterns configured in a local Git repository as specified in the Git documentation. See https://git-scm.com/docs/gitignore#_description The returned #IgnoreListCollection is guaranteed to contain at least one #IgnoreList with #IgnoreList.root pointing to the specified *root_dir* (which defaults to the parent directory of *git_dir*) as the first element. """""" result = IgnoreListCollection() if root_dir is None: if git_dir is None: raise ValueError(""root_dir or git_dir must be specified"") root_dir = os.path.dirname(os.path.abspath(git_dir)) def parse(filename, root=None): if os.path.isfile(filename): if root is None: root = os.path.dirname(os.path.abspath(filename)) with open(filename) as fp: result.parse(fp, root) result.append(IgnoreList(root_dir)) if ignore_file is not None: parse(ignore_file) for filename in additional_files: parse(filename) if git_dir is not None: parse(os.path.join(git_dir, 'info', 'exclude'), root_dir) if global_: # TODO: Read the core.excludeFiles configuration value. parse(os.path.expanduser('~/.gitignore'), root_dir) if defaults: result.append(get_defaults(root_dir)) return result" 1400,"def walk(patterns, dirname): """""" Like #os.walk(), but filters the files and directories that are excluded by the specified *patterns*. # Arguments patterns (IgnoreList, IgnoreListCollection): Can also be any object that implements the #IgnoreList.match() interface. dirname (str): The directory to walk. """""" join = os.path.join for root, dirs, files in os.walk(dirname, topdown=True): dirs[:] = [d for d in dirs if patterns.match(join(root, d), True) != MATCH_IGNORE] files[:] = [f for f in files if patterns.match(join(root, f), False) != MATCH_IGNORE] yield root, dirs, files" 1401,"def parse(self, lines): """""" Parses the `.gitignore` file represented by the *lines*. """""" if isinstance(lines, str): lines = lines.split('\n') sub = _re.sub for line in lines: if line.endswith('\n'): line = line[:-1] line = line.lstrip() if not line.startswith('#'): invert = False if line.startswith('!'): line = line[1:] invert = True while line.endswith(' ') and line[-2:] != '\ ': line = line[:-1] line = sub(r'\\([!# ])', r'\1', line) if '/' in line and not line.startswith('/'): # Patterns with a slash can only be matched absolute. line = '/' + line self.patterns.append(Pattern(line, invert))" 1402,"def match(self, filename, isdir): """""" Match the specified *filename*. If *isdir* is False, directory-only patterns will be ignored. Returns one of - #MATCH_DEFAULT - #MATCH_IGNORE - #MATCH_INCLUDE """""" fnmatch = _fnmatch.fnmatch ignored = False filename = self.convert_path(filename) basename = os.path.basename(filename) for pattern in self.patterns: if pattern.dir_only and not isdir: continue if (not ignored or pattern.invert) and pattern.match(filename): if pattern.invert: # This file is definitely NOT ignored, no matter what other patterns match return MATCH_INCLUDE ignored = True if ignored: return MATCH_IGNORE else: return MATCH_DEFAULT" 1403,"def parse(self, lines, root): """""" Shortcut for #IgnoreList.parse() and #IgnoreListCollection.append(). """""" lst = IgnoreList(root) lst.parse(lines) self.append(lst)" 1404,"def match(self, filename, isdir=False): """""" Match all the #IgnoreList#s` in this collection. Returns one of - #MATCH_DEFAULT - #MATCH_IGNORE - #MATCH_INCLUDE """""" for lst in self: result = lst.match(filename, isdir) if result != MATCH_DEFAULT: return result return MATCH_DEFAULT" 1405,"def reply(self,message,message_type): """""" Send a reply message of the given type Args: - message: the message to publish - message_type: the type of message being sent """""" if message_type == MULTIPART: raise Exception(""Unsupported reply type"") super(Replier,self).send(message,message_type)" 1406,"def parse_domain(url): """""" parse the domain from the url """""" domain_match = lib.DOMAIN_REGEX.match(url) if domain_match: return domain_match.group()" 1407,"def get_credentials(options, environment): """""" Get credentials or prompt for them from options """""" if options['--username'] or options['--auth']: if not options['--username']: options[''] = lib.prompt(""Please enter the username for %s..."" % environment) if not options['--password']: options[''] = lib.prompt(""Please enter the password for %s..."" % environment, secret=True) return options" 1408,"def check_type(self, value): """""" Raises a #TypeError if *value* is not an instance of the field's #type. """""" if self.null and value is None: return if self.type is not None and not isinstance(value, self.type): msg = '{0!r} expected type {1}' raise TypeError(msg.format(self.full_name, self.type.__name__))" 1409,"def get_default(self): """""" Return the default value of the field. Returns either #default, the return value of #default_factory or raises a #RuntimeError if the field has no default value. """""" if self.default is not NotImplemented: return self.default elif self.default_factory is not None: return self.default_factory() else: raise RuntimeError('{0!r} has no default value'.format(self.full_name))" 1410,"def full_name(self): """""" The full name of the field. This is the field's entities name concatenated with the field's name. If the field is unnamed or not bound to an entity, the result respectively contains None. """""" entity = self.entity.__name__ if self.entity is not None else None name = self.name if self.name is not None else None if entity and name: return entity + '.' + name elif entity: return entity + '.' elif name: return '.' + name else: return '.'" 1411,"def type_name(self): """""" Returns the full type identifier of the field. """""" res = self.type.__name__ if self.type.__module__ not in ('__builtin__', 'builtins'): res = self.type.__module__ + '.' + res return res" 1412,"def get_subclass_from_module(module, parent_class): """""" Get a subclass of parent_class from the module at module get_subclass_from_module performs reflection to find the first class that extends the parent_class in the module path, and returns it. """""" try: r = __recursive_import(module) member_dict = dict(inspect.getmembers(r)) sprinter_class = parent_class for v in member_dict.values(): if inspect.isclass(v) and issubclass(v, parent_class) and v != parent_class: if sprinter_class is parent_class: sprinter_class = v if sprinter_class is None: raise SprinterException(""No subclass %s that extends %s exists in classpath!"" % (module, str(parent_class))) return sprinter_class except ImportError: e = sys.exc_info()[1] raise e" 1413,"def __recursive_import(module_name): """""" Recursively looks for and imports the names, returning the module desired >>> __recursive_import(""sprinter.formula.unpack"") # doctest: +ELLIPSIS currently module with relative imports don't work. """""" names = module_name.split(""."") path = None module = None while len(names) > 0: if module: path = module.__path__ name = names.pop(0) (module_file, pathname, description) = imp.find_module(name, path) module = imp.load_module(name, module_file, pathname, description) return module" 1414,"def err_exit(msg, rc=1): """"""Print msg to stderr and exit with rc. """""" print(msg, file=sys.stderr) sys.exit(rc)" 1415,"def popen(self, cmd): """"""Execute an external command and return (rc, output). """""" process = Popen(cmd, shell=True, stdout=PIPE, env=self.env) stdoutdata, stderrdata = process.communicate() return process.returncode, stdoutdata" 1416,"def read_file(self, infile): """"""Read a reST file into a string. """""" try: with open(infile, 'rt') as file: return file.read() except UnicodeDecodeError as e: err_exit('Error reading %s: %s' % (infile, e)) except (IOError, OSError) as e: err_exit('Error reading %s: %s' % (infile, e.strerror or e))" 1417,"def write_file(self, html, outfile): """"""Write an HTML string to a file. """""" try: with open(outfile, 'wt') as file: file.write(html) except (IOError, OSError) as e: err_exit('Error writing %s: %s' % (outfile, e.strerror or e))" 1418,"def convert_string(self, rest): """"""Convert a reST string to an HTML string. """""" try: html = publish_string(rest, writer_name='html') except SystemExit as e: err_exit('HTML conversion failed with error: %s' % e.code) else: if sys.version_info[0] >= 3: return html.decode('utf-8') return html" 1419,"def apply_styles(self, html, styles): """"""Insert style information into the HTML string. """""" index = html.find('') if index >= 0: return ''.join((html[:index], styles, html[index:])) return html" 1420,"def publish_string(self, rest, outfile, styles=''): """"""Render a reST string as HTML. """""" html = self.convert_string(rest) html = self.strip_xml_header(html) html = self.apply_styles(html, styles) self.write_file(html, outfile) return outfile" 1421,"def publish_file(self, infile, outfile, styles=''): """"""Render a reST file as HTML. """""" rest = self.read_file(infile) return self.publish_string(rest, outfile, styles)" 1422,"def upgrade(self): """"""Upgrade the config file. """""" warn('Upgrading ' + self.filename) if self.backup_config(self.filename): return self.write_default_config(self.filename) return False" 1423,"def backup_config(self, filename): """"""Backup the current config file. """""" backup_name = filename + '-' + self.version warn('Moving current configuration to ' + backup_name) try: shutil.copy2(filename, backup_name) return True except (IOError, OSError) as e: print('Error copying %s: %s' % (filename, e.strerror or e), file=sys.stderr) return False" 1424,"def write_default_config(self, filename): """"""Write the default config file. """""" try: with open(filename, 'wt') as file: file.write(DEFAULT_CONFIG) return True except (IOError, OSError) as e: print('Error writing %s: %s' % (filename, e.strerror or e), file=sys.stderr) return False" 1425,"def set_defaults(self, config_file): """"""Set defaults. """""" self.defaults = Defaults(config_file) self.python = Python() self.setuptools = Setuptools() self.docutils = Docutils() self.styles = self.defaults.styles self.browser = self.defaults.browser self.list = False" 1426,"def reset_defaults(self, config_file): """"""Reset defaults. """""" if not exists(config_file): err_exit('No such file: %(config_file)s' % locals()) if not isfile(config_file): err_exit('Not a file: %(config_file)s' % locals()) if not os.access(config_file, os.R_OK): err_exit('File cannot be read: %(config_file)s' % locals()) self.set_defaults(config_file)" 1427,"def write_defaults(self): """"""Create default config file and reload. """""" self.defaults.write() self.reset_defaults(self.defaults.filename)" 1428,"def upgrade_defaults(self): """"""Upgrade config file and reload. """""" self.defaults.upgrade() self.reset_defaults(self.defaults.filename)" 1429,"def parse_options(self, args, depth=0): """"""Parse command line options. """""" style_names = tuple(self.defaults.known_styles) style_opts = tuple('--'+x for x in style_names) try: options, remaining_args = getopt.gnu_getopt(args, 'b:c:hls:v', ('help', 'style=', 'version', 'list-styles', 'browser=', 'config-file=') + style_names) except getopt.GetoptError as e: err_exit('viewdoc: %s\n%s' % (e.msg, USAGE)) for name, value in options: if name in ('-s', '--style'): self.styles = self.defaults.known_styles.get(value, '') elif name in style_opts: self.styles = self.defaults.known_styles.get(name[2:], '') elif name in ('-b', '--browser'): self.browser = value elif name in ('-l', '--list-styles'): self.list = True elif name in ('-h', '--help'): msg_exit(HELP) elif name in ('-v', '--version'): msg_exit(VERSION) elif name in ('-c', '--config-file') and depth == 0: self.reset_defaults(expanduser(value)) return self.parse_options(args, depth+1) if len(remaining_args) > 1: err_exit('viewdoc: too many arguments\n%s' % USAGE) if not isfile(self.defaults.filename) and depth == 0: self.write_defaults() return self.parse_options(args, depth+1) if self.defaults.version < CONFIG_VERSION and depth == 0: self.upgrade_defaults() return self.parse_options(args, depth+1) if self.list: self.list_styles() return remaining_args" 1430,"def list_styles(self): """"""Print available styles and exit. """""" known = sorted(self.defaults.known_styles) if not known: err_exit('No styles', 0) for style in known: if style == self.defaults.default_style: print(style, '(default)') else: print(style) sys.exit(0)" 1431,"def render_file(self, filename): """"""Convert a reST file to HTML. """""" dirname, basename = split(filename) with changedir(dirname): infile = abspath(basename) outfile = abspath('.%s.html' % basename) self.docutils.publish_file(infile, outfile, self.styles) return outfile" 1432,"def render_long_description(self, dirname): """"""Convert a package's long description to HTML. """""" with changedir(dirname): self.setuptools.check_valid_package() long_description = self.setuptools.get_long_description() outfile = abspath('.long-description.html') self.docutils.publish_string(long_description, outfile, self.styles) return outfile" 1433,"def open_in_browser(self, outfile): """"""Open the given HTML file in a browser. """""" if self.browser == 'default': webbrowser.open('file://%s' % outfile) else: browser = webbrowser.get(self.browser) browser.open('file://%s' % outfile)" 1434,"def run(self): """"""Render and display Python package documentation. """""" os.environ['JARN_RUN'] = '1' self.python.check_valid_python() args = self.parse_options(self.args) if args: arg = args[0] else: arg = os.curdir if arg: arg = expanduser(arg) if isfile(arg): outfile = self.render_file(arg) elif isdir(arg): outfile = self.render_long_description(arg) else: err_exit('No such file or directory: %s' % arg) self.open_in_browser(outfile)" 1435,"def preprocess_cell( self, cell: ""NotebookNode"", resources: dict, cell_index: int ) -> Tuple[""NotebookNode"", dict]: """"""Apply a transformation on each cell. Parameters ---------- cell : NotebookNode cell Notebook cell being processed resources : dictionary Additional resources used in the conversion process. Allows preprocessors to pass variables into the Jinja engine. cell_index : int Index of the cell being processed (see base.py) """""" # Get files directory if it has been specified output_files_dir = resources.get(""output_files_dir"", None) # Make sure outputs key exists if not isinstance(resources[""outputs""], dict): resources[""outputs""] = {} # Loop through all of the attachments in the cell for name, attach in cell.get(""attachments"", {}).items(): orig_name = name name = re.sub(r""%[\w\d][\w\d]"", ""-"", name) for mime, data in attach.items(): if mime not in self.extract_output_types: continue # Binary files are base64-encoded, SVG is already XML if mime in {""image/png"", ""image/jpeg"", ""application/pdf""}: # data is b64-encoded as text (str, unicode), # we want the original bytes data = a2b_base64(data) elif sys.platform == ""win32"": data = data.replace(""\n"", ""\r\n"").encode(""UTF-8"") else: data = data.encode(""UTF-8"") filename = self.output_filename_template.format( cell_index=cell_index, name=name, unique_key=resources.get(""unique_key"", """"), ) if output_files_dir is not None: filename = os.path.join(output_files_dir, filename) if name.endswith("".gif"") and mime == ""image/png"": filename = filename.replace("".gif"", "".png"") # In the resources, make the figure available via # resources['outputs']['filename'] = data resources[""outputs""][filename] = data # now we need to change the cell source so that it links to the # filename instead of `attachment:` attach_str = ""attachment:"" + orig_name if attach_str in cell.source: cell.source = cell.source.replace(attach_str, filename) return cell, resources" 1436,"def combine_pdf_as_bytes(pdfs: List[BytesIO]) -> bytes: """"""Combine PDFs and return a byte-string with the result. Arguments --------- pdfs A list of BytesIO representations of PDFs """""" writer = PdfWriter() for pdf in pdfs: writer.addpages(PdfReader(pdf).pages) bio = BytesIO() writer.write(bio) bio.seek(0) output = bio.read() bio.close() return output" 1437,"def split(self, granularity_after_split, exclude_partial=True): """""" Split a period into a given granularity. Optionally include partial periods at the start and end of the period. """""" if granularity_after_split == Granularity.DAY: return self.get_days() elif granularity_after_split == Granularity.WEEK: return self.get_weeks(exclude_partial) elif granularity_after_split == Granularity.MONTH: return self.get_months(exclude_partial) elif granularity_after_split == Granularity.QUARTER: return self.get_quarters(exclude_partial) elif granularity_after_split == Granularity.HALF_YEAR: return self.get_half_years(exclude_partial) elif granularity_after_split == Granularity.YEAR: return self.get_years(exclude_partial) else: raise Exception(""Invalid granularity: %s"" % granularity_after_split)" 1438,"def fit_fb_calibration(df, calibration): ''' Fit feedback calibration data to solve for values of `C_fb[:]` and `R_fb[:]`. Returns a `pandas.DataFrame` indexed by the feedback resistor/capacitance index, and with the following columns: - Model: Either with parasitic capacitance term or not. - N: Number of samples used for fit. - F: F-value - p-value: p-value from Chi squared test. - R_fb: Feedback resistor value based on fit. - R-CI %: Confidence interval for feedback resistor value. - C_fb: Feedback capacitor value based on fit (0 if no-capacitance model is used). - C-CI %: Confidence interval for feedback capacitance value. __N.B.__ This function does not actually _update_ the calibration, it only performs the fit. See `apply_calibration`. ''' # Set initial guesses for the feedback parameters. R_fb = pd.Series([2e2, 2e3, 2e4, 2e5, 2e6]) C_fb = pd.Series(len(calibration.C_fb) * [50e-12]) # Error function. def error(p0, df, calibration): # Impedance of the reference resistor on the HV attenuator circuit. Z = 10e6 R_fb = p0[0] # If the parameter vector only contains one variable, the capacitance # is zero if len(p0) == 2: C_fb = p0[1] else: C_fb = 0 R_hv = calibration.R_hv[df.hv_resistor.values] C_hv = calibration.C_hv[df.hv_resistor.values] # Solve feedback transfer function for the actuation voltage, _(i.e., # `V1`)_, based on the high-voltage measurements. # Note that the transfer function definition depends on the hardware # version. V_actuation = compute_from_transfer_function(calibration.hw_version .major, 'V1', V2=df.V_hv, R1=Z, R2=R_hv, C2=C_hv, f=df.frequency) # Solve feedback transfer function for the expected impedance feedback # voltage, _(i.e., `V2`)_, based on the actuation voltage, the proposed # values for `R2` and `C2`, and the reported `C1` value from the # feedback measurements. # Note that the transfer function definition depends on the hardware # version. # __NB__ If we do not specify a value for `R1`, a symbolic value of # infinity is used. However, in this case, we have `R1` in both the # numerator and denominator. The result is a value of zero returned # regardless of the values of the other arguments. We avoid this issue # by specifying a *very large* value for `R1`. # TODO Update comment if this works... V_impedance = compute_from_transfer_function(calibration.hw_version .major, 'V2', V1=V_actuation, C1=df.test_capacitor, R2=R_fb, C2=C_fb, f=df.frequency) return df.V_fb - V_impedance # Perform a nonlinear least-squares fit of the data. def fit_model(p0, df, calibration): p1, cov_x, infodict, mesg, ier = scipy.optimize.leastsq( error, p0, args=(df, calibration), full_output=True) p1 = np.abs(p1) E = error(p1, df, calibration) return p1, E, cov_x CI = [] feedback_records = [] # Fit feedback parameters for each feedback resistor. for i in range(len(calibration.R_fb)): # Only include data points for the given feedback resistor (and where # `hv_resistor` is a valid index). df_i = df.loc[(df.fb_resistor == i)].dropna() if df_i.shape[0] < 2: CI.append([0, 0]) continue # Fit the data assuming no parasitic capacitance (model 1). p0_1 = [R_fb[i]] p1_1, E_1, cov_x_1 = fit_model(p0_1, df_i, calibration) df_1 = (len(E_1) - len(p0_1)) chi2_1 = np.sum(E_1 ** 2) chi2r_1 = chi2_1 / (df_1 - 1) # fit the data including parasitic capacitance (model 2) p0_2 = [R_fb[i], C_fb[i]] p1_2, E_2, cov_x_2 = fit_model(p0_2, df_i, calibration) df_2 = (len(E_2) - len(p0_2)) chi2_2 = np.sum(E_2 ** 2) chi2r_2 = chi2_2 / (df_2 - 1) # do an F-test to compare the models F = (chi2_1 - chi2_2) / chi2r_2 p_value = scipy.stats.f.cdf(F, 1, df_2-1) # if the p_value is > 0.95, we assume that the capacitive term is # necessary if p_value > .95 and cov_x_2 is not None: model = 'w/Parasitic C' chi2r = chi2r_2 R_fb_i = p1_2[0] C_fb_i = p1_2[1] CI.append((100 * np.sqrt(chi2r_2 * np.diag(cov_x_2)) / p1_2)) else: # otherwise, set the capacitance to zero model = 'w/o Parasitic C' chi2r = chi2r_2 R_fb_i = p1_1[0] C_fb_i = 0 if cov_x_1 is None: cov_x_1 = [0] CI.append((100 * np.sqrt(chi2r_1 * np.diag(cov_x_1)) / p1_1).tolist() + [0]) feedback_records.append([int(i), model, df_i.shape[0], R_fb_i, CI[i][0], C_fb_i, CI[i][1], F, (1e3 * np.sqrt(chi2r)), p_value]) calibration_df = pd.DataFrame(feedback_records, columns=['fb_resistor', 'Model', 'N', 'R_fb', 'R-CI %', 'C_fb', 'C-CI %', 'F', 'sqrt(Chi2r*sigma^2)', 'p-value']) return calibration_df" 1439,"def apply_calibration(df, calibration_df, calibration): ''' Apply calibration values from `fit_fb_calibration` result to `calibration` object. ''' from dmf_control_board_firmware import FeedbackResults for i, (fb_resistor, R_fb, C_fb) in calibration_df[['fb_resistor', 'R_fb', 'C_fb']].iterrows(): calibration.R_fb[int(fb_resistor)] = R_fb calibration.C_fb[int(fb_resistor)] = C_fb cleaned_df = df.dropna() grouped = cleaned_df.groupby(['frequency', 'test_capacitor', 'repeat_index']) for (f, channel, repeat_index), group in grouped: r = FeedbackResults(group.V_actuation.iloc[0], f, 5.0, group.V_hv.values, group.hv_resistor.values, group.V_fb.values, group.fb_resistor.values, calibration) # Update the measured capacitance values based on the updated # calibration model. df.loc[group.index, 'C'] = r.capacitance()" 1440,"def config_dict(config): """""" Given a Sphinx config object, return a dictionary of config values. """""" return dict( (key, getattr(config, key)) for key in config.values )" 1441,"def from_defn(cls, defn): ""Return the first Repl subclass that works with this"" instances = (subcl(defn) for subcl in cls.__subclasses__()) return next(filter(None, instances))" 1442,"def from_definition(cls, defn, names={}): """""" A definition may contain the following members: - using: a dictionary of variables available for substitution - replace: a list of replacement definitions. """""" repls = map(Repl.from_defn, defn.get('replace', [])) self = cls(repls) vars(self).update(names) vars(self).update(defn.get('using', {})) return self" 1443,"def data(self, index, role=Qt.DisplayRole): """"""Cell content"""""" if not index.isValid(): return None if role == Qt.DisplayRole or role == Qt.EditRole: return ' ' if role == Qt.BackgroundColorRole: color = self.color_da[index.row(), index.column()].values return QtGui.QColor.fromRgbF(*color) return None" 1444,"def headerData(self, section, orientation, role=Qt.DisplayRole): """"""Set header data"""""" if role != Qt.DisplayRole: return None if orientation == Qt.Vertical: return six.text_type(self.color_da.cmap[section].values) return super(ColormapModel, self).headerData(section, orientation, role)" 1445,"def get_colormap(cls, names=[], N=10, *args, **kwargs): """"""Open a :class:`ColormapDialog` and get a colormap Parameters ---------- %(ColormapModel.parameters)s Other Parameters ---------------- ``*args, **kwargs`` Anything else that is passed to the ColormapDialog Returns ------- str or matplotlib.colors.Colormap Either the name of a standard colormap available via :func:`psy_simple.colors.get_cmap` or a colormap """""" names = safe_list(names) obj = cls(names, N, *args, **kwargs) vbox = obj.layout() buttons = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, parent=obj) buttons.button(QDialogButtonBox.Ok).setEnabled(False) vbox.addWidget(buttons) buttons.accepted.connect(obj.accept) buttons.rejected.connect(obj.reject) obj.table.selectionModel().selectionChanged.connect( lambda indices: buttons.button(QDialogButtonBox.Ok).setEnabled( bool(indices))) accepted = obj.exec_() if accepted: return obj.table.chosen_colormap" 1446,"def show_colormap(cls, names=[], N=10, show=True, *args, **kwargs): """"""Show a colormap dialog Parameters ---------- %(show_colormaps.parameters.no_use_qt)s"""""" names = safe_list(names) obj = cls(names, N, *args, **kwargs) vbox = obj.layout() buttons = QDialogButtonBox(QDialogButtonBox.Close, parent=obj) buttons.rejected.connect(obj.close) vbox.addWidget(buttons) if show: obj.show() return obj" 1447,"def cmd_list(args): """"""List all element in pen"""""" for penlist in penStore.data: puts(penlist + "" ("" + str(len(penStore.data[penlist])) + "")"")" 1448,"def cmd_all(args): """"""List everything recursively"""""" for penlist in penStore.data: puts(penlist) with indent(4, ' -'): for penfile in penStore.data[penlist]: puts(penfile)" 1449,"def cmd_create(args): """"""Creates a list"""""" name = args.get(0) if name: penStore.createList(name) else: puts(""not valid"")" 1450,"def cmd_touch_note(args): """"""Create a note"""""" major = args.get(0) minor = args.get(1) if major in penStore.data: if minor is None: # show items in list for note in penStore.data[major]: puts(note) elif minor in penStore.data[major]: penStore.openNote(major, minor) else: penStore.createNote(major, minor) penStore.openNote(major, minor) else: puts(""No list of that name."")" 1451,"def cmd_delete(args): """"""Deletes a node"""""" major = args.get(0) minor = args.get(1) if major is not None: if major in penStore.data: if minor is None: if len(penStore.data[major]) > 0: if raw_input(""are you sure (y/n)? "") not in ['y', 'Y', 'yes', 'Yes']: return ExitStatus.ABORT penStore.deleteList(major) puts(""list deleted"") elif minor in penStore.data[major]: penStore.deleteNote(major, minor) puts(""note deleted"") else: puts(""no such note, sorry! (%s)"" % minor) else: puts(""no such list, sorry! (%s)"" % major) else: print """""" - pen: delete help ------------------------------------------------------------ pen delete deletes list and all of its notes pen delete deletes note """"""" 1452,"def restclient_admin_required(view_func): """""" View decorator that checks whether the user is permitted to view proxy restclients. Calls login_required in case the user is not authenticated. """""" def wrapper(request, *args, **kwargs): template = 'access_denied.html' if hasattr(settings, 'RESTCLIENTS_ADMIN_AUTH_MODULE'): auth_func = import_string(settings.RESTCLIENTS_ADMIN_AUTH_MODULE) else: context = {'error_msg': ( ""Your application must define an authorization function as "" ""RESTCLIENTS_ADMIN_AUTH_MODULE in settings.py."")} return render(request, template, context=context, status=401) service = args[0] if len(args) > 0 else None url = args[1] if len(args) > 1 else None if auth_func(request, service, url): return view_func(request, *args, **kwargs) return render(request, template, status=401) return login_required(function=wrapper)" 1453,"def open_file(filepath): """""" Open file with the default system app. Copied from https://stackoverflow.com/a/435669/1224456 """""" if sys.platform.startswith('darwin'): subprocess.Popen(('open', filepath)) elif os.name == 'nt': os.startfile(filepath) elif os.name == 'posix': subprocess.Popen(('xdg-open', filepath))" 1454,"def destination_heuristic(data): """""" A heuristic to get the folder with all other files from bib, using majority vote. """""" counter = collections.Counter() for entry in data: file_field = entry['fields'].get('file') if not file_field: continue path = os.path.dirname(file_field) counter[path] += 1 if not counter: # No paths found raise click.ClickException( 'Path finding heuristics failed: no paths in the database' ) # Find the paths that appears most often sorted_paths = sorted(counter, reverse=True) groupby = itertools.groupby(sorted_paths, key=len) _, group = next(groupby) # We know that there's at least one candidate. Make sure it's # the only one candidate = next(group) try: next(group) except StopIteration: return candidate else: raise click.ClickException( 'Path finding heuristics failed: ' 'there are multiple equally valid paths in the database' )" 1455,"def remove_entry(data, entry): ''' Remove an entry in place. ''' file_field = entry['fields'].get('file') if file_field: try: os.remove(file_field) except IOError: click.echo('This entry\'s file was missing') data.remove(entry)" 1456,"def string_to_basename(s): ''' Converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. ''' s = s.strip().lower() s = re.sub(r'[^\w\s-]', '', s) return re.sub(r'[\s-]+', '-', s)" 1457,"def editor(*args, **kwargs): ''' Wrapper for `click.edit` that raises an error when None is returned. ''' result = click.edit(*args, **kwargs) if result is None: msg = 'Editor exited without saving, command aborted' raise click.ClickException(msg) return result" 1458,"def terms(self, facet_name, field, size=10, order=None, all_terms=False, exclude=[], regex='', regex_flags=''): ''' Allow to specify field facets that return the N most frequent terms. Ordering: Allow to control the ordering of the terms facets, to be ordered by count, term, reverse_count or reverse_term. The default is count. All Terms: Allow to get all the terms in the terms facet, ones that do not match a hit, will have a count of 0. Note, this should not be used with fields that have many terms. Excluding Terms: It is possible to specify a set of terms that should be excluded from the terms facet request result. Regex Patterns: The terms API allows to define regex expression that will control which terms will be included in the faceted list. ''' self[facet_name] = dict(terms=dict(field=field, size=size)) if order: self[facet_name][terms]['order'] = order if all_terms: self[facet_name][terms]['all_terms'] = True if exclude: self[facet_name][terms]['exclude'] = exclude if regex: self[facet_name][terms]['regex'] = regex if regex_flags: self[facet_name][terms]['regex_flags'] = regex_flags return self" 1459,"def range(self, facet_name, field, ranges=[]): ''' Range facet allow to specify a set of ranges and get both the number of docs (count) that fall within each range, and aggregated data either based on the field, or using another field. http://www.elasticsearch.org/guide/reference/api/search/facets/range-facet.html > ElasticFacet().range('range1', 'field_name', [ slice(50), slice(20,70), slice(50,-1) ]) { ""range1"" : { ""range"" : { ""field"" : ""field_name"", ""ranges"" : [ { ""to"" : 50 }, { ""from"" : 20, ""to"" : 70 }, { ""from"" : 70, ""to"" : 120 }, { ""from"" : 150 } ] } } } ''' self[facet_name] = {'range': {'field': field, 'ranges': []}} for s in ranges: if not isinstance(s, slice): continue entry = dict() if s.start: entry['from'] = s.start if s.stop != -1: entry['to'] = s.stop self[facet_name]['range']['ranges'].append(entry) return self" 1460,"def parse_gpx(gpx_element, gpxns=None): """"""Parse a GPX file into a GpxModel. Args: gpx_element: The root element of an XML document containing a version attribute. GPX versions 1.0 and 1.1 are supported. gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited by curly braces). Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX. """""" gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError(""No gpx root element"") version = gpx_element.attrib['version'] if version == '1.0': return parse_gpx_1_0(gpx_element, gpxns=gpxns) elif version == '1.1': return parse_gpx_1_1(gpx_element, gpxns=gpxns) else: raise ValueError(""Cannot parse GPX version {0}"".format(version))" 1461,"def backup_file(filename): """""" create a backup of the file desired """""" if not os.path.exists(filename): return BACKUP_SUFFIX = "".sprinter.bak"" backup_filename = filename + BACKUP_SUFFIX shutil.copyfile(filename, backup_filename)" 1462,"def inject(self, filename, content): """""" add the injection content to the dictionary """""" # ensure content always has one trailing newline content = _unicode(content).rstrip() + ""\n"" if filename not in self.inject_dict: self.inject_dict[filename] = """" self.inject_dict[filename] += content" 1463,"def commit(self): """""" commit the injections desired, overwriting any previous injections in the file. """""" self.logger.debug(""Starting injections..."") self.logger.debug(""Injections dict is:"") self.logger.debug(self.inject_dict) self.logger.debug(""Clear list is:"") self.logger.debug(self.clear_set) for filename, content in self.inject_dict.items(): content = _unicode(content) self.logger.debug(""Injecting values into %s..."" % filename) self.destructive_inject(filename, content) for filename in self.clear_set: self.logger.debug(""Clearing injection from %s..."" % filename) self.destructive_clear(filename)" 1464,"def injected(self, filename): """""" Return true if the file has already been injected before. """""" full_path = os.path.expanduser(filename) if not os.path.exists(full_path): return False with codecs.open(full_path, 'r+', encoding=""utf-8"") as fh: contents = fh.read() return self.wrapper_match.search(contents) is not None" 1465,"def destructive_inject(self, filename, content): """""" Injects the injections desired immediately. This should generally be run only during the commit phase, when no future injections will be done. """""" content = _unicode(content) backup_file(filename) full_path = self.__generate_file(filename) with codecs.open(full_path, 'r', encoding=""utf-8"") as f: new_content = self.inject_content(f.read(), content) with codecs.open(full_path, 'w+', encoding=""utf-8"") as f: f.write(new_content)" 1466,"def __generate_file(self, file_path): """""" Generate the file at the file_path desired. Creates any needed directories on the way. returns the absolute path of the file. """""" file_path = os.path.expanduser(file_path) if not os.path.exists(os.path.dirname(file_path)): self.logger.debug(""Directories missing! Creating directories for %s..."" % file_path) os.makedirs(os.path.dirname(file_path)) if not os.path.exists(file_path): open(file_path, ""w+"").close() return file_path" 1467,"def in_noninjected_file(self, file_path, content): """""" Checks if a string exists in the file, sans the injected """""" if os.path.exists(file_path): file_content = codecs.open(file_path, encoding=""utf-8"").read() file_content = self.wrapper_match.sub(u"""", file_content) else: file_content = """" return file_content.find(content) != -1" 1468,"def inject_content(self, content, inject_string): """""" Inject inject_string into a text buffer, wrapped with #{{ wrapper }} comments if condition lambda is not satisfied or is None. Remove old instances of injects if they exist. """""" inject_string = _unicode(inject_string) content = self.wrapper_match.sub("""", _unicode(content)) if self.override_match: sprinter_overrides = self.override_match.search(content) if sprinter_overrides: content = self.override_match.sub("""", content) sprinter_overrides = sprinter_overrides.groups()[0] else: sprinter_overrides = """" content += """""" %s %s %s """""" % (self.wrapper, inject_string.rstrip(), self.wrapper) if self.override_match: content += sprinter_overrides.rstrip() + ""\n"" return content" 1469,"def clear_content(self, content): """""" Clear the injected content from the content buffer, and return the results """""" content = _unicode(content) return self.wrapper_match.sub("""", content)" 1470,"def get_all_orders_ungrouped(self): """""" Uses a generator to return all orders within. :py:class:`MarketOrder` objects are yielded directly, instead of being grouped in :py:class:`MarketItemsInRegionList` instances. .. note:: This is a generator! :rtype: generator :returns: Generates a list of :py:class:`MarketOrder` instances. """""" for olist in self._orders.values(): for order in olist.orders: yield order" 1471,"def add_order(self, order): """""" Adds a MarketOrder instance to the list of market orders contained within this order list. Does some behind-the-scenes magic to get it all ready for serialization. :param MarketOrder order: The order to add to this order list. """""" # This key is used to group the orders based on region. key = '%s_%s' % (order.region_id, order.type_id) if not self._orders.has_key(key): # We don't have any orders for this yet. Prep the region+item # combo by instantiating a new MarketItemsInRegionList for # the MarketOrders. self.set_empty_region( order.region_id, order.type_id, order.generated_at ) # The MarketOrder gets stuffed into the MarketItemsInRegionList for this # item+region combo. self._orders[key].add_order(order)" 1472,"def set_empty_region(self, region_id, type_id, generated_at, error_if_orders_present=True): """""" Prepares for the given region+item combo by instantiating a :py:class:`MarketItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_orders_present: If True, raise an exception if an order already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here. """""" key = '%s_%s' % (region_id, type_id) if error_if_orders_present and self._orders.has_key(key): raise ItemAlreadyPresentError( ""Orders already exist for the given region and type ID. "" ""Pass error_if_orders_present=False to disable this failsafe, "" ""if desired."" ) self._orders[key] = MarketItemsInRegionList( region_id, type_id, generated_at)" 1473,"def add_entry(self, entry): """""" Adds a MarketHistoryEntry instance to the list of market history entries contained within this instance. Does some behind-the-scenes magic to get it all ready for serialization. :param MarketHistoryEntry entry: The history entry to add to instance. """""" # This key is used to group the orders based on region. key = '%s_%s' % (entry.region_id, entry.type_id) if not self._history.has_key(key): # We don't have any orders for this yet. Prep the region+item # combo by instantiating a new MarketItemsInRegionList for # the MarketOrders. self.set_empty_region( entry.region_id, entry.type_id, entry.generated_at ) # The MarketOrder gets stuffed into the MarketItemsInRegionList for this # item+region combo. self._history[key].add_entry(entry)" 1474,"def set_empty_region(self, region_id, type_id, generated_at, error_if_entries_present=True): """""" Prepares for the given region+item combo by instantiating a :py:class:`HistoryItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_entries_present: If True, raise an exception if an entry already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here. """""" key = '%s_%s' % (region_id, type_id) if error_if_entries_present and self._history.has_key(key): raise ItemAlreadyPresentError( ""Orders already exist for the given region and type ID. "" ""Pass error_if_orders_present=False to disable this failsafe, "" ""if desired."" ) self._history[key] = HistoryItemsInRegionList( region_id, type_id, generated_at)" 1475,"def _find_file(self, file_name: str, lookup_dir: Path) -> Path or None: '''Find a file in a directory by name. Check subdirectories recursively. :param file_name: Name of the file :lookup_dir: Starting directory :returns: Path to the found file or None if the file was not found :raises: FileNotFoundError ''' self.logger.debug('Trying to find the file {file_name} inside the directory {lookup_dir}') result = None for item in lookup_dir.rglob('*'): if item.name == file_name: result = item break else: raise FileNotFoundError(file_name) self.logger.debug('File found: {result}') return result" 1476,"def _sync_repo(self, repo_url: str, revision: str or None = None) -> Path: '''Clone a Git repository to the cache dir. If it has been cloned before, update it. :param repo_url: Repository URL :param revision: Revision: branch, commit hash, or tag :returns: Path to the cloned repository ''' repo_name = repo_url.split('/')[-1].rsplit('.', maxsplit=1)[0] repo_path = (self._cache_path / repo_name).resolve() self.logger.debug(f'Synchronizing with repo; URL: {repo_url}, revision: {revision}') try: self.logger.debug(f'Cloning repo {repo_url} to {repo_path}') run( f'git clone {repo_url} {repo_path}', shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: if repo_path.exists(): self.logger.debug('Repo already cloned; pulling from remote') try: run( 'git pull', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: self.logger.warning(str(exception)) else: self.logger.error(str(exception)) if revision: run( f'git checkout {revision}', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) return repo_path" 1477,"def _shift_headings(self, content: str, shift: int) -> str: '''Shift Markdown headings in a string by a given value. The shift can be positive or negative. :param content: Markdown content :param shift: Heading shift :returns: Markdown content with headings shifted by ``shift`` ''' def _sub(heading): new_heading_level = len(heading.group('hashes')) + shift self.logger.debug(f'Shift heading level to {new_heading_level}, heading title: {heading.group(""title"")}') if new_heading_level <= 6: return f'{""#"" * new_heading_level} {heading.group(""title"")}{heading.group(""tail"")}' else: self.logger.debug('New heading level is out of range, using bold paragraph text instead of heading') return f'**{heading.group(""title"")}**{heading.group(""tail"")}' return self._heading_pattern.sub(_sub, content)" 1478,"def _find_top_heading_level(self, content: str) -> int: '''Find the highest level heading (i.e. having the least '#'s) in a Markdown string. :param content: Markdown content :returns: Maximum heading level detected; if no heading is found, 0 is returned ''' result = float('inf') for heading in self._heading_pattern.finditer(content): heading_level = len(heading.group('hashes')) if heading_level < result: result = heading_level self.logger.debug(f'Maximum heading level: {result}') return result if result < float('inf') else 0" 1479,"def _cut_from_heading_to_heading( self, content: str, from_heading: str, to_heading: str or None = None, options={} ) -> str: '''Cut part of Markdown string between two headings, set internal heading level, and remove top heading. If only the starting heading is defined, cut to the next heading of the same level. Heading shift and top heading elimination are optional. :param content: Markdown content :param from_heading: Starting heading :param to_heading: Ending heading (will not be incuded in the output) :param options: ``sethead``, ``nohead`` :returns: Part of the Markdown content between headings with internal headings adjusted ''' self.logger.debug(f'Cutting from heading: {from_heading}, to heading: {to_heading}, options: {options}') from_heading_pattern = re.compile(r'^\#{1,6}\s+' + rf'{from_heading}\s*$', flags=re.MULTILINE) if not from_heading_pattern.findall(content): return '' from_heading_line = from_heading_pattern.findall(content)[0] from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes')) self.logger.debug(f'From heading level: {from_heading_level}') result = from_heading_pattern.split(content)[1] if to_heading: to_heading_pattern = re.compile(r'^\#{1,6}\s+' + rf'{to_heading}\s*$', flags=re.MULTILINE) else: to_heading_pattern = re.compile( rf'^\#{{1,{from_heading_level}}}[^\#]+?$', flags=re.MULTILINE ) result = to_heading_pattern.split(result)[0] if not options.get('nohead'): result = from_heading_line + result if options.get('sethead'): if options['sethead'] > 0: result = self._shift_headings( result, options['sethead'] - from_heading_level ) return result" 1480,"def _cut_to_heading( self, content: str, to_heading: str or None = None, options={} ) -> str: '''Cut part of Markdown string from the start to a certain heading, set internal heading level, and remove top heading. If not heading is defined, the whole string is returned. Heading shift and top heading elimination are optional. :param content: Markdown content :param to_heading: Ending heading (will not be incuded in the output) :param options: ``sethead``, ``nohead`` :returns: Part of the Markdown content from the start to ``to_heading``, with internal headings adjusted ''' self.logger.debug(f'Cutting to heading: {to_heading}, options: {options}') content_buffer = StringIO(content) first_line = content_buffer.readline() if self._heading_pattern.fullmatch(first_line): from_heading_line = first_line from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes')) result = content_buffer.read() else: from_heading_line = '' from_heading_level = self._find_top_heading_level(content) result = content self.logger.debug(f'From heading level: {from_heading_level}') if to_heading: to_heading_pattern = re.compile(r'^\#{1,6}\s+' + rf'{to_heading}\s*$', flags=re.MULTILINE) result = to_heading_pattern.split(result)[0] if not options.get('nohead'): result = from_heading_line + result if options.get('sethead'): if options['sethead'] > 0: result = self._shift_headings( result, options['sethead'] - from_heading_level ) return result" 1481,"def _adjust_image_paths(self, content: str, md_file_path: Path) -> str: '''Locate images referenced in a Markdown string and replace their paths with the absolute ones. :param content: Markdown content :param md_file_path: Path to the Markdown file containing the content :returns: Markdown content with absolute image paths ''' def _sub(image): image_caption = image.group('caption') image_path = md_file_path.parent / Path(image.group('path')) self.logger.debug( f'Updating image reference; user specified path: {image.group(""path"")}, ' + f'absolute path: {image_path}, caption: {image_caption}' ) return f'![{image_caption}]({image_path.absolute().as_posix()})' return self._image_pattern.sub(_sub, content)" 1482,"def _get_src_file_path(self, markdown_file_path: Path) -> Path: '''Translate the path of Markdown file that is located inside the temporary working directory into the path of the corresponding Markdown file that is located inside the source directory of Foliant project. :param markdown_file_path: Path to Markdown file that is located inside the temporary working directory :returns: Mapping of Markdown file path to the source directory ''' path_relative_to_working_dir = markdown_file_path.relative_to(self.working_dir.resolve()) self.logger.debug( 'Currently processed Markdown file path relative to working dir: ' + f'{path_relative_to_working_dir}' ) path_mapped_to_src_dir = ( self.project_path.resolve() / self.config['src_dir'] / path_relative_to_working_dir ) self.logger.debug( 'Currently processed Markdown file path mapped to source dir: ' + f'{path_mapped_to_src_dir}' ) return path_mapped_to_src_dir" 1483,"def _get_included_file_path(self, user_specified_path: str, current_processed_file_path: Path) -> Path: '''Resolve user specified path to the local included file. :param user_specified_path: User specified string that represents the path to a local file :param current_processed_file_path: Path to the currently processed Markdown file that contains include statements :returns: Local path of the included file relative to the currently processed Markdown file ''' self.logger.debug(f'Currently processed Markdown file: {current_processed_file_path}') included_file_path = (current_processed_file_path.parent / user_specified_path).resolve() self.logger.debug(f'User-specified included file path: {included_file_path}') if ( self.working_dir.resolve() in current_processed_file_path.parents and self.working_dir.resolve() not in included_file_path.parents ): self.logger.debug( 'Currently processed file is located inside the working dir, ' + 'but included file is located outside the working dir. ' + 'So currently processed file path should be rewritten with the path of corresponding file ' + 'that is located inside the source dir' ) included_file_path = ( self._get_src_file_path(current_processed_file_path).parent / user_specified_path ).resolve() else: self.logger.debug( 'Using these paths without changes' ) self.logger.debug(f'Finally, included file path: {included_file_path}') return included_file_path" 1484,"def _process_include( self, file_path: Path, from_heading: str or None = None, to_heading: str or None = None, options={} ) -> str: '''Replace a local include statement with the file content. Necessary adjustments are applied to the content: cut between certain headings, strip the top heading, set heading level. :param file_path: Path to the included file :param from_heading: Include starting from this heading :param to_heading: Include up to this heading (not including the heading itself) :param options: ``sethead``, ``nohead`` :returns: Included file content ''' self.logger.debug( f'Included file path: {file_path}, from heading: {from_heading}, ' + f'to heading: {to_heading}, options: {options}' ) if file_path.name.startswith('^'): file_path = self._find_file(file_path.name[1:], file_path.parent) with open(file_path, encoding='utf8') as incl_file: incl_content = incl_file.read() if from_heading: incl_content = self._cut_from_heading_to_heading( incl_content, from_heading, to_heading, options ) else: incl_content = self._cut_to_heading( incl_content, to_heading, options ) incl_content = self._adjust_image_paths(incl_content, file_path) return incl_content" 1485,"def process_includes(self, markdown_file_path: Path, content: str) -> str: '''Replace all include statements with the respective file contents. :param markdown_file_path: Path to curently processed Markdown file :param content: Markdown content :returns: Markdown content with resolved includes ''' markdown_file_path = markdown_file_path.resolve() self.logger.debug(f'Processing Markdown file: {markdown_file_path}') processed_content = '' include_statement_pattern = re.compile( rf'((?]*)?\>.*?\<\/{""|"".join(self.tags)}\>)', flags=re.DOTALL ) content_parts = include_statement_pattern.split(content) for content_part in content_parts: include_statement = self.pattern.fullmatch(content_part) if include_statement: body = self._tag_body_pattern.match(include_statement.group('body').strip()) options = self.get_options(include_statement.group('options')) self.logger.debug(f'Processing include statement; body: {body}, options: {options}') if body.group('repo'): repo = body.group('repo') repo_from_alias = self.options['aliases'].get(repo) revision = None if repo_from_alias: self.logger.debug(f'Alias found: {repo}, resolved as: {repo_from_alias}') if '#' in repo_from_alias: repo_url, revision = repo_from_alias.split('#', maxsplit=1) else: repo_url = repo_from_alias else: repo_url = repo if body.group('revision'): revision = body.group('revision') self.logger.debug(f'Highest priority revision specified in the include statement: {revision}') self.logger.debug(f'File in Git repository referenced; URL: {repo_url}, revision: {revision}') repo_path = self._sync_repo(repo_url, revision) self.logger.debug(f'Local path of the repo: {repo_path}') included_file_path = repo_path / body.group('path') self.logger.debug(f'Resolved path to the included file: {included_file_path}') processed_content_part = self._process_include( included_file_path, body.group('from_heading'), body.group('to_heading'), options ) else: self.logger.debug('Local file referenced') included_file_path = self._get_included_file_path(body.group('path'), markdown_file_path) self.logger.debug(f'Resolved path to the included file: {included_file_path}') processed_content_part = self._process_include( included_file_path, body.group('from_heading'), body.group('to_heading'), options ) if self.options['recursive'] and self.pattern.search(processed_content_part): self.logger.debug('Recursive call of include statements processing') processed_content_part = self.process_includes(included_file_path, processed_content_part) if options.get('inline'): self.logger.debug('Processing included content part as inline') processed_content_part = re.sub(r'\s+', ' ', processed_content_part).strip() else: processed_content_part = content_part processed_content += processed_content_part return processed_content" 1486,"def create_view_from_dict(name, spec, template=None, cls=ActionsView): """"""Creates a view from an spec dict (typically, the YAML front-matter). """""" kwargs = dict(spec) if template is not None: kwargs.setdefault(""template"", template) actions = load_grouped_actions(kwargs, pop_keys=True) view = cls(name=name, **kwargs) if isinstance(view, ActionsView): view.actions.extend(actions) return view" 1487,"def _parse_argv(argv=copy(sys.argv)): """"""return argv as a parsed dictionary, looks like the following: app --option1 likethis --option2 likethat --flag -> {'option1': 'likethis', 'option2': 'likethat', 'flag': True} """""" cfg = DotDict() cfg_files = [] argv = argv[1:] # Skip command name while argv: arg = argv.pop(0) # split up arg in format --arg=val key_val = re.split('=| ', arg) arg = key_val[0] try: val = key_val[1] except IndexError: if len(argv) > 0 and argv[0][0] != '-': val = argv.pop(0) else: # No val available, probably a flag val = None if arg[0] == '-': key = arg.lstrip('-') if not val: val = True new_cfg = _dict_from_dotted(key, val) cfg = dict_merge(cfg, new_cfg) else: if arg.endswith("".yml""): cfg_files.append(arg) return cfg, cfg_files" 1488,"def _dict_from_dotted(key, val): """"""takes a key value pair like: key: ""this.is.a.key"" val: ""the value"" and returns a dictionary like: {""this"": {""is"": {""a"": {""key"": ""the value"" } } } } """""" split_key = key.split(""."") split_key.reverse() for key_part in split_key: new_dict = DotDict() new_dict[key_part] = val val = new_dict return val" 1489,"def get_logger(name, CFG=None): """"""set up logging for a service using the py 2.7 dictConfig """""" logger = logging.getLogger(name) if CFG: # Make log directory if it doesn't exist for handler in CFG.get('handlers', {}).itervalues(): if 'filename' in handler: log_dir = os.path.dirname(handler['filename']) if not os.path.exists(log_dir): os.makedirs(log_dir) try: #TODO: This requires python 2.7 logging.config.dictConfig(CFG) except AttributeError: print >> sys.stderr, '""logging.config.dictConfig"" doesn\'t seem to be supported in your python' raise return logger" 1490,"def t_TITLE(self, token): ur'\#\s+(?P.+)\n' token.value = token.lexer.lexmatch.group(""title"").decode(""utf8"") token.lexer.lineno += 1 return token" 1491,"def t_LABELDECL(self, token): ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n' label = token.lexer.lexmatch.group(""label"").decode(""utf8"") text = token.lexer.lexmatch.group(""text"").decode(""utf8"") token.value = (label, text) token.lexer.lineno += 1 return token" 1492,"def t_ARTICLEHEADER(self, token): # \xef\xbc\x9a is the ""fullwidth colon"" used in Japanese for instance ur'\#\#\s+<article-(?P<number>[A-Z0-9]+)><(?P<newtag>[a-zA-Z0-9-]+)><(?P<oldtag>[a-zA-Z0-9-]+)>[ ]*(?P<name>[^\<]+?)(?P<sep>:\s|\xef\xbc\x9a)(?P<title>[^<\n]+)\n' number = token.lexer.lexmatch.group(""number"").decode(""utf8"") newtag = token.lexer.lexmatch.group(""newtag"").decode(""utf8"") oldtag = token.lexer.lexmatch.group(""oldtag"").decode(""utf8"") name = token.lexer.lexmatch.group(""name"").decode(""utf8"") sep = token.lexer.lexmatch.group(""sep"").decode(""utf8"") title = token.lexer.lexmatch.group(""title"").decode(""utf8"") token.value = (number, newtag, oldtag, name, title, sep) token.lexer.lineno += 1 return token" 1493,"def t_STATESHEADER(self, token): ur'\#\#\s+<states-list>(?P<title>[^<\n]*)\n' title = token.lexer.lexmatch.group(""title"").decode(""utf8"") token.value = title token.lexer.lineno += 1 return token" 1494,"def t_REGULATION(self, token): ur'(?P<indents>\s{4,})*-\s(?P<reg>[a-zA-Z0-9]+)\)\s*(?P<text>.+?[^ ])\n' indents = token.lexer.lexmatch.group(""indents"") indents = len(indents)/4 if indents else 0 reg = token.lexer.lexmatch.group(""reg"").decode(""utf8"") text = token.lexer.lexmatch.group(""text"").decode(""utf8"") token.value = (indents, reg, text) token.lexer.lineno += 1 return token" 1495,"def t_GUIDELINE(self, token): ur'-\s(?P<reg>[a-zA-Z0-9]+[+]+)\)\s\[(?P<label>.+?)\]\s*(?P<text>.+?[^ ])\n' reg = token.lexer.lexmatch.group(""reg"").decode(""utf8"") text = token.lexer.lexmatch.group(""text"").decode(""utf8"") label = token.lexer.lexmatch.group(""label"").decode(""utf8"") token.value = (0, reg, text, label) token.lexer.lineno += 1 return token" 1496,"def t_STATE(self, token): ur'-\s\((?P<state>[A-Z]{2}):(?P<continent>[_A-Za-z ]+)(:(?P<friendly_id>[A-Za-z_]+))?\)\s(?P<name>[A-Z].+?[^ ])\n' state = token.lexer.lexmatch.group(""state"").decode(""utf8"") continent = token.lexer.lexmatch.group(""continent"").decode(""utf8"") name = token.lexer.lexmatch.group(""name"").decode(""utf8"") friendly_id = token.lexer.lexmatch.group(""friendly_id"") if friendly_id: friendly_id = friendly_id.decode(""utf8"") else: friendly_id = unidecode(name).replace(""'"", ""_"") token.value = (state, continent, name, friendly_id) token.lexer.lineno += 1 return token" 1497,"def t_TEXT(self, token): ur'(?P<text>[^<#\n ].+?[^ ])(?=\n)' text = token.lexer.lexmatch.group(""text"").decode(""utf8"") token.value = text return token" 1498,"def t_PARBREAK(self, token): ur'\n{2,}' token.lexer.lineno += len(token.value) return token" 1499,"def t_trailingwhitespace(self, token): ur'.+? \n' print ""Error: trailing whitespace at line %s in text '%s'"" % (token.lexer.lineno + 1, token.value[:-1]) token.lexer.lexerror = True token.lexer.skip(1)" 1500,"def register_event(cls, event_name, event, method): """""" Register an event class on it's name with a method to process it. :param event_name: name of the event. :param event: class of the event. :param method: a method used to process this event. """""" log.info('@Registry.register_event `{}` with subscriber `{}`' .format(event_name, method.__name__)) if event_name not in cls._events: cls._events[event_name] = {} if event not in cls._events[event_name]: cls._events[event_name][event] = [] cls._events[event_name][event].append(method)" 1501,"def register_producer(cls, producer): """""" Register a default producer for events to use. :param producer: the default producer to to dispatch events on. """""" log.info('@Registry.register_producer `{}`' .format(producer.__class__.__name__)) cls._producer = (cls._producer or producer)" 1502,"def exec_before_request_actions(actions, **kwargs): """"""Execute actions in the ""before"" and ""before_METHOD"" groups """""" groups = (""before"", ""before_"" + flask.request.method.lower()) return execute_actions(actions, limit_groups=groups, **kwargs)" 1503,"def exec_after_request_actions(actions, response, **kwargs): """"""Executes actions of the ""after"" and ""after_METHOD"" groups. A ""response"" var will be injected in the current context. """""" current_context[""response""] = response groups = (""after_"" + flask.request.method.lower(), ""after"") try: rv = execute_actions(actions, limit_groups=groups, **kwargs) except ReturnValueException as e: rv = e.value if rv: return rv return response" 1504,"def full_exec_request_actions(actions, func=None, render_func=None): """"""Full process to execute before, during and after actions. If func is specified, it will be called after exec_request_actions() unless a ContextExitException was raised. If render_func is specified, it will be called after exec_request_actions() only if there is no response. exec_after_request_actions() is always called. """""" response = None try: exec_before_request_actions(actions, catch_context_exit=False) exec_request_actions(actions, catch_context_exit=False) if func: response = func() except ContextExitException as e: response = e.result except ReturnValueException as e: response = e.value if render_func and response is None: response = render_func() return exec_after_request_actions(actions, response)" 1505,"def as_view(url=None, methods=None, view_class=ActionsView, name=None, url_rules=None, **kwargs): """"""Decorator to transform a function into a view class. Be warned that this will replace the function with the view class. """""" def decorator(f): if url is not None: f = expose(url, methods=methods)(f) clsdict = {""name"": name or f.__name__, ""actions"": getattr(f, ""actions"", None), ""url_rules"": url_rules or getattr(f, ""urls"", None)} if isinstance(f, WithActionsDecorator): f = f.func clsdict['func'] = f def constructor(self, **ctorkwargs): for k, v in kwargs.items(): if k not in ctorkwargs or ctorkwargs[k] is None: ctorkwargs[k] = v view_class.__init__(self, func=f, **ctorkwargs) clsdict[""__init__""] = constructor return type(f.__name__, (view_class,), clsdict) return decorator" 1506,"def register(self, target): """"""Registers url_rules on the blueprint """""" for rule, options in self.url_rules: target.add_url_rule(rule, self.name, self.dispatch_request, **options)" 1507,"def view(self, *args, **kwargs): """"""Decorator to automatically apply as_view decorator and register it. """""" def decorator(f): kwargs.setdefault(""view_class"", self.view_class) return self.add_view(as_view(*args, **kwargs)(f)) return decorator" 1508,"def add_action_view(self, name, url, actions, **kwargs): """"""Creates an ActionsView instance and registers it. """""" view = ActionsView(name, url=url, self_var=self, **kwargs) if isinstance(actions, dict): for group, actions in actions.iteritems(): view.actions.extend(load_actions(actions, group=group or None)) else: view.actions.extend(load_actions(actions)) self.add_view(view) return view" 1509,"def process(exam_num: int, time: str, date: str) -> None: """"""Process the exams in the exam_num folder for the time."""""" prefix = Path(f""exams/exam-{exam_num}"") problems = list(prefix.glob(f""exam-{exam_num}-{time}-[0-9].ipynb"")) problems = sorted(problems, key=lambda k: k.stem[-1]) output_directory = (prefix / ""output"").resolve() fw = FilesWriter(build_directory=str(output_directory)) assignment_zip_name = output_directory / f""exam-{exam_num}-{time}.zip"" solution_zip_name = output_directory / f""exam-{exam_num}-{time}-soln.zip"" solution_pdfs: List[BytesIO] = [] exam_date_time = datetime.strptime(time + date, ""%H%M%d-%b-%Y"") res: Dict[str, Union[str, int]] = { ""exam_num"": exam_num, ""time"": exam_date_time.strftime(""%I:%M %p""), ""date"": exam_date_time.strftime(""%b. %d, %Y""), ""delete_pymarkdown"": True, } for problem in problems: res[""unique_key""] = problem.stem problem_fname = str(problem.resolve()) if problem.stem.endswith(""1""): assignment_nb, _ = sa_nb_exp.from_filename(problem_fname, resources=res) with ZipFile(assignment_zip_name, mode=""a"") as zip_file: zip_file.writestr(problem.name, assignment_nb) else: assignment_nb, _ = prob_nb_exp.from_filename(problem_fname, resources=res) with ZipFile(assignment_zip_name, mode=""a"") as zip_file: zip_file.writestr(problem.name, assignment_nb) solution_pdf, _ = solution_pdf_exp.from_filename(problem_fname, resources=res) solution_pdfs.append(BytesIO(solution_pdf)) solution_nb, _ = solution_nb_exp.from_filename(problem_fname, resources=res) with ZipFile(solution_zip_name, mode=""a"") as zip_file: zip_file.writestr(problem.name, solution_nb) resources: Dict[str, Any] = { ""metadata"": { ""name"": f""exam-{exam_num}-{time}-soln"", ""path"": str(prefix), ""modified_date"": datetime.today().strftime(""%B %d, %Y""), }, ""output_extension"": "".pdf"", } fw.write( combine_pdf_as_bytes(solution_pdfs), resources, f""exam-{exam_num}-{time}-soln"" )" 1510,"def main(argv: Optional[Sequence[str]] = None) -> None: """"""Parse arguments and process the exam assignment."""""" parser = ArgumentParser(description=""Convert Jupyter Notebook exams to PDFs"") parser.add_argument( ""--exam"", type=int, required=True, help=""Exam number to convert"", dest=""exam_num"", ) parser.add_argument( ""--time"", type=str, required=True, help=""Time of exam to convert"" ) parser.add_argument( ""--date"", type=str, required=True, help=""The date the exam will take place"" ) args = parser.parse_args(argv) process(args.exam_num, args.time, args.date)" 1511,"def get_profile_model(): """""" Returns configured user profile model or None if not found """""" auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None) profile_model = None if auth_profile_module: # get the profile model. TODO: super flacky, refactor app_label, model = auth_profile_module.split('.') profile_model = getattr(__import__(""%s.models"" % app_label, \ globals(), locals(), [model, ], -1), model, None) return profile_model" 1512,"def is_on(self): """""" Get sensor state. Assume offline or open (worst case). """""" return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE, CONST.STATUS_CLOSED, CONST.STATUS_OPEN)" 1513,"def extract_meta(self, text): """""" Takes input as the entire file. Reads the first yaml document as metadata. and the rest of the document as text """""" first_line = True metadata = [] content = [] metadata_parsed = False for line in text.split('\n'): if first_line: first_line = False if line.strip() != '---': raise MetaParseException('Invalid metadata') else: continue if line.strip() == '' and not metadata_parsed: continue if line.strip() == '---' and not metadata_parsed: # reached the last line metadata_parsed = True elif not metadata_parsed: metadata.append(line) else: content.append(line) content = '\n'.join(content) try: metadata = yaml.load('\n'.join(metadata)) except: raise content = text metadata = yaml.load('') return content, metadata" 1514,"def set_defaults(self): """""" Add each model entry with it's default """""" for key, value in self.spec.items(): setattr(self, key.upper(), value.get(""default"", None))" 1515,"def load_env(self): """""" Load the model fron environment variables """""" for key, value in self.spec.items(): if value['type'] in (dict, list): envar = (self.env_prefix + ""_"" + key).upper() try: envvar = env.json(envar, default=getattr(self, key.upper(), value.get('default'))) except ConfigurationError as _err: #pragma: no cover print(_err) self.log.critical(f""Error parsing json from env var. {os.environ.get(envar)}"") print(envar) raise else: envvar = env((self.env_prefix + ""_"" + key).upper(), default=getattr(self, key.upper(), value.get('default')), cast=value['type']) setattr(self, key.upper(), envvar)" 1516,"def parse_args(self): """""" Parse the cli args Returns: args (namespace): The args """""" parser = ArgumentParser(description='', formatter_class=RawTextHelpFormatter) parser.add_argument(""--generate"", action=""store"", dest='generate', choices=['command', 'docker-run', 'docker-compose', 'ini', 'env', 'kubernetes', 'readme', 'drone-plugin'], help=""Generate a template "") parser.add_argument(""--settings"", action=""store"", dest='settings', help=""Specify a settings file. (ie settings.dev)"") for key, value in self.spec.items(): if value['type'] in [str, int, float]: parser.add_argument(f""--{key.lower()}"", action=""store"", dest=key, type=value['type'], choices=value.get(""choices""), help=self.help(value)) elif value['type'] == bool: parser.add_argument(f""--{key.lower()}"", action=""store"", dest=key, type=lambda x:bool(strtobool(x)), choices=value.get(""choices""), help=self.help(value)) elif value['type'] == list: parser.add_argument(f""--{key.lower()}"", action=""store"", dest=key, nargs='+', choices=value.get(""choices""), help=self.help(value)) elif value['type'] == dict: parser.add_argument(f""--{key.lower()}"", action=""store"", dest=key, type=json.loads, choices=value.get(""choices""), help=self.help(value)) args, _unknown = parser.parse_known_args() return args" 1517,"def add_args(self, args): """""" Add the args Args: args (namespace): The commandline args """""" for key, value in vars(args).items(): if value is not None: setattr(self, key.upper(), value)" 1518,"def load_ini(self, ini_file): """""" Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded """""" if ini_file and not os.path.exists(ini_file): self.log.critical(f""Settings file specified but not found. {ini_file}"") sys.exit(1) if not ini_file: ini_file = f""{self.cwd}/settings.ini"" if os.path.exists(ini_file): config = configparser.RawConfigParser(allow_no_value=True) config.read(ini_file) for key, value in self.spec.items(): entry = None if value['type'] == str: entry = config.get(""settings"", option=key.lower(), fallback=None) elif value['type'] == bool: entry = config.getboolean(""settings"", option=key.lower(), fallback=None) elif value['type'] == int: entry = config.getint(""settings"", option=key.lower(), fallback=None) elif value['type'] == float: entry = config.getfloat(""settings"", option=key.lower(), fallback=None) elif value['type'] in [list, dict]: entries = config.get(""settings"", option=key.lower(), fallback=None) if entries: try: entry = json.loads(entries) except json.decoder.JSONDecodeError as _err: #pragma: no cover self.log.critical(f""Error parsing json from ini file. {entries}"") sys.exit(1) if entry is not None: setattr(self, key.upper(), entry)" 1519,"def check_required(self): """""" Check all required settings have been provided """""" die = False for key, value in self.spec.items(): if not getattr(self, key.upper()) and value['required']: print(f""{key} is a required setting. "" ""Set via command-line params, env or file. "" ""For examples, try '--generate' or '--help'."") die = True if die: sys.exit(1)" 1520,"def generate(self): """""" Generate sample settings """""" otype = getattr(self, 'GENERATE') if otype: if otype == 'env': self.generate_env() elif otype == ""command"": self.generate_command() elif otype == ""docker-run"": self.generate_docker_run() elif otype == ""docker-compose"": self.generate_docker_compose() elif otype == ""kubernetes"": self.generate_kubernetes() elif otype == 'ini': self.generate_ini() elif otype == 'readme': self.generate_readme() elif otype == 'drone-plugin': self.generate_drone_plugin() sys.exit(0)" 1521,"def generate_env(self): """""" Generate sample environment variables """""" for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f""\'{json.dumps(self.spec[key].get('example', ''))}\'"" else: value = f""{self.spec[key].get('example', '')}"" print(f""export {self.env_prefix}_{key.upper()}={value}"")" 1522,"def generate_command(self): """""" Generate a sample command """""" example = [] example.append(f""{sys.argv[0]}"") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] == list: value = "" "".join(self.spec[key].get('example', '')) elif self.spec[key]['type'] == dict: value = f""\'{json.dumps(self.spec[key].get('example', ''))}\'"" else: value = self.spec[key].get('example', '') string = f"" --{key.lower()} {value}"" example.append(string) print("" \\\n"".join(example))" 1523,"def generate_docker_run(self): """""" Generate a sample docker run """""" example = [] example.append(""docker run -it"") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f""\'{json.dumps(self.spec[key].get('example', ''))}\'"" else: value = f""{self.spec[key].get('example', '')}"" string = f"" -e {self.env_prefix}_{key.upper()}={value}"" example.append(string) example.append("" <container-name>"") print("" \\\n"".join(example))" 1524,"def generate_docker_compose(self): """""" Generate a sample docker compose """""" example = {} example['app'] = {} example['app']['environment'] = [] for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f""\'{json.dumps(self.spec[key].get('example', ''))}\'"" else: value = f""{self.spec[key].get('example', '')}"" example['app']['environment'].append(f""{self.env_prefix}_{key.upper()}={value}"") print(yaml.dump(example, default_flow_style=False))" 1525,"def generate_ini(self): """""" Generate a sample ini """""" example = [] example.append(""[settings]"") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in [list, dict]: value = json.dumps(self.spec[key].get('example', '')) else: value = self.spec[key].get('example', '') string = f""{key.lower()}={value}"" example.append(string) print(""\n"".join(example))" 1526,"def generate_kubernetes(self): """""" Generate a sample kubernetes """""" example = {} example['spec'] = {} example['spec']['containers'] = [] example['spec']['containers'].append({""name"": '', ""image"": '', ""env"": []}) for key, value in self.spec.items(): if value['type'] in (dict, list): kvalue = f""\'{json.dumps(value.get('example', ''))}\'"" else: kvalue = f""{value.get('example', '')}"" entry = {""name"": f""{self.env_prefix}_{key.upper()}"", ""value"": kvalue} example['spec']['containers'][0]['env'].append(entry) print(yaml.dump(example, default_flow_style=False))" 1527,"def generate_drone_plugin(self): """""" Generate a sample drone plugin configuration """""" example = {} example['pipeline'] = {} example['pipeline']['appname'] = {} example['pipeline']['appname']['image'] = """" example['pipeline']['appname']['secrets'] = """" for key, value in self.spec.items(): if value['type'] in (dict, list): kvalue = f""\'{json.dumps(value.get('example', ''))}\'"" else: kvalue = f""{value.get('example', '')}"" example['pipeline']['appname'][key.lower()] = kvalue print(yaml.dump(example, default_flow_style=False))" 1528,"def generate_readme(self): """""" Generate a readme with all the generators """""" print(""## Examples of settings runtime params"") print(""### Command-line parameters"") print(""```"") self.generate_command() print(""```"") print(""### Environment variables"") print(""```"") self.generate_env() print(""```"") print(""### ini file"") print(""```"") self.generate_ini() print(""```"") print(""### docker run"") print(""```"") self.generate_docker_run() print(""```"") print(""### docker compose"") print(""```"") self.generate_docker_compose() print(""```"") print(""### kubernetes"") print(""```"") self.generate_kubernetes() print(""```"") print(""### drone plugin"") print(""```"") self.generate_drone_plugin() print(""```"")" 1529,"def file_exists(self, subdir, prefix, suffix): """"""Returns true if the resource file exists, else False. Positional arguments: subdir -- sub directory name under the resource's main directory (e.g. css or js, or an empty string if the resource's directory structure is flat). prefix -- file name without the file extension. suffix -- file extension (if self.minify = True, includes .min before the extension). """""" real_path = os.path.join(self.STATIC_DIR, self.DIR, subdir, prefix + suffix) return os.path.exists(real_path)" 1530,"def add_css(self, subdir, file_name_prefix): """"""Add a css file for this resource. If self.minify is True, checks if the .min.css file exists. If not, falls back to non-minified file. If that file also doesn't exist, IOError is raised. Positional arguments: subdir -- sub directory name under the resource's main directory (e.g. css or js, or an empty string). file_name_prefix -- file name without the file extension. """""" suffix_maxify = '.css' suffix_minify = '.min.css' if self.minify and self.file_exists(subdir, file_name_prefix, suffix_minify): self.resources_css.append(posixpath.join(self.DIR, subdir, file_name_prefix + suffix_minify)) elif self.file_exists(subdir, file_name_prefix, suffix_maxify): self.resources_css.append(posixpath.join(self.DIR, subdir, file_name_prefix + suffix_maxify)) else: file_path = os.path.join(self.STATIC_DIR, self.DIR, subdir, file_name_prefix + suffix_maxify) raise IOError('Resource file not found: {0}'.format(file_path))" 1531,"def read_dataframe_from_xls(desired_type: Type[T], file_path: str, encoding: str, logger: Logger, **kwargs) -> pd.DataFrame: """""" We register this method rather than the other because pandas guesses the encoding by itself. Also, it is easier to put a breakpoint and debug by trying various options to find the good one (in streaming mode you just have one try and then the stream is consumed) :param desired_type: :param file_path: :param encoding: :param logger: :param kwargs: :return: """""" return pd.read_excel(file_path, **kwargs)" 1532,"def read_df_or_series_from_csv(desired_type: Type[pd.DataFrame], file_path: str, encoding: str, logger: Logger, **kwargs) -> pd.DataFrame: """""" Helper method to read a dataframe from a csv file. By default this is well suited for a dataframe with headers in the first row, for example a parameter dataframe. :param desired_type: :param file_path: :param encoding: :param logger: :param kwargs: :return: """""" if desired_type is pd.Series: # as recommended in http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.from_csv.html # and from http://stackoverflow.com/questions/15760856/how-to-read-a-pandas-series-from-a-csv-file # TODO there should be a way to decide between row-oriented (squeeze=True) and col-oriented (index_col=0) # note : squeeze=true only works for row-oriented, so we dont use it. We rather expect that a row-oriented # dataframe would be convertible to a series using the df to series converter below if 'index_col' not in kwargs.keys(): one_col_df = pd.read_csv(file_path, encoding=encoding, index_col=0, **kwargs) else: one_col_df = pd.read_csv(file_path, encoding=encoding, **kwargs) if one_col_df.shape[1] == 1: return one_col_df[one_col_df.columns[0]] else: raise Exception('Cannot build a series from this csv: it has more than two columns (one index + one value).' ' Probably the parsing chain $read_df_or_series_from_csv => single_row_or_col_df_to_series$' 'will work, though.') else: return pd.read_csv(file_path, encoding=encoding, **kwargs)" 1533,"def get_default_pandas_parsers() -> List[AnyParser]: """""" Utility method to return the default parsers able to parse a dictionary from a file. :return: """""" return [SingleFileParserFunction(parser_function=read_dataframe_from_xls, streaming_mode=False, supported_exts={'.xls', '.xlsx', '.xlsm'}, supported_types={pd.DataFrame}, option_hints=pandas_parsers_option_hints_xls), SingleFileParserFunction(parser_function=read_df_or_series_from_csv, streaming_mode=False, supported_exts={'.csv', '.txt'}, supported_types={pd.DataFrame, pd.Series}, option_hints=pandas_parsers_option_hints_csv), ]" 1534,"def dict_to_df(desired_type: Type[T], dict_obj: Dict, logger: Logger, orient: str = None, **kwargs) -> pd.DataFrame: """""" Helper method to convert a dictionary into a dataframe. It supports both simple key-value dicts as well as true table dicts. For this it uses pd.DataFrame constructor or pd.DataFrame.from_dict intelligently depending on the case. The orientation of the resulting dataframe can be configured, or left to default behaviour. Default orientation is different depending on the contents: * 'index' for 2-level dictionaries, in order to align as much as possible with the natural way to express rows in JSON * 'columns' for 1-level (simple key-value) dictionaries, so as to preserve the data types of the scalar values in the resulting dataframe columns if they are different :param desired_type: :param dict_obj: :param logger: :param orient: the orientation of the resulting dataframe. :param kwargs: :return: """""" if len(dict_obj) > 0: first_val = dict_obj[next(iter(dict_obj))] if isinstance(first_val, dict) or isinstance(first_val, list): # --'full' table # default is index orientation orient = orient or 'index' # if orient is 'columns': # return pd.DataFrame(dict_obj) # else: return pd.DataFrame.from_dict(dict_obj, orient=orient) else: # --scalar > single-row or single-col # default is columns orientation orient = orient or 'columns' if orient is 'columns': return pd.DataFrame(dict_obj, index=[0]) else: res = pd.DataFrame.from_dict(dict_obj, orient=orient) res.index.name = 'key' return res.rename(columns={0: 'value'}) else: # for empty dictionaries, orientation does not matter # but maybe we should still create a column 'value' in this empty dataframe ? return pd.DataFrame.from_dict(dict_obj)" 1535,"def single_row_or_col_df_to_series(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> pd.Series: """""" Helper method to convert a dataframe with one row or one or two columns into a Series :param desired_type: :param single_col_df: :param logger: :param kwargs: :return: """""" if single_rowcol_df.shape[0] == 1: # one row return single_rowcol_df.transpose()[0] elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): # two columns but the index contains nothing but the row number : we can use the first column d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]] elif single_rowcol_df.shape[1] == 1: # one column and one index d = single_rowcol_df return d[d.columns[0]] else: raise ValueError('Unable to convert provided dataframe to a series : ' 'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '')" 1536,"def single_row_or_col_df_to_dict(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> Dict[str, str]: """""" Helper method to convert a dataframe with one row or one or two columns into a dictionary :param desired_type: :param single_rowcol_df: :param logger: :param kwargs: :return: """""" if single_rowcol_df.shape[0] == 1: return single_rowcol_df.transpose()[0].to_dict() # return {col_name: single_rowcol_df[col_name][single_rowcol_df.index.values[0]] for col_name in single_rowcol_df.columns} elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): # two columns but the index contains nothing but the row number : we can use the first column d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]].to_dict() elif single_rowcol_df.shape[1] == 1: # one column and one index d = single_rowcol_df return d[d.columns[0]].to_dict() else: raise ValueError('Unable to convert provided dataframe to a parameters dictionary : ' 'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '')" 1537,"def get_default_pandas_converters() -> List[Union[Converter[Any, pd.DataFrame], Converter[pd.DataFrame, Any]]]: """""" Utility method to return the default converters associated to dataframes (from dataframe to other type, and from other type to dataframe) :return: """""" return [ConverterFunction(from_type=pd.DataFrame, to_type=dict, conversion_method=single_row_or_col_df_to_dict), ConverterFunction(from_type=dict, to_type=pd.DataFrame, conversion_method=dict_to_df, option_hints=dict_to_single_row_or_col_df_opts), ConverterFunction(from_type=pd.DataFrame, to_type=pd.Series, conversion_method=single_row_or_col_df_to_series)]" 1538,"def full_subgraph(self, vertices): """""" Return the subgraph of this graph whose vertices are the given ones and whose edges are all the edges of the original graph between those vertices. """""" subgraph_vertices = {v for v in vertices} subgraph_edges = {edge for v in subgraph_vertices for edge in self._out_edges[v] if self._heads[edge] in subgraph_vertices} subgraph_heads = {edge: self._heads[edge] for edge in subgraph_edges} subgraph_tails = {edge: self._tails[edge] for edge in subgraph_edges} return DirectedGraph._raw( vertices=subgraph_vertices, edges=subgraph_edges, heads=subgraph_heads, tails=subgraph_tails, )" 1539,"def _raw(cls, vertices, edges, heads, tails): """""" Private constructor for direct construction of a DirectedGraph from its consituents. """""" self = object.__new__(cls) self._vertices = vertices self._edges = edges self._heads = heads self._tails = tails # For future use, map each vertex to its outward and inward edges. # These could be computed on demand instead of precomputed. self._out_edges = collections.defaultdict(set) self._in_edges = collections.defaultdict(set) for edge in self._edges: self._out_edges[self._tails[edge]].add(edge) self._in_edges[self._heads[edge]].add(edge) return self" 1540,"def from_out_edges(cls, vertices, edge_mapper): """""" Create a DirectedGraph from a collection of vertices and a mapping giving the vertices that each vertex is connected to. """""" vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for tail in vertices: for head in edge_mapper[tail]: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail return cls._raw( vertices=vertices, edges=edges, heads=heads, tails=tails, )" 1541,"def from_edge_pairs(cls, vertices, edge_pairs): """""" Create a DirectedGraph from a collection of vertices and a collection of pairs giving links between the vertices. """""" vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for tail, head in edge_pairs: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail return cls._raw( vertices=vertices, edges=edges, heads=heads, tails=tails, )" 1542,"def annotated(self): """""" Return an AnnotatedGraph with the same structure as this graph. """""" annotated_vertices = { vertex: AnnotatedVertex( id=vertex_id, annotation=six.text_type(vertex), ) for vertex_id, vertex in zip(itertools.count(), self.vertices) } annotated_edges = [ AnnotatedEdge( id=edge_id, annotation=six.text_type(edge), head=annotated_vertices[self.head(edge)].id, tail=annotated_vertices[self.tail(edge)].id, ) for edge_id, edge in zip(itertools.count(), self.edges) ] return AnnotatedGraph( vertices=annotated_vertices.values(), edges=annotated_edges, )" 1543,"def execute_command(working_dir, cmd, env_dict): """""" execute_command: run the command provided in the working dir specified adding the env_dict settings to the execution environment :param working_dir: path to directory to execute command also gets added to the PATH :param cmd: Shell command to execute :param env_dict: dictionary of additional env vars to be passed to the subprocess environment """""" proc_env = os.environ.copy() proc_env[""PATH""] = ""{}:{}:."".format(proc_env[""PATH""], working_dir) proc_env.update(env_dict) proc = subprocess.Popen( cmd, cwd=working_dir, env=proc_env, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) status = proc.wait() stdout, stderr = proc.communicate() if status: msg = ( ""Non zero {} exit from command {}\n"" ""Stdout: {}\n"" ""Stderr: {}\n"" ).format(status, cmd, stdout, stderr) LOGGER.error(msg) raise RuntimeError(msg) LOGGER.info(stdout)" 1544,"def load(self): """""" read dotfile and populate self opts will override the dotfile settings, make sure everything is synced in both opts and this object """""" if self.exists(): with open(self.dot_file, 'r') as handle: self.update(json.load(handle)) if self.options['context'] is not None: self['context'] = self.options['context'] else: self.options['context'] = self['context'] if self.options['defaults'] is not None: self['defaults'] = self.options['defaults'] else: self.options['defaults'] = self['defaults'] if self.options['output'] is not None: self['output'] = self.options['output'] if self.options.get('inclusive', False): self['inclusive'] = True if self.options.get('exclude', []): self['exclude'].extend(self.options['exclude']) if self['output'] is None: self['output'] = os.path.join(os.getcwd(), 'dockerstache-output') self['output_path'] = self.abs_output_dir() self['input_path'] = self.abs_input_dir() if self['context'] is not None: self['context_path'] = absolute_path(self['context']) if self['defaults'] is not None: self['defaults_path'] = absolute_path(self['defaults'])" 1545,"def env_dictionary(self): """""" convert the options to this script into an env var dictionary for pre and post scripts """""" none_to_str = lambda x: str(x) if x else """" return {""DOCKERSTACHE_{}"".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}" 1546,"def pre_script(self): """""" execute the pre script if it is defined """""" if self['pre_script'] is None: return LOGGER.info(""Executing pre script: {}"".format(self['pre_script'])) cmd = self['pre_script'] execute_command(self.abs_input_dir(), cmd, self.env_dictionary()) LOGGER.info(""Pre Script completed"")" 1547,"def say( text = None, preference_program = ""festival"", background = False, silent = True, filepath = None ): """""" Say specified text to speakers or to file, as specified. Determine the program to use based on the specified program preference and availability, then say the text to speakers or synthesize speech of the text and save it to file, as specified. """""" if not text: if not silent: print(""text not specified"") return False # Determine the program to use based on program preference and program # availability. preference_order_programs = [ ""festival"", ""espeak"", ""pico2wave"", ""deep_throat.py"" ] # Remove the specified preference program from the default program # preferences order and prioritise it. preference_order_programs.remove(preference_program) preference_order_programs.insert(0, preference_program) # Determine first program that is available in the programs order of # preference. preference_order_programs_available =\ [program for program in preference_order_programs \ if shijian.which(program) is not None] if not preference_order_programs_available: if not silent: print(""text-to-speech program unavailable"") return False program = preference_order_programs_available[0] if program != preference_program and not silent: print(""text-to-speech preference program unavailable, using {program}"".format(program = program)) if program == ""festival"": if not filepath: command = """""" echo ""{text}"" | festival --tts """""".format(text = text) else: command = """""" echo ""{text}"" | text2wave -o {filepath} """""".format(text = text, filepath = filepath) elif program == ""espeak"": if not filepath: command = """""" echo ""{text}"" | espeak """""".format(text = text) else: command = """""" echo ""{text}"" | espeak -w {filepath} """""".format(text = text, filepath = filepath) elif program == ""pico2wave"": if not filepath: command = """""" pico2wave --wave=""{filepath}"" ""{text}"" aplay --quiet ""{filepath}"" """""".format(text = text, filepath = shijian.tmp_filepath() + "".wav"") else: command = """""" pico2wave --wave=""{filepath}"" ""{text}"" """""".format(text = text, filepath = filepath) elif program == ""deep_throat.py"": if not filepath: command = """""" echo ""{text}"" | deep_throat.py """""".format(text = text) else: command = """""" deep_throat.py --text=""{text}"" --savetowavefile --outfile=""{filepath}"" """""".format(text = text, filepath = filepath) if filepath: background = False if background: command = command.rstrip().rstrip(""\n"") + "" &"" command = textwrap.dedent(command) engage_command(command = command, background = background)" 1548,"def say_tmp_filepath( text = None, preference_program = ""festival"" ): """""" Say specified text to a temporary file and return the filepath. """""" filepath = shijian.tmp_filepath() + "".wav"" say( text = text, preference_program = preference_program, filepath = filepath ) return filepath" 1549,"def clacks_overhead(fn): """""" A Django view decorator that will add the `X-Clacks-Overhead` header. Usage: @clacks_overhead def my_view(request): return my_response """""" @wraps(fn) def _wrapped(*args, **kw): response = fn(*args, **kw) response['X-Clacks-Overhead'] = 'GNU Terry Pratchett' return response return _wrapped" 1550,"def render(self, request, template, context): """""" Returns a response. By default, this will contain the rendered PDF, but if both ``allow_force_html`` is ``True`` and the querystring ``html=true`` was set it will return a plain HTML. """""" if self.allow_force_html and self.request.GET.get('html', False): html = get_template(template).render(context) return HttpResponse(html) else: response = HttpResponse(content_type='application/pdf') if self.prompt_download: response['Content-Disposition'] = 'attachment; filename=""{}""' \ .format(self.get_download_name()) helpers.render_pdf( template=template, file_=response, url_fetcher=self.url_fetcher, context=context, ) return response" 1551,"def _bfs_path_states(self, graph, start): """""" Find state access strings (DFA shortest paths for every state) using BFS Args: graph (DFA): The DFA states start (int): The DFA initial state Return: list: A list of all the DFA shortest paths for every state """""" pathstates = {} # maintain a queue of paths queue = [] visited = [] # push the first path into the queue queue.append([['', start]]) while queue: # get the first path from the queue path = queue.pop(0) # get the last node from the path node = path[-1][1] # path found """""" if node.stateid not in pathstates and node.stateid != len(list(graph.states)): pathstates[node.stateid] = ''.join( [mnode[0] for mnode in path]) visited.append(node.stateid) # enumerate all adjacent nodes, construct a new path and push it # into the queue for arc in node.arcs: char = graph.isyms.find(arc.ilabel) next_state = graph[arc.nextstate] if next_state.stateid not in visited: new_path = list(path) new_path.append([char, next_state]) queue.append(new_path) return pathstates" 1552,"def _get_accepted(self, graph): """""" Find the accepted states Args: graph (DFA): The DFA states Return: list: Returns the list of the accepted states """""" accepted = [] for state in graph.states: if state.final != TropicalWeight(float('inf')): accepted.append(state) return accepted" 1553,"def _object_set_to_state_list(self, objectset): """""" Args: objectset (list): A list of all the DFA states (as objects) Return: list: A list of all the DFA states (as identifiers) """""" state_list = [] for state in list(objectset): state_list.append(state.stateid) return state_list" 1554,"def _get_group_from_state(self, sid): """""" Args: sid (int): The state identifier Return: int: The group identifier that the state belongs """""" for index, selectgroup in enumerate(self.groups): if sid in selectgroup: return index" 1555,"def _reverse_to_source(self, target, group1): """""" Args: target (dict): A table containing the reverse transitions for each state group1 (list): A group of states Return: Set: A set of states for which there is a transition with the states of the group """""" new_group = [] for dst in group1: new_group += target[dst] return set(new_group)" 1556,"def _partition_group(self, group): """""" Args: group (list): A group of states Return: tuple: A set of two groups """""" for (group1, group2, distinguish_string) in self.bookeeping: if group & group1 != set() and not group.issubset(group1): new_g1 = group & group1 new_g2 = group - group1 return (new_g1, new_g2, distinguish_string) if group & group2 != set() and not group.issubset(group2): new_g1 = group & group2 new_g2 = group - group2 return (new_g1, new_g2, distinguish_string) assert False, ""Unmatched group partition""" 1557,"def _init_smi(self, graph, access_strings_map): """""" Args: graph (DFA): The DFA states access_strings_map (list): a dict containing all the access strings for each state Return: list: SMI transition table """""" smi = [] for selected_state in sorted(graph.states, key=attrgetter('initial'), reverse=True): # Initially gather all transitions of the state into a dictionary transitions_map = defaultdict(list) for character in self.alphabet: destination_state = self._delta(graph, selected_state, character) transitions_map[destination_state.stateid].append(character) chars_in_smi = [] sorted_transitions = sorted( transitions_map.items(), key=lambda x: len( x[1])) if len(sorted_transitions) == 1: # Just put 1 symbol is enough all other'input_string will be generalized # by the guardgen algorithm chars_in_smi.append(self.alphabet[0]) else: # Otherwise insert in smi_vector all transitions as explicit except # the one from the sink transition where we add just enough # explicity transitions to make sure that this state will be # selected as the sink state. # # If no transition has a clear advantage in terms of symbols then # just add all transitions in explicit form because it may be the # case the guardgen() will generalize in the wrong transition. for (_, char_list) in sorted_transitions[:-1]: chars_in_smi += char_list sink_chars = len(sorted_transitions[-2][1]) + 1 chars_in_smi.extend(sorted_transitions[-1][1][:sink_chars]) access_string = access_strings_map[selected_state.stateid] smi.extend([access_string + character for character in chars_in_smi]) return smi" 1558,"def _init_using_k_equivalence(self, given_graph, sfa=False): """""" Args: given_graph (DFA): The DFA states sfa (boolean): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors """""" graph = DFA(self.alphabet) graph.init_from_acceptor(given_graph) graph.fixminimized(self.alphabet) # Access Strings self.access_strings_map = self._bfs_path_states(graph, sorted( graph.states, key=attrgetter('initial'), reverse=True)[0]) # Find Q set_q = set(self._object_set_to_state_list(graph.states)) # We will work with states addresses here instead of states stateid for # more convenience set_f = set(self._object_set_to_state_list(self._get_accepted(graph))) # Perform P := {F, Q-F} set_nf = set_q.copy() - set_f.copy() self.groups = [set_f.copy(), set_nf.copy()] self.bookeeping = [(set_f, set_nf, '')] done = False while not done: done = True new_groups = [] for selectgroup in self.groups: # _check for each letter if it splits the current group for character in self.alphabet: # print 'Testing symbol: ', c target = defaultdict(list) target_states = defaultdict(int) new_g = [set(selectgroup)] for sid in selectgroup: # _check if all transitions using c are going in a state # in the same group. If they are going on a different # group then split deststate = self._delta(graph, graph[sid], character) destgroup = self._get_group_from_state( deststate.stateid) target[destgroup].append(sid) target_states[destgroup] = deststate.stateid if len(target) > 1: inv_target_states = { v: k for k, v in target_states.iteritems()} new_g = [set(selectedstate) for selectedstate in target.values()] done = False # Get all the partitions of destgroups queue = [set([x for x in target_states.values()])] while queue: top = queue.pop(0) (group1, group2, distinguish_string) = self._partition_group(top) ng1 = self._reverse_to_source( target, [inv_target_states[x] for x in group1]) ng2 = self._reverse_to_source( target, [inv_target_states[x] for x in group2]) dist_string = character + distinguish_string self.bookeeping.append((ng1, ng2, dist_string)) if len(group1) > 1: queue.append(group1) if len(group2) > 1: queue.append(group2) break new_groups += new_g # End of iteration for the k-equivalence # Assign new groups and check if any change occured self.groups = new_groups sm_vector = [ i for (a, i) in sorted( self.access_strings_map.items(), key=lambda x: len(x[1]))] if not sfa: smi_vector = ['{}{}'.format(a, b) for b in self.alphabet for a in sm_vector] else: smi_vector = self._init_smi(graph, self.access_strings_map) em_vector = [distinguish_string for (_, _, distinguish_string) in self.bookeeping] return sm_vector, smi_vector, em_vector" 1559,"def initialize(self, givengraph, sfa=False): """""" Args: givengraph (DFA): The DFA states sfa (bool): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors """""" sm_vector, smi_vector, em_vector = self._init_using_k_equivalence( givengraph, sfa) return sm_vector, smi_vector, em_vector" 1560,"def push(self,message,message_type): """""" Send a reply message of the given type Args: - message: the message to publish - message_type: the type of message being sent """""" super(Producer,self).send(message,message_type)" 1561,"def label_weight(base, label_name=None, children=[], parents=[], dependencies=[]): """""" Function that returns a Formatoption class for modifying the fontweight This function returns a :class:`~psyplot.plotter.Formatoption` instance that modifies the weight of the given `base` formatoption Parameters ---------- base: Formatoption The base formatoption instance that is used in the :class:`psyplot.Plotter` subclass to create the label. The instance must have a ``texts`` attribute which stores all the :class:`matplotlib.text.Text` instances. label_name: str The name of the label to use in the documentation. If None, it will be ``key``, where ``key`` is the :attr:`psyplot.plotter.Formatoption.key`` attribute of `base` children: list of str The childrens of the resulting formatoption class (besides the `base` formatoption which is included anyway) parents: list of str The parents of the resulting formatoption class (besides the `base` the properties formatoption from `base` (see :func:`label_props`)) dependencies: list of str The dependencies of the formatoption Returns ------- Formatoption The formatoption instance that modifies the fontweight of `base` See Also -------- label_size, label_props, Figtitle, Title"""""" label_name = label_name or base.key cl_children = children cl_parents = parents cl_dependencies = dependencies class LabelWeight(Formatoption): __doc__ = """""" Set the fontweight of the %s Possible types -------------- %%(fontweights)s See Also -------- %s, %s, %s"""""" % (label_name, base.key, base.key + 'size', base.key + 'props') children = [base.key] + \ cl_children parent = [base.key + 'props'] + cl_parents dependencies = cl_dependencies group = 'labels' name = 'Font weight of ' + (base.name or base.key) def update(self, value): for text in getattr(self, base.key).texts: text.set_weight(value) def get_fmt_widget(self, parent, project): """"""Get a widget with the different font weights"""""" from psy_simple.widgets.texts import FontWeightWidget return FontWeightWidget( parent, self, next(iter(getattr(self, base.key).texts), None), base) return LabelWeight(base.key + 'weight')" 1562,"def label_size(base, label_name=None, children=[], parents=[], dependencies=[]): """""" Function that returns a Formatoption class for modifying the fontsite This function returns a :class:`~psyplot.plotter.Formatoption` instance that modifies the size of the given `base` formatoption Parameters ---------- %(label_weight.parameters)s Returns ------- Formatoption The formatoption instance that modifies the fontsize of `base` See Also -------- label_weight, label_props, Figtitle, Title"""""" label_name = label_name or base.key cl_children = children cl_parents = parents cl_dependencies = dependencies class LabelSize(Formatoption): __doc__ = """""" Set the size of the %s Possible types -------------- %%(fontsizes)s See Also -------- %s, %s, %s"""""" % (label_name, base.key, base.key + 'weight', base.key + 'props') children = [base.key] + cl_children parent = [base.key + 'props'] + cl_parents dependencies = cl_dependencies group = 'labels' name = 'Font size of ' + (base.name or base.key) def update(self, value): for text in getattr(self, base.key).texts: text.set_size(value) def get_fmt_widget(self, parent, project): """"""Get a widget with the different font weights"""""" from psy_simple.widgets.texts import FontSizeWidget return FontSizeWidget( parent, self, next(iter(getattr(self, base.key).texts), None), base) return LabelSize(base.key + 'size')" 1563,"def label_props(base, label_name=None, children=[], parents=[], dependencies=[]): """""" Function that returns a Formatoption class for modifying the fontsite This function returns a :class:`~psyplot.plotter.Formatoption` instance that modifies the size of the given `base` formatoption Parameters ---------- %(label_weight.parameters)s children: list of str The childrens of the resulting formatoption class (besides the `base` formatoption, the ``base.key + 'size'`` and ``base.key + 'weight'`` keys, which are included anyway (see :func:`label_size`, :func:`label_weight`)) parents: list of str The parents of the resulting formatoption class Returns ------- Formatoption The formatoption instance that modifies the fontsize of `base` See Also -------- label_weight, label_props, Figtitle, Title"""""" label_name = label_name or base.key cl_children = children cl_parents = parents cl_dependencies = dependencies class LabelProps(Formatoption): __doc__ = """""" Properties of the %s Specify the font properties of the figure title manually. Possible types -------------- dict Items may be any valid text property See Also -------- %s, %s, %s"""""" % (label_name, base.key, base.key + 'size', base.key + 'weight') children = [base.key, base.key + 'size', base.key + 'weight'] + \ cl_children parents = cl_parents dependencies = cl_dependencies group = 'labels' name = 'Font properties of ' + (base.name or base.key) def __init__(self, *args, **kwargs): super(LabelProps, self).__init__(*args, **kwargs) self.default_props = {} self._todefault = False def set_value(self, value, validate=True, todefault=False): self._todefault = todefault super(LabelProps, self).set_value(value, validate, todefault) def update(self, fontprops): fontprops = fontprops.copy() # store default font properties try: text = next(iter(getattr(self, base.key).texts)) except StopIteration: return # TODO: This handling of the default management is not really # satisfying because you run into troubles when using alternate # property names (e.g. if you use 'ha' and 'horizontalalignment' # at the same time) if not self._todefault: for key in fontprops: if key == 'bbox': default = dict(facecolor='none', edgecolor='none') else: default = getattr(text, 'get_' + key)() self.default_props.setdefault(key, default) else: fontprops = self.default_props.copy() self.default_props.clear() if 'size' not in fontprops and 'fontsize' not in fontprops: fontprops['size'] = getattr(self, base.key + 'size').value if 'weight' not in fontprops and 'fontweight' not in fontprops: fontprops['weight'] = getattr(self, base.key + 'weight').value for text in getattr(self, base.key).texts: text.update(fontprops) self._todefault = False def get_fmt_widget(self, parent, project): """"""Get a widget with the different font weights"""""" from psy_simple.widgets.texts import FontPropertiesWidget return FontPropertiesWidget( parent, self, next(iter(getattr(self, base.key).texts), None), base) return LabelProps(base.key + 'props')" 1564,"def replace(self, s, data, attrs=None): """""" Replace the attributes of the plotter data in a string %(replace_note)s Parameters ---------- s: str String where the replacements shall be made data: InteractiveBase Data object from which to use the coordinates and insert the coordinate and attribute informations attrs: dict Meta attributes that shall be used for replacements. If None, it will be gained from `data.attrs` Returns ------- str `s` with inserted informations"""""" # insert labels s = s.format(**self.rc['labels']) # replace attributes attrs = attrs or data.attrs if hasattr(getattr(data, 'psy', None), 'arr_name'): attrs = attrs.copy() attrs['arr_name'] = data.psy.arr_name s = safe_modulo(s, attrs) # replace datetime.datetime like time informations if isinstance(data, InteractiveList): data = data[0] tname = self.any_decoder.get_tname( next(self.plotter.iter_base_variables), data.coords) if tname is not None and tname in data.coords: time = data.coords[tname] if not time.values.ndim: try: # assume a valid datetime.datetime instance s = pd.to_datetime(str(time.values[()])).strftime(s) except ValueError: pass if six.PY2: return s.decode('utf-8') return s" 1565,"def get_fig_data_attrs(self, delimiter=None): """"""Join the data attributes with other plotters in the project This method joins the attributes of the :class:`~psyplot.InteractiveBase` instances in the project that draw on the same figure as this instance does. Parameters ---------- delimiter: str Specifies the delimiter with what the attributes are joined. If None, the :attr:`delimiter` attribute of this instance or (if the latter is also None), the rcParams['texts.delimiter'] item is used. Returns ------- dict A dictionary with all the meta attributes joined by the specified `delimiter`"""""" if self.project is not None: delimiter = next(filter(lambda d: d is not None, [ delimiter, self.delimiter, self.rc['delimiter']])) figs = self.project.figs fig = self.ax.get_figure() if self.plotter._initialized and fig in figs: ret = figs[fig].joined_attrs(delimiter=delimiter, plot_data=True) else: ret = self.get_enhanced_attrs(self.plotter.plot_data) self.logger.debug( 'Can not get the figure attributes because plot has not ' 'yet been initialized!') return ret else: return self.get_enhanced_attrs(self.plotter.plot_data)" 1566,"def get_fmt_widget(self, parent, project): """"""Create a combobox with the attributes"""""" from psy_simple.widgets.texts import LabelWidget return LabelWidget(parent, self, project)" 1567,"def clear_other_texts(self, remove=False): """"""Make sure that no other text is a the same position as this one This method clears all text instances in the figure that are at the same position as the :attr:`_text` attribute Parameters ---------- remove: bool If True, the Text instances are permanently deleted from the figure, otherwise there text is simply set to ''"""""" fig = self.ax.get_figure() # don't do anything if our figtitle is the only Text instance if len(fig.texts) == 1: return for i, text in enumerate(fig.texts): if text == self._text: continue if text.get_position() == self._text.get_position(): if not remove: text.set_text('') else: del fig[i]" 1568,"def transform(self): """"""Dictionary containing the relevant transformations"""""" ax = self.ax return {'axes': ax.transAxes, 'fig': ax.get_figure().transFigure, 'data': ax.transData}" 1569,"def _remove_texttuple(self, pos): """"""Remove a texttuple from the value in the plotter Parameters ---------- pos: tuple (x, y, cs) x and y are the x- and y-positions and cs the coordinate system"""""" for i, (old_x, old_y, s, old_cs, d) in enumerate(self.value): if (old_x, old_y, old_cs) == pos: self.value.pop(i) return raise ValueError(""{0} not found!"".format(pos))" 1570,"def _update_texttuple(self, x, y, s, cs, d): """"""Update the text tuple at `x` and `y` with the given `s` and `d`"""""" pos = (x, y, cs) for i, (old_x, old_y, old_s, old_cs, old_d) in enumerate(self.value): if (old_x, old_y, old_cs) == pos: self.value[i] = (old_x, old_y, s, old_cs, d) return raise ValueError(""No text tuple found at {0}!"".format(pos))" 1571,"def share(self, fmto, **kwargs): """"""Share the settings of this formatoption with other data objects Parameters ---------- fmto: Formatoption The :class:`Formatoption` instance to share the attributes with ``**kwargs`` Any other keyword argument that shall be passed to the update method of `fmto` Notes ----- The Text formatoption sets the 'texts_to_remove' keyword to the :attr:`_texts_to_remove` attribute of this instance (if not already specified in ``**kwargs``"""""" kwargs.setdefault('texts_to_remove', self._texts_to_remove) super(Text, self).share(fmto, **kwargs)" 1572,"def save(self, *args, **kwargs): """""" **uid**: :code:`person:{slug}` """""" self.slug = uuslug( self.name, instance=self, max_length=100, separator='-', start_no=2 ) if not self.uid: self.uid = 'organization:{}'.format(self.slug) super(Organization, self).save(*args, **kwargs)" 1573,"def replace_variables(self, source: str, variables: dict) -> str: """"""Replace {{variable-name}} with stored value."""""" try: replaced = re.sub( ""{{(.*?)}}"", lambda m: variables.get(m.group(1), """"), source ) except TypeError: replaced = source return replaced" 1574,"def preprocess_cell( self, cell: ""NotebookNode"", resources: dict, index: int ) -> Tuple[""NotebookNode"", dict]: """"""Preprocess cell. Parameters ---------- cell : NotebookNode cell Notebook cell being processed resources : dictionary Additional resources used in the conversion process. Allows preprocessors to pass variables into the Jinja engine. cell_index : int Index of the cell being processed (see base.py) """""" if cell.cell_type == ""markdown"": variables = cell[""metadata""].get(""variables"", {}) if len(variables) > 0: cell.source = self.replace_variables(cell.source, variables) if resources.get(""delete_pymarkdown"", False): del cell.metadata[""variables""] return cell, resources" 1575,"def emit(self, ast_reg, ast_guide): ''' Default emit method: visit both ASTs and return the codegen ''' if (ast_reg): self.visit(ast_reg) codegen_reg = self.codegen self.codegen = self.cg_type() if (ast_guide): self.visit(ast_guide) return (codegen_reg, self.codegen)" 1576,"def index_dir(self, folder): """""" Creates a nested dictionary that represents the folder structure of folder. Also extracts meta data from all markdown posts and adds to the dictionary. """""" folder_path = folder print('Indexing folder: ' + folder_path) nested_dir = {} folder = folder_path.rstrip(os.sep) start = folder.rfind(os.sep) + 1 for root, dirs, files in os.walk(folder): folders = root[start:].split(os.sep) # subdir = dict.fromkeys(files) subdir = {} for f in files: # Create an entry for every markdown file if os.path.splitext(f)[1] == '.md': with open(os.path.abspath(os.path.join(root, f)), encoding='utf-8') as fp: try: _, meta = self.mrk.extract_meta(fp.read()) except: print(""Skipping indexing "" + f +""; Could not parse metadata"") meta = {'title': f} pass # Value of the entry (the key) is it's metadata subdir[f] = meta parent = nested_dir for fold in folders[:-1]: parent = parent.get(fold) # Attach the config of all children nodes onto the parent parent[folders[-1]] = subdir return nested_dir" 1577,"def _parse_orders(self, orders): """""" Transform orders from list objects to PHP arrays: [ { 'PNAME': 'CD Player', 'PCODE': 'PROD_04891', 'PINFO': 'Extended Warranty - 5 Years', 'PRICE': '82.3', 'PRICE_TYPE': 'GROSS', 'QTY': '7', 'VAT':'20' }, { 'PNAME': 'Mobile Phone', 'PCODE': 'PROD_07409', 'PINFO': 'Dual SIM', 'PRICE': '1945.75', 'PRICE_TYPE': 'GROSS', 'QTY': '3', 'VAT':'20' }, { 'PNAME': 'Laptop', 'PCODE': 'PROD_04965', 'PINFO': '17"" Display', 'PRICE': '5230', 'PRICE_TYPE': 'GROSS', 'QTY': '1', 'VAT':'20' } ] || \/ { 'ORDER_PCODE[0]': 'PROD_04891', 'ORDER_PCODE[1]': 'PROD_07409', 'ORDER_PCODE[2]': 'PROD_04965', 'ORDER_PINFO[0]': 'Extended Warranty - 5 Years', 'ORDER_PINFO[1]': 'Dual SIM', 'ORDER_PINFO[2]': '17"" Display', 'ORDER_PNAME[0]': 'CD Player', 'ORDER_PNAME[1]': 'Mobile Phone', 'ORDER_PNAME[2]': 'Laptop', 'ORDER_PRICE[0]': '82.3', 'ORDER_PRICE[1]': '1945.75', 'ORDER_PRICE[2]': '5230', 'ORDER_PRICE_TYPE[0]': 'GROSS', 'ORDER_PRICE_TYPE[1]': 'GROSS', 'ORDER_PRICE_TYPE[2]': 'GROSS', 'ORDER_QTY[0]': '7', 'ORDER_QTY[1]': '3', 'ORDER_QTY[2]': '1', 'ORDER_VAT[0]': '20', 'ORDER_VAT[1]': '20', 'ORDER_VAT[2]': '20' } """""" result = {} for index, order in enumerate(orders): for detail, value in order.iteritems(): result[""ORDER_%s[%s]"" % (detail, index)] = value return result" 1578,"def average_detections(detections, predictions, relative_prediction_threshold = 0.25): """"""average_detections(detections, predictions, [relative_prediction_threshold]) -> bounding_box, prediction Computes the weighted average of the given detections, where the weights are computed based on the prediction values. **Parameters:** ``detections`` : [:py:class:`BoundingBox`] The overlapping bounding boxes. ``predictions`` : [float] The predictions for the ``detections``. ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box which has been merged from the detections ``prediction`` : float The prediction value of the bounding box, which is a weighted sum of the predictions with minimum overlap """""" # remove the predictions that are too low prediction_threshold = relative_prediction_threshold * max(predictions) detections, predictions = zip(*[[d,p] for d,p in zip(detections, predictions) if p >= prediction_threshold]) # turn remaining predictions into weights s = sum(predictions) weights = [p/s for p in predictions] # compute weighted average of bounding boxes top = sum(w * b.topleft_f[0] for w, b in zip(weights, detections)) left = sum(w * b.topleft_f[1] for w, b in zip(weights, detections)) bottom = sum(w * b.bottomright_f[0] for w, b in zip(weights, detections)) right = sum(w * b.bottomright_f[1] for w, b in zip(weights, detections)) # compute the average prediction value value = sum(w*p for w,p in zip(weights, predictions)) # return the average bounding box return BoundingBox((top, left), (bottom-top, right-left)), value" 1579,"def best_detection(detections, predictions, minimum_overlap = 0.2, relative_prediction_threshold = 0.25): """"""best_detection(detections, predictions, [minimum_overlap], [relative_prediction_threshold]) -> bounding_box, prediction Computes the best detection for the given detections and according predictions. This is achieved by computing a weighted sum of detections that overlap with the best detection (the one with the highest prediction), where the weights are based on the predictions. Only detections with according prediction values > 0 are considered. **Parameters:** ``detections`` : [:py:class:`BoundingBox`] The detected bounding boxes. ``predictions`` : [float] The predictions for the ``detections``. ``minimum_overlap`` : float between 0 and 1 The minimum overlap (in terms of Jaccard :py:meth:`BoundingBox.similarity`) of bounding boxes with the best detection to be considered. ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box which has been merged from the detections ``prediction`` : float The prediction value of the bounding box, which is a weighted sum of the predictions with minimum overlap """""" # remove all negative predictions since they harm the calculation of the weights detections = [detections[i] for i in range(len(detections)) if predictions[i] > 0] predictions = [predictions[i] for i in range(len(predictions)) if predictions[i] > 0] if not detections: raise ValueError(""No detections with a prediction value > 0 have been found"") # keep only the bounding boxes with the highest overlap detections, predictions = overlapping_detections(detections, numpy.array(predictions), minimum_overlap) return average_detections(detections, predictions, relative_prediction_threshold)" 1580,"def detect_single_face(image, cascade = None, sampler = None, minimum_overlap=0.2, relative_prediction_threshold = 0.25): """"""detect_single_face(image, [cascade], [sampler], [minimum_overlap], [relative_prediction_threshold]) -> bounding_box, quality Detects a single face in the given image, i.e., the one with the highest prediction value. **Parameters:** ``image`` : array_like (2D aka gray or 3D aka RGB) The image to detect a face in. ``cascade`` : str or :py:class:`Cascade` or ``None`` If given, the cascade file name or the loaded cascade to be used. If not given, the :py:func:`default_cascade` is used. ``sampler`` : :py:class:`Sampler` or ``None`` The sampler that defines the sampling of bounding boxes to search for the face. If not specified, a default Sampler is instantiated, which will perform a tight sampling. ``minimum_overlap`` : float between 0 and 1 Computes the best detection using the given minimum overlap, see :py:func:`best_detection` ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box containing the detected face. ``quality`` : float The quality of the detected face, a value greater than 0. """""" if cascade is None: cascade = default_cascade() elif isinstance(cascade, str): cascade = Cascade(bob.io.base.HDF5File(cascade)) if sampler is None: sampler = Sampler(patch_size = cascade.extractor.patch_size, distance=2, scale_factor=math.pow(2.,-1./16.), lowest_scale=0.125) if image.ndim == 3: image = bob.ip.color.rgb_to_gray(image) detections = [] predictions = [] # get the detection scores for the image for prediction, bounding_box in sampler.iterate_cascade(cascade, image, None): detections.append(bounding_box) predictions.append(prediction) if not detections: return None # compute average over the best locations bb, quality = best_detection(detections, predictions, minimum_overlap, relative_prediction_threshold) return bb, quality" 1581,"def detect_all_faces(image, cascade = None, sampler = None, threshold = 0, overlaps = 1, minimum_overlap = 0.2, relative_prediction_threshold = 0.25): """"""detect_all_faces(image, [cascade], [sampler], [threshold], [overlaps], [minimum_overlap], [relative_prediction_threshold]) -> bounding_boxes, qualities Detects all faces in the given image, whose prediction values are higher than the given threshold. If the given ``minimum_overlap`` is lower than 1, overlapping bounding boxes are grouped, with the ``minimum_overlap`` being the minimum Jaccard similarity between two boxes to be considered to be overlapping. Afterwards, all groups which have less than ``overlaps`` elements are discarded (this measure is similar to the Viola-Jones face detector). Finally, :py:func:`average_detections` is used to compute the average bounding box for each of the groups, including averaging the detection value (which will, hence, usually decrease in value). **Parameters:** ``image`` : array_like (2D aka gray or 3D aka RGB) The image to detect a face in. ``cascade`` : str or :py:class:`Cascade` or ``None`` If given, the cascade file name or the loaded cascade to be used to classify image patches. If not given, the :py:func:`default_cascade` is used. ``sampler`` : :py:class:`Sampler` or ``None`` The sampler that defines the sampling of bounding boxes to search for the face. If not specified, a default Sampler is instantiated. ``threshold`` : float The threshold of the quality of detected faces. Detections with a quality lower than this value will not be considered. Higher thresholds will not detect all faces, while lower thresholds will generate false detections. ``overlaps`` : int The number of overlapping boxes that must exist for a bounding box to be considered. Higher values will remove a lot of false-positives, but might increase the chance of a face to be missed. The default value ``1`` will not limit the boxes. ``minimum_overlap`` : float between 0 and 1 Groups detections based on the given minimum bounding box overlap, see :py:func:`group_detections`. ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_boxes`` : [:py:class:`BoundingBox`] The bounding box containing the detected face. ``qualities`` : [float] The qualities of the ``bounding_boxes``, values greater than ``threshold``. """""" if cascade is None: cascade = default_cascade() elif isinstance(cascade, str): cascade = Cascade(bob.io.base.HDF5File(cascade)) if sampler is None: sampler = Sampler(patch_size = cascade.extractor.patch_size, distance=2, scale_factor=math.pow(2.,-1./16.), lowest_scale=0.125) if image.ndim == 3: image = bob.ip.color.rgb_to_gray(image) detections = [] predictions = [] # get the detection scores for the image for prediction, bounding_box in sampler.iterate_cascade(cascade, image, threshold): detections.append(bounding_box) predictions.append(prediction) if not detections: # No face detected return None # group overlapping detections if minimum_overlap < 1.: detections, predictions = group_detections(detections, predictions, minimum_overlap, threshold, overlaps) if not detections: return None # average them detections, predictions = zip(*[average_detections(b, q, relative_prediction_threshold) for b,q in zip(detections, predictions)]) return detections, predictions" 1582,"def cycles_created_by(callable): """""" Return graph of cyclic garbage created by the given callable. Return an :class:`~refcycle.object_graph.ObjectGraph` representing those objects generated by the given callable that can't be collected by Python's usual reference-count based garbage collection. This includes objects that will eventually be collected by the cyclic garbage collector, as well as genuinely unreachable objects that will never be collected. `callable` should be a callable that takes no arguments; its return value (if any) will be ignored. """""" with restore_gc_state(): gc.disable() gc.collect() gc.set_debug(gc.DEBUG_SAVEALL) callable() new_object_count = gc.collect() if new_object_count: objects = gc.garbage[-new_object_count:] del gc.garbage[-new_object_count:] else: objects = [] return ObjectGraph(objects)" 1583,"def garbage(): """""" Collect garbage and return an :class:`~refcycle.object_graph.ObjectGraph` based on collected garbage. The collected elements are removed from ``gc.garbage``, but are still kept alive by the references in the graph. Deleting the :class:`~refcycle.object_graph.ObjectGraph` instance and doing another ``gc.collect`` will remove those objects for good. """""" with restore_gc_state(): gc.disable() gc.set_debug(gc.DEBUG_SAVEALL) collected_count = gc.collect() if collected_count: objects = gc.garbage[-collected_count:] del gc.garbage[-collected_count:] else: objects = [] return ObjectGraph(objects)" 1584,"def objects_reachable_from(obj): """""" Return graph of objects reachable from *obj* via ``gc.get_referrers``. Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all objects reachable from the given one by following the output of ``gc.get_referrers``. Note that unlike the :func:`~refcycle.creators.snapshot` function, the output graph may include non-gc-tracked objects. """""" # Depth-first search. found = ObjectGraph.vertex_set() to_process = [obj] while to_process: obj = to_process.pop() found.add(obj) for referent in gc.get_referents(obj): if referent not in found: to_process.append(referent) return ObjectGraph(found)" 1585,"def snapshot(): """"""Return the graph of all currently gc-tracked objects. Excludes the returned :class:`~refcycle.object_graph.ObjectGraph` and objects owned by it. Note that a subsequent call to :func:`~refcycle.creators.snapshot` will capture all of the objects owned by this snapshot. The :meth:`~refcycle.object_graph.ObjectGraph.owned_objects` method may be helpful when excluding these objects from consideration. """""" all_objects = gc.get_objects() this_frame = inspect.currentframe() selected_objects = [] for obj in all_objects: if obj is not this_frame: selected_objects.append(obj) graph = ObjectGraph(selected_objects) del this_frame, all_objects, selected_objects, obj return graph" 1586,"def extendMarkdown(self, md, md_globals): """""" Every extension requires a extendMarkdown method to tell the markdown renderer how use the extension. """""" md.registerExtension(self) for processor in (self.preprocessors or []): md.preprocessors.add(processor.__name__.lower(), processor(md), '_end') for pattern in (self.inlinepatterns or []): md.inlinePatterns.add(pattern.__name__.lower(), pattern(md), '_end') for processor in (self.postprocessors or []): md.postprocessors.add(processor.__name__.lower(), processor(md), '_end')" 1587,"def run( paths, output=_I_STILL_HATE_EVERYTHING, recurse=core.flat, sort_by=None, ls=core.ls, stdout=stdout, ): """""" Project-oriented directory and file information lister. """""" if output is _I_STILL_HATE_EVERYTHING: output = core.columnized if stdout.isatty() else core.one_per_line if sort_by is None: if output == core.as_tree: def sort_by(thing): return ( thing.parent(), thing.basename().lstrip(string.punctuation).lower(), ) else: def sort_by(thing): return thing def _sort_by(thing): return not getattr(thing, ""_always_sorts_first"", False), sort_by(thing) contents = [ path_and_children for path in paths or (project.from_path(FilePath(""."")),) for path_and_children in recurse(path=path, ls=ls) ] for line in output(contents, sort_by=_sort_by): stdout.write(line) stdout.write(""\n"")" 1588,"def getCustomLogger(name, logLevel, logFormat='%(asctime)s %(levelname)-9s:%(name)s:%(module)s:%(funcName)s: %(message)s'): ''' Set up logging :param str name: What log level to set :param str logLevel: What log level to use :param str logFormat: Format string for logging :rtype: logger ''' assert isinstance(logFormat, basestring), (""logFormat must be a string but is %r"" % logFormat) assert isinstance(logLevel, basestring), (""logLevel must be a string but is %r"" % logLevel) assert isinstance(name, basestring), (""name must be a string but is %r"" % name) validLogLevels = ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'WARNING'] if not logLevel: logLevel = 'DEBUG' # If they don't specify a valid log level, err on the side of verbosity if logLevel.upper() not in validLogLevels: logLevel = 'DEBUG' numericLevel = getattr(logging, logLevel.upper(), None) if not isinstance(numericLevel, int): raise ValueError(""Invalid log level: %s"" % logLevel) logging.basicConfig(level=numericLevel, format=logFormat) logger = logging.getLogger(name) return logger" 1589,"def mkdir_p(path): ''' Mimic `mkdir -p` since os module doesn't provide one. :param str path: directory to create ''' assert isinstance(path, basestring), (""path must be a string but is %r"" % path) try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise" 1590,"def limit_chord_unlock_tasks(app): """""" Set max_retries for chord.unlock tasks to avoid infinitely looping tasks. (see celery/celery#1700 or celery/celery#2725) """""" task = app.tasks['celery.chord_unlock'] if task.max_retries is None: retries = getattr(app.conf, 'CHORD_UNLOCK_MAX_RETRIES', None) task.max_retries = retries" 1591,"def setup_exchanges(app): """""" Setup result exchange to route all tasks to platform queue. """""" with app.producer_or_acquire() as P: # Ensure all queues are noticed and configured with their # appropriate exchange. for q in app.amqp.queues.values(): P.maybe_declare(q)" 1592,"def setup_app(app, throw=True): """""" Ensure application is set up to expected configuration. This function is typically triggered by the worker_init signal, however it must be called manually by codebases that are run only as task producers or from within a Python shell. """""" success = True try: for func in SETUP_FUNCS: try: func(app) except Exception: success = False if throw: raise else: msg = ""Failed to run setup function %r(app)"" logger.exception(msg, func.__name__) finally: setattr(app, 'is_set_up', success)" 1593,"def insert(self, item, priority): """"""Adds item to DEPQ with given priority by performing a binary search on the concurrently rotating deque. Amount rotated R of DEPQ of length n would be n <= R <= 3n/2. Performance: O(n)"""""" with self.lock: self_data = self.data rotate = self_data.rotate self_items = self.items maxlen = self._maxlen try: if priority <= self_data[-1][1]: self_data.append((item, priority)) elif priority > self_data[0][1]: self_data.appendleft((item, priority)) else: length = len(self_data) + 1 mid = length // 2 shift = 0 while True: if priority <= self_data[0][1]: rotate(-mid) shift += mid mid //= 2 if mid == 0: mid += 1 else: rotate(mid) shift -= mid mid //= 2 if mid == 0: mid += 1 if self_data[-1][1] >= priority > self_data[0][1]: self_data.appendleft((item, priority)) # When returning to original position, never shift # more than half length of DEPQ i.e. if length is # 100 and we rotated -75, rotate -25, not 75 if shift > length // 2: shift = length % shift rotate(-shift) else: rotate(shift) break try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 except IndexError: self_data.append((item, priority)) try: self_items[item] = 1 except TypeError: self_items[repr(item)] = 1 if maxlen is not None and maxlen < len(self_data): self._poplast()" 1594,"def addfirst(self, item, new_priority=None): """"""Adds item to DEPQ as highest priority. The default starting priority is 0, the default new priority is self.high(). Performance: O(1)"""""" with self.lock: self_data = self.data try: priority = self_data[0][1] if new_priority is not None: if new_priority < priority: raise ValueError('Priority must be >= ' 'highest priority.') else: priority = new_priority except IndexError: priority = 0 if new_priority is None else new_priority self_data.appendleft((item, priority)) self_items = self.items maxlen = self._maxlen try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 if maxlen is not None and maxlen < len(self_data): self._poplast()" 1595,"def addlast(self, item, new_priority=None): """"""Adds item to DEPQ as lowest priority. The default starting priority is 0, the default new priority is self.low(). Performance: O(1)"""""" with self.lock: self_data = self.data maxlen = self._maxlen if maxlen is not None and maxlen == len(self_data): return try: priority = self_data[-1][1] if new_priority is not None: if new_priority > priority: raise ValueError('Priority must be <= ' 'lowest priority.') else: priority = new_priority except IndexError: priority = 0 if new_priority is None else new_priority self_data.append((item, priority)) self_items = self.items try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1" 1596,"def popfirst(self): """"""Removes item with highest priority from DEPQ. Returns tuple(item, priority). Performance: O(1)"""""" with self.lock: try: tup = self.data.popleft() except IndexError as ex: ex.args = ('DEPQ is already empty',) raise self_items = self.items try: self_items[tup[0]] -= 1 if self_items[tup[0]] == 0: del self_items[tup[0]] except TypeError: r = repr(tup[0]) self_items[r] -= 1 if self_items[r] == 0: del self_items[r] return tup" 1597,"def _poplast(self): """"""For avoiding lock during inserting to keep maxlen"""""" try: tup = self.data.pop() except IndexError as ex: ex.args = ('DEPQ is already empty',) raise self_items = self.items try: self_items[tup[0]] -= 1 if self_items[tup[0]] == 0: del self_items[tup[0]] except TypeError: r = repr(tup[0]) self_items[r] -= 1 if self_items[r] == 0: del self_items[r] return tup" 1598,"def first(self): """"""Gets item with highest priority. Performance: O(1)"""""" with self.lock: try: return self.data[0][0] except IndexError as ex: ex.args = ('DEPQ is empty',) raise" 1599,"def last(self): """"""Gets item with lowest priority. Performance: O(1)"""""" with self.lock: try: return self.data[-1][0] except IndexError as ex: ex.args = ('DEPQ is empty',) raise" 1600,"def high(self): """"""Gets highest priority. Performance: O(1)"""""" with self.lock: try: return self.data[0][1] except IndexError as ex: ex.args = ('DEPQ is empty',) raise" 1601,"def low(self): """"""Gets lowest priority. Performance: O(1)"""""" with self.lock: try: return self.data[-1][1] except IndexError as ex: ex.args = ('DEPQ is empty',) raise" 1602,"def clear(self): """"""Empties DEPQ. Performance: O(1)"""""" with self.lock: self.data.clear() self.items.clear()" 1603,"def set_maxlen(self, length): """"""Sets maxlen"""""" with self.lock: self._maxlen = length while len(self.data) > length: self._poplast()" 1604,"def count(self, item): """"""Returns number of occurrences of item in DEPQ. Performance: O(1)"""""" try: return self.items.get(item, 0) except TypeError: return self.items.get(repr(item), 0)" 1605,"def remove(self, item, count=1): """"""Removes occurrences of given item in ascending priority. Default number of removals is 1. Useful for tasks that no longer require completion, inactive clients, certain algorithms, etc. Returns a list of tuple(item, priority). Performance: O(n)"""""" with self.lock: try: count = int(count) except ValueError as ex: ex.args = ('{} cannot be represented as an ' 'integer'.format(count),) raise except TypeError as ex: ex.args = ('{} cannot be represented as an ' 'integer'.format(count),) raise removed = [] self_items = self.items try: item_freq = self_items[item] item_repr = item if item_freq == 0: return removed except TypeError: item_freq = self_items[repr(item)] item_repr = repr(item) if item_freq == 0: return removed if count == -1: count = item_freq self_data = self.data rotate = self_data.rotate pop = self_data.pop counter = 0 for i in range(len(self_data)): if count > counter and item == self_data[-1][0]: removed.append(pop()) counter += 1 continue rotate() if item_freq <= count: del self_items[item_repr] else: self_items[item_repr] -= count return removed" 1606,"def DatabaseEnabled(cls): """"""Given persistence methods to classes with this annotation. All this really does is add some functions that forward to the mapped database class. """""" if not issubclass(cls, Storable): raise ValueError( ""%s is not a subclass of gludb.datab.Storage"" % repr(cls) ) cls.ensure_table = classmethod(_ensure_table) cls.find_one = classmethod(_find_one) cls.find_all = classmethod(_find_all) cls.find_by_index = classmethod(_find_by_index) cls.save = _save cls.delete = _delete return cls" 1607,"def _find_playlist(self): """""" Internal method to populate the object given the ``id`` or ``reference_id`` that has been set in the constructor. """""" data = None if self.id: data = self.connection.get_item( 'find_playlist_by_id', playlist_id=self.id) elif self.reference_id: data = self.connection.get_item( 'find_playlist_by_reference_id', reference_id=self.reference_id) if data: self._load(data)" 1608,"def _to_dict(self): """""" Internal method that serializes object into a dictionary. """""" data = { 'name': self.name, 'referenceId': self.reference_id, 'shortDescription': self.short_description, 'playlistType': self.type, 'id': self.id} if self.videos: for video in self.videos: if video.id not in self.video_ids: self.video_ids.append(video.id) if self.video_ids: data['videoIds'] = self.video_ids [data.pop(key) for key in data.keys() if data[key] == None] return data" 1609,"def _load(self, data): """""" Internal method that deserializes a ``pybrightcove.playlist.Playlist`` object. """""" self.raw_data = data self.id = data['id'] self.reference_id = data['referenceId'] self.name = data['name'] self.short_description = data['shortDescription'] self.thumbnail_url = data['thumbnailURL'] self.videos = [] self.video_ids = data['videoIds'] self.type = data['playlistType'] for video in data.get('videos', []): self.videos.append(pybrightcove.video.Video( data=video, connection=self.connection))" 1610,"def save(self): """""" Create or update a playlist. """""" d = self._to_dict() if len(d.get('videoIds', [])) > 0: if not self.id: self.id = self.connection.post('create_playlist', playlist=d) else: data = self.connection.post('update_playlist', playlist=d) if data: self._load(data)" 1611,"def delete(self, cascade=False): """""" Deletes this playlist. """""" if self.id: self.connection.post('delete_playlist', playlist_id=self.id, cascade=cascade) self.id = None" 1612,"def find_all(connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): """""" List all playlists. """""" return pybrightcove.connection.ItemResultSet(""find_all_playlists"", Playlist, connection, page_size, page_number, sort_by, sort_order)" 1613,"def find_by_ids(ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): """""" List playlists by specific IDs. """""" ids = ','.join([str(i) for i in ids]) return pybrightcove.connection.ItemResultSet('find_playlists_by_ids', Playlist, connection, page_size, page_number, sort_by, sort_order, playlist_ids=ids)" 1614,"def find_by_reference_ids(reference_ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): """""" List playlists by specific reference_ids. """""" reference_ids = ','.join([str(i) for i in reference_ids]) return pybrightcove.connection.ItemResultSet( ""find_playlists_by_reference_ids"", Playlist, connection, page_size, page_number, sort_by, sort_order, reference_ids=reference_ids)" 1615,"def find_for_player_id(player_id, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): """""" List playlists for a for given player id. """""" return pybrightcove.connection.ItemResultSet( ""find_playlists_for_player_id"", Playlist, connection, page_size, page_number, sort_by, sort_order, player_id=player_id)" 1616,"def is_any_type_set(sett: Set[Type]) -> bool: """""" Helper method to check if a set of types is the {AnyObject} singleton :param sett: :return: """""" return len(sett) == 1 and is_any_type(min(sett))" 1617,"def get_validated_types(object_types: Set[Type], set_name: str) -> Set[Type]: """""" Utility to validate a set of types : * None is not allowed as a whole or within the set, * object and Any are converted into AnyObject * if AnyObject is in the set, it must be the only element :param object_types: the set of types to validate :param set_name: a name used in exceptions if any :return: the fixed set of types """""" check_var(object_types, var_types=set, var_name=set_name) res = {get_validated_type(typ, set_name + '[x]') for typ in object_types} if AnyObject in res and len(res) > 1: raise ValueError('The set of types contains \'object\'/\'Any\'/\'AnyObject\', so no other type must be present ' 'in the set') else: return res" 1618,"def get_validated_type(object_type: Type[Any], name: str, enforce_not_joker: bool = True) -> Type[Any]: """""" Utility to validate a type : * None is not allowed, * 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type * JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is' :param object_type: the type to validate :param name: a name used in exceptions if any :param enforce_not_joker: a boolean, set to False to tolerate JOKER types :return: the fixed type """""" if object_type is object or object_type is Any or object_type is AnyObject: return AnyObject else: # -- !! Do not check TypeVar or Union : this is already handled at higher levels -- if object_type is JOKER: # optionally check if JOKER is allowed if enforce_not_joker: raise ValueError('JOKER is not allowed for object_type') else: # note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore check_var(object_type, var_types=type, var_name=name) return object_type" 1619,"def get_options_for_id(options: Dict[str, Dict[str, Any]], identifier: str): """""" Helper method, from the full options dict of dicts, to return either the options related to this parser or an empty dictionary. It also performs all the var type checks :param options: :param identifier: :return: """""" check_var(options, var_types=dict, var_name='options') res = options[identifier] if identifier in options.keys() else dict() check_var(res, var_types=dict, var_name='options[' + identifier + ']') return res" 1620,"def is_able_to_convert_detailed(self, strict: bool, from_type: Type[Any], to_type: Type[Any]) \ -> Tuple[bool, bool, bool]: """""" Utility method to check if a parser is able to convert a given type to the given type, either in * strict mode : provided_type and desired_type must be equal to this converter's from_type and to_type respectively (or the to_type does not match but this converter is generic * inference mode (non-strict) : provided_type may be a subclass of from_type, and to_type may be a subclass of desired_type If a custom function was provided at construction time, it is called to enable converters to reject some conversions based on source and/or dest type provided. :param strict: a boolean indicating if matching should be in strict mode or not :param from_type: :param to_type: :return: a tuple of 3 booleans : (does match?, strict source match? (None if no match), strict dest match? (None if no match)) """""" # (1) first handle the easy joker+joker case if from_type is JOKER and to_type is JOKER: return True, None, None # Don't validate types -- this is called too often at the initial RootParser instance creation time, # and this is quite 'internal' so the risk is very low # # check_var(strict, var_types=bool, var_name='strict') # if from_type is not JOKER: # check_var(from_type, var_types=type, var_name='from_type') # if to_type is not JOKER: # check_var(to_type, var_types=type, var_name='to_type') # -- first call custom checker if provided if self.is_able_to_convert_func is not None: # TODO Maybe one day, rather push the JOKER to the function ? not sure that it will be more explicit.. if not self.is_able_to_convert_func(strict, from_type=None if from_type is JOKER else from_type, to_type=None if to_type is JOKER else to_type): return False, None, None # -- from_type strict match if (from_type is JOKER) or (from_type is self.from_type) or is_any_type(from_type): # -- check to type strict if (to_type is JOKER) or self.is_generic() or (to_type is self.to_type): return True, True, True # strict to_type match # -- check to type non-strict elif (not strict) and issubclass(self.to_type, to_type): return True, True, False # approx to_type match # -- from_type non-strict match elif (not strict) and issubclass(from_type, self.from_type): # -- check to type strict if (to_type is JOKER) or self.is_generic() or (to_type is self.to_type): return True, False, True # exact to_type match # -- check to type non-strict elif (not strict) and issubclass(self.to_type, to_type): return True, False, False # approx to_type match # -- otherwise no match return False, None, None" 1621,"def are_worth_chaining(left_converter, right_converter) -> bool: """""" Utility method to check if it makes sense to chain these two converters. Returns True if it brings value to chain the first converter with the second converter. To bring value, * the second converter's input should not be a parent class of the first converter's input (in that case, it is always more interesting to use the second converter directly for any potential input) * the second converter's output should not be a parent class of the first converter's input or output. Otherwise the chain does not even make any progress :) * The first converter has to allow chaining (with converter.can_chain=True) :param left_converter: :param right_converter: :return: """""" if not left_converter.can_chain: return False elif not is_any_type(left_converter.to_type) and is_any_type(right_converter.to_type): # we gain the capability to generate any type. So it is interesting. return True elif issubclass(left_converter.from_type, right_converter.to_type) \ or issubclass(left_converter.to_type, right_converter.to_type) \ or issubclass(left_converter.from_type, right_converter.from_type): # Not interesting : the outcome of the chain would be not better than one of the converters alone return False # Note: we dont say that chaining a generic converter with a converter is useless. Indeed it might unlock some # capabilities for the user (new file extensions, etc.) that would not be available with the generic parser # targetting to_type alone. For example parsing object A from its constructor then converting A to B might # sometimes be interesting, rather than parsing B from its constructor else: # interesting return True" 1622,"def can_be_appended_to(self, left_converter, strict: bool) -> bool: """""" Utility method to check if this (self) converter can be appended after the output of the provided converter. This method does not check if it makes sense, it just checks if the output type of the left converter is compliant with the input type of this converter. Compliant means: * strict mode : type equality * non-strict mode : output type of left_converter should be a subclass of input type of this converter In addition, the custom function provided in constructor may be used to reject conversion (see is_able_to_convert for details) :param left_converter: :param strict: boolean to :return: """""" is_able_to_take_input = self.is_able_to_convert(strict, from_type=left_converter.to_type, to_type=JOKER) if left_converter.is_generic(): return is_able_to_take_input \ and left_converter.is_able_to_convert(strict, from_type=JOKER, to_type=self.from_type) else: return is_able_to_take_input" 1623,"def get_applicable_options(self, options: Dict[str, Dict[str, Any]]): """""" Returns the options that are applicable to this particular converter, from the full map of options. It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of the options corresponding to this id, or returns an empty dict(). :param options: a dictionary converter_id > options :return: """""" return get_options_for_id(options, self.get_id_for_options())" 1624,"def _convert(self, desired_type: Type[T], source_obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Implementing classes should implement this method to perform the conversion itself :param desired_type: the destination type of the conversion :param source_obj: the source object that should be converter :param logger: a logger to use if any is available, or None :param options: additional options map. Implementing classes may use 'self.get_applicable_options()' to get the options that are of interest for this converter. :return: """""" pass" 1625,"def create_not_able_to_convert(source: S, converter: Converter, desired_type: Type[T]): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param source: :param converter: :param desired_type: :return: """""" base_msg = 'Converter ' + str(converter) + ' is not able to ingest source value \'' + str(source) + '\''\ ' of type \'' + get_pretty_type_str(type(source)) + '\' and/or convert it to type \'' \ + get_pretty_type_str(desired_type) + '\'.' base_msg += ' This can happen in a chain when the previous step in the chain is generic and actually produced '\ ' an output of the wrong type/content' return ConversionException(base_msg)" 1626,"def create(converter_func: ConversionMethod, caught: Exception): """""" Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param converter_func: :param caught: :return: """""" msg = 'Caught TypeError while calling conversion function \'' + str(converter_func.__name__) + '\'. ' \ 'Note that the conversion function signature should be \'' + conversion_method_example_signature_str \ + '\' (unpacked options mode - default) or ' + multioptions_conversion_method_example_signature_str \ + ' (unpack_options = False).' \ + 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) return CaughtTypeError(msg).with_traceback(caught.__traceback__)" 1627,"def _convert(self, desired_type: Type[T], source_obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Delegates to the user-provided method. Passes the appropriate part of the options according to the function name. :param desired_type: :param source_obj: :param logger: :param options: :return: """""" try: if self.unpack_options: opts = self.get_applicable_options(options) if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, **self.function_args, **opts) else: return self.conversion_method(desired_type, source_obj, logger, **opts) else: if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, options, **self.function_args) else: return self.conversion_method(desired_type, source_obj, logger, options) except TypeError as e: raise CaughtTypeError.create(self.conversion_method, e)" 1628,"def is_able_to_convert_detailed(self, strict: bool, from_type: Type[Any], to_type: Type[Any]): """""" Overrides the parent method to delegate left check to the first (left) converter of the chain and right check to the last (right) converter of the chain. This includes custom checking if they have any... see Converter.is_able_to_convert for details :param strict: :param from_type: :param to_type: :return: """""" # check if first and last converters are happy if not self._converters_list[0].is_able_to_convert(strict, from_type=from_type, to_type=JOKER): return False, None, None elif not self._converters_list[-1].is_able_to_convert(strict, from_type=JOKER, to_type=to_type): return False, None, None else: # behave as usual. This is probably useless but lets be sure. return super(ConversionChain, self).is_able_to_convert_detailed(strict, from_type, to_type)" 1629,"def remove_first(self, inplace: bool = False): """""" Utility method to remove the first converter of this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the first converter removed """""" if len(self._converters_list) > 1: if inplace: self._converters_list = self._converters_list[1:] # update the current source type self.from_type = self._converters_list[0].from_type return else: new = copy(self) new._converters_list = new._converters_list[1:] # update the current source type new.from_type = new._converters_list[0].from_type return new else: raise ValueError('cant remove first: would make it empty!')" 1630,"def add_conversion_steps(self, converters: List[Converter], inplace: bool = False): """""" Utility method to add converters to this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converters: the list of converters to add :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converters added """""" check_var(converters, var_types=list, min_len=1) if inplace: for converter in converters: self.add_conversion_step(converter, inplace=True) else: new = copy(self) new.add_conversion_steps(converters, inplace=True) return new" 1631,"def add_conversion_step(self, converter: Converter[S, T], inplace: bool = False): """""" Utility method to add a converter to this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converter: the converter to add :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converter added """""" # it the current chain is generic, raise an error if self.is_generic() and converter.is_generic(): raise ValueError('Cannot chain this generic converter chain to the provided converter : it is generic too!') # if the current chain is able to transform its input into a valid input for the new converter elif converter.can_be_appended_to(self, self.strict): if inplace: self._converters_list.append(converter) # update the current destination type self.to_type = converter.to_type return else: new = copy(self) new._converters_list.append(converter) # update the current destination type new.to_type = converter.to_type return new else: raise TypeError('Cannnot register a converter on this conversion chain : source type \'' + get_pretty_type_str(converter.from_type) + '\' is not compliant with current destination type of the chain : \'' + get_pretty_type_str(self.to_type) + ' (this chain performs ' + ('' if self.strict else 'non-') + 'strict mode matching)')" 1632,"def insert_conversion_steps_at_beginning(self, converters: List[Converter], inplace: bool = False): """""" Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converters: the list of converters to insert :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converters added """""" if inplace: for converter in reversed(converters): self.insert_conversion_step_at_beginning(converter, inplace=True) return else: new = copy(self) for converter in reversed(converters): # do inplace since it is a copy new.insert_conversion_step_at_beginning(converter, inplace=True) return new" 1633,"def _convert(self, desired_type: Type[T], obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """""" Apply the converters of the chain in order to produce the desired result. Only the last converter will see the 'desired type', the others will be asked to produce their declared to_type. :param desired_type: :param obj: :param logger: :param options: :return: """""" for converter in self._converters_list[:-1]: # convert into each converters destination type obj = converter.convert(converter.to_type, obj, logger, options) # the last converter in the chain should convert to desired type return self._converters_list[-1].convert(desired_type, obj, logger, options)" 1634,"def are_worth_chaining(first_converter: Converter, second_converter: Converter) -> bool: """""" This is a generalization of Converter.are_worth_chaining(), to support ConversionChains. :param first_converter: :param second_converter: :return: """""" if isinstance(first_converter, ConversionChain): if isinstance(second_converter, ConversionChain): # BOTH are chains for sec_conv in second_converter._converters_list: for fir_conv in first_converter._converters_list: if not Converter.are_worth_chaining(fir_conv, sec_conv): return False else: for fir_conv in first_converter._converters_list: if not Converter.are_worth_chaining(fir_conv, second_converter): return False else: if isinstance(second_converter, ConversionChain): for sec_conv in second_converter._converters_list: if not Converter.are_worth_chaining(first_converter, sec_conv): return False else: # Neither is a chain if not Converter.are_worth_chaining(first_converter, second_converter): return False # finally return True if nothing proved otherwise return True" 1635,"def chain(first_converter, second_converter, strict: bool): """""" Utility method to chain two converters. If any of them is already a ConversionChain, this method ""unpacks"" it first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict' chain, to produce a 'non-strict' chain. :param first_converter: :param second_converter: :param strict: :return: """""" if isinstance(first_converter, ConversionChain): if isinstance(second_converter, ConversionChain): # BOTH are chains if (first_converter.strict == strict) and (second_converter.strict == strict): return first_converter.add_conversion_steps(second_converter._converters_list) else: if not strict: # create a non-strict chain return ConversionChain(initial_converters=first_converter._converters_list, strict_chaining=False) \ .add_conversion_steps(second_converter._converters_list) else: raise ValueError('Trying to chain conversion chains with different strict modes than expected') else: # FIRST is a chain if strict == first_converter.strict: return first_converter.add_conversion_step(second_converter) else: if not strict: # create a non-strict chain return ConversionChain(initial_converters=[second_converter], strict_chaining=False) \ .insert_conversion_steps_at_beginning(first_converter._converters_list) else: raise ValueError('Trying to chain after a conversion chain that has different strict mode than ' 'expected') else: if isinstance(second_converter, ConversionChain): # SECOND is a chain if strict == second_converter.strict: return second_converter.insert_conversion_step_at_beginning(first_converter) else: if not strict: # create a non-strict chain return ConversionChain(initial_converters=[first_converter], strict_chaining=False) \ .add_conversion_steps(second_converter._converters_list) else: raise ValueError( 'Trying to chain before a conversion chain that has different strict mode than ' 'expected') else: # Neither is a chain return ConversionChain([first_converter, second_converter], strict)" 1636,"def main(as_module=False): """"""This is copy/paste of flask.cli.main to instanciate our own group """""" this_module = __package__ args = sys.argv[1:] if as_module: if sys.version_info >= (2, 7): name = 'python -m ' + this_module.rsplit('.', 1)[0] else: name = 'python -m ' + this_module # This module is always executed as ""python -m flask.run"" and as such # we need to ensure that we restore the actual command line so that # the reloader can properly operate. sys.argv = ['-m', this_module] + sys.argv[1:] else: name = None cli.main(args=args, prog_name=name)" 1637,"def init_app(self, app, entry_point_group='invenio_queues.queues'): """"""Flask application initialization."""""" self.init_config(app) app.extensions['invenio-queues'] = _InvenioQueuesState( app, app.config['QUEUES_CONNECTION_POOL'], entry_point_group=entry_point_group ) return app" 1638,"def parse_result(result): """"""parse_result(json result) -- print the web query according to the type of result from duckduckgo. """""" if(result['Type'] == 'D'): print """"""There is more than one answer for this. Try making your query\ more specific. For example, if you want to learn about apple the company\ and not apple the fruit, try something like apple inc or apple computers. """""" elif(result['Type'] == 'A'): print result['AbstractText'] print '\nResults from DuckDuckGo' elif(result['Type'] == 'C'): for entry in result['RelatedTopics']: print entry['Text'] print ""\n"" else: print ""I do not know how to process this query at the moment.""" 1639,"def query(string): """"""query(user string) -- make http request to duckduckgo api, to get result in json format, then call parse_result. """""" url = ""https://api.duckduckgo.com/?q="" formating = ""&format=json"" query_string = url+'+'.join(string)+formating try: result = json.loads(requests.get(query_string).text) except: print ""I'm sorry! Something went wrong. Maybe we could try again later."" return parse_result(result)" 1640,"def parse_gpx(gpx_element, gpx_extensions_parser=None, metadata_extensions_parser=None, waypoint_extensions_parser=None, route_extensions_parser=None, track_extensions_parser=None, segment_extensions_parser=None, gpxns=None): """"""Parse a GPX file into a GpxModel. Args: gpx_element: gpx_element: The root <gpx> element of an XML document containing a version attribute. GPX versions 1.1 is supported. gpx_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. metadata_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. waypoint_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. route_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. track_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. segment_extensions_parser: An optional callable which accepts an Element with the 'extensions' tag and returns a list of model objects representing the extensions. If not specified, extensions are ignored. Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX. """""" gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError(""No gpx root element"") creator = gpx_element.attrib['creator'] version = gpx_element.attrib['version'] if not version.startswith('1.1'): raise ValueError(""Not a GPX 1.1 file"") metadata_element = gpx_element.find(gpxns+'metadata') metadata = nullable(parse_metadata)(metadata_element, gpxns) waypoint_elements = gpx_element.findall(gpxns+'wpt') waypoints = [parse_waypoint(waypoint_element, gpxns) for waypoint_element in waypoint_elements] route_elements = gpx_element.findall(gpxns+'rte') routes = [parse_route(route_element, gpxns) for route_element in route_elements] track_elements = gpx_element.findall(gpxns+'trk') tracks = [parse_track(track_element, gpxns) for track_element in track_elements] extensions_element = gpx_element.find(gpxns+'extensions') extensions = nullable(parse_gpx_extensions)(extensions_element, gpxns) gpx_model = GpxModel(creator, metadata, waypoints, routes, tracks, extensions) return gpx_model" 1641,"def listens_to(name, sender=None, weak=True): """"""Listens to a named signal """""" def decorator(f): if sender: return signal(name).connect(f, sender=sender, weak=weak) return signal(name).connect(f, weak=weak) return decorator" 1642,"def LoadInstallations(counter): """"""Load installed packages and export the version map. This function may be called multiple times, but the counters will be increased each time. Since Prometheus counters are never decreased, the aggregated results will not make sense. """""" process = subprocess.Popen([""pip"", ""list"", ""--format=json""], stdout=subprocess.PIPE) output, _ = process.communicate() installations = json.loads(output) for i in installations: counter.labels(i[""name""], i[""version""]).inc()" 1643,"def RESTrequest(*args, **kwargs): """"""return and save the blob of data that is returned from kegg without caring to the format"""""" verbose = kwargs.get('verbose', False) force_download = kwargs.get('force', False) save = kwargs.get('force', True) # so you can copy paste from kegg args = list(chain.from_iterable(a.split('/') for a in args)) args = [a for a in args if a] request = 'http://rest.kegg.jp/' + ""/"".join(args) print_verbose(verbose, ""richiedo la pagina: "" + request) filename = ""KEGG_"" + ""_"".join(args) try: if force_download: raise IOError() print_verbose(verbose, ""loading the cached file "" + filename) with open(filename, 'r') as f: data = pickle.load(f) except IOError: print_verbose(verbose, ""downloading the library,it may take some time"") import urllib2 try: req = urllib2.urlopen(request) data = req.read() if save: with open(filename, 'w') as f: print_verbose(verbose, ""saving the file to "" + filename) pickle.dump(data, f) # clean the error stacktrace except urllib2.HTTPError as e: raise e return data" 1644,"def command_help_long(self): """""" Return command help for use in global parser usage string @TODO update to support self.current_indent from formatter """""" indent = "" "" * 2 # replace with current_indent help = ""Command must be one of:\n"" for action_name in self.parser.valid_commands: help += ""%s%-10s %-70s\n"" % (indent, action_name, self.parser.commands[action_name].desc_short.capitalize()) help += '\nSee \'%s help COMMAND\' for help and information on a command' % self.parser.prog return help" 1645,"def _print(self, helpstr, file=None): """""". """""" if file is None: file = sys.stdout encoding = self._get_encoding(file) file.write(helpstr.encode(encoding, ""replace""))" 1646,"def run(self): """""" Run the multiopt parser """""" self.parser = MultioptOptionParser( usage=""%prog <command> [options] [args]"", prog=self.clsname, version=self.version, option_list=self.global_options, description=self.desc_short, commands=self.command_set, epilog=self.footer ) try: self.options, self.args = self.parser.parse_args(self.argv) except Exception, e: print str(e) pass if len(self.args) < 1: self.parser.print_lax_help() return 2 self.command = self.args.pop(0) showHelp = False if self.command == 'help': if len(self.args) < 1: self.parser.print_lax_help() return 2 else: self.command = self.args.pop() showHelp = True if self.command not in self.valid_commands: self.parser.print_cmd_error(self.command) return 2 self.command_set[self.command].set_cmdname(self.command) subcmd_parser = self.command_set[self.command].get_parser(self.clsname, self.version, self.global_options) subcmd_options, subcmd_args = subcmd_parser.parse_args(self.args) if showHelp: subcmd_parser.print_help_long() return 1 try: self.command_set[self.command].func(subcmd_options, *subcmd_args) except (CommandError, TypeError), e: # self.parser.print_exec_error(self.command, str(e)) subcmd_parser.print_exec_error(self.command, str(e)) print # @TODO show command help # self.parser.print_lax_help() return 2 return 1" 1647,"def list(self, community=None, hostfilter=None, host=None): """""" Returns a list of SNMP information for a community, hostfilter or host :param snmpstring: A specific SNMP string to list :param hostfilter: Valid hostfilter or None :param host: t_hosts.id or t_hosts.f_ipaddr :return: [ [ record_id, ipaddr, hostname, community, access, version ] ... ] """""" return self.send.snmp_list(community, hostfilter, host)" 1648,"def add(self, host=None, f_community=None, f_access=None, f_version=None): """""" Add an SNMP community string to a host :param host: t_hosts.id or t_hosts.f_ipaddr :param f_community: Community string to add :param f_access: READ or WRITE :param f_version: v1, v2c or v3 :return: (True/False, t_snmp.id/Error string) """""" return self.send.snmp_add(host, f_community, f_access, f_version)" 1649,"def delete_collection(db_name, collection_name, host='localhost', port=27017): """"""Almost exclusively for testing."""""" client = MongoClient(""mongodb://%s:%d"" % (host, port)) client[db_name].drop_collection(collection_name)" 1650,"def ensure_table(self, cls): """"""Required functionality."""""" coll_name = cls.get_table_name() try: db = self.mongo_client.get_default_database() db.create_collection(coll_name) except CollectionInvalid: pass # Expected if collection already exists # Make sure we have indexes coll = self.get_collection(coll_name) for idx_name in cls.index_names(): coll.ensure_index(idx_name)" 1651,"def find_one(self, cls, id): """"""Required functionality."""""" one = self._find(cls, {""_id"": id}) if not one: return None return one[0]" 1652,"def find_by_index(self, cls, index_name, value): """"""Required functionality."""""" return self._find(cls, {index_name: str(value)})" 1653,"def save(self, obj): """"""Required functionality."""""" if not obj.id: obj.id = uuid() stored_data = { '_id': obj.id, 'value': json.loads(obj.to_data()) } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = str(val) coll = self.get_collection(obj.__class__.get_table_name()) coll.update({""_id"": obj.id}, stored_data, upsert=True)" 1654,"def delete(self, obj): """"""Required functionality."""""" del_id = obj.get_id() if not del_id: return coll = self.get_collection(obj.__class__.get_table_name()) coll.delete_one({""_id"": del_id})" 1655,"def new(cls, __name, __fields, **defaults): ''' Creates a new class that can represent a record with the specified *fields*. This is equal to a mutable namedtuple. The returned class also supports keyword arguments in its constructor. :param __name: The name of the recordclass. :param __fields: A string or list of field names. :param defaults: Default values for fields. The defaults may list field names that haven't been listed in *fields*. ''' name = __name fields = __fields fieldset = set(fields) if isinstance(fields, str): if ',' in fields: fields = fields.split(',') else: fields = fields.split() else: fields = list(fields) for key in defaults.keys(): if key not in fields: fields.append(key) class _record(cls): __slots__ = fields __defaults__ = defaults _record.__name__ = name return _record" 1656,"def _check_1st_line(line, **kwargs): """"""First line check. Check that the first line has a known component name followed by a colon and then a short description of the commit. :param line: first line :type line: str :param components: list of known component names :type line: list :param max_first_line: maximum length of the first line :type max_first_line: int :return: errors as in (code, line number, *args) :rtype: list """""" components = kwargs.get(""components"", ()) max_first_line = kwargs.get(""max_first_line"", 50) errors = [] lineno = 1 if len(line) > max_first_line: errors.append((""M190"", lineno, max_first_line, len(line))) if line.endswith("".""): errors.append((""M191"", lineno)) if ':' not in line: errors.append((""M110"", lineno)) else: component, msg = line.split(':', 1) if component not in components: errors.append((""M111"", lineno, component)) return errors" 1657,"def _check_bullets(lines, **kwargs): """"""Check that the bullet point list is well formatted. Each bullet point shall have one space before and after it. The bullet character is the ""*"" and there is no space before it but one after it meaning the next line are starting with two blanks spaces to respect the indentation. :param lines: all the lines of the message :type lines: list :param max_lengths: maximum length of any line. (Default 72) :return: errors as in (code, line number, *args) :rtype: list """""" max_length = kwargs.get(""max_length"", 72) labels = {l for l, _ in kwargs.get(""commit_msg_labels"", tuple())} def _strip_ticket_directives(line): return re.sub(r'( \([^)]*\)){1,}$', '', line) errors = [] missed_lines = [] skipped = [] for (i, line) in enumerate(lines[1:]): if line.startswith('*'): dot_found = False if len(missed_lines) > 0: errors.append((""M130"", i + 2)) if lines[i].strip() != '': errors.append((""M120"", i + 2)) if _strip_ticket_directives(line).endswith('.'): dot_found = True label = _re_bullet_label.search(line) if label and label.group('label') not in labels: errors.append((""M122"", i + 2, label.group('label'))) for (j, indented) in enumerate(lines[i + 2:]): if indented.strip() == '': break if not re.search(r""^ {2}\S"", indented): errors.append((""M121"", i + j + 3)) else: skipped.append(i + j + 1) stripped_line = _strip_ticket_directives(indented) if stripped_line.endswith('.'): dot_found = True elif stripped_line.strip(): dot_found = False if not dot_found: errors.append((""M123"", i + 2)) elif i not in skipped and line.strip(): missed_lines.append((i + 2, line)) if len(line) > max_length: errors.append((""M190"", i + 2, max_length, len(line))) return errors, missed_lines" 1658,"def _check_signatures(lines, **kwargs): """"""Check that the signatures are valid. There should be at least three signatures. If not, one of them should be a trusted developer/reviewer. Formatting supported being: [signature] full name <email@address> :param lines: lines (lineno, content) to verify. :type lines: list :param signatures: list of supported signature :type signatures: list :param alt_signatures: list of alternative signatures, not counted :type alt_signatures: list :param trusted: list of trusted reviewers, the e-mail address. :type trusted: list :param min_reviewers: minimal number of reviewers needed. (Default 3) :type min_reviewers: int :return: errors as in (code, line number, *args) :rtype: list """""" trusted = kwargs.get(""trusted"", ()) signatures = tuple(kwargs.get(""signatures"", ())) alt_signatures = tuple(kwargs.get(""alt_signatures"", ())) min_reviewers = kwargs.get(""min_reviewers"", 3) matching = [] errors = [] signatures += alt_signatures test_signatures = re.compile(""^({0})"".format(""|"".join(signatures))) test_alt_signatures = re.compile(""^({0})"".format(""|"".join(alt_signatures))) for i, line in lines: if signatures and test_signatures.search(line): if line.endswith("".""): errors.append((""M191"", i)) if not alt_signatures or not test_alt_signatures.search(line): matching.append(line) else: errors.append((""M102"", i)) if not matching: errors.append((""M101"", 1)) errors.append((""M100"", 1)) elif len(matching) < min_reviewers: pattern = re.compile('|'.join(map(lambda x: '<' + re.escape(x) + '>', trusted))) trusted_matching = list(filter(None, map(pattern.search, matching))) if len(trusted_matching) == 0: errors.append((""M100"", 1)) return errors" 1659,"def check_message(message, **kwargs): """"""Check the message format. Rules: - the first line must start by a component name - and a short description (52 chars), - then bullet points are expected - and finally signatures. :param components: compontents, e.g. ``('auth', 'utils', 'misc')`` :type components: `list` :param signatures: signatures, e.g. ``('Signed-off-by', 'Reviewed-by')`` :type signatures: `list` :param alt_signatures: alternative signatures, e.g. ``('Tested-by',)`` :type alt_signatures: `list` :param trusted: optional list of reviewers, e.g. ``('john.doe@foo.org',)`` :type trusted: `list` :param max_length: optional maximum line length (by default: 72) :type max_length: int :param max_first_line: optional maximum first line length (by default: 50) :type max_first_line: int :param allow_empty: optional way to allow empty message (by default: False) :type allow_empty: bool :return: errors sorted by line number :rtype: `list` """""" if kwargs.pop(""allow_empty"", False): if not message or message.isspace(): return [] lines = re.split(r""\r\n|\r|\n"", message) errors = _check_1st_line(lines[0], **kwargs) err, signature_lines = _check_bullets(lines, **kwargs) errors += err errors += _check_signatures(signature_lines, **kwargs) def _format(code, lineno, args): return ""{0}: {1} {2}"".format(lineno, code, _messages_codes[code].format(*args)) return list(map(lambda x: _format(x[0], x[1], x[2:]), sorted(errors, key=lambda x: x[0])))" 1660,"def _register_pyflakes_check(): """"""Register the pyFlakes checker into PEP8 set of checks."""""" from flake8_isort import Flake8Isort from flake8_blind_except import check_blind_except # Resolving conflicts between pep8 and pyflakes. codes = { ""UnusedImport"": ""F401"", ""ImportShadowedByLoopVar"": ""F402"", ""ImportStarUsed"": ""F403"", ""LateFutureImport"": ""F404"", ""Redefined"": ""F801"", ""RedefinedInListComp"": ""F812"", ""UndefinedName"": ""F821"", ""UndefinedExport"": ""F822"", ""UndefinedLocal"": ""F823"", ""DuplicateArgument"": ""F831"", ""UnusedVariable"": ""F841"", } for name, obj in vars(pyflakes.messages).items(): if name[0].isupper() and obj.message: obj.tpl = ""{0} {1}"".format(codes.get(name, ""F999""), obj.message) pep8.register_check(_PyFlakesChecker, codes=['F']) # FIXME parser hack parser = pep8.get_parser('', '') Flake8Isort.add_options(parser) options, args = parser.parse_args([]) # end of hack pep8.register_check(Flake8Isort, codes=['I']) pep8.register_check(check_blind_except, codes=['B90'])" 1661,"def is_file_excluded(filename, excludes): """"""Check if the file should be excluded. :param filename: file name :param excludes: list of regex to match :return: True if the file should be excluded """""" # check if you need to exclude this file return any([exclude and re.match(exclude, filename) is not None for exclude in excludes])" 1662,"def check_pep8(filename, **kwargs): """"""Perform static analysis on the given file. :param filename: path of file to check. :type filename: str :param ignore: codes to ignore, e.g. ``('E111', 'E123')`` :type ignore: `list` :param select: codes to explicitly select. :type select: `list` :param pyflakes: run the pyflakes checks too (default ``True``) :type pyflakes: bool :return: errors :rtype: `list` .. seealso:: :py:class:`pycodestyle.Checker` """""" options = { ""ignore"": kwargs.get(""ignore""), ""select"": kwargs.get(""select""), } if not _registered_pyflakes_check and kwargs.get(""pyflakes"", True): _register_pyflakes_check() checker = pep8.Checker(filename, reporter=_Report, **options) checker.check_all() errors = [] for error in sorted(checker.report.errors, key=lambda x: x[0]): errors.append(""{0}:{1}: {3}"".format(*error)) return errors" 1663,"def check_pydocstyle(filename, **kwargs): """"""Perform static analysis on the given file docstrings. :param filename: path of file to check. :type filename: str :param ignore: codes to ignore, e.g. ('D400',) :type ignore: `list` :param match: regex the filename has to match to be checked :type match: str :param match_dir: regex everydir in path should match to be checked :type match_dir: str :return: errors :rtype: `list` .. seealso:: `PyCQA/pydocstyle <https://github.com/GreenSteam/pydocstyle/>`_ """""" ignore = kwargs.get(""ignore"") match = kwargs.get(""match"", None) match_dir = kwargs.get(""match_dir"", None) errors = [] if match and not re.match(match, os.path.basename(filename)): return errors if match_dir: # FIXME here the full path is checked, be sure, if match_dir doesn't # match the path (usually temporary) before the actual application path # it may not run the checks when it should have. path = os.path.split(os.path.abspath(filename))[0] while path != ""/"": path, dirname = os.path.split(path) if not re.match(match_dir, dirname): return errors checker = pydocstyle.PEP257Checker() with open(filename) as fp: try: for error in checker.check_source(fp.read(), filename): if ignore is None or error.code not in ignore: # Removing the colon ':' after the error code message = re.sub(""(D[0-9]{3}): ?(.*)"", r""\1 \2"", error.message) errors.append(""{0}: {1}"".format(error.line, message)) except tokenize.TokenError as e: errors.append(""{1}:{2} {0}"".format(e.args[0], *e.args[1])) except pydocstyle.AllError as e: errors.append(str(e)) return errors" 1664,"def check_license(filename, **kwargs): """"""Perform a license check on the given file. The license format should be commented using # and live at the top of the file. Also, the year should be the current one. :param filename: path of file to check. :type filename: str :param year: default current year :type year: int :param ignore: codes to ignore, e.g. ``('L100', 'L101')`` :type ignore: `list` :param python_style: False for JavaScript or CSS files :type python_style: bool :return: errors :rtype: `list` """""" year = kwargs.pop(""year"", datetime.now().year) python_style = kwargs.pop(""python_style"", True) ignores = kwargs.get(""ignore"") template = ""{0}: {1} {2}"" if python_style: re_comment = re.compile(r""^#.*|\{#.*|[\r\n]+$"") starter = ""# "" else: re_comment = re.compile(r""^/\*.*| \*.*|[\r\n]+$"") starter = "" *"" errors = [] lines = [] file_is_empty = False license = """" lineno = 0 try: with codecs.open(filename, ""r"", ""utf-8"") as fp: line = fp.readline() blocks = [] while re_comment.match(line): if line.startswith(starter): line = line[len(starter):].lstrip() blocks.append(line) lines.append((lineno, line.strip())) lineno, line = lineno + 1, fp.readline() file_is_empty = line == """" license = """".join(blocks) except UnicodeDecodeError: errors.append((lineno + 1, ""L190"", ""utf-8"")) license = """" if file_is_empty and not license.strip(): return errors match_year = _re_copyright_year.search(license) if match_year is None: errors.append((lineno + 1, ""L101"")) elif int(match_year.group(""year"")) != year: theline = match_year.group(0) lno = lineno for no, l in lines: if theline.strip() == l: lno = no break errors.append((lno + 1, ""L102"", year, match_year.group(""year""))) else: program_match = _re_program.search(license) program_2_match = _re_program_2.search(license) program_3_match = _re_program_3.search(license) if program_match is None: errors.append((lineno, ""L100"")) elif (program_2_match is None or program_3_match is None or (program_match.group(""program"").upper() != program_2_match.group(""program"").upper() != program_3_match.group(""program"").upper())): errors.append((lineno, ""L103"")) def _format_error(lineno, code, *args): return template.format(lineno, code, _licenses_codes[code].format(*args)) def _filter_codes(error): if not ignores or error[1] not in ignores: return error return list(map(lambda x: _format_error(*x), filter(_filter_codes, errors)))" 1665,"def check_file(filename, **kwargs): """"""Perform static analysis on the given file. .. seealso:: - :data:`.SUPPORTED_FILES` - :func:`.check_pep8` - :func:`.check_pydocstyle` - and :func:`.check_license` :param filename: path of file to check. :type filename: str :return: errors sorted by line number or None if file is excluded :rtype: `list` """""" excludes = kwargs.get(""excludes"", []) errors = [] if is_file_excluded(filename, excludes): return None if filename.endswith("".py""): if kwargs.get(""pep8"", True): errors += check_pep8(filename, **kwargs) if kwargs.get(""pydocstyle"", True): errors += check_pydocstyle(filename, **kwargs) if kwargs.get(""license"", True): errors += check_license(filename, **kwargs) elif re.search(""\.(tpl|html)$"", filename): errors += check_license(filename, **kwargs) elif re.search(""\.(js|jsx|css|less)$"", filename): errors += check_license(filename, python_style=False, **kwargs) def try_to_int(value): try: return int(value.split(':', 1)[0]) except ValueError: return 0 return sorted(errors, key=try_to_int)" 1666,"def check_author(author, **kwargs): """"""Check the presence of the author in the AUTHORS/THANKS files. Rules: - the author full name and email must appear in AUTHORS file :param authors: name of AUTHORS files :type authors: `list` :param path: path to the repository home :type path: str :return: errors :rtype: `list` """""" errors = [] authors = kwargs.get(""authors"") if not authors: errors.append('1:A100: ' + _author_codes['A100']) return errors exclude_author_names = kwargs.get(""exclude_author_names"") if exclude_author_names and author in exclude_author_names: return [] path = kwargs.get(""path"") if not path: path = os.getcwd() for afile in authors: if not os.path.exists(path + os.sep + afile): errors.append('1:A101: ' + _author_codes['A101'].format(afile)) if errors: return errors status = subprocess.Popen(['grep', '-q', author] + [path + os.sep + afile for afile in authors], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path).wait() if status: errors.append('1:A102: ' + _author_codes['A102'].format(author)) return errors" 1667,"def get_options(config=None): """"""Build the options from the config object."""""" if config is None: from . import config config.get = lambda key, default=None: getattr(config, key, default) base = { ""components"": config.get(""COMPONENTS""), ""signatures"": config.get(""SIGNATURES""), ""commit_msg_template"": config.get(""COMMIT_MSG_TEMPLATE""), ""commit_msg_labels"": config.get(""COMMIT_MSG_LABELS""), ""alt_signatures"": config.get(""ALT_SIGNATURES""), ""trusted"": config.get(""TRUSTED_DEVELOPERS""), ""pep8"": config.get(""CHECK_PEP8"", True), ""pydocstyle"": config.get(""CHECK_PYDOCSTYLE"", True), ""license"": config.get(""CHECK_LICENSE"", True), ""pyflakes"": config.get(""CHECK_PYFLAKES"", True), ""ignore"": config.get(""IGNORE""), ""select"": config.get(""SELECT""), ""match"": config.get(""PYDOCSTYLE_MATCH""), ""match_dir"": config.get(""PYDOCSTYLE_MATCH_DIR""), ""min_reviewers"": config.get(""MIN_REVIEWERS""), ""colors"": config.get(""COLORS"", True), ""excludes"": config.get(""EXCLUDES"", []), ""authors"": config.get(""AUTHORS""), ""exclude_author_names"": config.get(""EXCLUDE_AUTHOR_NAMES""), } options = {} for k, v in base.items(): if v is not None: options[k] = v return options" 1668,"def run(self): """"""Yield the error messages."""""" for msg in self.messages: col = getattr(msg, 'col', 0) yield msg.lineno, col, (msg.tpl % msg.message_args), msg.__class__" 1669,"def error(self, line_number, offset, text, check): """"""Run the checks and collect the errors."""""" code = super(_Report, self).error(line_number, offset, text, check) if code: self.errors.append((line_number, offset + 1, code, text, check))" 1670,"def prompt(prompt_string, default=None, secret=False, boolean=False, bool_type=None): """""" Prompt user for a string, with a default value * secret converts to password prompt * boolean converts return value to boolean, checking for starting with a Y """""" if boolean or bool_type in BOOLEAN_DEFAULTS: if bool_type is None: bool_type = 'y_n' default_msg = BOOLEAN_DEFAULTS[bool_type][is_affirmative(default)] else: default_msg = "" (default {val}): "" prompt_string += (default_msg.format(val=default) if default else "": "") if secret: val = getpass(prompt_string) else: val = input(prompt_string) val = (val if val else default) if boolean: val = val.lower().startswith('y') return val" 1671,"def prop_unc(jc): """""" Propagate uncertainty. :param jc: the Jacobian and covariance matrix :type jc: sequence This method is mainly designed to be used as the target for a multiprocessing pool. """""" j, c = jc return np.dot(np.dot(j, c), j.T)" 1672,"def partial_derivative(f, x, n, nargs, delta=DELTA): """""" Calculate partial derivative using central finite difference approximation. :param f: function :param x: sequence of arguments :param n: index of argument derivateve is with respect to :param nargs: number of arguments :param delta: optional step size, default is :math:`\\epsilon^{1/3}` where :math:`\\epsilon` is machine precision """""" dx = np.zeros((nargs, len(x[n]))) # scale delta by (|x| + 1.0) to avoid noise from machine precision dx[n] += np.where(x[n], x[n] * delta, delta) # apply central difference approximation x_dx = zip(*[xi + (dxi, -dxi) for xi, dxi in zip(x, dx)]) return (f(x_dx[0]) - f(x_dx[1])) / dx[n] / 2.0" 1673,"def jacobian(func, x, nf, nobs, *args, **kwargs): """""" Estimate Jacobian matrices :math:`\\frac{\\partial f_i}{\\partial x_{j,k}}` where :math:`k` are independent observations of :math:`x`. The independent variable, :math:`x`, must be a numpy array with exactly 2 dimensions. The first dimension is the number of independent arguments, and the second dimensions is the number of observations. The function must return a Numpy array with exactly 2 dimensions. The first is the number of returns and the second dimension corresponds to the number of observations. If the input argument is 2-D then the output should also be 2-D Constant arguments can be passed as additional positional arguments or keyword arguments. If any constant argument increases the number of observations of the return value, tile the input arguments to match. Use :func:`numpy.atleast_2d` or :func:`numpy.reshape` to get the correct dimensions for scalars. :param func: function :param x: independent variables grouped by observation :param nf: number of return in output (1st dimension) :param nobs: number of observations in output (2nd dimension) :return: Jacobian matrices for each observation """""" nargs = len(x) # degrees of freedom f = lambda x_: func(x_, *args, **kwargs) j = np.zeros((nargs, nf, nobs)) # matrix of zeros for n in xrange(nargs): j[n] = partial_derivative(f, x, n, nargs) # better to transpose J once than transpose partial derivative each time # j[:,:,n] = df.T return j.T" 1674,"def jflatten(j): """""" Flatten 3_D Jacobian into 2-D. """""" nobs, nf, nargs = j.shape nrows, ncols = nf * nobs, nargs * nobs jflat = np.zeros((nrows, ncols)) for n in xrange(nobs): r, c = n * nf, n * nargs jflat[r:(r + nf), c:(c + nargs)] = j[n] return jflat" 1675,"def jtosparse(j): """""" Generate sparse matrix coordinates from 3-D Jacobian. """""" data = j.flatten().tolist() nobs, nf, nargs = j.shape indices = zip(*[(r, c) for n in xrange(nobs) for r in xrange(n * nf, (n + 1) * nf) for c in xrange(n * nargs, (n + 1) * nargs)]) return csr_matrix((data, indices), shape=(nobs * nf, nobs * nargs))" 1676,"def unc_wrapper_args(*covariance_keys): """""" Wrap function, calculate its Jacobian and calculate the covariance of the outputs given the covariance of the specified inputs. :param covariance_keys: indices and names of arguments corresponding to covariance :return: wrapped function bound to specified covariance keys This is the outer uncertainty wrapper that allows you to specify the arguments in the original function that correspond to the covariance. The inner wrapper takes the original function to be wrapped. :: def f(a, b, c, d, kw1='foo', *args, **kwargs): pass # arguments a, c, d and kw1 correspond to the covariance matrix f_wrapped = unc_wrapper_args(0, 2, 3, 'kw1')(f) cov = np.array([[0.0001, 0., 0., 0.], [0., 0.0001, 0., 0.], [0., 0., 0.0001, 0.], [0., 0., 0., 0.0001]) y, cov, jac = f_wrapped(a, b, c, d, kw1='bar', __covariance__=cov) The covariance keys can be indices of positional arguments or the names of keywords argument used in calling the function. If no covariance keys are specified then the arguments that correspond to the covariance shoud be grouped into a sequence. If ``None`` is anywhere in ``covariance_keys`` then all of the arguments will be used to calculate the Jacobian. The covariance matrix must be a symmetrical matrix with positive numbers on the diagonal that correspond to the square of the standard deviation, second moment around the mean or root-mean-square(RMS) of the function with respect to the arguments specified as covariance keys. The other elements are the covariances corresponding to the arguments intersecting at that element. Pass the covariance matrix with the keyword ``__covariance__`` and it will be popped from the dictionary of keyword arguments provided to the wrapped function. The wrapped function will return the evaluation of the original function, its Jacobian, which is the sensitivity of the return output to each argument specified as a covariance key and the covariance propagated using the first order terms of a Taylor series expansion around the arguments. An optional keyword argument ``__method__`` can also be passed to the wrapped function (not the wrapper) that specifies the method used to calculate the dot product. The default method is ``'loop'``. The other methods are ``'dense'``, ``'sparse'`` and ``'pool'``. If the arguments specified as covariance keys are arrays, they should all be the same size. These dimensions will be considered as separate observations. Another argument, not in the covariance keys, may also create observations. The resulting Jacobian will have dimensions of number of observations (nobs) by number of return output (nf) by number of covariance keys (nargs). The resulting covariance will be nobs x nf x nf. """""" def wrapper(f): @wraps(f) def wrapped_function(*args, **kwargs): cov = kwargs.pop('__covariance__', None) # pop covariance method = kwargs.pop('__method__', 'loop') # pop covariance # covariance keys cannot be defaults, they must be in args or kwargs cov_keys = covariance_keys # convert args to kwargs by index kwargs.update({n: v for n, v in enumerate(args)}) args = () # empty args if None in cov_keys: # use all keys cov_keys = kwargs.keys() # group covariance keys if len(cov_keys) > 0: # uses specified keys x = [np.atleast_1d(kwargs.pop(k)) for k in cov_keys] else: # arguments already grouped x = kwargs.pop(0) # use first argument # remaining args args_dict = {} def args_from_kwargs(kwargs_): """"""unpack positional arguments from keyword arguments"""""" # create mapping of positional arguments by index args_ = [(n, v) for n, v in kwargs_.iteritems() if not isinstance(n, basestring)] # sort positional arguments by index idx, args_ = zip(*sorted(args_, key=lambda m: m[0])) # remove args_ and their indices from kwargs_ args_dict_ = {n: kwargs_.pop(n) for n in idx} return args_, args_dict_ if kwargs: args, args_dict = args_from_kwargs(kwargs) def f_(x_, *args_, **kwargs_): """"""call original function with independent variables grouped"""""" args_dict_ = args_dict if cov_keys: kwargs_.update(zip(cov_keys, x_), **args_dict_) if kwargs_: args_, _ = args_from_kwargs(kwargs_) return np.array(f(*args_, **kwargs_)) # assumes independent variables already grouped return f(x_, *args_, **kwargs_) # evaluate function and Jacobian avg = f_(x, *args, **kwargs) # number of returns and observations if avg.ndim > 1: nf, nobs = avg.shape else: nf, nobs = avg.size, 1 jac = jacobian(f_, x, nf, nobs, *args, **kwargs) # calculate covariance if cov is not None: # covariance must account for all observations # scale covariances by x squared in each direction if cov.ndim == 3: x = np.array([np.repeat(y, nobs) if len(y)==1 else y for y in x]) LOGGER.debug('x:\n%r', x) cov = np.array([c * y * np.row_stack(y) for c, y in zip(cov, x.T)]) else: # x are all only one dimension x = np.asarray(x) cov = cov * x * x.T assert jac.size / nf / nobs == cov.size / len(x) cov = np.tile(cov, (nobs, 1, 1)) # propagate uncertainty using different methods if method.lower() == 'dense': j, c = jflatten(jac), jflatten(cov) cov = prop_unc((j, c)) # sparse elif method.lower() == 'sparse': j, c = jtosparse(jac), jtosparse(cov) cov = j.dot(c).dot(j.transpose()) cov = cov.todense() # pool elif method.lower() == 'pool': try: p = Pool() cov = np.array(p.map(prop_unc, zip(jac, cov))) finally: p.terminate() # loop is the default else: cov = np.array([prop_unc((jac[o], cov[o])) for o in xrange(nobs)]) # dense and spares are flattened, unravel them into 3-D list of # observations if method.lower() in ['dense', 'sparse']: cov = np.array([ cov[(nf * o):(nf * (o + 1)), (nf * o):(nf * (o + 1))] for o in xrange(nobs) ]) # unpack returns for original function with ungrouped arguments if None in cov_keys or len(cov_keys) > 0: return tuple(avg.tolist() + [cov, jac]) # independent variables were already grouped return avg, cov, jac return wrapped_function return wrapper" 1677,"def assign_handler(query, category): """"""assign_handler(query, category) -- assign the user's query to a particular category, and call the appropriate handler. """""" if(category == 'count lines'): handler.lines(query) elif(category == 'count words'): handler.words(query) elif(category == 'weather'): web.weather(query) elif(category == 'no match'): web.generic(query) elif(category == 'file info'): handler.file_info(query) elif(category == 'executable'): handler.make_executable(query) elif(category == 'search'): handler.search(query) elif(category == 'path'): handler.add_to_path(query) elif(category == 'uname'): handler.system_info(query) else: print 'I\'m not able to understand your query'" 1678,"def get_file_name(query): """"""get_file_name(query) -> filename -- return the filename found in a given, found by matching a regular expression. """""" match = re.search(r'\S*\.[\d\w]{1,4}', query) if(match): filename = match.group() return filename else: start = match.start() end = match.end() spaces = re.finditer(r' ', query) space_index = [] for space in spaces: space_index.append(space.start()) space_index.pop() for i in space_index: filename = query[i+1:end] if(os.path.isfile(filename)): return filename return None" 1679,"def get_path(query): """"""get_path(query) -> pathname -- return the path found in a given, found by matching a regular expression. """""" match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query) if(os.path.isfile(match.group()) or os.path.isdir(match.group())): return match.group() else: return None" 1680,"def get_readable_filesize(size): """"""get_readable_filesize(size) -> filesize -- return human readable filesize from given size in bytes. """""" if(size < 1024): return str(size)+' bytes' temp = size/1024.0 level = 1 while(temp >= 1024 and level< 3): temp = temp/1024 level += 1 if(level == 1): return str(round(temp,2))+' KB' elif(level == 2): return str(round(temp,2))+' MB' else: return str(round(temp,2))+' GB'" 1681,"def list(self, svc_rec=None, hostfilter=None, compromised=False): """""" List user accounts :param svc_rec: db.t_services.id :param hostfilter: :param compromised: Show only compromised accounts :return: [acct.t_accounts.f_services_id, acct.t_hosts.f_ipaddr, acct.t_hosts.f_hostname, acct.t_accounts.id, acct.t_accounts.f_username, acct.t_accounts.f_fullname, acct.t_accounts.f_password, acct.t_accounts.f_compromised, acct.t_accounts.f_hash1, acct.t_accounts.f_hash1_type, acct.t_accounts.f_hash2, acct.t_accounts.f_hash2_type, acct.t_accounts.f_source, acct.t_accounts.f_uid, acct.t_accounts.f_gid, acct.t_accounts.f_level, acct.t_accounts.f_domain, acct.t_accounts.f_message, acct.t_accounts.f_lockout, acct.t_accounts.f_duration, acct.t_accounts.f_active, acct.t_accounts.f_description, acct.t_services.f_proto, acct.t_services.f_number, ] """""" return self.send.accounts_list(svc_rec, hostfilter, compromised)" 1682,"def upload_file(self, service_rec=None, host_service=None, filename=None, pw_data=None, f_type=None, add_to_evidence=True): """""" Upload a password file :param service_rec: db.t_services.id :param host_service: db.t_hosts.id :param filename: Filename :param pw_data: Content of file :param f_type: Type of file :param add_to_evidence: True/False to add to t_evidence :return: (True/False, Response Message) """""" return self.send.accounts_upload_file(service_rec, host_service, filename, pw_data, f_type, add_to_evidence)" 1683,"def _columns_to_kwargs(conversion_table, columns, row): """""" Given a list of column names, and a list of values (a row), return a dict of kwargs that may be used to instantiate a MarketHistoryEntry or MarketOrder object. :param dict conversion_table: The conversion table to use for mapping spec names to kwargs. :param list columns: A list of column names. :param list row: A list of values. """""" kwdict = {} counter = 0 for column in columns: # Map the column name to the correct MarketHistoryEntry kwarg. kwarg_name = conversion_table[column] # Set the kwarg to the correct value from the row. kwdict[kwarg_name] = row[counter] counter += 1 return kwdict" 1684,"def parse_datetime(time_str): """""" Wraps dateutil's parser function to set an explicit UTC timezone, and to make sure microseconds are 0. Unified Uploader format and EMK format bother don't use microseconds at all. :param str time_str: The date/time str to parse. :rtype: datetime.datetime :returns: A parsed, UTC datetime. """""" try: return dateutil.parser.parse( time_str ).replace(microsecond=0).astimezone(UTC_TZINFO) except ValueError: # This was some kind of unrecognizable time string. raise ParseError(""Invalid time string: %s"" % time_str)" 1685,"def merge(self, po_file, source_files): """"""从源码中获取所有条目,合并到 po_file 中。 :param string po_file: 待写入的 po 文件路径。 :param list source_files : 所有待处理的原文件路径 list。 """""" # Create a temporary file to write pot file pot_file = tempfile.NamedTemporaryFile(mode='wb', prefix='rookout_', delete=False) pot_filename = pot_file.name slog.info('Create POT file [%s].', pot_filename) xargs = [self._xgettext, ""--package-name=main"", ""--package-version=0.1"", ""--default-domain=main"", ""--from-code=UTF-8"", ""-C"", ""-k_"", ""--output"", pot_filename] txt = subprocess.check_output(xargs+source_files, stderr=subprocess.STDOUT, universal_newlines=True) if len(txt) > 0: raise(ChildProcessError(txt)) slog.info('Start merge [%s] to [%s].', pot_filename, po_file) xargs = [self._msgmerge, ""-U"", po_file, pot_filename] txt = subprocess.check_output(xargs, universal_newlines=True) slog.info(txt) pot_file.close() os.remove(pot_filename)" 1686,"def fmt(self, po_file, mo_file): """"""将 po 文件转换成 mo 文件。 :param string po_file: 待转换的 po 文件路径。 :param string mo_file: 目标 mo 文件的路径。 """""" if not os.path.exists(po_file): slog.error('The PO file [%s] is non-existen!'%po_file) return txt = subprocess.check_output([self._msgfmt, '--check', ""--strict"", '--verbose', ""--output-file"", mo_file, po_file], stderr=subprocess.STDOUT, universal_newlines=True) slog.info(txt)" 1687,"def add_suffix(filename, suffix): """""" ADD suffix TO THE filename (NOT INCLUDING THE FILE EXTENSION) """""" path = filename.split(""/"") parts = path[-1].split(""."") i = max(len(parts) - 2, 0) parts[i] = parts[i] + suffix path[-1] = ""."".join(parts) return ""/"".join(path)" 1688,"def find(self, pattern): """""" :param pattern: REGULAR EXPRESSION TO MATCH NAME (NOT INCLUDING PATH) :return: LIST OF File OBJECTS THAT HAVE MATCHING NAME """""" output = [] def _find(dir): if re.match(pattern, dir._filename.split(""/"")[-1]): output.append(dir) if dir.is_directory(): for c in dir.children: _find(c) _find(self) return output" 1689,"def set_extension(self, ext): """""" RETURN NEW FILE WITH GIVEN EXTENSION """""" path = self._filename.split(""/"") parts = path[-1].split(""."") if len(parts) == 1: parts.append(ext) else: parts[-1] = ext path[-1] = ""."".join(parts) return File(""/"".join(path))" 1690,"def set_name(self, name): """""" RETURN NEW FILE WITH GIVEN EXTENSION """""" path = self._filename.split(""/"") parts = path[-1].split(""."") if len(parts) == 1: path[-1] = name else: path[-1] = name + ""."" + parts[-1] return File(""/"".join(path))" 1691,"def backup_name(self, timestamp=None): """""" RETURN A FILENAME THAT CAN SERVE AS A BACKUP FOR THIS FILE """""" suffix = datetime2string(coalesce(timestamp, datetime.now()), ""%Y%m%d_%H%M%S"") return File.add_suffix(self._filename, suffix)" 1692,"def read(self, encoding=""utf8""): """""" :param encoding: :return: """""" with open(self._filename, ""rb"") as f: if self.key: return get_module(""mo_math.crypto"").decrypt(f.read(), self.key) else: content = f.read().decode(encoding) return content" 1693,"def read_zipfile(self, encoding='utf8'): """""" READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING """""" from zipfile import ZipFile with ZipFile(self.abspath) as zipped: for num, zip_name in enumerate(zipped.namelist()): return zipped.open(zip_name).read().decode(encoding)" 1694,"def append(self, content, encoding='utf8'): """""" add a line to file """""" if not self.parent.exists: self.parent.create() with open(self._filename, ""ab"") as output_file: if not is_text(content): Log.error(u""expecting to write unicode only"") output_file.write(content.encode(encoding)) output_file.write(b""\n"")" 1695,"def url_param2value(param): """""" CONVERT URL QUERY PARAMETERS INTO DICT """""" if param == None: return Null if param == None: return Null def _decode(v): output = [] i = 0 while i < len(v): c = v[i] if c == ""%"": d = hex2chr(v[i + 1:i + 3]) output.append(d) i += 3 else: output.append(c) i += 1 output = text_type("""".join(output)) try: return json2value(output) except Exception: pass return output query = Data() for p in param.split('&'): if not p: continue if p.find(""="") == -1: k = p v = True else: k, v = p.split(""="") v = _decode(v) u = query.get(k) if u is None: query[k] = v elif is_list(u): u += [v] else: query[k] = [u, v] return query" 1696,"def value2url_param(value): """""" :param value: :return: ascii URL """""" if value == None: Log.error(""Can not encode None into a URL"") if is_data(value): value_ = wrap(value) output = ""&"".join([ value2url_param(k) + ""="" + (value2url_param(v) if is_text(v) else value2url_param(value2json(v))) for k, v in value_.leaves() ]) elif is_text(value): output = """".join(_map2url[c] for c in value.encode('utf8')) elif is_binary(value): output = """".join(_map2url[c] for c in value) elif hasattr(value, ""__iter__""): output = "","".join(value2url_param(v) for v in value) else: output = str(value) return output" 1697,"def configfile_from_path(path, strict=True): """"""Get a ConfigFile object based on a file path. This method will inspect the file extension and return the appropriate ConfigFile subclass initialized with the given path. Args: path (str): The file path which represents the configuration file. strict (bool): Whether or not to parse the file in strict mode. Returns: confpy.loaders.base.ConfigurationFile: The subclass which is specialized for the given file path. Raises: UnrecognizedFileExtension: If there is no loader for the path. """""" extension = path.split('.')[-1] conf_type = FILE_TYPES.get(extension) if not conf_type: raise exc.UnrecognizedFileExtension( ""Cannot parse file of type {0}. Choices are {1}."".format( extension, FILE_TYPES.keys(), ) ) return conf_type(path=path, strict=strict)" 1698,"def configuration_from_paths(paths, strict=True): """"""Get a Configuration object based on multiple file paths. Args: paths (iter of str): An iterable of file paths which identify config files on the system. strict (bool): Whether or not to parse the files in strict mode. Returns: confpy.core.config.Configuration: The loaded configuration object. Raises: NamespaceNotRegistered: If a file contains a namespace which is not defined. OptionNotRegistered: If a file contains an option which is not defined but resides under a valid namespace. UnrecognizedFileExtension: If there is no loader for a path. """""" for path in paths: cfg = configfile_from_path(path, strict=strict).config return cfg" 1699,"def set_environment_var_options(config, env=None, prefix='CONFPY'): """"""Set any configuration options which have an environment var set. Args: config (confpy.core.config.Configuration): A configuration object which has been initialized with options. env (dict): Optional dictionary which contains environment variables. The default is os.environ if no value is given. prefix (str): The string prefix prepended to all environment variables. This value will be set to upper case. The default is CONFPY. Returns: confpy.core.config.Configuration: A configuration object with environment variables set. The pattern to follow when setting environment variables is: <PREFIX>_<SECTION>_<OPTION> Each value should be upper case and separated by underscores. """""" env = env or os.environ for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}_{2}'.format( prefix.upper(), section_name.upper(), option_name.upper(), ) env_var = env.get(var_name) if env_var: setattr(section, option_name, env_var) return config" 1700,"def set_cli_options(config, arguments=None): """"""Set any configuration options which have a CLI value set. Args: config (confpy.core.config.Configuration): A configuration object which has been initialized with options. arguments (iter of str): An iterable of strings which contains the CLI arguments passed. If nothing is give then sys.argv is used. Returns: confpy.core.config.Configuration: A configuration object with CLI values set. The pattern to follow when setting CLI values is: <section>_<option> Each value should be lower case and separated by underscores. """""" arguments = arguments or sys.argv[1:] parser = argparse.ArgumentParser() for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}'.format( section_name.lower(), option_name.lower(), ) parser.add_argument('--{0}'.format(var_name)) args, _ = parser.parse_known_args(arguments) args = vars(args) for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}'.format( section_name.lower(), option_name.lower(), ) value = args.get(var_name) if value: setattr(section, option_name, value) return config" 1701,"def check_for_missing_options(config): """"""Iter over a config and raise if a required option is still not set. Args: config (confpy.core.config.Configuration): The configuration object to validate. Raises: MissingRequiredOption: If any required options are not set in the configuration object. Required options with default values are considered set and will not cause this function to raise. """""" for section_name, section in config: for option_name, option in section: if option.required and option.value is None: raise exc.MissingRequiredOption( ""Option {0} in namespace {1} is required."".format( option_name, section_name, ) ) return config" 1702,"def parse_options(files, env_prefix='CONFPY', strict=True): """"""Parse configuration options and return a configuration object. Args: files (iter of str): File paths which identify configuration files. These files are processed in order with values in later files overwriting values in earlier files. env_prefix (str): The static prefix prepended to all options when set as environment variables. The default is CONFPY. strict (bool): Whether or not to parse the files in strict mode. Returns: confpy.core.config.Configuration: The loaded configuration object. Raises: MissingRequiredOption: If a required option is not defined in any file. NamespaceNotRegistered: If a file contains a namespace which is not defined. OptionNotRegistered: If a file contains an option which is not defined but resides under a valid namespace. UnrecognizedFileExtension: If there is no loader for a path. """""" return check_for_missing_options( config=set_cli_options( config=set_environment_var_options( config=configuration_from_paths( paths=files, strict=strict, ), prefix=env_prefix, ), ) )" 1703,"def buy_product(self, product_pk): """""" determina si el customer ha comprado un producto """""" if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \ or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists(): return True else: return False" 1704,"def template(self): """""" Get the template from: YAML or class """""" # First try props if self.props.template: return self.props.template else: # Return the wtype of the widget, and we'll presume that, # like resources, there's a .html file in that directory return self.wtype" 1705,"def render(self, sphinx_app: Sphinx, context): """""" Given a Sphinx builder and context with sphinx_app in it, generate HTML """""" # Called from kaybee.plugins.widgets.handlers.render_widgets builder: StandaloneHTMLBuilder = sphinx_app.builder resource = sphinx_app.env.resources[self.docname] context['sphinx_app'] = sphinx_app context['widget'] = self context['resource'] = resource # make_context is optionally implemented on the concrete class # for each widget self.make_context(context, sphinx_app) # NOTE: Can use builder.templates.render_string template = self.template + '.html' html = builder.templates.render(template, context) return html" 1706,"def desc(t=None, reg=True): """""" Describe Class Dependency :param reg: should we register this class as well :param t: custom type as well :return: """""" def decorated_fn(cls): if not inspect.isclass(cls): return NotImplemented('For now we can only describe classes') name = t or camel_case_to_underscore(cls.__name__)[0] if reg: di.injector.register(name, cls) else: di.injector.describe(name, cls) return cls return decorated_fn" 1707,"def add(self, key, value=None): """""" Adds the new key to this enumerated type. :param key | <str> """""" if value is None: value = 2 ** (len(self)) self[key] = value setattr(self, key, self[key]) return value" 1708,"def all(self): """""" Returns all the values joined together. :return <int> """""" out = 0 for key, value in self.items(): out |= value return out" 1709,"def base(self, value, recurse=True): """""" Returns the root base for the given value from this enumeration. :param value | <variant> recurse | <bool> """""" while value in self._bases: value = self._bases[value] if not recurse: break return value" 1710,"def displayText(self, value, blank='', joiner=', '): """""" Returns the display text for the value associated with the inputted text. This will result in a comma separated list of labels for the value, or the blank text provided if no text is found. :param value | <variant> blank | <str> joiner | <str> :return <str> """""" if value is None: return '' labels = [] for key, my_value in sorted(self.items(), key=lambda x: x[1]): if value & my_value: labels.append(self._labels.get(my_value, text.pretty(key))) return joiner.join(labels) or blank" 1711,"def extend(self, base, key, value=None): """""" Adds a new definition to this enumerated type, extending the given base type. This will create a new key for the type and register it as a new viable option from the system, however, it will also register its base information so you can use enum.base to retrieve the root type. :param base | <variant> | value for this enumeration key | <str> | new key for the value value | <variant> | if None is supplied, it will be auto-assigned :usage |>>> from projex.enum import enum |>>> Types = enum('Integer', 'Boolean') |>>> Types.Integer |1 |>>> Types.Boolean |2 |>>> Types.extend(Types.Integer, 'BigInteger') |>>> Types.BigInteger |4 |>>> Types.base(Types.BigInteger) |1 """""" new_val = self.add(key, value) self._bases[new_val] = base" 1712,"def fromSet(self, values): """""" Generates a flag value based on the given set of values. :param values: <set> :return: <int> """""" value = 0 for flag in values: value |= self(flag) return value" 1713,"def label(self, value): """""" Returns a pretty text version of the key for the inputted value. :param value | <variant> :return <str> """""" return self._labels.get(value) or text.pretty(self(value))" 1714,"def labels(self): """""" Return a list of ""user friendly"" labels. :return <list> [ <str>, .. ] """""" return [self._labels.get(value) or text.pretty(key) for key, value in sorted(self.items(), key=lambda x: x[1])]" 1715,"def setLabel(self, value, label): """""" Sets the label text for the inputted value. This will override the default pretty text label that is used for the key. :param value | <variant> label | <str> """""" if label: self._labels[value] = label else: self._labels.pop(value, None)" 1716,"def text(self, value, default=''): """""" Returns the text for the inputted value. :return <str> """""" for key, val in self.items(): if val == value: return key return default" 1717,"def toSet(self, flags): """""" Generates a flag value based on the given set of values. :param values: <set> :return: <int> """""" return {key for key, value in self.items() if value & flags}" 1718,"def valueByLabel(self, label): """""" Determine a given value based on the inputted label. :param label <str> :return <int> """""" keys = self.keys() labels = [text.pretty(key) for key in keys] if label in labels: return self[keys[labels.index(label)]] return 0" 1719,"def cli(ctx, name,all): """"""Show example for doing some task in bubble(experimental)"""""" ctx.gbc.say('all_example_functions',stuff=all_examples_functions, verbosity=1000) for example in all_examples_functions: if all or (name and example['name'] == name): if all: ctx.gbc.say('example',stuff=example, verbosity=100) name = example['name'] #click.echo_via_pager(example['fun']()) click.echo(""#""*80) click.echo(""### start of bubble example: ""+name) click.echo(""#""*80) click.echo(example['fun']()) click.echo(""#""*80) click.echo(""### end of bubble example: ""+name) click.echo(""#""*80) click.echo() else: click.echo(""available example: "" + example['name'])" 1720,"def check_if_alive(self): """"""Check if the content is available on the host server. Returns `True` if available, else `False`. This method is `lazy`-evaluated or only executes when called. :rtype: bool """""" try: from urllib2 import urlopen, URLError, HTTPError except ImportError: from urllib.request import urlopen, URLError, HTTPError if len(self.instance.STATUS_LINK): check_url = self.instance.STATUS_LINK % ({'content_uid': self.get_content_uid()}) else: # fallback check_url = self.instance.url try: response = urlopen(check_url) except (HTTPError, URLError): return False except ValueError: raise URLError('Invalid URL: %s'.format(check_url)) else: return True if response.code == 200 else False" 1721,"def load_config_file(self): """"""Parse configuration file and get config values."""""" config_parser = SafeConfigParser() config_parser.read(self.CONFIG_FILE) if config_parser.has_section('handlers'): self._config['handlers_package'] = config_parser.get('handlers', 'package') if config_parser.has_section('auth'): self._config['consumer_key'] = config_parser.get('auth', 'consumer_key') self._config['consumer_secret'] = config_parser.get('auth', 'consumer_secret') self._config['token_key'] = config_parser.get('auth', 'token_key') self._config['token_secret'] = config_parser.get('auth', 'token_secret') if config_parser.has_section('stream'): self._config['user_stream'] = config_parser.get('stream', 'user_stream').lower() == 'true' else: self._config['user_stream'] = False if config_parser.has_option('general', 'min_seconds_between_errors'): self._config['min_seconds_between_errors'] = config_parser.get('general', 'min_seconds_between_errors') if config_parser.has_option('general', 'sleep_seconds_on_consecutive_errors'): self._config['sleep_seconds_on_consecutive_errors'] = config_parser.get( 'general', 'sleep_seconds_on_consecutive_errors')" 1722,"def load_config_from_cli_arguments(self, *args, **kwargs): """""" Get config values of passed in CLI options. :param dict kwargs: CLI options """""" self._load_config_from_cli_argument(key='handlers_package', **kwargs) self._load_config_from_cli_argument(key='auth', **kwargs) self._load_config_from_cli_argument(key='user_stream', **kwargs) self._load_config_from_cli_argument(key='min_seconds_between_errors', **kwargs) self._load_config_from_cli_argument(key='sleep_seconds_on_consecutive_errors', **kwargs)" 1723,"def validate_configs(self): """""" Check that required config are set. :raises :class:`~responsebot.common.exceptions.MissingConfigError`: if a required config is missing """""" # Check required arguments, validate values for conf in self.REQUIRED_CONFIGS: if conf not in self._config: raise MissingConfigError('Missing required configuration %s' % conf)" 1724,"def get(self, id): """""" Gets the dict data and builds the item object. """""" data = self.db.get_data(self.get_path, id=id) return self._build_item(**data['Data'][self.name])" 1725,"def save(self, entity): """"""Maps entity to dict and returns future"""""" assert isinstance(entity, Entity), "" entity must have an instance of Entity"" return self.__collection.save(entity.as_dict())" 1726,"def find_one(self, **kwargs): """"""Returns future. Executes collection's find_one method based on keyword args maps result ( dict to instance ) and return future Example:: manager = EntityManager(Product) product_saved = yield manager.find_one(_id=object_id) """""" future = TracebackFuture() def handle_response(result, error): if error: future.set_exception(error) else: instance = self.__entity() instance.map_dict(result) future.set_result(instance) self.__collection.find_one(kwargs, callback=handle_response) return future" 1727,"def find(self, **kwargs): """"""Returns List(typeof=). Executes collection's find method based on keyword args maps results ( dict to list of entity instances). Set max_limit parameter to limit the amount of data send back through network Example:: manager = EntityManager(Product) products = yield manager.find(age={'$gt': 17}, max_limit=100) """""" max_limit = None if 'max_limit' in kwargs: max_limit = kwargs.pop('max_limit') cursor = self.__collection.find(kwargs) instances = [] for doc in (yield cursor.to_list(max_limit)): instance = self.__entity() instance.map_dict(doc) instances.append(instance) return instances" 1728,"def update(self, entity): """""" Executes collection's update method based on keyword args. Example:: manager = EntityManager(Product) p = Product() p.name = 'new name' p.description = 'new description' p.price = 300.0 yield manager.update(p) """""" assert isinstance(entity, Entity), ""Error: entity must have an instance of Entity"" return self.__collection.update({'_id': entity._id}, {'$set': entity.as_dict()})" 1729,"async def get_poll(poll_id): """""" Get a strawpoll. Example: poll = strawpy.get_poll('11682852') :param poll_id: :return: strawpy.Strawpoll object """""" async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)) as r: return await StrawPoll(r)" 1730,"async def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck='normal'): """""" Create a strawpoll. Example: new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No']) :param title: :param options: :param multi: :param permissive: :param captcha: :param dupcheck: :return: strawpy.Strawpoll object """""" query = { 'title': title, 'options': options, 'multi': multi, 'permissive': permissive, 'captcha': captcha, 'dupcheck': dupcheck } async with aiohttp.post(api_url, data=json.dumps(query)) as r: return await StrawPoll(r)" 1731,"def raise_status(response): """"""Raise an exception if the request did not return a status code of 200. :param response: Request response body """""" if response.status != 200: if response.status == 401: raise StrawPollException('Unauthorized', response) elif response.status == 403: raise StrawPollException('Forbidden', response) elif response.status == 404: raise StrawPollException('Not Found', response) else: response.raise_for_status()" 1732,"def results_with_percent(self): """""" Zip options, votes and percents (as integers) together. :return: List of tuples (option, votes, percent) """""" percents = [int(float(v) / sum(self.votes) * 100) if sum(self.votes) > 0 else 0 for v in self.votes] return zip(self.options, self.votes, percents)" 1733,"def open(self, results=False): """""" Open the strawpoll in a browser. Can specify to open the main or results page. :param results: True/False """""" webbrowser.open(self.results_url if results else self.url)" 1734,"def main(): """""" Testing function for DFA brzozowski algebraic method Operation """""" argv = sys.argv if len(argv) < 2: targetfile = 'target.y' else: targetfile = argv[1] print 'Parsing ruleset: ' + targetfile, flex_a = Flexparser() mma = flex_a.yyparse(targetfile) print 'OK' print 'Perform minimization on initial automaton:', mma.minimize() print 'OK' print 'Perform Brzozowski on minimal automaton:', brzozowski_a = Brzozowski(mma) mma_regex = brzozowski_a.get_regex() print mma_regex" 1735,"def _bfs_sort(self, start): """""" maintain a map of states distance using BFS Args: start (fst state): The initial DFA state Returns: list: An ordered list of DFA states using path distance """""" pathstates = {} # maintain a queue of nodes to be visited. Both current and previous # node must be included. queue = [] # push the first path into the queue queue.append([0, start]) pathstates[start.stateid] = 0 while queue: # get the first node from the queue leaf = queue.pop(0) node = leaf[1] pathlen = leaf[0] # enumerate all adjacent nodes, construct a new path and push it # into the queue for arc in node.arcs: next_state = self.mma[arc.nextstate] if next_state.stateid not in pathstates: queue.append([pathlen + 1, next_state]) pathstates[next_state.stateid] = pathlen + 1 orderedstatesdict = OrderedDict( sorted( pathstates.items(), key=lambda x: x[1], reverse=False)) for state in self.mma.states: orderedstatesdict[state.stateid] = state orderedstates = [x[1] for x in list(orderedstatesdict.items())] return orderedstates" 1736,"def star(self, input_string): """""" Kleene star operation Args: input_string (str): The string that the kleene star will be made Returns: str: The applied Kleene star operation on the input string """""" if input_string != self.epsilon and input_string != self.empty: return ""("" + input_string + "")*"" else: return """"" 1737,"def _brzozowski_algebraic_method_init(self): """"""Initialize Brzozowski Algebraic Method"""""" # Initialize B for state_a in self.mma.states: if state_a.final: self.B[state_a.stateid] = self.epsilon else: self.B[state_a.stateid] = self.empty # Initialize A for state_b in self.mma.states: self.A[state_a.stateid, state_b.stateid] = self.empty for arc in state_a.arcs: if arc.nextstate == state_b.stateid: self.A[state_a.stateid, state_b.stateid] = \ self.mma.isyms.find(arc.ilabel)" 1738,"def _brzozowski_algebraic_method_solve(self): """"""Perform Brzozowski Algebraic Method"""""" orderedstates = self._bfs_sort( sorted( self.mma.states, key=attrgetter('initial'), reverse=True)[0]) for n in range(len(orderedstates) - 1, 0, -1): # print ""n:"" + repr(n) if self.A[ orderedstates[n].stateid, orderedstates[n].stateid] != self.empty: # B[n] := star(A[n,n]) . B[n] if self.B[orderedstates[n].stateid] != self.empty: self.B[orderedstates[n].stateid] = \ self.star(self.A[orderedstates[n].stateid, orderedstates[n].stateid]) \ + self.B[orderedstates[n].stateid] else: self.B[orderedstates[n].stateid] = self.star( self.A[orderedstates[n].stateid, orderedstates[n].stateid]) for j in range(0, n): # A[n,j] := star(A[n,n]) . A[n,j] if self.A[ orderedstates[n].stateid, orderedstates[j].stateid] != self.empty: self.A[ orderedstates[n].stateid, orderedstates[j].stateid] = \ self.star(self.A[orderedstates[n].stateid, orderedstates[n].stateid]) \ + self.A[orderedstates[n].stateid, orderedstates[j].stateid] else: self.A[orderedstates[n].stateid, orderedstates[j].stateid] = self.star( self.A[orderedstates[n].stateid, orderedstates[n].stateid]) for i in range(0, n): # B[i] += A[i,n] . B[n] newnode = None if self.A[orderedstates[i].stateid, orderedstates[n].stateid] != self.empty \ and self.B[orderedstates[n].stateid] != self.empty: newnode = self.A[orderedstates[i].stateid, orderedstates[ n].stateid] + self.B[orderedstates[n].stateid] elif self.A[orderedstates[i].stateid, orderedstates[n].stateid] != self.empty: newnode = self.A[ orderedstates[i].stateid, orderedstates[n].stateid] elif self.B[orderedstates[n].stateid] != self.empty: newnode = self.B[orderedstates[n].stateid] if self.B[orderedstates[i].stateid] != self.empty: if newnode is not None: self.B[orderedstates[i].stateid] += newnode else: self.B[orderedstates[i].stateid] = newnode for j in range(0, n): # A[i,j] += A[i,n] . A[n,j] newnode = None if self.A[ orderedstates[i].stateid, orderedstates[n].stateid] != self.empty \ and self.A[orderedstates[n].stateid, orderedstates[j].stateid] \ != self.empty: newnode = self.A[orderedstates[i].stateid, orderedstates[ n].stateid] + self.A[orderedstates[n].stateid, orderedstates[j].stateid] elif self.A[orderedstates[i].stateid, orderedstates[n].stateid] != self.empty: newnode = self.A[ orderedstates[i].stateid, orderedstates[n].stateid] elif self.A[orderedstates[n].stateid, orderedstates[j].stateid] != self.empty: newnode = self.A[ orderedstates[n].stateid, orderedstates[j].stateid] if self.A[ orderedstates[i].stateid, orderedstates[j].stateid] != self.empty: if newnode is not None: self.A[ orderedstates[i].stateid, orderedstates[j].stateid] += newnode else: self.A[ orderedstates[i].stateid, orderedstates[j].stateid] = newnode" 1739,"def load_mmd(): """"""Loads libMultiMarkdown for usage"""""" global _MMD_LIB global _LIB_LOCATION try: lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()] _LIB_LOCATION = os.path.abspath(os.path.join(DEFAULT_LIBRARY_DIR, lib_file)) if not os.path.isfile(_LIB_LOCATION): _LIB_LOCATION = ctypes.util.find_library('MultiMarkdown') _MMD_LIB = ctypes.cdll.LoadLibrary(_LIB_LOCATION) except: _MMD_LIB = None" 1740,"def _expand_source(source, dname, fmt): """"""Expands source text to include headers, footers, and expands Multimarkdown transclusion directives. Keyword arguments: source -- string containing the Multimarkdown text to expand dname -- directory name to use as the base directory for transclusion references fmt -- format flag indicating which format to use to convert transclusion statements """""" _MMD_LIB.g_string_new.restype = ctypes.POINTER(GString) _MMD_LIB.g_string_new.argtypes = [ctypes.c_char_p] src = source.encode('utf-8') gstr = _MMD_LIB.g_string_new(src) _MMD_LIB.prepend_mmd_header(gstr) _MMD_LIB.append_mmd_footer(gstr) manif = _MMD_LIB.g_string_new(b"""") _MMD_LIB.transclude_source.argtypes = [ctypes.POINTER(GString), ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(GString)] _MMD_LIB.transclude_source(gstr, dname.encode('utf-8'), None, fmt, manif) manifest_txt = manif.contents.str full_txt = gstr.contents.str _MMD_LIB.g_string_free(manif, True) _MMD_LIB.g_string_free(gstr, True) manifest_txt = [ii for ii in manifest_txt.decode('utf-8').split('\n') if ii] return full_txt.decode('utf-8'), manifest_txt" 1741,"def has_metadata(source, ext): """"""Returns a flag indicating if a given block of MultiMarkdown text contains metadata."""""" _MMD_LIB.has_metadata.argtypes = [ctypes.c_char_p, ctypes.c_int] _MMD_LIB.has_metadata.restype = ctypes.c_bool return _MMD_LIB.has_metadata(source.encode('utf-8'), ext)" 1742,"def convert(source, ext=COMPLETE, fmt=HTML, dname=None): """"""Converts a string of MultiMarkdown text to the requested format. Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a valid directory Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use dname -- Path to use for transclusion - if None, transclusion functionality is bypassed """""" if dname and not ext & COMPATIBILITY: if os.path.isfile(dname): dname = os.path.abspath(os.path.dirname(dname)) source, _ = _expand_source(source, dname, fmt) _MMD_LIB.markdown_to_string.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_int] _MMD_LIB.markdown_to_string.restype = ctypes.c_char_p src = source.encode('utf-8') return _MMD_LIB.markdown_to_string(src, ext, fmt).decode('utf-8')" 1743,"def convert_from(fname, ext=COMPLETE, fmt=HTML): """""" Reads in a file and performs MultiMarkdown conversion, with transclusion ocurring based on the file directory. Returns the converted string. Keyword arguments: fname -- Filename of document to convert ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use """""" dname = os.path.abspath(os.path.dirname(fname)) with open(fname, 'r') as fp: src = fp.read() return convert(src, ext, fmt, dname)" 1744,"def manifest(txt, dname): """"""Extracts file manifest for a body of text with the given directory."""""" _, files = _expand_source(txt, dname, HTML) return files" 1745,"def keys(source, ext=COMPLETE): """"""Extracts metadata keys from the provided MultiMarkdown text. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for extracting MultiMarkdown """""" _MMD_LIB.extract_metadata_keys.restype = ctypes.c_char_p _MMD_LIB.extract_metadata_keys.argtypes = [ctypes.c_char_p, ctypes.c_ulong] src = source.encode('utf-8') all_keys = _MMD_LIB.extract_metadata_keys(src, ext) all_keys = all_keys.decode('utf-8') if all_keys else '' key_list = [ii for ii in all_keys.split('\n') if ii] return key_list" 1746,"def value(source, key, ext=COMPLETE): """"""Extracts value for the specified metadata key from the given extension set. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for processing text key -- key to extract """""" _MMD_LIB.extract_metadata_value.restype = ctypes.c_char_p _MMD_LIB.extract_metadata_value.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p] src = source.encode('utf-8') dkey = key.encode('utf-8') value = _MMD_LIB.extract_metadata_value(src, ext, dkey) return value.decode('utf-8') if value else ''" 1747,"def tweet(self, text, in_reply_to=None, filename=None, file=None): """""" Post a new tweet. :param text: the text to post :param in_reply_to: The ID of the tweet to reply to :param filename: If `file` param is not provided, read file from this path :param file: A file object, which will be used instead of opening `filename`. `filename` is still required, for MIME type detection and to use as a form field in the POST data :return: Tweet object """""" if filename is None: return Tweet(self._client.update_status(status=text, in_reply_to_status_id=in_reply_to)._json) else: return Tweet(self._client.update_with_media(filename=filename, file=file, status=text, in_reply_to_status_id=in_reply_to)._json)" 1748,"def retweet(self, id): """""" Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """""" try: self._client.retweet(id=id) return True except TweepError as e: if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR: return False raise" 1749,"def get_tweet(self, id): """""" Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found """""" try: return Tweet(self._client.get_status(id=id)._json) except TweepError as e: if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR: return None raise" 1750,"def get_user(self, id): """""" Get a user's info. :param id: ID of the user in question :return: User object. None if not found """""" try: return User(self._client.get_user(user_id=id)._json) except TweepError as e: if e.api_code == TWITTER_USER_NOT_FOUND_ERROR: return None raise" 1751,"def remove_tweet(self, id): """""" Delete a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """""" try: self._client.destroy_status(id=id) return True except TweepError as e: if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]: return False raise" 1752,"def follow(self, user_id, notify=False): """""" Follow a user. :param user_id: ID of the user in question :param notify: whether to notify the user about the following :return: user that are followed """""" try: return User(self._client.create_friendship(user_id=user_id, follow=notify)._json) except TweepError as e: if e.api_code in [TWITTER_ACCOUNT_SUSPENDED_ERROR]: return self.get_user(user_id) raise" 1753,"def unfollow(self, user_id): """""" Follow a user. :param user_id: ID of the user in question :return: The user that were unfollowed """""" return User(self._client.destroy_friendship(user_id=user_id)._json)" 1754,"def create_list(self, name, mode='public', description=None): """""" Create a list :param name: Name of the new list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: Description of the new list :return: The new list object :rtype: :class:`~responsebot.models.List` """""" return List(tweepy_list_to_json(self._client.create_list(name=name, mode=mode, description=description)))" 1755,"def destroy_list(self, list_id): """""" Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List` """""" return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id)))" 1756,"def update_list(self, list_id, name=None, mode=None, description=None): """""" Update a list :param list_id: list ID number :param name: New name for the list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: New description of the list :return: The updated list object :rtype: :class:`~responsebot.models.List` """""" return List(tweepy_list_to_json( self._client.update_list(list_id=list_id, name=name, mode=mode, description=description)) )" 1757,"def list_timeline(self, list_id, since_id=None, max_id=None, count=20): """""" List the tweets of specified list. :param list_id: list ID number :param since_id: results will have ID greater than specified ID (more recent than) :param max_id: results will have ID less than specified ID (older than) :param count: number of results per page :return: list of :class:`~responsebot.models.Tweet` objects """""" statuses = self._client.list_timeline(list_id=list_id, since_id=since_id, max_id=max_id, count=count) return [Tweet(tweet._json) for tweet in statuses]" 1758,"def get_list(self, list_id): """""" Get info of specified list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """""" return List(tweepy_list_to_json(self._client.get_list(list_id=list_id)))" 1759,"def add_list_member(self, list_id, user_id): """""" Add a user to list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object """""" return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id)))" 1760,"def remove_list_member(self, list_id, user_id): """""" Remove a user from a list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object """""" return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id)))" 1761,"def list_members(self, list_id): """""" List users in a list :param list_id: list ID number :return: list of :class:`~responsebot.models.User` objects """""" return [User(user._json) for user in self._client.list_members(list_id=list_id)]" 1762,"def is_list_member(self, list_id, user_id): """""" Check if a user is member of a list :param list_id: list ID number :param user_id: user ID number :return: :code:`True` if user is member of list, :code:`False` otherwise """""" try: return bool(self._client.show_list_member(list_id=list_id, user_id=user_id)) except TweepError as e: if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER: return False raise" 1763,"def subscribe_list(self, list_id): """""" Subscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """""" return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id)))" 1764,"def unsubscribe_list(self, list_id): """""" Unsubscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """""" return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id)))" 1765,"def list_subscribers(self, list_id): """""" List subscribers of a list :param list_id: list ID number :return: :class:`~responsebot.models.User` object """""" return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)]" 1766,"def is_subscribed_list(self, list_id, user_id): """""" Check if user is a subscribed of specified list :param list_id: list ID number :param user_id: user ID number :return: :code:`True` if user is subscribed of list, :code:`False` otherwise """""" try: return bool(self._client.show_list_subscriber(list_id=list_id, user_id=user_id)) except TweepError as e: if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER: return False raise" 1767,"def auth(config): """""" Perform authentication with Twitter and return a client instance to communicate with Twitter :param config: ResponseBot config :type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig` :return: client instance to execute twitter action :rtype: :class:`~responsebot.responsebot_client.ResponseBotClient` :raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate :raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit """""" auth = tweepy.OAuthHandler(config.get('consumer_key'), config.get('consumer_secret')) auth.set_access_token(config.get('token_key'), config.get('token_secret')) api = tweepy.API(auth) try: api.verify_credentials() except RateLimitError as e: raise APIQuotaError(e.args[0][0]['message']) except TweepError as e: raise AuthenticationError(e.args[0][0]['message']) else: logging.info('Successfully authenticated as %s' % api.me().screen_name) return ResponseBotClient(config=config, client=api)" 1768,"def json2py(json_obj): """""" Converts the inputted JSON object to a python value. :param json_obj | <variant> """""" for key, value in json_obj.items(): if type(value) not in (str, unicode): continue # restore a datetime if re.match('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d+$', value): value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S:%f') elif re.match('^\d{4}-\d{2}-\d{2}$', value): year, month, day = map(int, value.split('-')) value = datetime.date(year, month, day) elif re.match('^\d{2}:\d{2}:\d{2}:\d+$', value): hour, minute, second, micro = map(int, value.split(':')) value = datetime.time(hour, minute, second, micro) else: found = False for decoder in _decoders: success, new_value = decoder(value) if success: value = new_value found = True break if not found: continue json_obj[key] = value return json_obj" 1769,"def jsonify(py_data, default=None, indent=4, sort_keys=True): """""" Converts the inputted Python data to JSON format. :param py_data | <variant> """""" return json.dumps(py_data, default=py2json, indent=indent, sort_keys=sort_keys)" 1770,"def py2json(py_obj): """""" Converts the inputted python object to JSON format. :param py_obj | <variant> """""" method = getattr(py_obj, '__json__', None) if method: return method() elif type(py_obj) == datetime.datetime: return py_obj.isoformat() elif type(py_obj) == datetime.date: return py_obj.isoformat() elif type(py_obj) == datetime.time: return py_obj.isoformat() elif type(py_obj) == set: return list(py_obj) elif type(py_obj) == decimal.Decimal: return str(py_obj) else: # look through custom plugins for encoder in _encoders: success, value = encoder(py_obj) if success: return value opts = (py_obj, type(py_obj)) raise TypeError('Unserializable object {} of type {}'.format(*opts))" 1771,"def register(encoder=None, decoder=None): """""" Registers an encoder method and/or a decoder method for processing custom values. Encoder and decoders should take a single argument for the value to encode or decode, and return a tuple of (<bool> success, <variant> value). A successful decode or encode should return True and the value. :param encoder | <callable> || None decoder | <callable> || None """""" if encoder: _encoders.append(encoder) if decoder: _decoders.append(decoder)" 1772,"def xmlresponse(py_data): """""" Generates an XML formatted method response for the given python data. :param py_data | <variant> """""" xroot = ElementTree.Element('methodResponse') xparams = ElementTree.SubElement(xroot, 'params') xparam = ElementTree.SubElement(xparams, 'param') type_map = {'bool': 'boolean', 'float': 'double', 'str': 'string', 'unicode': 'string', 'datetime': 'dateTime.iso8601', 'date': 'date.iso8601', 'time': 'time.iso8601'} def xobj(xparent, py_obj): # convert a list of information if type(py_obj) in (tuple, list): xarr = ElementTree.SubElement(xparent, 'array') xdata = ElementTree.SubElement(xarr, 'data') for val in py_obj: xval = ElementTree.SubElement(xdata, 'value') xobj(xval, val) # convert a dictionary of information elif type(py_obj) == dict: xstruct = ElementTree.SubElement(xparent, 'struct') for key, val in py_obj.items(): xmember = ElementTree.SubElement(xstruct, 'member') xname = ElementTree.SubElement(xmember, 'name') xname.text = key xval = ElementTree.SubElement(xmember, 'value') xobj(xval, val) # convert a None value elif py_obj is None: ElementTree.SubElement(xparent, 'nil') # convert a basic value else: typ = type(py_obj).__name__ typ = type_map.get(typ, typ) xitem = ElementTree.SubElement(xparent, typ) # convert a datetime/date/time if isinstance(py_obj, datetime.date) or \ isinstance(py_obj, datetime.time) or \ isinstance(py_obj, datetime.datetime): if py_obj.tzinfo and pytz: data = py_obj.astimezone(pytz.utc).replace(tzinfo=None) xitem.text = data.isoformat() else: xitem.text = py_obj.isoformat() # convert a boolean elif type(py_obj) == bool: xitem.text = nstr(int(py_obj)) # convert a non-string object elif not type(py_obj) in (str, unicode): xitem.text = nstr(py_obj) # convert a string object else: xitem.text = py_obj xobj(xparam, py_data) projex.text.xmlindent(xroot) return ElementTree.tostring(xroot)" 1773,"def _read_file(fname): """""" Args: fname (str): Name of the grammar file to be parsed Return: list: The grammar rules """""" with open(fname) as input_file: re_grammar = [x.strip('\n') for x in input_file.readlines()] return re_grammar" 1774,"def main(): """""" Function for PDA to CNF Operation :type argv: list :param argv: Parameters """""" if len(argv) < 3: print 'Usage for getting CFG: %s CFG_fileA CFG ' % argv[0] print 'Usage for getting STR: %s CFG_fileA STR ' \ 'Optimize[0 or 1] splitstring[0 or 1] ' % argv[0] print '' print 'For example: python pdacnf.py grammar.y STR 1 0' print ' python pdacnf.py grammar.y STR 1 1' print ' python pdacnf.py grammar.y CFG' return alphabet = createalphabet() mode = argv[2] optimized = 0 splitstring = 0 if mode == 'STR': optimized = int(argv[3]) splitstring = int(argv[4]) cfgtopda = CfgPDA(alphabet) print '* Parsing Grammar:', mma = cfgtopda.yyparse(argv[1]) print 'OK' print ' - Total PDA states are ' + repr(len(mma.s)) print '* Simplify State IDs:', simple_a = SimplifyStateIDs() mma.s, biggestid, newaccepted = simple_a.get(mma.s) if newaccepted: print 'OK' else: print 'OK' print '* Eliminate READ states:', replace = ReadReplace(mma.s, biggestid) mma.s = replace.replace_read() print 'OK' print ' - Total PDA states now are ' + repr(len(mma.s)) maxstate = replace.nextstate() - 1 print '* Reduce PDA:', simple_b = ReducePDA() mma.s = simple_b.get(mma.s) print 'OK' print ' - Total PDA states now are ' + repr(len(mma.s)) print '* PDA to CFG transformation:', cnfgenerator = PdaCnf(mma.s) grammar = cnfgenerator.get_rules(optimized) print 'OK' print ' - Total CFG rules generated: ' + repr(len(grammar)) if mode == 'STR': gen = CFGGenerator(CNFGenerator(grammar), optimized=optimized, splitstring=splitstring, maxstate=maxstate) print gen.generate() else: print grammar" 1775,"def get(self, statediag, dfaaccepted): """""" # - Remove all the POP (type - 2) transitions to state 0,non DFA accepted # for symbol @closing # - Generate the accepted transitions - Replace DFA accepted States with a push - pop symbol and two extra states Args: statediag (list): The states of the PDA dfaaccepted (list):The list of DFA accepted states Returns: list: A cleaned, smaller list of DFA states """""" newstatediag = {} newstate = PDAState() newstate.id = 'AI,I' # BECAREFUL WHEN SIMPLIFYING... newstate.type = 1 newstate.sym = '@wrapping' transitions = {} transitions[(0, 0)] = [0] newstate.trans = transitions i = 0 newstatediag[i] = newstate # print 'accepted:' # print dfaaccepted for stateid in statediag: state = statediag[stateid] # print state.id if state.type == 2: for state2id in dfaaccepted: # print state.id[1] if state.id[1] == state2id: # print 'adding...' state.trans['AI,I'] = ['@wrapping'] # print state.trans break i = i + 1 newstatediag[i] = state return newstatediag" 1776,"def bfs(self, graph, start): """""" Performs BFS operation for eliminating useless loop transitions Args: graph (PDA): the PDA object start (PDA state): The PDA initial state Returns: list: A cleaned, smaller list of DFA states """""" newstatediag = {} # maintain a queue of paths queue = [] visited = [] # push the first path into the queue queue.append(start) while queue: # get the first path from the queue state = queue.pop(0) # get the last node from the path # visited visited.append(state.id) # enumerate all adjacent nodes, construct a new path and push it # into the queue for key in state.trans: if state.trans[key] != []: if key not in visited: for nextstate in graph: if graph[nextstate].id == key: queue.append(graph[nextstate]) break i = 0 for state in graph: if graph[state].id in visited: newstatediag[i] = graph[state] i = i + 1 return newstatediag" 1777,"def get(self, statediag): """""" Args: statediag (list): The states of the PDA Returns: list: A reduced list of states using BFS """""" if len(statediag) < 1: print 'PDA is empty and can not be reduced' return statediag newstatediag = self.bfs(statediag, statediag[0]) return newstatediag" 1778,"def get(self, statediag, accepted=None): """""" Replaces complex state IDs as generated from the product operation, into simple sequencial numbers. A dictionaty is maintained in order to map the existed IDs. Args: statediag (list): The states of the PDA accepted (list): the list of DFA accepted states Returns: list: """""" count = 0 statesmap = {} newstatediag = {} for state in statediag: # Simplify state IDs if statediag[state].id not in statesmap: statesmap[statediag[state].id] = count mapped = count count = count + 1 else: mapped = statesmap[statediag[state].id] # Simplify transitions IDs transitions = {} for nextstate in statediag[state].trans: if nextstate not in statesmap: statesmap[nextstate] = count transmapped = count count = count + 1 else: transmapped = statesmap[nextstate] transitions[transmapped] = statediag[state].trans[nextstate] newstate = PDAState() newstate.id = mapped newstate.type = statediag[state].type newstate.sym = statediag[state].sym newstate.trans = transitions newstatediag[mapped] = newstate newaccepted = None if accepted is not None: newaccepted = [] for accepted_state in accepted : if (0, accepted_state) in statesmap: newaccepted.append(statesmap[(0, accepted_state)]) return newstatediag, count, newaccepted" 1779,"def _generate_state(self, trans): """""" Creates a new POP state (type - 2) with the same transitions. The POPed symbol is the unique number of the state. Args: trans (dict): Transition dictionary Returns: Int: The state identifier """""" state = PDAState() state.id = self.nextstate() state.type = 2 state.sym = state.id state.trans = trans.copy() self.toadd.append(state) return state.id" 1780,"def replace_read(self): """""" Replaces all READ (type - 3) states to a PUSH (type - 1) and a POP (type - 2). The actual state is replaced with the PUSH, and a new POP is created. """""" for statenum in self.statediag: state = self.statediag[statenum] if state.type == 3: # READ state state.type = 1 destination_and_symbol = self._generate_state(state.trans) state.sym = destination_and_symbol state.trans = {} state.trans[destination_and_symbol] = [0] statenumber_identifier = len(self.statediag) + 1 for state in self.toadd: self.statediag[statenumber_identifier] = state statenumber_identifier = statenumber_identifier + 1 return self.statediag" 1781,"def insert_self_to_empty_and_insert_all_intemediate(self, optimized): """""" For each state qi of the PDA, we add the rule Aii -> e For each triplet of states qi, qj and qk, we add the rule Aij -> Aik Akj. Args: optimized (bool): Enable or Disable optimization - Do not produce O(n^3) """""" for state_a in self.statediag: self.rules.append('A' +repr(state_a.id) +',' + repr(state_a.id) + ': @empty_set') # If CFG is not requested, avoid the following O(n^3) rule. # It can be solved and a string can be generated faster with BFS of DFS if optimized == 0: for state_b in self.statediag: if state_b.id != state_a.id: for state_c in self.statediag: if state_c.id != state_a.id \ and state_b.id != state_c.id: self.rules.append('A' + repr(state_a.id) + ',' + repr(state_c.id) + ': A' + repr(state_a.id) + ',' + repr(state_b.id) + ' A' + repr(state_b.id) + ',' + repr(state_c.id) + '')" 1782,"def insert_symbol_pushpop(self): """""" For each stack symbol t E G, we look for a pair of states, qi and qj, such that the PDA in state qi can read some input a E S and push t on the stack and in state state qj can read some input b E S and pop t off the stack. In that case, we add the rule Aik -> a Alj b where (ql,t) E d(qi,a,e) and (qk,e) E d(qj,b,t). """""" for state_a in self.statediag: if state_a.type == 1: found = 0 for state_b in self.statediag: if state_b.type == 2 and state_b.sym == state_a.sym: found = 1 for j in state_a.trans: if state_a.trans[j] == [0]: read_a = '' else: new = [] for selected_transition in state_a.trans[j]: if selected_transition == ' ': new.append('&') else: new.append(selected_transition) read_a = "" | "".join(new) for i in state_b.trans: if state_b.trans[i] == [0]: read_b = '' else: new = [] for selected_transition in state_b.trans[i]: if selected_transition == ' ': new.append('&') else: new.append(selected_transition) read_b = "" | "".join(new) self.rules.append( 'A' + repr(state_a.id) + ',' + repr(i) + ':' + read_a + ' A' + repr(j) + ',' + repr(state_b.id) + ' ' + read_b) if found == 0: # A special case is required for State 2, where the POPed symbols # are part of the transitions array and not defined for ""sym"" variable. for state_b in self.statediag: if state_b.type == 2 and state_b.sym == 0: for i in state_b.trans: if state_a.sym in state_b.trans[i]: for j in state_a.trans: if state_a.trans[j] == [0]: read_a = '' else: read_a = "" | "".join( state_a.trans[j]) self.rules.append( 'A' + repr(state_a.id) + ',' + repr(i) + ':' + read_a + ' A' + repr(j) + ',' + repr(state_b.id)) # print # 'A'+`state_a.id`+','+`i`+':'+read_a+' # A'+`j`+','+`state_b.id` found = 1 if found == 0: print ""ERROR: symbol "" + repr(state_a.sym) \ + "". It was not found anywhere in the graph.""" 1783,"def get_rules(self, optimized): """""" Args: optimized (bool): Enable or Disable optimization - Do not produce O(n^3) Return: list: The CFG rules """""" self.insert_start_to_accepting() # If CFG is not requested, avoid the following O(n^3) rule. # It can be solved and a string can be generated faster with BFS of DFS if optimized == 0: self.insert_self_to_empty_and_insert_all_intemediate(optimized) self.insert_symbol_pushpop() return self.rules" 1784,"def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: """"""Provision the new instance see openbrokerapi documentation Returns: ProvisionedServiceSpec """""" if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER: # Provision the instance on an Existing Atlas Cluster # Find or create the instance instance = self._backend.find(instance_id) # Create the instance if needed return self._backend.create(instance, service_details.parameters, existing=True) # Plan not supported raise ErrPlanUnsupported(service_details.plan_id)" 1785,"def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): """"""Unbinding the instance see openbrokerapi documentation Raises: ErrBindingDoesNotExist: Binding does not exist. """""" # Find the instance instance = self._backend.find(instance_id) # Find the binding binding = self._backend.find(binding_id, instance) if not binding.isProvisioned(): # The binding does not exist raise ErrBindingDoesNotExist() # Delete the binding self._backend.unbind(binding)" 1786,"def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding: """"""Binding the instance see openbrokerapi documentation """""" # Find the instance instance = self._backend.find(instance_id) # Find or create the binding binding = self._backend.find(binding_id, instance) # Create the binding if needed return self._backend.bind(binding, details.parameters)" 1787,"def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: """"""Deprovision an instance see openbrokerapi documentation Raises: ErrInstanceDoesNotExist: Instance does not exist. """""" # Find the instance instance = self._backend.find(instance_id) if not instance.isProvisioned(): # the instance does not exist raise ErrInstanceDoesNotExist() return self._backend.delete(instance)" 1788,"def _ddns(self, ip): """""" curl -X POST https://dnsapi.cn/Record.Ddns -d 'login_token=LOGIN_TOKEN&format=json&domain_id=2317346&record_id=16894439&record_line=默认&sub_domain=www' :return: """""" headers = {""Accept"": ""text/json"", ""User-Agent"": ""ddns/0.1.0 (imaguowei@gmail.com)""} data = { 'login_token': self.login_token, 'format': ""json"", 'domain_id': self.domain_id, 'record_id': self.record_id, 'sub_domain': self.sub_domain, 'record_line': '默认', 'value': ip } res = requests.post(Ddns.DNSPOD_API, data, headers=headers) logger.debug(res.json()) return res.json()['status']['code'] == '1'" 1789,"def post(self, path, data={}): '''Perform POST Request ''' response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers()) return self._check_response(response, self.post, path, data)" 1790,"def delete(self, path, data={}): '''Perform DELETE Request''' if len(data) != 0: parameter_string = '' for k,v in data.items(): parameter_string += '{}={}'.format(k,v) parameter_string += '&' path += '?' + parameter_string response = requests.delete(API_URL + path, headers=self._set_headers()) return self._check_response(response, self.delete, path, data)" 1791,"def parsed(self): """"""Get the ConfigParser object which represents the content. This property is cached and only parses the content once. """""" if not self._parsed: self._parsed = ConfigParser() self._parsed.readfp(io.StringIO(self.content)) return self._parsed" 1792,"def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs): """""" Create a html cache. Html string will be automatically compressed. :param directory: path for the cache directory. :param compress_level: 0 ~ 9, 9 is slowest and smallest. :param kwargs: other arguments. :return: a `diskcache.Cache()` """""" cache = diskcache.Cache( directory, disk=CompressedDisk, disk_compress_level=compress_level, disk_value_type_is_binary=value_type_is_binary, **kwargs ) return cache" 1793,"def timeticks(tdiff): """""" NOTE do NOT use ""interval"" or ticks are misaligned! use ""bysecond"" only! """""" if isinstance(tdiff, xarray.DataArray): # len==1 tdiff = timedelta(seconds=tdiff.values / np.timedelta64(1, 's')) assert isinstance(tdiff, timedelta), 'expecting datetime.timedelta' if tdiff > timedelta(hours=2): return None, None elif tdiff > timedelta(minutes=20): return MinuteLocator(byminute=range(0, 60, 5)), MinuteLocator(byminute=range(0, 60, 2)) elif (timedelta(minutes=10) < tdiff) & (tdiff <= timedelta(minutes=20)): return MinuteLocator(byminute=range(0, 60, 2)), MinuteLocator(byminute=range(0, 60, 1)) elif (timedelta(minutes=5) < tdiff) & (tdiff <= timedelta(minutes=10)): return MinuteLocator(byminute=range(0, 60, 1)), SecondLocator(bysecond=range(0, 60, 30)) elif (timedelta(minutes=1) < tdiff) & (tdiff <= timedelta(minutes=5)): return SecondLocator(bysecond=range(0, 60, 30)), SecondLocator(bysecond=range(0, 60, 10)) elif (timedelta(seconds=30) < tdiff) & (tdiff <= timedelta(minutes=1)): return SecondLocator(bysecond=range(0, 60, 10)), SecondLocator(bysecond=range(0, 60, 2)) else: return SecondLocator(bysecond=range(0, 60, 2)), SecondLocator(bysecond=range(0, 60, 1))" 1794,"def cli(ctx, bubble_home, config, verbose, barverbose, profile): """"""Bubble: command line tool for bubbling information between services .oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.\n Making single point to point API connections:\n _________________>>>>>>>>>pump>>>>>>>>>>>>>_____________________\n (source-service)->pull->(transform)->push->(target-service)\n _________________>>>>>>>>>pump>>>>>>>>>>>>>_____________________\n bubble can:\n * pull data from the source client\n * transform the data with flexible mapping and filtering rules\n * rules can use (custom) rule functions\n * push the result to the target client\n A Bubble can process a list of basic python dicts(LOD), which are persisted in files or a database, for each step and stage that produced it. The only requirement for the service clients is that they have a:\n * source sevice: pull method which provides a LOD\n * target sevice: push method which accepts a dict\n A Bubble tries hard not to forget any step that has taken place, the results of any completed step is stored in a file, in the remember directory inside the Bubble. Without rules and bubble will ""just"" copy.\n Commands marked with (experimental) might work, but have not fully ""behave"" tested yet. For help on a specific command you can use: bubble <cmd> --help Create a bubble, make the information flow and start bubbling.\n .oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.\n """""" # Create a bubble object and remember it as the context object. # From this point onwards other commands can refer to it by using the # @pass_bubble decorator. cis = ctx.invoked_subcommand initing = False if cis == 'stats': nagios = False try: monitor = ctx.args[ctx.args.index('--monitor') + 1] if monitor == 'nagios': nagios = True except (ValueError, IndexError): pass if nagios: verbose = 0 BUBBLE_CLI_GLOBALS['profiling'] = profile if profile: start_profile() global VERBOSE VERBOSE = verbose global VERBOSE_BAR VERBOSE_BAR = barverbose if bubble_home != '.': bubble_home_abs = os.path.abspath(bubble_home) else: bubble_home_abs = os.path.abspath(os.getcwd()) if cis == 'init': initing = True if initing: if not os.path.exists(bubble_home_abs): os.makedirs(bubble_home_abs) if os.path.exists(bubble_home_abs): os.chdir(bubble_home_abs) ctx.obj = BubbleCli(home=bubble_home_abs, verbose=verbose, verbose_bar=barverbose) else: click.echo('Bubble home path does not exist: ' + bubble_home_abs) raise click.Abort() BUBBLE_CLI_GLOBALS['full_command'] = ' '.join(sys.argv) for key, value in config: ctx.obj.set_config(key, value) if not ctx.obj.bubble and not initing: ctx.obj.say_yellow('There is no bubble in %s' % bubble_home_abs, verbosity=10) ctx.obj.say('You can start one with: bubble init', verbosity=10)" 1795,"def consume(self, msg): """"""Called with each incoming fedmsg. From here we trigger an rpm-ostree compose by touching a specific file under the `touch_dir`. Then our `doRead` method is called with the output of the rpm-ostree-toolbox treecompose, which we monitor to determine when it has completed. """""" self.log.info(msg) body = msg['body'] topic = body['topic'] repo = None if 'rawhide' in topic: arch = body['msg']['arch'] self.log.info('New rawhide %s compose ready', arch) repo = 'rawhide' elif 'branched' in topic: arch = body['msg']['arch'] branch = body['msg']['branch'] self.log.info('New %s %s branched compose ready', branch, arch) log = body['msg']['log'] if log != 'done': self.log.warn('Compose not done?') return repo = branch elif 'updates.fedora' in topic: self.log.info('New Fedora %(release)s %(repo)s compose ready', body['msg']) repo = 'f%(release)s-%(repo)s' % body['msg'] else: self.log.warn('Unknown topic: %s', topic) release = self.releases[repo] reactor.callInThread(self.compose, release)" 1796,"def parse_addr(text): ""Parse a 1- to 3-part address spec."" if text: parts = text.split(':') length = len(parts) if length== 3: return parts[0], parts[1], int(parts[2]) elif length == 2: return None, parts[0], int(parts[1]) elif length == 1: return None, '', int(parts[0]) return None, None, None" 1797,"def start(self): ""Start the service"" # register signals gevent.signal(signal.SIGINT, self._shutdown) # spawn the flush trigger def _flush_impl(): while 1: gevent.sleep(self._stats.interval) # rotate stats stats = self._stats self._reset_stats() # send the stats to the sink which in turn broadcasts # the stats packet to one or more hosts. try: self._sink.send(stats) except Exception, ex: trace = traceback.format_tb(sys.exc_info()[-1]) self.error(''.join(trace)) self._flush_task = gevent.spawn(_flush_impl) # start accepting connections self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) self._sock.bind(self._bindaddr) while 1: try: data, _ = self._sock.recvfrom(MAX_PACKET) for p in data.split('\n'): if p: self._process(p) except Exception, ex: self.error(str(ex))" 1798,"def _process(self, data): ""Process a single packet and update the internal tables."" parts = data.split(':') if self._debug: self.error('packet: %r' % data) if not parts: return # interpret the packet and update stats stats = self._stats key = parts[0].translate(KEY_TABLE, KEY_DELETIONS) if self._key_prefix: key = '.'.join([self._key_prefix, key]) for part in parts[1:]: srate = 1.0 fields = part.split('|') length = len(fields) if length < 2: continue value = fields[0] stype = fields[1].strip() with stats_lock: # timer (milliseconds) if stype == 'ms': stats.timers[key].append(float(value if value else 0)) # counter with optional sample rate elif stype == 'c': if length == 3 and fields[2].startswith('@'): srate = float(fields[2][1:]) value = float(value if value else 1) * (1 / srate) stats.counts[key] += value elif stype == 'g': value = float(value if value else 1) stats.gauges[key] = value" 1799,"def step_note_that(context, remark): """""" Used as generic step that provides an additional remark/hint and enhance the readability/understanding without performing any check. .. code-block:: gherkin Given that today is ""April 1st"" But note that ""April 1st is Fools day (and beware)"" """""" log = getattr(context, ""log"", None) if log: log.info(u""NOTE: %s;"" % remark)" 1800,"def section(self, resources): """""" Which section is this in, if any """""" section = [p for p in self.parents(resources) if p.rtype == 'section'] if section: return section[0] return None" 1801,"def in_navitem(self, resources, nav_href): """""" Given href of nav item, determine if resource is in it """""" # The navhref might end with '/index' so remove it if so if nav_href.endswith('/index'): nav_href = nav_href[:-6] return self.docname.startswith(nav_href)" 1802,"def is_published(self): """""" Return true if this resource has published date in the past """""" now = datetime.now() published = self.props.published if published: return published < now return False" 1803,"def request(self, method, path, query=None, content=None): """""" Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned """""" kwargs = { ""headers"": headers, ""timeout"": self.timeout, ""verify"": False, } if self.username and self.password: kwargs[""auth""] = self.username, self.password if content is not None: kwargs[""data""] = self._json_encoder.encode(content) if query: prepare_query(query) kwargs[""params""] = query url = self._base_url + path # print ""Sending request to %s %s"" % (url, kwargs) try: r = requests.request(method, url, **kwargs) except requests.ConnectionError: raise GanetiApiError(""Couldn't connect to %s"" % self._base_url) except requests.Timeout: raise GanetiApiError(""Timed out connecting to %s"" % self._base_url) if r.status_code != requests.codes.ok: raise NotOkayError(str(r.status_code), code=r.status_code) if r.content: return json.loads(r.content) else: return None" 1804,"def start(self): """""" Confirm that we may access the target cluster. """""" version = self.request(""get"", ""/version"") if version != 2: raise GanetiApiError(""Can't work with Ganeti RAPI version %d"" % version) logging.info(""Accessing Ganeti RAPI, version %d"" % version) self.version = version try: features = self.request(""get"", ""/2/features"") except NotOkayError, noe: if noe.code == 404: # Okay, let's calm down, this is totally reasonable. Certain # older Ganeti RAPIs don't have a list of features. features = [] else: # No, wait, panic was the correct thing to do. raise logging.info(""RAPI features: %r"" % (features,)) self.features = features" 1805,"def _create_driver(self, **kwargs): """""" Create webdriver, assign it to ``self.driver``, and run webdriver initiation process, which is usually used for manual login. """""" if self.driver is None: self.driver = self.create_driver(**kwargs) self.init_driver_func(self.driver)" 1806,"def get_html(self, url, params=None, cache_cb=None, **kwargs): """""" Get html of an url. """""" url = add_params(url, params) cache_consumed, value = self.try_read_cache(url) if cache_consumed: html = value else: self._create_driver() self.driver.get(url) html = self.driver.page_source if self.should_we_update_cache(html, cache_cb, cache_consumed): self.cache.set( url, html, expire=kwargs.get(""cache_expire"", self.cache_expire), ) return html" 1807,"def deserialize_time(data): """"""Return a time instance based on the values of the data param"""""" parsed = parser.parse(data) return parsed.time().replace(tzinfo=parsed.tzinfo)" 1808,"def freeze(): ''' Show arguments to require() to recreate what has been installed ''' installations = {} for dist in get_installed_distributions(): req = pip.FrozenRequirement.from_dist(dist, [], find_tags=False) installations[req.name] = req return [str(installation).rstrip() for installation in sorted(installations.values(), key=lambda x: x.name.lower())]" 1809,"def require(*args, **kwargs): ''' Install a set of packages using pip This is designed to be an interface for IPython notebooks that replicates the requirements.txt pip format. This lets notebooks specify which versions of packages they need inside the notebook itself. This function is the general-purpose interface that lets the caller specify any version string for any package. ''' # If called with no arguments, returns requirements list if not args and not kwargs: return freeze() # Construct array of requirements requirements = list(args) extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs] requirements.extend(extra) args = ['install', '-q'] args.extend(requirements) pip.main(args)" 1810,"def handle(self, *args, **options): """""" Compares current database with a migrations. Creates a temporary database, applies all the migrations to it, and then dumps the schema from both current and temporary, diffs them, then report the diffs to the user. """""" self.db = options.get(""database"", DEFAULT_DB_ALIAS) self.current_name = connections[self.db].settings_dict[""NAME""] self.compare_name = options.get(""db_name"") self.lines = options.get(""lines"") self.ignore = int(options.get('ignore')) if not self.compare_name: self.compare_name = ""%s_compare"" % self.current_name command = NASHVEGAS.get(""dumpdb"", ""pg_dump -s {dbname}"") print ""Getting schema for current database..."" current_sql = Popen( command.format(dbname=self.current_name), shell=True, stdout=PIPE ).stdout.readlines() print ""Getting schema for fresh database..."" self.setup_database() connections[self.db].close() connections[self.db].settings_dict[""NAME""] = self.compare_name try: call_command(""syncdb"", interactive=False, verbosity=0, migrations=False) new_sql = Popen( command.format(dbname=self.compare_name).split(), stdout=PIPE ).stdout.readlines() finally: connections[self.db].close() connections[self.db].settings_dict[""NAME""] = self.current_name self.teardown_database() print ""Outputing diff between the two..."" print """".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore), normalize_sql(new_sql, self.ignore), n=int(self.lines)))" 1811,"def render_widgets(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str, ): """""" Go through docs and replace widget directive with rendering """""" builder: StandaloneHTMLBuilder = sphinx_app.builder for node in doctree.traverse(widget): # Render the output w = sphinx_app.env.widgets.get(node.name) context = builder.globalcontext.copy() # Add in certain globals context['resources'] = sphinx_app.env.resources context['references'] = sphinx_app.env.references output = w.render(sphinx_app, context) # Put the output into the node contents listing = [nodes.raw('', output, format='html')] node.replace_self(listing)" 1812,"def attr_string(filterKeys=(), filterValues=(), **kwargs): """"""Build a string consisting of 'key=value' substrings for each keyword argument in :kwargs: @param filterKeys: list of key names to ignore @param filterValues: list of values to ignore (e.g. None will ignore all key=value pairs that has that value. """""" return ', '.join([str(k)+'='+repr(v) for k, v in kwargs.items() if k not in filterKeys and v not in filterValues])" 1813,"def auth_string(self): """""" Get the auth string. If the token is expired and auto refresh enabled, a new token will be fetched :return: the auth string :rtype: str """""" if not self._token: self.execute() if not self._token.expired: return 'Bearer {}'.format(self._token.access_token) if self.auto_refresh: self.execute() return 'Bearer {}'.format(self._token.access_token) raise TokenExpired()" 1814,"async def main(): """"""Get the data from a *hole instance."""""" async with aiohttp.ClientSession() as session: data = Hole('192.168.0.215', loop, session) await data.get_data() # Get the raw data print(json.dumps(data.data, indent=4, sort_keys=True)) print(""Status:"", data.status) print(""Domains being blocked:"", data.domains_being_blocked)" 1815,"async def enable(): """"""Get the data from a *hole instance."""""" async with aiohttp.ClientSession() as session: data = Hole('192.168.0.215', loop, session, api_token=API_TOKEN) await data.enable()" 1816,"def admin_penalty(self, column=None, value=None, **kwargs): """""" An enforcement action that results in levying the permit holder with a penalty or fine. It is used to track judicial hearing dates, penalty amounts, and type of administrative penalty order. >>> PCS().admin_penalty('enfor_action_date', '16-MAR-01') """""" return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column, value, **kwargs)" 1817,"def audit(self, column=None, value=None, **kwargs): """""" Pretreatment Compliance Inspections (PCI) or Pretreatment Audits collect information resulting from inspections pertaining to a Publicly Owned Treatment Works (POTWs) that receive pollutants from in direct dischargers. >>> PCS().audit('insp_date', '16-MAR-01') """""" return self._resolve_call('PCS_PCI_AUDIT', column, value, **kwargs)" 1818,"def code_description(self, column=None, value=None, **kwargs): """""" The Permit Compliance System (PCS) records milestones, events, and many other parameters in code format. To provide text descriptions that explain the code meanings, the PCS_CODE_DESC provide s complete information on all types of codes, and for each type, the text description of each possible code value. >>> PCS().code_description('code', 110) """""" return self._resolve_call('PCS_CODE_DESC', column, value, **kwargs)" 1819,"def compliance_schedule(self, column=None, value=None, **kwargs): """""" A sequence of activities with associated milestones which pertains to a given permit. >>> PCS().compliance_schedule('cmpl_schd_evt', '62099') """""" return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs)" 1820,"def compliance_violation(self, column=None, value=None, **kwargs): """""" A compliance schedule violation reflects the non-achievement of a given compliance schedule event including the type of violation and ty pe of resolution. >>> PCS().compliance_violation('cs_rnc_detect_date', '16-MAR-04') """""" return self._resolve_call('PCS_CMPL_SCHD_VIOL', column, value, **kwargs)" 1821,"def dmr_measurement(self, column=None, value=None, **kwargs): """""" Measurements of effluents reported on the Discharge Monitoring Report (DMR). The violations are detected by comparing the measurement values against the corresponding effluent limit. >>> PCS().dmr_measurement('season_num', 2) """""" return self._resolve_call('PCS_DMR_MEASUREMENT', column, value, **kwargs)" 1822,"def enforcement_action(self, column=None, value=None, **kwargs): """""" A disciplinary action taken against a permit facility. The action may be applicable to one or more violations. >>> PCS().enforcement_action('ea_code', '09') """""" return self._resolve_call('PCS_ENFOR_ACTION', column, value, **kwargs)" 1823,"def hearing(self, column=None, value=None, **kwargs): """""" An evidentiary hearing. >>> PCS().hearing('event_date', '23-MAY-01') """""" return self._resolve_call('PCS_EVIDENTIARY_HEARING_EVENT', column, value, **kwargs)" 1824,"def industrial_user(self, column=None, value=None, **kwargs): """""" Information from the PCI_AUDIT table pertaining to industrial users, i.e. the number of significant industrial users. >>> PCS().industrial_user('insp_date', '16-MAR-01') """""" return self._resolve_call('PCS_INDUSTRIAL_USER_INFO', column, value, **kwargs)" 1825,"def inspection(self, column=None, value=None, **kwargs): """""" An official visit to the permit facility on a periodic basis which consists of the following inspection types: NPDES, Biomonitoring, Pretreatment, and Industrial User. >>> PCS().inspection('insp_date', '16-MAR-01') """""" return self._resolve_call('PCS_INSPECTION', column, value, **kwargs)" 1826,"def permit_event(self, column=None, value=None, **kwargs): """""" A permit event tracks the lifecycle of a permit from issuance to expiration. Examples include 'Application Received' and 'Permit Issued', etc. >>> PCS().permit_event('event_actual_date', '16-MAR-04') """""" return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs)" 1827,"def pipe_schedule(self, column=None, value=None, **kwargs): """""" Particular discharge points at a permit facility that are governed by effluent limitations and monitoring and submission requirements. >>> PCS().pipe_schedule('state_submission_units', 'M') """""" return self._resolve_call('PCS_PIPE_SCHED', column, value, **kwargs)" 1828,"def single_violation(self, column=None, value=None, **kwargs): """""" A single event violation is a one-time event that occurred on a fixed date, and is associated with one permitted facility. >>> PCS().single_violation('single_event_viol_date', '16-MAR-01') """""" return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column, value, **kwargs)" 1829,"def sludge(self, column=None, value=None, **kwargs): """""" Sludge information describes the volumn of sludge produced at a facility, identification information on a sludge handler, and classification/permitting information on a facility that handles sludge, such as a pretreatment POTW. >>> PCS().sludge('county_name', 'San Francisco') """""" return self._resolve_call('PCS_SLUDGE', column, value, **kwargs)" 1830,"def typify(value: Union[dict, list, set, str]): """""" Enhance block operation with native types. Typify takes a blockchain operation or dict/list/value, and then it parses and converts string types into native data types where appropriate. """""" if type(value) == dict: return walk_values(typify, value) if type(value) in [list, set]: return list(map(typify, value)) if type(value) == str: if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value): return keep_in_dict(dict(Amount(value)), ['amount', 'asset']) if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value): return parse_time(value) return value" 1831,"def json_expand(json_op): """""" For custom_json ops. """""" if type(json_op) == dict and 'json' in json_op: return update_in(json_op, ['json'], safe_json_loads) return json_op" 1832,"def delete(self, subnet_id): """""" This is bad delete function because one vpc can have more than one subnet. It is Ok if user only use CAL for manage cloud resource We will update ASAP. """""" # 1 : show subnet subnet = self.client.describe_subnets( SubnetIds=[subnet_id]).get('Subnets')[0] vpc_id = subnet.get('VpcId') # 2 : delete subnet self.client.delete_subnet(SubnetId=subnet_id) # 3 : delete vpc return self.client.delete_vpc(VpcId=vpc_id)" 1833,"def generate(self): """""" Generates a new random string from the start symbol Args: None Returns: str: The generated string """""" result = self._gen(self.optimized, self.splitstring) if self.splitstring and result is not None: result = result[1:] return result" 1834,"def _clean_terminals(self): """""" Because of the optimization, there are some non existing terminals on the generated list. Remove them by checking for terms in form Ax,x """""" new_terminals = [] for term in self.grammar.grammar_terminals: x_term = term.rfind('@') y_term = term.rfind('A') if y_term > x_term: x_term = y_term ids = term[x_term + 1:].split(',') if len(ids) < 2: """"""It'input_string a normal terminal, not a state"""""" new_terminals.append(term) self.grammar.grammar_terminals = new_terminals" 1835,"def _check_self_to_empty(self, stateid): """""" Because of the optimization, the rule for empty states is missing A check takes place live Args: stateid (int): The state identifier Returns: bool: A true or false response """""" x_term = stateid.rfind('@') y_term = stateid.rfind('A') if y_term > x_term: x_term = y_term ids = stateid[x_term + 1:].split(',') if len(ids) < 2: return 0 if ids[0] == ids[1]: # print 'empty' return 1 return 0" 1836,"def _check_intemediate(self, myntr, maxstate): """""" For each state Apq which is a known terminal, this function searches for rules Apr -> Apq Aqr and Arq -> Arp Apq where Aqr is also a known terminal or Arp is also a known terminal. It is mainly used as an optimization in order to avoid the O(n^3) for generating all the Apq -> Apr Arq rules during the PDA to CFG procedure. Args: myntr (str): The examined non terminal that was poped out of the queue maxstate (int): The maxstate is used for generating in a dynamic way the CNF rules that were not included due to the optimization. As a result, the algorithm generates these rules only if required. Returns: bool: Returns true if the algorithm was applied at least one time """""" # print 'BFS Dictionary Update - Intermediate' x_term = myntr.rfind('@') y_term = myntr.rfind('A') if y_term > x_term: x_term = y_term ids = myntr[x_term + 1:].split(',') if len(ids) < 2: return 0 i = ids[0] j = ids[1] r = 0 find = 0 while r < maxstate: if r != i and r != j: if 'A' + i + ',' + \ repr(r) not in self.resolved \ and 'A' + j + ',' + repr(r) in self.resolved: self.resolved[ 'A' + i + ',' + repr(r)] = self.resolved[myntr] \ + self.resolved['A' + j + ',' + repr(r)] if self._checkfinal('A' + i + ',' + repr(r)): return self.resolved['A' + i + ',' + repr(r)] if 'A' + i + ',' + repr(r) not in self.bfs_queue: self.bfs_queue.append('A' + i + ',' + repr(r)) find = 1 if 'A' + repr(r) + ',' + j not in self.resolved and 'A' + \ repr(r) + ',' + i in self.resolved: self.resolved[ 'A' + repr(r) + ',' + j] = self.resolved['A' + repr(r) + ',' + i] \ + self.resolved[myntr] if self._checkfinal('A' + repr(r) + ',' + j): return self.resolved['A' + repr(r) + ',' + j] if 'A' + repr(r) + ',' + j not in self.bfs_queue: self.bfs_queue.append('A' + repr(r) + ',' + j) find = 1 r = r + 1 if find == 1: return 1 return 0" 1837,"def _check_self_replicate(self, myntr): """""" For each Rule B -> c where c is a known terminal, this function searches for B occurences in rules with the form A -> B and sets A -> c. """""" # print 'BFS Dictionary Update - Self Replicate' find = 0 for nonterm in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nonterm]: if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance( self.grammar.grammar_rules[i][1], (set, tuple)) \ and self.grammar.grammar_rules[i][1] == myntr: self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) find = 1 if find == 1: return 1 return 0" 1838,"def _check_self_nonterminals(self, optimized): """""" For each Rule A -> BC where B and C are known terminals (B -> c1 and C -> c2), this function searches replaces A to the corresponding terminals A -> c1c2 """""" # print 'BFS Dictionary Update - Self Non Terminals' find = 0 for nt in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nt]: if (self.grammar.grammar_rules[i][0] not in self.resolved\ or self.grammar.grammar_rules[i][0] == 'S') \ and isinstance(self.grammar.grammar_rules[i][1], (set, tuple)): # All rules are in CNF form, so first check the A -> BC rules part_a = None if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1][0]): part_a = '' elif self.grammar.grammar_rules[i][1][0] in self.resolved: part_a = self.resolved[self.grammar.grammar_rules[i][1][0]] part_b = None if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1][1]): part_b = '' elif self.grammar.grammar_rules[i][1][1] in self.resolved: part_b = self.resolved[self.grammar.grammar_rules[i][1][1]] if part_a is not None and part_b is not None: self.resolved[self.grammar.grammar_rules[i][0]] = part_a + part_b # print 'Non Terminals Resolving # '+self.g.Rules[i][0]+"": ""+ # self.Resolved[self.g.Rules[i][0]] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) find = 1 if find == 1: return 1 return 0" 1839,"def _gen(self, optimized, splitstring): """"""Generates a new random object generated from the nonterminal Args: optimized (bool): mode of operation - if enabled not all CNF rules are included (mitigate O(n^3)) splitstring (bool): A boolean for enabling or disabling Returns: str: The generated string """""" # Define Dictionary that holds resolved rules # (only in form A -> terminals sequence) self.resolved = {} # First update Resolved dictionary by adding rules # that contain only terminals (resolved rules) for nt in self.grammar.grammar_nonterminals_map: for i in self.grammar.grammar_nonterminals_map[nt]: if self.grammar.grammar_rules[i][0] not in self.resolved\ and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)): if self.grammar.grammar_rules[i][1] != '@empty_set' \ and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals: if splitstring: self.resolved[ self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1] else: if self.grammar.grammar_rules[i][1] == '&': self.resolved[self.grammar.grammar_rules[i][0]] = ' ' else: self.resolved[ self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1] # print 'ResolvingA '+self.g.Rules[i][0]+"": ""+ # self.g.Rules[i][1] if self._checkfinal(self.grammar.grammar_rules[i][0]): return self.resolved[self.grammar.grammar_rules[i][0]] if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) if self.grammar.grammar_rules[i][1] == '@empty_set': self.resolved[self.grammar.grammar_rules[i][0]] = '' # print 'ResolvingB '+self.g.Rules[i][0]+"": "" self.bfs_queue.append(self.grammar.grammar_rules[i][0]) if optimized and self._check_self_to_empty( self.grammar.grammar_rules[i][1]): self.resolved[self.grammar.grammar_rules[i][0]] = '' # print 'ResolvingC '+self.g.Rules[i][0]+"": "" if self.grammar.grammar_rules[i][0] not in self.bfs_queue: self.bfs_queue.append(self.grammar.grammar_rules[i][0]) # Then try to use the rules from Resolved dictionary and check # if there is another rule that can be resolved. # This should be done in a while loop change = 1 while change: change = 0 if not change: ret = self._check_self_nonterminals(optimized) if ret == 1: change = 1 elif ret != 0: return ret if not change: while not change and len(self.bfs_queue) > 0: myntr = self.bfs_queue.pop() ret = self._check_self_replicate(myntr) if ret == 1: change = 1 elif ret != 0: return ret if optimized and self._check_intemediate( myntr, self.maxstate): change = 1 break" 1840,"def project_workspace_addsitedir(sitedir): """""" Similar to site.addsitedir() but prefers new sitedir over existing ones. Therefore, prefers local packages over installed packages. .. note:: This allows to support *.pth files and zip-/egg-imports similar to an installed site-packages directory. """""" assert os.path.isdir(sitedir) try: from site import addsitedir except ImportError: # -- USE: Python2.7 site.py package from pysite import addsitedir next_package_pos = len(sys.path) addsitedir(sitedir) # -- POST-PROCESS: Move new packages from end to begin of sys.path list. pos = 0 new_packages = sys.path[next_package_pos:] del sys.path[next_package_pos:] sys.path[pos:pos] = new_packages" 1841,"def create(self, name, description=None, units=None, agg_method=""priority_fill"", overwrite=False): """""" Create, or get if exists, a Symbol. Parameters ---------- name : str A symbol's name is a primary key, used across the Trump ORM. description : str, optional An arbitrary string, used to store user information related to the symbol. units : str, optional This is a string used to denote the units of the final data Series. agg_method : str, optional The aggregation method, used to calculate the final feed. Defaults to priority_fill. overwrite : bool, optional Set to True, to force deletion an existing symbol. defaults to False. Returns ------- Symbol """""" sym = self.try_to_get(name) if sym is not None: if overwrite: print ""Deleting {}"".format(sym.name) self.ses.delete(sym) self.ses.commit() else: msg = 'Symbol {} already exists.\n' + \ 'Consider setting overwrite to True.' msg = msg.format(name) raise Exception(msg) sym = Symbol(name, description, units, agg_method) self.ses.add(sym) print ""Creating {}"".format(sym.name) sym.add_alias(name) sym.handle = SymbolHandle(sym=sym) self.ses.commit() return sym" 1842,"def delete(self, symbol): """""" Deletes a Symbol. Parameters ---------- symbol : str or Symbol """""" if isinstance(symbol, (str, unicode)): sym = self.get(symbol) elif isinstance(symbol, Symbol): sym = symbol else: raise Exception(""Invalid symbol {}"".format((repr(symbol)))) # Has to handle the case where the table would exist already # and where it wouldn't. try: sym.datatable = Table(sym.name, Base.metadata, autoload=True) sym.datatable.drop(self.eng, checkfirst=True) except NoSuchTableError: print ""No worries, {} never existed to begin with."".format(sym.name) self.ses.delete(sym) self.ses.commit()" 1843,"def exists(self, symbol): """"""Checks to if a symbol exists, by name. Parameters ---------- symbol : str or Symbol Returns ------- bool """""" if isinstance(symbol, str): sym = symbol elif isinstance(symbol, Symbol): sym = symbol.name syms = self.ses.query(Symbol).filter(Symbol.name == sym).all() if len(syms) == 0: return False else: return True" 1844,"def get(self, symbol): """""" Gets a Symbol based on name, which is expected to exist. Parameters ---------- symbol : str or Symbol Returns ------- Symbol Raises ------ Exception If it does not exist. Use .try_to_get(), if the symbol may or may not exist. """""" syms = self.try_to_get(symbol) if syms is None: raise Exception(""Symbol {} does not exist"".format(symbol)) else: return syms" 1845,"def try_to_get(self, symbol): """""" Gets a Symbol based on name, which may or may not exist. Parameters ---------- symbol : str Returns ------- Symbol or None. Note ---- Use .get(), if the symbol should exist, and an exception is needed if it doesn't. """""" syms = self.ses.query(Symbol).filter(Symbol.name == symbol).all() if len(syms) == 0: return None else: return syms[0]" 1846,"def search_meta(self, attr, value=None, stronly=False): """""" Get a list of Symbols by searching a specific meta attribute, and optionally the value. Parameters ---------- attr : str The meta attribute to query. value : None, str or list The meta attribute to query. If you pass a float, or an int, it'll be converted to a string, prior to searching. stronly : bool, optional, default True Return only a list of symbol names, as opposed to the (entire) Symbol objects. Returns ------- List of Symbols or empty list """""" if stronly: qry = self.ses.query(Symbol.name).join(SymbolMeta) else: qry = self.ses.query(Symbol).join(SymbolMeta) crits = [] if value is None: crits.append(SymbolMeta.attr == attr) else: if isinstance(value, str): values = [value] elif isinstance(value, (tuple, list)): values = value for v in values: crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value))) if len(crits): qry = qry.filter(or_(*crits)) qry = qry.order_by(Symbol.name) if stronly: return [sym[0] for sym in qry.distinct()] else: return [sym for sym in qry.distinct()]" 1847,"def search(self, usrqry=None, name=False, desc=False, tags=False, meta=False, stronly=False, dolikelogic=True): """""" Get a list of Symbols by searching a combination of a Symbol's name, description, tags or meta values. Parameters ---------- usrqry : str The string used to query. Appending '%' will use SQL's ""LIKE"" functionality. name : bool, optional, default False Search by symbol name. desc : bool, optional, default False Search by symbol descriptions. tags : bool, optional, default False Search by symbol tags. meta : bool, optional, default False Search within a symbol's meta attribute's value. stronly : bool, optional, default True Return only a list of symbol names, as opposed to the (entire) Symbol objects. dolikelogic : Append '%' to either side of the string, if the string doesn't already have % specified. Returns ------- List of Symbols or empty list """""" if stronly: qry = self.ses.query(Symbol.name) else: qry = self.ses.query(Symbol) if tags: qry = qry.join(SymbolTag) if meta: qry = qry.join(SymbolMeta) if dolikelogic: if usrqry is not None: if '%' not in usrqry: usrqry = '%' + usrqry + '%' crits = [] if name: crits.append(Symbol.name.like(usrqry)) if tags: crits.append(SymbolTag.tag.like(usrqry)) if desc: crits.append(Symbol.description.like(usrqry)) if meta: crits.append(SymbolMeta.value.like(usrqry)) if len(crits): qry = qry.filter(or_(*crits)) qry = qry.order_by(Symbol.name) if stronly: return [sym[0] for sym in qry.distinct()] else: return [sym for sym in qry.distinct()]" 1848,"def search_tag(self, tag, symbols=True, feeds=False): """""" Get a list of Symbols by searching a tag or partial tag. Parameters ---------- tag : str The tag to search. Appending '%' will use SQL's ""LIKE"" functionality. symbols : bool, optional Search for Symbol's based on their tags. feeds : bool, optional Search for Symbol's based on their Feeds' tags. Returns ------- List of Symbols or empty list """""" syms = [] if isinstance(tag, (str, unicode)): tags = [tag] else: tags = tag if symbols: crits = [] for tag in tags: if ""%"" in tag: crit = SymbolTag.tag.like(tag) else: crit = SymbolTag.tag == tag crits.append(crit) qry = self.ses.query(SymbolTag) qry = qry.filter(or_(*crits)) syms = qry.all() syms = [tagged.symbol for tagged in syms] if feeds: crits = [] for tag in tags: if ""%"" in tag: crit = FeedTag.tag.like(tag) else: crit = FeedTag.tag == tag crits.append(crit) qry = self.ses.query(Symbol).select_from(FeedTag) qry = qry.join(FeedTag.feed).join(Feed.symbol) qry = qry.filter(or_(*crits)) fds = qry.distinct() syms = syms + [sym for sym in fds] return list(set(syms)) return syms" 1849,"def search_meta_specific(self, **avargs): """"""Search list of Symbol objects by by querying specific meta attributes and their respective values. Parameters ---------- avargs The attributes and values passed as key word arguments. If more than one criteria is specified, AND logic is applied. Appending '%' to values will use SQL's ""LIKE"" functionality. Example ------- >>> sm.search_meta(geography='Canada', sector='Gov%') Returns ------- List of Symbols or empty list """""" qry = self.ses.query(Symbol).join(SymbolMeta.symbol) for attr, value in avargs.iteritems(): SMA = aliased(SymbolMeta) if ""%"" in value: acrit = SMA.value.like(value) else: acrit = SMA.value == value crit = and_(acrit, SMA.attr == attr) qry = qry.filter(crit).join(SMA, SMA.symname == SymbolMeta.symname) qry = qry.order_by(Symbol.name) return qry.all()" 1850,"def tag_counts(self): """""" Get a list of tags and the number of each. Returns ------- List of tuples, in order (tag, # of Symbols w/Tag) """""" qry = self.ses.query(SymbolTag.tag, func.count(SymbolTag.tag)) qry = qry.group_by(SymbolTag.tag) qry = qry.order_by(SymbolTag.tag) tags = list(qry.all()) return tags" 1851,"def bulk_cache_of_tag(self, tag): """""" Caches all the symbols by a certain tag. For now, there is no different, than caching each symbol individually. In the future, this functionality could have speed improvements. Parameters ---------- tag : str Use '%' to enable SQL's ""LIKE"" functionality. Returns ------- TrumpReport """""" syms = self.search_tag(tag) name = 'Bulk Cache of Symbols tagged {}'.format(tag) tr = TrumpReport(name) for sym in syms: sr = sym.cache() tr.add_symbolreport(sr) return tr" 1852,"def build_view_from_tag(self, tag): """""" Build a view of group of Symbols based on their tag. Parameters ---------- tag : str Use '%' to enable SQL's ""LIKE"" functionality. Note ---- This function is written without SQLAlchemy, so it only tested on Postgres. """""" syms = self.search_tag(tag) names = [sym.name for sym in syms] subs = [""SELECT indx, '{}' AS symbol, final FROM {}"".format(s, s) for s in names] qry = "" UNION ALL "".join(subs) qry = ""CREATE VIEW {} AS {};"".format(tag, qry) self.ses.execute(""DROP VIEW IF EXISTS {};"".format(tag)) self.ses.commit() self.ses.execute(qry) self.ses.commit()" 1853,"def _add_orfs(self, which, symbol, ind, val, dt_log=None, user=None, comment=None): """""" Appends a single indexed-value pair, to a symbol object, to be used during the final steps of the aggregation of the datatable. See add_override and add_fail_safe. Parameters ---------- which : str Fail Safe or Override? symbol : Symbol or str The Symbol to apply the fail safe ind : obj The index value where the fail safe should be applied val : obj The data value which will be used in the fail safe dt_log : datetime A log entry, for saving when this fail safe was created. user : str A string representing which user made the fail safe comment : str A string to store any notes related to this fail safe. """""" if not isinstance(symbol, (str, unicode)): symbol = symbol.name if not dt_log: dt_log = dt.datetime.now() if which.lower() == 'override': qry = self.ses.query(func.max(Override.ornum).label('max_ornum')) override = True elif which.lower() == 'failsafe': qry = self.ses.query(func.max(FailSafe.fsnum).label('max_fsnum')) override = False qry = qry.filter_by(symname = symbol) cur_num = qry.one() if cur_num[0] is None: next_num = 0 else: next_num = cur_num[0] + 1 if override: tmp = Override(symname=symbol, ind=ind, val=val, dt_log=dt_log, user=user, comment=comment, ornum=next_num) else: tmp = FailSafe(symname=symbol, ind=ind, val=val, dt_log=dt_log, user=user, comment=comment, fsnum=next_num) self.ses.add(tmp) self.ses.commit()" 1854,"def add_override(self, symbol, ind, val, dt_log=None, user=None, comment=None): """""" Appends a single indexed-value pair, to a symbol object, to be used during the final steps of the aggregation of the datatable. With default settings Overrides, get applied with highest priority. Parameters ---------- symbol : Symbol or str The Symbol to override ind : obj The index value where the override should be applied val : obj The data value which will be used in the override dt_log : datetime A log entry, for saving when this override was created. user : str A string representing which user made the override comment : str A string to store any notes related to this override. """""" self._add_orfs('override', symbol, ind, val, dt_log, user, comment)" 1855,"def get_converted(self, symbol, units='CAD', system=None, tag=None): """""" Uses a Symbol's Dataframe, to build a new Dataframe, with the data converted to the new units Parameters ---------- symbol : str or tuple of the form (Dataframe, str) String representing a symbol's name, or a dataframe with the data required to be converted. If supplying a dataframe, units must be passed. units : str, optional Specify the units to convert the symbol to, default to CAD system : str, optional If None, the default system specified at instantiation is used. System defines which conversion approach to take. tag : str, optional Tags define which set of conversion data is used. If None, the default tag specified at instantiation is used. """""" if isinstance(symbol, (str, unicode)): sym = self.get(symbol) df = sym.df curu = sym.units requ = units elif isinstance(symbol, tuple): df = symbol[0] curu = symbol[1] requ = units else: raise TypeError(""Expected str or (DataFrame, str), found {}"".format(type(symbol))) system = system or self.default_system tag = tag or self.default_tag conv = self.converters[system][tag] newdf = conv.convert(df, curu, requ) newdf = pd.merge(df, newdf, left_index=True, right_index=True) newdf = newdf[df.columns[0] + ""_y""].to_frame() newdf.columns = df.columns return newdf" 1856,"def last_cache(self,result='COMPLETE'): """""" The date and time of the previous cache. Parameters ---------- result : string, default 'COMPLETE' A string to choose which point in the log, should be returned. - COMPLETE - the last time a cache was completed - STARTED - the last time a cache was started Returns ------- datetime.datetime """""" crit = and_(SymbolLogEvent.event == 'CACHE', SymbolLogEvent.evresult == result) qry = self.log.filter(crit) qry = qry.order_by(SymbolLogEvent.evtime.desc()) t = qry.first() if t: return t.evtime else: return None" 1857,"def set_indexing(self, index_template): """""" Update a symbol's indexing strategy Parameters ---------- index_template : bIndex or bIndex-like An index template used to overwrite all details about the symbol's current index. """""" objs = object_session(self) if self.index.indimp != index_template.imp_name: self._refresh_datatable_schema() self.index.name = index_template.name self.index.indimp = index_template.imp_name self.index.case = index_template.case self.index.setkwargs(**index_template.kwargs) objs.commit()" 1858,"def add_meta(self, **metadict): """"""Add meta information to a Symbol. Parameters ---------- metadict Attributes are passed as keywords, with their associated values as strings. For meta attributes with spaces, use an unpacked dict. """""" objs = object_session(self) for attr,val in metadict.iteritems(): newmeta = SymbolMeta(self, attr, val) self.meta.append(newmeta) objs.commit()" 1859,"def add_validator(self, val_template): """""" Creates and adds a SymbolValidity object to the Symbol. Parameters ---------- validity_template : bValidity or bValidity-like a validity template. """""" validator = val_template.validator args = [] for arg in SymbolValidity.argnames: if arg in val_template.__dict__.keys(): args.append(getattr(val_template, arg)) objs = object_session(self) qry = objs.query(func.max(SymbolValidity.vid).label('max_vid')) qry = qry.filter_by(symname = self.name) cur_vid = qry.one()[0] if cur_vid is None: next_vid = 0 else: next_vid = cur_vid + 1 self.validity.append(SymbolValidity(self, next_vid, validator, *args)) objs.commit()" 1860,"def cache(self, checkvalidity=True, staleonly=False, allowraise=True): """""" Re-caches the Symbol's datatable by querying each Feed. Parameters ---------- checkvalidity : bool, optional Optionally, check validity post-cache. Improve speed by turning to False. staleonly : bool, default False Set to True, for speed up, by looking at staleness allowraise : bool, default True AND with the Symbol.handle and Feed.handle's 'raise', set to False, to do a list of symbols. Note, this won't silence bugs in Trump, eg. unhandled edge cases. So, those still need to be handled by the application. Returns ------- SymbolReport """""" note = ""staleonly = {}"".format(staleonly) self._log_an_event('CACHE','START',note) docache = True if staleonly: lc = self.last_cache() if lc: freshthresh = self.freshthresh nw = dt.datetime.now() freshness = (nw - lc).total_seconds() / 60.0 if freshness <= freshthresh: docache = False smrp = SymbolReport(self.name) if docache: data = [] cols = ['final', 'override_feed000', 'failsafe_feed999'] if len(self.feeds) == 0: err_msg = ""Symbol has no Feeds. Can't cache a feed-less Symbol."" raise Exception(err_msg) try: datt = datadefs[self.dtype.datadef] indtt = indexingtypes[self.index.indimp] indkwargs = self.index.getkwargs() indt = indtt(self.index.case, **indkwargs) rp = ReportPoint('datadef', 'class', datt) smrp.add_reportpoint(rp) for afeed in self.feeds: fdrp = afeed.cache(allowraise) smrp.add_feedreport(fdrp) tmp = datt(afeed.data).converted tmp = indt.process_post_feed_cache(tmp) data.append(tmp) cols.append(afeed.data.name) except: point = ""caching"" smrp = self._generic_exception(point, smrp, allowraise) try: data = pd.concat(data, axis=1) except: point = ""concatenation"" smrp = self._generic_exception(point, smrp, allowraise) # We shouldn't need to do anything here, as the concatenation # should be smooth... # preindlen = len(data) # # # if preindlen > 0 : # #indt = indtt(data, self.index.case, indkwargs) # #data = indt.final_dataframe() # data = indt.process_post_concat(data) # # postindlen = len(data) # if postindlen == 0 and preindlen > 0: # raise Exception(""Indexing Implementer likely poorly designed"") # else: # postindlen = 0 def build_hi_df(which, colname): objs = object_session(self) qry = objs.query(which.ind, func.max(which.dt_log).label('max_dt_log')) qry = qry.filter_by(symname = self.name) grb = qry.group_by(which.ind).subquery() qry = objs.query(which) ords = qry.join((grb, and_(which.ind == grb.c.ind, which.dt_log == grb.c.max_dt_log))).all() if len(ords): orind = [row.ind for row in ords] orval = [row.val for row in ords] ordf = indt.build_ordf(orind, orval, colname) else: ordf = pd.DataFrame(columns=[colname]) return ordf ordf = build_hi_df(Override, 'override_feed000') fsdf = build_hi_df(FailSafe, 'failsafe_feed999') orfsdf = pd.merge(ordf, fsdf, how='outer', left_index=True, right_index=True) data = pd.merge(orfsdf, data, how='outer', left_index=True, right_index=True) data = indt.process_post_orfs(data) try: data = data.fillna(value=pd.np.nan) data = data[sorted_feed_cols(data)] data['final'] = FeedAggregator(self.agg_method).aggregate(data) except: point = ""aggregation"" smrp = self._generic_exception(point, smrp, allowraise) # SQLAQ There are several states to deal with at this point # A) the datatable exists but a feed has been added # B) the datatable doesn't exist and needs to be created # C) the datatable needs to be updated for more or less feeds # D) the datatable_exists flag is incorrect because all edge cases # haven't been handled yet. # # My logic is that once Trump is more functional, I'll be able to # eliminate this hacky solution. But, SQLAlchemy might have # a more elegant answer. A check, of somekind prior to deletion? # if not self.datatable_exists: # self._init_datatable() #older version of _init_datatable # delete(self.datatable).execute() # self._init_datatable() #older version of _init_datatable # Is this the best way to check? # if engine.dialect.has_table(session.connection(), self.name): # delete(self.datatable).execute() self._refresh_datatable_schema() if len(data) > 0: data.index.name = 'indx' data = data.reset_index() datarecords = data.to_dict(orient='records') objs = object_session(self) objs.execute(self.datatable.insert(), datarecords) objs.commit() if checkvalidity: try: isvalid, reports = self.check_validity(report=True) for rep in reports: smrp.add_reportpoint(rep) if not isvalid: raise Exception('{} is not valid'.format(self.name)) except: point = ""validity_check"" smrp = self._generic_exception(point, smrp, allowraise) self._log_an_event('CACHE','COMPLETE', ""Fresh!"") else: self._log_an_event('CACHE','FRESH', ""Was still fresh"") return smrp" 1861,"def check_validity(self, checks=None, report=True): """""" Runs a Symbol's validity checks. Parameters ---------- checks : str, [str,], optional Only run certain checks. report : bool, optional If set to False, the method will return only the result of the check checks (True/False). Set to True, to have a SymbolReport returned as well. Returns ------- Bool, or a Tuple of the form (Bool, SymbolReport) """""" if report: reportpoints = [] allchecks = [] checks_specified=False if isinstance(checks, (str, unicode)): checks = [checks] checks_specified = True elif isinstance(checks, (list, tuple)): checks_specified = True else: checks = [] for val in self.validity: if (val.validator in checks) or (not checks_specified): ValCheck = validitychecks[val.validator] anum = ValCheck.__init__.func_code.co_argcount - 2 args = [] for arg in SymbolValidity.argnames: args.append(getattr(val, arg)) valid = ValCheck(self.datatable_df, *args[:anum]) res = valid.result allchecks.append(res) rp = ReportPoint('validation', val.validator, res, str(args[:anum])) reportpoints.append(rp) if report: return all(allchecks), reportpoints else: return all(allchecks)" 1862,"def describe(self): """""" describes a Symbol, returns a string """""" lines = [] lines.append(""Symbol = {}"".format(self.name)) if len(self.tags): tgs = "", "".join(x.tag for x in self.tags) lines.append("" tagged = {}"".format(tgs)) if len(self.aliases): als = "", "".join(x.alias for x in self.aliases) lines.append("" aliased = {}"".format(als)) if len(self.feeds): lines.append("" feeds:"") for fed in self.feeds: lines.append("" {}. {}"".format(fed.fnum, fed.ftype)) return ""\n"".join(lines)" 1863,"def del_tags(self, tags): """""" remove a tag or tags from a symbol Parameters ---------- tags : str or [str,] Tags to be removed """""" # SQLA Adding a SymbolTag object, feels awkward/uneccessary. # Should I be implementing this functionality a different way? if isinstance(tags, (str, unicode)): tags = [tags] objs = object_session(self) docommit = False for symboltag in self.tags: if symboltag.tag in tags: objs.delete(symboltag) docommit = True if docommit: objs.commit()" 1864,"def add_tags(self, tags): """""" add a tag or tags to a symbol Parameters ---------- tags : str or [str,] Tags to be added """""" # SQLA Adding a SymbolTag object, feels awkward/uneccessary. # Should I be implementing this functionality a different way? if isinstance(tags, (str, unicode)): tags = [tags] objs = object_session(self) tmps = [SymbolTag(tag=t, sym=self) for t in tags] objs.add_all(tmps) objs.commit()" 1865,"def _log_an_event(self, event, evresult='No Result', note='No Note'): """""" log an event Parameters ---------- event : string evresult : string note : string """""" objs = object_session(self) evnt = SymbolLogEvent(event, evresult, note, sym=self.name) objs.add(evnt) objs.commit()" 1866,"def add_feed(self, feedlike, **kwargs): """""" Add a feed to the Symbol Parameters ---------- feedlike : Feed or bFeed-like The feed template, or Feed object to be added. kwargs Munging instructions """""" if 'fnum' in kwargs: fnum = kwargs['fnum'] del kwargs['fnum'] else: fnum = None if isinstance(feedlike, bFeed): munging = feedlike.munging if 'munging' in kwargs: explicit_munging = kwargs['munging'].as_odict for key in explicit_munging: munging[key] = explicit_munging[key] fed = Feed(self, feedlike.ftype, feedlike.sourcing, munging, feedlike.meta, fnum) elif isinstance(feedlike, Feed): fed = feedlike else: raise Exception(""Invalid Feed {}"".format(repr(feedlike))) self.feeds.append(fed) objs = object_session(self) objs.add(fed) objs.commit()" 1867,"def add_alias(self, alias): """""" Add an alias to a Symbol Parameters ---------- alias : str The alias """""" objs = object_session(self) if isinstance(alias, list): raise NotImplementedError elif isinstanceofany(alias, (str, unicode)): a = SymbolAlias(self, alias) self.aliases.append(a) objs.add(a)" 1868,"def _final_data(self): """""" Returns ------- A list of tuples representing rows from the datatable's index and final column, sorted accordingly. """""" dtbl = self.datatable objs = object_session(self) if isinstance(dtbl, Table): return objs.query(dtbl.c.indx, dtbl.c.final).all() else: raise Exception(""Symbol has no datatable, likely need to cache first."")" 1869,"def _max_min(self): """""" Returns ------- A tuple consisting of (max, min) of the index. """""" dtbl = self.datatable objs = object_session(self) if isinstance(dtbl, Table): return objs.query(func.max(dtbl.c.indx).label(""max_indx""), func.min(dtbl.c.indx).label(""min_indx"")).one() else: raise Exception(""Symbol has no datatable"")" 1870,"def _all_datatable_data(self): """""" Returns ------- A list of tuples representing rows from all columns of the datatable, sorted accordingly. """""" dtbl = self.datatable objs = object_session(self) imcols = [dtbl.c.indx, dtbl.c.final, dtbl.c.override_feed000, dtbl.c.failsafe_feed999] cols = imcols[:3] + [c for c in dtbl.c if c not in (imcols)] + [imcols[3]] if isinstance(dtbl, Table): return objs.query(*cols).order_by(dtbl.c.indx).all() else: raise Exception(""Symbol has no datatable"")" 1871,"def df(self): """""" Note: this accessor is read-only. It should be copied, if accessed in an application, more than once. Returns ------- Dataframe of the symbol's final data. """""" data = self._final_data() if len(data) == 0: adf = pd.DataFrame(columns = [self.index.name, self.name]) return adf.set_index(self.index.name) adf = pd.DataFrame(data) if len(adf.columns) != 2: msg = ""Symbol ({}) needs to be cached prior to building a Dataframe"" msg = msg.format(self.name) raise Exception(msg) adf.columns = [self.index.name, self.name] return self._finish_df(adf, 'FINAL')" 1872,"def datatable_df(self): """""" returns the dataframe representation of the symbol's final data """""" data = self._all_datatable_data() adf = pd.DataFrame(data) adf.columns = self.dt_all_cols return self._finish_df(adf, 'ALL')" 1873,"def _init_datatable(self): """""" Instantiates the .datatable attribute, pointing to a table in the database that stores all the cached data """""" try: self.datatable = Table(self.name, Base.metadata, autoload=True) except NoSuchTableError: print ""Creating datatable, cause it doesn't exist"" self.datatable = self._datatable_factory() self.datatable.create() self.datatable_exists = True" 1874,"def _datatable_factory(self): """""" creates a SQLAlchemy Table object with the appropriate number of columns given the number of feeds """""" feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)] feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999'] ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp atbl = Table(self.name, Base.metadata, Column('indx', ind_sqlatyp, primary_key=True), Column('final', dat_sqlatyp), *(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols), extend_existing=True) self.dt_feed_cols = feed_cols[:] self.dt_all_cols = ['indx', 'final'] + feed_cols[:] return atbl" 1875,"def update_handle(self, chkpnt_settings): """""" Update a feeds's handle checkpoint settings :param chkpnt_settings, dict: a dictionary where the keys are stings representing individual handle checkpoint names, for a Feed (eg. api_failure, feed_type, monounique...) See FeedHandle.__table__.columns for the current list. The values can be either integer or BitFlags. :return: None """""" # Note, for now, this function is nearly identical # to the Symbol version. Careful when augmenting, # to get the right one. objs = object_session(self) # override with anything passed in for checkpoint in chkpnt_settings: if checkpoint in FeedHandle.__table__.columns: settings = chkpnt_settings[checkpoint] setattr(self.handle, checkpoint, settings) objs.commit()" 1876,"def add_tags(self, tags): """""" add a tag or tags to a Feed """""" if isinstance(tags, (str, unicode)): tags = [tags] objs = object_session(self) tmps = [FeedTag(tag=t, feed=self) for t in tags] objs.add_all(tmps) objs.commit()" 1877,"def initiate_browser(self): # Create a unique tempdir for downloaded files tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR) tempsubdir = uuid4().hex # TODO: Remove this directory when finished! self.tempdir = os.path.join(tempdir, tempsubdir) try: # Try and create directory before checking if it exists, # to avoid race condition os.makedirs(self.tempdir) except OSError: if not os.path.isdir(self.tempdir): raise profile = webdriver.FirefoxProfile() # Set download location, avoid download dialogues if possible # Different settings needed for different Firefox versions # This will be a long list... profile.set_preference('browser.download.folderList', 2) profile.set_preference('browser.download.manager.showWhenStarting', False) profile.set_preference('browser.download.manager.closeWhenDone', True) profile.set_preference('browser.download.dir', self.tempdir) profile.set_preference(""browser.helperApps.neverAsk.saveToDisk"", ""application/octet-stream;application/vnd.ms-excel"") profile.set_preference(""browser.helperApps.alwaysAsk.force"", False) profile.set_preference(""browser.download.manager.useWindow"", False) self.browser = webdriver.Firefox(profile) self.browser.get('http://webbstat.av.se') detailed_cls = ""Document_TX_GOTOTAB_Avancerad"" """""" The button for expanded detailed options. This also happens to be a good indicator as to wheter all content is loaded. """""" # Wait for a content element, and 3 extra seconds just in case WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\ .until(EC.presence_of_element_located((By.CLASS_NAME, detailed_cls))) self.browser.implicitly_wait(3) self.browser\ .find_element_by_class_name(detailed_cls)\ .find_element_by_tag_name(""td"")\ .click() # Wait for a content element, and 3 extra seconds just in case WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\ .until(EC.presence_of_element_located((By.CLASS_NAME, detailed_cls))) self.browser.implicitly_wait(3)" 1878,"def _fetch_dimensions(self, dataset): """""" Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january'). """""" yield Dimension(u""region"", label=""municipality or county"", datatype=""region"", dialect=""arbetsmiljoverket"") yield Dimension(u""period"", label=""Year or month"")" 1879,"def _fetch_itemslist(self, item): """""" We define two collection: - Number of work injuries (""Arbetsolycka"") - Number of workrelated diseases (""Arbetssjukdom"") Each contains four datasets: - Per municipality and year - Per county and year - Per municipality and month - Per municipality and year """""" if item.is_root: for c in [""Arbetsolycka"", ""Arbetssjukdom""]: yield Collection(c, blob=(c, None, None)) else: c = item.id for r in [u""kommun"", u""län""]: for p in [u""år"", u""månad""]: yield Dataset(u""%s-%s-%s"" % (c, r, p), blob=(c, r, p), label=u""%s, antal per %s och %s"" % (c, r, p))" 1880,"def make_log_record_output(category, level, message, format=None, datefmt=None, **kwargs): """""" Create the output for a log record, like performed by :mod:`logging` module. :param category: Name of the logger (as string or None). :param level: Log level (as number). :param message: Log message to use. :returns: Log record output (as string) """""" if not category or (category == ""__ROOT__""): category = ""root"" levelname = logging.getLevelName(level) record_data = dict(name=category, levelname=levelname, msg=message) record_data.update(kwargs) record = logging.makeLogRecord(record_data) formatter = logging.Formatter(format, datefmt=datefmt) return formatter.format(record)" 1881,"def step_I_create_logrecords_with_table(context): """""" Step definition that creates one more log records by using a table. .. code-block: gherkin When I create log records with: | category | level | message | | foo | ERROR | Hello Foo | | foo.bar | WARN | Hello Foo.Bar | Table description ------------------ | Column | Type | Required | Description | | category | string | yes | Category (or logger) to use. | | level | LogLevel | yes | Log level to use. | | message | string | yes | Log message to use. | .. code-block: python import logging from behave.configuration import LogLevel for row in table.rows: logger = logging.getLogger(row.category) level = LogLevel.parse_type(row.level) logger.log(level, row.message) """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""category"", ""level"", ""message""]) for row in context.table.rows: category = row[""category""] if category == ""__ROOT__"": category = None level = LogLevel.parse_type(row[""level""]) message = row[""message""] make_log_record(category, level, message)" 1882,"def step_I_create_logrecord_with_table(context): """""" Create an log record by using a table to provide the parts. .. seealso: :func:`step_I_create_logrecords_with_table()` """""" assert context.table, ""REQUIRE: context.table"" assert len(context.table.rows) == 1, ""REQUIRE: table.row.size == 1"" step_I_create_logrecords_with_table(context)" 1883,"def step_command_output_should_contain_log_records(context): """""" Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""category"", ""level"", ""message""]) format = getattr(context, ""log_record_format"", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.execute_steps(u''' Then the command output should contain: """""" {expected_output} """""" '''.format(expected_output=output))" 1884,"def step_command_output_should_not_contain_log_records(context): """""" Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""category"", ""level"", ""message""]) format = getattr(context, ""log_record_format"", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.execute_steps(u''' Then the command output should not contain: """""" {expected_output} """""" '''.format(expected_output=output))" 1885,"def step_command_output_should_contain_log_records_from_categories(context): """""" Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Given I define a log record schema: | category | level | message | | root | ERROR | __LOG_MESSAGE__ | Then the command output should contain log records from categories: | category | | bar | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_column(""category"") record_schema = context.log_record_row_schema LogRecordTable.annotate_with_row_schema(context.table, record_schema) step_command_output_should_contain_log_records(context) context.table.remove_columns([""level"", ""message""])" 1886,"def step_command_output_should_not_contain_log_records_from_categories(context): """""" Verifies that the command output contains not log records from the provided log categories (in any order). .. code-block: gherkin Given I define the log record schema: | category | level | message | | root | ERROR | __LOG_MESSAGE__ | Then the command output should not contain log records from categories: | category | | bar | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_column(""category"") record_schema = context.log_record_row_schema LogRecordTable.annotate_with_row_schema(context.table, record_schema) step_command_output_should_not_contain_log_records(context) context.table.remove_columns([""level"", ""message""])" 1887,"def step_file_should_contain_log_records(context, filename): """""" Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file ""xxx.log"" should contain the log records: | category | level | message | | bar | CURRENT | xxx | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""category"", ""level"", ""message""]) format = getattr(context, ""log_record_format"", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.text = output step_file_should_contain_multiline_text(context, filename)" 1888,"def step_file_should_not_contain_log_records(context, filename): """""" Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file ""xxx.log"" should not contain the log records: | category | level | message | | bar | CURRENT | xxx | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""category"", ""level"", ""message""]) format = getattr(context, ""log_record_format"", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.text = output step_file_should_not_contain_multiline_text(context, filename)" 1889,"def step_use_log_record_configuration(context): """""" Define log record configuration parameters. .. code-block: gherkin Given I use the log record configuration: | property | value | | format | | | datefmt | | """""" assert context.table, ""REQUIRE: context.table"" context.table.require_columns([""property"", ""value""]) for row in context.table.rows: property_name = row[""property""] value = row[""value""] if property_name == ""format"": context.log_record_format = value elif property_name == ""datefmt"": context.log_record_datefmt = value else: raise KeyError(""Unknown property=%s"" % property_name)" 1890,"def annotate_with_row_schema(table, row_schema): """""" Annotate/extend a table of log-records with additional columns from the log-record schema if columns are missing. :param table: Table w/ log-records (as :class:`behave.model.Table`) :param row_schema: Log-record row schema (as dict). """""" for column, value in row_schema.items(): if column not in table.headings: table.add_column(column, default_value=value)" 1891,"def smart_decode(binary, errors=""strict""): """""" Automatically find the right codec to decode binary data to string. :param binary: binary data :param errors: one of 'strict', 'ignore' and 'replace' :return: string """""" d = chardet.detect(binary) encoding = d[""encoding""] confidence = d[""confidence""] text = binary.decode(encoding, errors=errors) return text, encoding, confidence" 1892,"def decode(self, binary, url, encoding=None, errors=""strict""): """""" Decode binary to string. :param binary: binary content of a http request. :param url: endpoint of the request. :param encoding: manually specify the encoding. :param errors: errors handle method. :return: str """""" if encoding is None: domain = util.get_domain(url) if domain in self.domain_encoding_table: encoding = self.domain_encoding_table[domain] html = binary.decode(encoding, errors=errors) else: html, encoding, confidence = smart_decode( binary, errors=errors) # cache domain name and encoding self.domain_encoding_table[domain] = encoding else: html = binary.decode(encoding, errors=errors) return html" 1893,"def modify_number_pattern(number_pattern, **kwargs): """"""Modifies a number pattern by specified keyword arguments."""""" params = ['pattern', 'prefix', 'suffix', 'grouping', 'int_prec', 'frac_prec', 'exp_prec', 'exp_plus'] for param in params: if param in kwargs: continue kwargs[param] = getattr(number_pattern, param) return NumberPattern(**kwargs)" 1894,"def format_currency_field(__, prec, number, locale): """"""Formats a currency field."""""" locale = Locale.parse(locale) currency = get_territory_currencies(locale.territory)[0] if prec is None: pattern, currency_digits = None, True else: prec = int(prec) pattern = locale.currency_formats['standard'] pattern = modify_number_pattern(pattern, frac_prec=(prec, prec)) currency_digits = False return format_currency(number, currency, pattern, locale=locale, currency_digits=currency_digits)" 1895,"def format_decimal_field(__, prec, number, locale): """"""Formats a decimal field: .. sourcecode:: 1234 ('D') -> 1234 -1234 ('D6') -> -001234 """""" prec = 0 if prec is None else int(prec) if number < 0: prec += 1 return format(number, u'0%dd' % prec)" 1896,"def format_float_field(__, prec, number, locale): """"""Formats a fixed-point field."""""" format_ = u'0.' if prec is None: format_ += u'#' * NUMBER_DECIMAL_DIGITS else: format_ += u'0' * int(prec) pattern = parse_pattern(format_) return pattern.apply(number, locale)" 1897,"def format_number_field(__, prec, number, locale): """"""Formats a number field."""""" prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.decimal_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))" 1898,"def format_percent_field(__, prec, number, locale): """"""Formats a percent field."""""" prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.percent_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))" 1899,"def format_hexadecimal_field(spec, prec, number, locale): """"""Formats a hexadeciaml field."""""" if number < 0: # Take two's complement. number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1 format_ = u'0%d%s' % (int(prec or 0), spec) return format(number, format_)" 1900,"def format_field(self, value, format_spec): """"""Format specifiers are described in :func:`format_field` which is a static function. """""" if format_spec: spec, arg = format_spec[0], format_spec[1:] arg = arg or None else: spec = arg = None return self._format_field(spec, arg, value, self.numeric_locale)" 1901,"def delegate(attribute_name, method_names): """""" Decorator factory to delegate methods to an attribute. Decorate a class to map every method in `method_names` to the attribute `attribute_name`. """""" call_attribute_method = partial(_call_delegated_method, attribute_name) def decorate(class_): for method in method_names: setattr(class_, method, partialmethod(call_attribute_method, method)) return class_ return decorate" 1902,"def prepare_query(query): """""" Prepare a query object for the RAPI. RAPI has lots of curious rules for coercing values. This function operates on dicts in-place and has no return value. @type query: dict @param query: Query arguments """""" for name in query: value = query[name] # None is sent as an empty string. if value is None: query[name] = """" # Booleans are sent as 0 or 1. elif isinstance(value, bool): query[name] = int(value) # XXX shouldn't this just check for basestring instead? elif isinstance(value, dict): raise ValueError(""Invalid query data type %r"" % type(value).__name__)" 1903,"def itemgetters(*args): """""" Get a handful of items from an iterable. This is just map(itemgetter(...), iterable) with a list comprehension. """""" f = itemgetter(*args) def inner(l): return [f(x) for x in l] return inner" 1904,"def create_container(self, container, **kwargs): """"""Create container :param container(string): container name (Container is equivalent to Bucket term in Amazon). :param **kwargs(dict): extend args for specific driver. """""" try: LOG.debug('create_container() with %s is success.', self.driver) return self.driver.create_container(container, **kwargs) except DriverException as e: LOG.exception('create_container() with %s raised\ an exception %s.', self.driver, e)" 1905,"def delete_container(self, container): """"""Delete container :param container: container name (Container is equivalent to Bucket term in Amazon). """""" try: LOG.debug('delete_container() with %s is success.', self.driver) return self.driver.delete_container(container) except DriverException as e: LOG.exception('delete_container() with %s raised\ an exception %s.', self.driver, e)" 1906,"def stat_container(self, container): """"""Stat container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). """""" LOG.debug('stat_container() with %s is success.', self.driver) return self.driver.stat_container(container)" 1907,"def update_container(self, container, metadata, **kwargs): """"""Update container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param metadata(dict): additional metadata to include in the request. :param **kwargs(dict): extend args for specific driver. """""" LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_container(container, metadata, **kwargs)" 1908,"def upload_object(self, container, obj, contents, content_length=None, metadata=None, **kwargs): """"""Upload object :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param contents: object content. :param content_length(int): content length. :param metadata (dict): addition infomation. :param **kwargs(dict): extend args for specific driver. """""" try: LOG.debug('upload_object() with %s is success.', self.driver) return self.driver.upload_object(container, obj, contents=contents, content_length=content_length, metadata=metadata, **kwargs) except DriverException as e: LOG.exception('upload_object() with %s raised\ an exception %s.', self.driver, e)" 1909,"def stat_object(self, container, obj): """"""Stat object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). """""" LOG.debug('stat_object() with %s is success.', self.driver) return self.driver.stat_object(container, obj)" 1910,"def delete_object(self, container, obj, **kwargs): """"""Delete object in container :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). """""" try: LOG.debug('delete_object() with %s is success.', self.driver) return self.driver.delete_object(container, obj, **kwargs) except DriverException as e: LOG.exception('download_object() with %s raised\ an exception %s.', self.driver, e)" 1911,"def list_container_objects(self, container, prefix=None, delimiter=None): """"""List container objects :param container: container name (Container is equivalent to Bucket term in Amazon). :param prefix: prefix query :param delimiter: string to delimit the queries on """""" LOG.debug('list_container_objects() with %s is success.', self.driver) return self.driver.list_container_objects(container, prefix, delimiter)" 1912,"def update_object(self, container, obj, metadata, **kwargs): """"""Update object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param metadata(dict): additional metadata to include in the request. """""" try: LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_object(container, obj, metadata, **kwargs) except DriverException as e: LOG.exception('copy_object() with %s raised\ an exception %s.', self.driver, e)" 1913,"def copy_object(self, container, obj, metadata=None, destination=None, **kwargs): """"""Copy object :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param destination: The container and object name of the destination object in the form of /container/object; if None, the copy will use the source as the destination. :param metadata(dict): additional metadata(headers) to include in the request :param **kwargs(dict): extend args for specific driver. """""" try: LOG.debug('copy_object() with %s is success.', self.driver) return self.driver.copy_object(container, obj, metadata=metadata, destination=destination, **kwargs) except DriverException as e: LOG.exception('copy_object() with %s raised\ an exception %s.', self.driver, e)" 1914,"def permission_required(*actions, obj=None, raise_exception=False): """"""Permission checking decorator -- works like the ``permission_required`` decorator in the default Django authentication system, except that it takes a sequence of actions to check, an object must be supplied, and the user must have permission to perform all of the actions on the given object for the permissions test to pass. *Not actually sure how useful this is going to be: in any case where obj is not None, it's going to be tricky to get the object into the decorator. Class-based views are definitely best here...* """""" def checker(user): ok = False if user.is_authenticated() and check_perms(user, actions, [obj]): ok = True if raise_exception and not ok: raise PermissionDenied else: return ok def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if checker(request.user): return view_func(request, *args, **kwargs) return _wrapped_view return decorator" 1915,"def get_path_fields(cls, base=[]): """"""Get object fields used for calculation of django-tutelary object paths. """""" pfs = [] for pf in cls.TutelaryMeta.path_fields: if pf == 'pk': pfs.append(base + ['pk']) else: f = cls._meta.get_field(pf) if isinstance(f, models.ForeignKey): pfs += get_path_fields(f.target_field.model, base=base + [pf]) else: pfs.append(base + [f.name]) return pfs" 1916,"def get_perms_object(obj, action): """"""Get the django-tutelary path for an object, based on the fields listed in ``TutelaryMeta.pfs``. """""" def get_one(pf): if isinstance(pf, str): return pf else: return str(reduce(lambda o, f: getattr(o, f), pf, obj)) return Object([get_one(pf) for pf in obj.__class__.TutelaryMeta.pfs])" 1917,"def make_get_perms_object(perms_objs): """"""Make a function to delegate permission object rendering to some other (foreign key) field of an object. """""" def retfn(obj, action): if action in perms_objs: if perms_objs[action] is None: return None else: return get_perms_object(getattr(obj, perms_objs[action]), action) else: return get_perms_object(obj, action) return retfn" 1918,"def permissioned_model(cls, perm_type=None, path_fields=None, actions=None): """"""Function to set up a model for permissioning. Can either be called directly, passing a class and suitable values for ``perm_type``, ``path_fields`` and ``actions``, or can be used as a class decorator, taking values for ``perm_type``, ``path_fields`` and ``actions`` from the ``TutelaryMeta`` subclass of the decorated class. """""" if not issubclass(cls, models.Model): raise DecoratorException( 'permissioned_model', ""class '"" + cls.__name__ + ""' is not a Django model"" ) added = False try: if not hasattr(cls, 'TutelaryMeta'): if perm_type is None or path_fields is None or actions is None: raise DecoratorException( 'permissioned_model', (""missing argument: all of perm_type, path_fields and "" + ""actions must be supplied"") ) added = True cls.TutelaryMeta = type('TutelaryMeta', (object,), dict(perm_type=perm_type, path_fields=path_fields, actions=actions)) cls.TutelaryMeta.pfs = ([cls.TutelaryMeta.perm_type] + get_path_fields(cls)) perms_objs = {} for a in cls.TutelaryMeta.actions: an = a ap = {} if isinstance(a, tuple): an = a[0] ap = a[1] Action.register(an) if isinstance(ap, dict) and 'permissions_object' in ap: po = ap['permissions_object'] if po is not None: try: t = cls._meta.get_field(po).__class__ if t not in [models.ForeignKey, models.OneToOneField]: raise PermissionObjectException(po) except: raise PermissionObjectException(po) perms_objs[an] = po if len(perms_objs) == 0: cls.get_permissions_object = get_perms_object else: cls.get_permissions_object = make_get_perms_object(perms_objs) return cls except: if added: del cls.TutelaryMeta raise" 1919,"def _getArrays(items, attr, defaultValue): """"""Return arrays with equal size of item attributes from a list of sorted ""items"" for fast and convenient data processing. :param attr: list of item attributes that should be added to the returned array. :param defaultValue: if an item is missing an attribute, the ""defaultValue"" is added to the array instead. :returns: {'attribute1': numpy.array([attributeValue1, ...]), ...} """""" arrays = dict([(key, []) for key in attr]) for item in items: for key in attr: arrays[key].append(getattr(item, key, defaultValue)) for key in [_ for _ in viewkeys(arrays)]: arrays[key] = numpy.array(arrays[key]) return arrays" 1920,"def _getItems(container, containerKeys=None, sort=False, reverse=False, selector=lambda item: True): """"""Generator that yields filtered and/or sorted items from the specified ""container"". :param container: The container has to be a dictionary of dictionaries that contain some kind of items. Depending on the specified parameters all or a subset of these items are yielded. ``{containerKey1: {key1: item1, key2: item2, ...}, ...}`` :param containerKeys: valid keys of the ""container"", if None all keys are considered. :type containerKeys: a single dictionary key or a list of keys :param sort: if ""sort"" is specified the returned list of items is sorted according to the item attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each item and returns True (include item) or False (discard item). If not specified all items are returned :returns: items from container that passed the selector function """""" if containerKeys is None: containerKeys = [_ for _ in viewkeys(container)] else: containerKeys = aux.toList(containerKeys) if sort: sortIdentifier = list() for containerKey in containerKeys: for identifier in [_ for _ in viewkeys(container[containerKey])]: item = container[containerKey][identifier] if selector(item): try: sortIdentifier.append((getattr(item, sort), containerKey, identifier ) ) except AttributeError: pass sortIdentifier.sort(key=ITEMGETTER(0), reverse=reverse) for _, containerKey, identifier in sortIdentifier: yield container[containerKey][identifier] else: for containerKey in containerKeys: for identifier in [_ for _ in viewkeys(container[containerKey])]: item = container[containerKey][identifier] if selector(item): yield item" 1921,"def _containerSetPath(container, folderpath, specfiles): """"""Helper function for :class:`MsrunContainer`, :class:`SiiContainer` and :class:`FiContainer`. Changes the folderpath of the specified specfiles in container.info: ``container.info[specfile]['path'] = folderpath``. :param container: a container like class that has an attribute ``.info`` :param folderpath: a filedirectory :param specfiles: a list of ms-run names """""" if not os.path.exists(folderpath): warntext = 'Error while calling ""_containerSetPath()"": The specified '\ 'directory ""%s"" does not exist!' %(folderpath, ) warnings.warn(warntext) for specfile in specfiles: if specfile in container.info: container.info[specfile]['path'] = folderpath else: warntext = 'Error while calling ""_containerSetPath()"": The '\ 'specfile ""%s"" is not present in the container!'\ %(specfile, ) warnings.warn(warntext)" 1922,"def _mzmlListAttribToTuple(oldList): """"""Turns the param entries of elements in a list elements into tuples, used in :func:`MzmlScan._fromJSON()` and :func:`MzmlPrecursor._fromJSON()`. .. note:: only intended for a list of elements that contain params. For example the mzML element ``selectedIonList`` or ``scanWindowList``. :param oldList: [[paramList, paramList, ...], ...] :returns: [[paramTuple, paramTuple, ...], ...] """""" newList = list() for oldParamList in oldList: newParamLIst = [tuple(param) for param in oldParamList] newList.append(newParamLIst) return newList" 1923,"def addMsrunContainers(mainContainer, subContainer): """"""Adds the complete content of all specfile entries from the subContainer to the mainContainer. However if a specfile of ``subContainer.info`` is already present in ``mainContainer.info`` its contents are not added to the mainContainer. :param mainContainer: :class:`MsrunContainer` :param subContainer: :class:`MsrunContainer` .. warning:: does not generate new items, all items added to the ``mainContainer`` are still present in the ``subContainer`` and changes made to elements of one container also affects the elements of the other one (ie elements share same memory location). """""" typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic', 'sai': 'saic', 'si': 'sic' } for specfile in subContainer.info: if specfile in mainContainer.info: continue mainContainer.addSpecfile(specfile, subContainer.info[specfile]['path']) for datatype, status in listitems(subContainer.info[specfile]['status']): if not status: continue datatypeContainer = typeToContainer[datatype] dataTypeContainer = getattr(mainContainer, datatypeContainer) subContainerData = getattr(subContainer, datatypeContainer )[specfile] dataTypeContainer[specfile] = subContainerData mainContainer.info[specfile]['status'][datatype] = True" 1924,"def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): """"""Return a condensed array of data selected from :class:`Si` instances from ``self.sic`` for fast and convenient data processing. :param attr: list of :class:`Si` item attributes that should be added to the returned array. The attributes ""id"" and ""specfile"" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the ""defaultValue"" is added to the array instead. :param specfiles: filenames of ms-run files, if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if ""sort"" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each :class:`Si` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... } """""" selector = (lambda si: True) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)" 1925,"def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): """"""Generator that yields filtered and/or sorted :class:`Si` instances from ``self.sic``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if ""sort"" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Si`` item and returns True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: items from container that passed the selector function """""" selector = (lambda si: True) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getItems(self.sic, specfiles, sort, reverse, selector)" 1926,"def addSpecfile(self, specfiles, path): """"""Prepares the container for loading ``mrc`` files by adding specfile entries to ``self.info``. Use :func:`MsrunContainer.load()` afterwards to actually import the files :param specfiles: the name of an ms-run file or a list of names :type specfiles: str or [str, str, ...] :param path: filedirectory used for loading and saving ``mrc`` files """""" for specfile in aux.toList(specfiles): if specfile not in self.info: self._addSpecfile(specfile, path) else: warntext = 'Error while calling ""MsrunContainer.addSpecfile()""'\ ': ""%s"" is already present ""MsrunContainer.info""'\ % (specfile, ) warnings.warn(warntext)" 1927,"def _addSpecfile(self, specfile, path): """"""Adds a new specfile entry to MsrunContainer.info. See also :class:`MsrunContainer.addSpecfile()`. :param specfile: the name of an ms-run file :param path: filedirectory used for loading and saving ``mrc`` files """""" datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False, 'si': False } self.info[specfile] = {'path': path, 'status': datatypeStatus}" 1928,"def setPath(self, folderpath, specfiles=None): """"""Changes the folderpath of the specified specfiles. The folderpath is used for saving and loading of ``mrc`` files. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param folderpath: a filedirectory """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) _containerSetPath(self, folderpath, specfiles)" 1929,"def removeData(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False): """"""Removes the specified datatypes of the specfiles from the msrunContainer. To completely remove a specfile use :func:`MsrunContainer.removeSpecfile`, which also removes the complete entry from ``self.info``. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to select ``self.rmc`` :param ci: bool, True to select ``self.cic`` :param smi: bool, True to select ``self.smic`` :param sai: bool, True to select ``self.saic`` :param si: bool, True to select ``self.sic`` """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) #TODO: add a check if specfiles are present in the container typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic', 'sai': 'saic', 'si': 'sic' } datatypes = self._processDatatypes(rm, ci, smi, sai, si) for specfile in specfiles: for datatype in datatypes: datatypeContainer = typeToContainer[datatype] dataContainer = getattr(self, datatypeContainer) try: del dataContainer[specfile] except KeyError: pass finally: self.info[specfile]['status'][datatype] = False" 1930,"def removeSpecfile(self, specfiles): """"""Completely removes the specified specfiles from the ``msrunContainer``. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: str, [str, str] """""" for specfile in aux.toList(specfiles): for datatypeContainer in ['rmc', 'cic', 'smic', 'saic', 'sic']: dataContainer = getattr(self, datatypeContainer) try: del dataContainer[specfile] except KeyError: pass del self.info[specfile]" 1931,"def _processDatatypes(self, rm, ci, smi, sai, si): """"""Helper function that returns a list of datatype strings, depending on the parameters boolean value. :param rm: bool, True to add ``rm`` :param ci: bool, True to add ``ci`` :param smi: bool, True to add ``smi`` :param sai: bool, True to add ``sai`` :param si: bool, True to add ``si`` :returns: [datatype1, ...] """""" datatypes = list() for datatype, value in [('rm', rm), ('ci', ci), ('smi', smi), ('sai', sai), ('si', si)]: if value: datatypes.append(datatype) return datatypes" 1932,"def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False, compress=True, path=None): """"""Writes the specified datatypes to ``mrc`` files on the hard disk. .. note:: If ``.save()`` is called and no ``mrc`` files are present in the specified path new files are generated, otherwise old files are replaced. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to select ``self.rmc`` (run metadata) :param ci: bool, True to select ``self.cic`` (chromatogram items) :param smi: bool, True to select ``self.smic`` (spectrum metadata items) :param sai: bool, True to select ``self.saic`` (spectrum array items) :param si: bool, True to select ``self.sic`` (spectrum items) :param compress: bool, True to use zip file compression :param path: filedirectory to which the ``mrc`` files are written. By default the parameter is set to ``None`` and the filedirectory is read from ``self.info[specfile]['path']`` """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) datatypes = self._processDatatypes(rm, ci, smi, sai, si) if len(datatypes) == 0: datatypes = ['rm', 'ci', 'smi', 'sai', 'si'] for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""MsrunContainer.save()"": ""%s"" '\ 'is not present in ""MsrunContainer.info""!'\ % (specfile, ) warnings.warn(warntext) continue else: msrunInfo = self.info[specfile] specfilePath = msrunInfo['path'] if path is None else path with aux.PartiallySafeReplace() as msr: for datatype in datatypes: filename = specfile + '.mrc_' + datatype filepath = aux.joinpath(specfilePath, filename) with msr.open(filepath, 'w+b') as openfile: if datatype == 'rm': self._writeRmc(openfile, specfile) elif datatype == 'ci': self._writeCic(openfile, specfile, compress) elif datatype == 'si': self._writeSic(openfile, specfile, compress) elif datatype == 'smi': self._writeSmic(openfile, specfile, compress) elif datatype == 'sai': self._writeSaic(openfile, specfile, compress)" 1933,"def _writeCic(self, filelike, specfile, compress): """"""Writes the ``.cic`` container entry of the specified specfile to the ``mrc_cic`` format. For details see :func:`maspy.auxiliary.writeBinaryItemContainer()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression """""" aux.writeBinaryItemContainer(filelike, self.cic[specfile], compress)" 1934,"def _writeSaic(self, filelike, specfile, compress): """"""Writes the ``.ssic`` container entry of the specified specfile to the ``mrc_saic`` format. For details see :func:`maspy.auxiliary.writeBinaryItemContainer()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression """""" aux.writeBinaryItemContainer(filelike, self.saic[specfile], compress)" 1935,"def _writeSmic(self, filelike, specfile, compress): """"""Writes the ``.smic`` container entry of the specified specfile to the ``mrc_smic`` format. For details see :func:`maspy.auxiliary.writeJsonZipfile()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression """""" aux.writeJsonZipfile(filelike, self.smic[specfile], compress)" 1936,"def _writeSic(self, filelike, specfile, compress): """"""Writes the ``.sic`` container entry of the specified specfile to the ``mrc_sic`` format. For details see :func:`maspy.auxiliary.writeJsonZipfile()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression """""" aux.writeJsonZipfile(filelike, self.sic[specfile], compress)" 1937,"def _writeRmc(self, filelike, specfile): """"""Writes the ``.rmc`` container entry of the specified specfile as an human readable and pretty formatted xml string. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` """""" xmlString = ETREE.tostring(self.rmc[specfile], pretty_print=True) filelike.write(xmlString)" 1938,"def load(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False): """"""Import the specified datatypes from ``mrc`` files on the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to import ``mrc_rm`` (run metadata) :param ci: bool, True to import ``mrc_ci`` (chromatogram items) :param smi: bool, True to import ``mrc_smi`` (spectrum metadata items) :param sai: bool, True to import ``mrc_sai`` (spectrum array items) :param si: bool, True to import ``mrc_si`` (spectrum items) """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) #Select only specfiles which are present in the ``self.info``. selectedSpecfiles = list() for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""MsrunContainer.load()"": ""%s"" '\ 'not present in MsrunContainer.info' % specfile warnings.warn(warntext) else: selectedSpecfiles.append(specfile) datatypes = self._processDatatypes(rm, ci, smi, sai, si) if len(datatypes) == 0: datatypes = ['rm', 'ci', 'smi', 'sai', 'si'] for specfile in selectedSpecfiles: msrunInfo = self.info[specfile] specfilePath = msrunInfo['path'] if 'rm' in datatypes: rmPath = aux.joinpath(specfilePath, specfile+'.mrc_rm') with open(rmPath, 'rb') as openfile: xmlString = openfile.read() self.rmc[specfile] = ETREE.fromstring(xmlString) msrunInfo['status']['rm'] = True if 'ci' in datatypes: ciPath = aux.joinpath(specfilePath, specfile+'.mrc_ci') self.cic[specfile] = aux.loadBinaryItemContainer(ciPath, Ci.jsonHook) msrunInfo['status']['ci'] = True if 'smi' in datatypes: smiPath = aux.joinpath(specfilePath, specfile+'.mrc_smi') with zipfile.ZipFile(smiPath, 'r') as containerZip: #Convert the zipfile data into a str object,necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() self.smic[specfile] = json.loads(jsonString, object_hook=Smi.jsonHook ) msrunInfo['status']['smi'] = True if 'sai' in datatypes: saiPath = aux.joinpath(specfilePath, specfile+'.mrc_sai') self.saic[specfile] = aux.loadBinaryItemContainer(saiPath, Sai.jsonHook ) msrunInfo['status']['sai'] = True if 'si' in datatypes: siPath = aux.joinpath(specfilePath, specfile+'.mrc_si') with zipfile.ZipFile(siPath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() self.sic[specfile] = json.loads(jsonString, object_hook=Si.jsonHook ) msrunInfo['status']['si'] = True" 1939,"def _reprJSON(self): """"""Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object """""" return {'__Ci__': (self.id, self.specfile, self.dataProcessingRef, self.precursor, self.product, self.params, self.attrib, self.arrayInfo ) }" 1940,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.Ci` from a decoded JSON object (as generated by :func:`maspy.core.Ci._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Ci` """""" newInstance = cls(jsonobject[0], jsonobject[1]) attribDict = {} attribDict['dataProcessingRef'] = jsonobject[2] attribDict['precursor'] = jsonobject[3] attribDict['product'] = jsonobject[4] attribDict['params'] = [tuple(param) for param in jsonobject[5]] attribDict['attrib'] = jsonobject[6] attribDict['arrayInfo'] = dict() for arrayType, jsonEntry in viewitems(jsonobject[7]): arrayEntry = {'dataProcessingRef': jsonEntry['dataProcessingRef'], 'params': [tuple(_) for _ in jsonEntry['params']] } attribDict['arrayInfo'][arrayType] = arrayEntry for key, value in viewitems(attribDict): setattr(newInstance, key, value) return newInstance" 1941,"def jsonHook(encoded): """"""Custom JSON decoder that allows construction of a new ``Ci`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: ""encoded"" or one of the these objects: :class:`Ci`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """""" if '__Ci__' in encoded: return Ci._fromJSON(encoded['__Ci__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded" 1942,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.Sai` from a decoded JSON object (as generated by :func:`maspy.core.Sai._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Sai` """""" newInstance = cls(jsonobject[0], jsonobject[1]) for arrayType, jsonEntry in viewitems(jsonobject[2]): arrayEntry = {'dataProcessingRef': jsonEntry['dataProcessingRef'], 'params': [tuple(_) for _ in jsonEntry['params']] } newInstance.arrayInfo[arrayType] = arrayEntry return newInstance" 1943,"def _reprJSON(self): """"""Returns a JSON serializable represenation of a ``Smi`` class instance. Use :func:`maspy.core.Sai._fromJSON()` to generate a new ``Smi`` instance from the return value. :returns: a JSON serializable python object """""" return {'__Smi__': (self.id, self.specfile, self.attributes, self.params, self.scanListParams, self.scanList, self.precursorList, self.productList ) }" 1944,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.Smi` from a decoded JSON object (as generated by :func:`maspy.core.Smi._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Smi` """""" newInstance = cls(None, None) attribDict = {} attribDict['id'] = jsonobject[0] attribDict['specfile'] = jsonobject[1] attribDict['attributes'] = jsonobject[2] attribDict['params'] = [tuple(param) for param in jsonobject[3]] attribDict['scanListParams'] = [tuple(param) for param in jsonobject[4]] attribDict['scanList'] = jsonobject[5] attribDict['precursorList'] = jsonobject[6] attribDict['productList'] = jsonobject[7] for key, value in viewitems(attribDict): setattr(newInstance, key, value) return newInstance" 1945,"def jsonHook(encoded): """"""Custom JSON decoder that allows construction of a new ``Smi`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: ""encoded"" or one of the these objects: :class:`Smi`, :class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """""" if '__Smi__' in encoded: return Smi._fromJSON(encoded['__Smi__']) elif '__MzmlScan__' in encoded: return MzmlScan._fromJSON(encoded['__MzmlScan__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded" 1946,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.Si` from a decoded JSON object (as generated by :func:`maspy.core.Si._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Si` """""" newInstance = cls(None, None) newInstance.__dict__.update(jsonobject) return newInstance" 1947,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.MzmlScan` from a decoded JSON object (as generated by :func:`maspy.core.MzmlScan._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlScan` """""" scanWindowList = _mzmlListAttribToTuple(jsonobject[0]) params = [tuple(param) for param in jsonobject[1]] return cls(scanWindowList, params)" 1948,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.MzmlProduct` from a decoded JSON object (as generated by :func:`maspy.core.MzmlProduct._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlProduct` """""" isolationWindow =[tuple(param) for param in jsonobject] return cls(isolationWindow)" 1949,"def _reprJSON(self): """"""Returns a JSON serializable represenation of a ``MzmlPrecursor`` class instance. Use :func:`maspy.core.MzmlPrecursor._fromJSON()` to generate a new ``MzmlPrecursor`` instance from the return value. :returns: a JSON serializable python object """""" return {'__MzmlPrecursor__': (self.spectrumRef, self.activation, self.isolationWindow, self.selectedIonList ) }" 1950,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.core.MzmlPrecursor` from a decoded JSON object (as generated by :func:`maspy.core.MzmlPrecursor._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlPrecursor` """""" spectrumRef = jsonobject[0] activation = [tuple(param) for param in jsonobject[1]] isolationWindow =[tuple(param) for param in jsonobject[2]] selectedIonList = _mzmlListAttribToTuple(jsonobject[3]) return cls(spectrumRef, activation, isolationWindow, selectedIonList)" 1951,"def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): """"""Generator that yields filtered and/or sorted :class:`Sii` instances from ``self.container``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if ""sort"" is specified the returned list of items is sorted according to the :class:`Sii` attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Sii`` item and has to return True (include item) or False (discard item). By default only items with ``Sii.isValid == True`` are returned. :returns: items from container that passed the selector function """""" selector = (lambda sii: sii.isValid) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getListItems(self.container, specfiles, sort, reverse, selector)" 1952,"def getValidItem(self, specfile, identifier): """"""Returns a ``Sii`` instance from ``self.container`` if it is valid, if all elements of ``self.container[specfile][identifier] are ``Sii.isValid == False`` then ``None`` is returned. :param specfile: a ms-run file name :param identifier: item identifier ``Sii.id`` :returns: ``Sii`` or ``None`` """""" for item in self.container[specfile][identifier]: if item.isValid: return item else: return None" 1953,"def _addSpecfile(self, specfile, path): """"""Adds a new specfile entry to SiiContainer.info. See also :class:`SiiContainer.addSpecfile()`. :param specfile: the name of an ms-run file :param path: filedirectory for loading and saving the ``siic`` files """""" self.info[specfile] = {'path': path, 'qcAttr': None, 'qcCutoff': None, 'qcLargerBetter': None, 'rankAttr': None, 'rankLargerBetter': None } self.container[specfile] = dict()" 1954,"def removeSpecfile(self, specfiles): """"""Completely removes the specified specfiles from the ``SiiContainer``. :param specfiles: the name of an ms-run file or a list of names. """""" for specfile in aux.toList(specfiles): del self.container[specfile] del self.info[specfile]" 1955,"def save(self, specfiles=None, compress=True, path=None): """"""Writes the specified specfiles to ``siic`` files on the hard disk. .. note:: If ``.save()`` is called and no ``siic`` files are present in the specified path new files are generated, otherwise old files are replaced. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param compress: bool, True to use zip file compression :param path: filedirectory to which the ``siic`` files are written. By default the parameter is set to ``None`` and the filedirectory is read from ``self.info[specfile]['path']`` """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""SiiContainer.save()"": ""%s"" is'\ ' not present in ""SiiContainer.info""!'\ % (specfile, ) warnings.warn(warntext) continue else: path = self.info[specfile]['path'] if path is None else path with aux.PartiallySafeReplace() as msr: filename = specfile + '.siic' filepath = aux.joinpath(path, filename) with msr.open(filepath, mode='w+b') as openfile: self._writeContainer(openfile, specfile, compress)" 1956,"def addSiInfo(self, msrunContainer, specfiles=None, attributes=['obsMz', 'rt', 'charge']): """"""Transfer attributes to :class:`Sii` elements from the corresponding :class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an attribute is not present in the ``Si`` the attribute value in the ``Sii``is set to ``None``. Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id' :param msrunContainer: an instance of :class:`MsrunContainer` which has imported the corresponding specfiles :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param attributes: a list of ``Si`` attributes that should be transfered. """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""SiiContainer.addSiInfo()"": '\ '""%s"" is not present in ""SiiContainer.info""!'\ % (specfile, ) warnings.warn(warntext) elif specfile not in msrunContainer.info: warntext = 'Error while calling ""SiiContainer.addSiInfo()"": '\ '""%s"" is not present in ""MsrunContainer.info""'\ % (specfile, ) warnings.warn(warntext) else: for identifier in self.container[specfile]: si = msrunContainer.sic[specfile][identifier] for sii in self.container[specfile][identifier]: for attribute in attributes: setattr(sii, attribute, getattr(si, attribute, None) )" 1957,"def calcMz(self, specfiles=None, guessCharge=True, obsMzKey='obsMz'): """"""Calculate the exact mass for ``Sii`` elements from the ``Sii.peptide`` sequence. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param guessCharge: bool, True if the charge should be guessed if the attribute ``charge`` is missing from ``Sii``. Uses the calculated peptide mass and the observed m/z value to calculate the charge. :param obsMzKey: attribute name of the observed m/z value in ``Sii``. """""" #TODO: important to test function, since changes were made _calcMass = maspy.peptidemethods.calcPeptideMass _calcMzFromMass = maspy.peptidemethods.calcMzFromMass _massProton = maspy.constants.atomicMassProton _guessCharge = lambda mass, mz: round(mass / (mz - _massProton), 0) if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) tempMasses = dict() for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""SiiContainer.calcMz()"": '\ '""%s"" is not present in ""SiiContainer.info""!'\ % (specfile, ) warnings.warn(warntext) else: for sii in self.getItems(specfiles=specfile): peptide = sii.peptide if peptide not in tempMasses: if hasattr(sii, 'diPeptide'): tempMasses[peptide] = (_calcMass(sii.peptide1) + _calcMass(sii.peptide2) ) else: tempMasses[peptide] = _calcMass(peptide) peptideMass = tempMasses[peptide] if sii.charge is not None: sii.excMz = _calcMzFromMass(peptideMass, sii.charge) elif guessCharge: guessedCharge = _guessCharge(peptideMass, getattr(sii, obsMzKey) ) sii.excMz = _calcMzFromMass(peptideMass, guessedCharge) sii.charge = guessedCharge else: sii.excMz = None del(tempMasses)" 1958,"def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): """"""Return a condensed array of data selected from :class:`Fi` instances from ``self.container`` for fast and convenient data processing. :param attr: list of :class:`Fi` item attributes that should be added to the returned array. The attributes ""id"" and ""specfile"" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the ""defaultValue"" is added to the array instead. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if ""sort"" is specified the returned list of items is sorted according to the :class:`Fi` attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each `Fi` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True``. By default only items with ``Fi.isValid == True`` are returned. :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... } """""" selector = (lambda fi: fi.isValid) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)" 1959,"def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): """"""Generator that yields filtered and/or sorted :class:`Fi` instances from ``self.container``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if ""sort"" is specified the returned list of items is sorted according to the :class:`Fi` attribute specified by ""sort"", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Fi`` item and has to return True (include item) or False (discard item). By default only items with ``Fi.isValid == True`` are returned. :returns: items from container that passed the selector function """""" selector = (lambda fi: fi.isValid) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getItems(self.container, specfiles, sort, reverse, selector)" 1960,"def _writeContainer(self, filelike, specfile, compress): """"""Writes the ``self.container`` entry of the specified specfile to the ``fic`` format. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression .. note:: In addition it could also dump the ``self.info`` entry to the zipfile with the filename ``info``, but this is not used at the moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()` """""" aux.writeJsonZipfile(filelike, self.container[specfile], compress=compress )" 1961,"def load(self, specfiles=None): """"""Imports the specified ``fic`` files from the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling ""FiContainer.load()"": ""%s"" is'\ ' not present in ""FiContainer.info""!'\ % (specfile, ) warnings.warn(warntext) continue else: fiPath = aux.joinpath(self.info[specfile]['path'], specfile+'.fic' ) with zipfile.ZipFile(fiPath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() #infoString = io.TextIOWrapper(containerZip.open('info'), # encoding='utf-8' # ).read() self.container[specfile] = json.loads(jsonString, object_hook=Fi.jsonHook )" 1962,"def removeAnnotation(self, specfiles=None): """"""Remove all annotation information from :class:`Fi` elements. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] """""" if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in aux.toList(specfiles): for item in viewvalues(self.container[specfile]): item.isMatched = False item.isAnnotated = False item.siIds = list() item.siiIds = list() item.peptide = None item.sequence = None item.bestScore = None" 1963,"def as_dict(self): """"""Returns a JSON-serializeable object representing this tree."""""" def conv(v): if isinstance(v, SerializableAttributesHolder): return v.as_dict() elif isinstance(v, list): return [conv(x) for x in v] elif isinstance(v, dict): return {x:conv(y) for (x,y) in v.items()} else: return v return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()}" 1964,"def from_json(cls, data): """"""Decode a JSON string and inflate a node instance."""""" # Decode JSON string assert isinstance(data, str) data = json.loads(data) assert isinstance(data, dict) return cls.from_dict(data)" 1965,"def _pred(aclass): """""" :param aclass :return: boolean """""" isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__" 1966,"def extract_keywords(func): """""" Parses the keywords from the given function. :param func | <function> """""" if hasattr(func, 'im_func'): func = func.im_func try: return func.func_code.co_varnames[-len(func.func_defaults):] except (TypeError, ValueError, IndexError): return tuple()" 1967,"def _get_adv_trans_stats(self, cmd, return_tdo=False): """"""Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests """""" t = time() code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10) if self._scanchain and self._scanchain._print_statistics: print(""GET STATS TIME"", time()-t)#pragma: no cover if len(res) == 4: count = struct.unpack('<I', res)[0] return count elif len(res) == 8: written, read = struct.unpack('<II', res) return written, read return res" 1968,"def jtag_enable(self): """""" Enables JTAG output on the controller. JTAG operations executed before this function is called will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray(""001011111""), return_tdo=True) >>> c.jtag_disable() """""" status, _ = self.bulkCommand(_BMSG_ENABLE_JTAG) if status == 0: self._jtagon = True elif status == 3: self._jtagon = True raise JTAGAlreadyEnabledError() else: raise JTAGEnableFailedError(""Error enabling JTAG. Error code: %s."" %status)" 1969,"def jtag_disable(self): """""" Disables JTAG output on the controller. JTAG operations executed immediately after this function will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray(""001011111""), return_tdo=True) >>> c.jtag_disable() """""" if not self._jtagon: return status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG) if status == 0: self._jtagon = False elif status == 3: raise JTAGControlError(""Error Code %s""%status) self.close_handle()" 1970,"def write_tms_bits(self, data, return_tdo=False, TDI=False): """""" Command controller to write TMS data (with constant TDI bit) to the physical scan chain. Optionally return TDO bits sent back from scan the chain. Args: data - bits to send over TMS line of scan chain (bitarray) return_tdo (bool) - return the devices bitarray response TDI (bool) - whether TDI should send a bitarray of all 0's of same length as `data` (i.e False) or all 1's (i.e. True) Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray(""001011111""), return_tdo=True) >>> c.jtag_disable() """""" self._check_jtag() self._update_scanchain(data) self.bulkCommandDefault(_BMSG_WRITE_TMS % (return_tdo, TDI, len(data).to_bytes(4, 'little'))) self.bulkWriteData(build_byte_align_buff(data).tobytes()[::-1]) tdo_bits = self._read_tdo(len(data)) if return_tdo else None self._get_adv_trans_stats(0x0B, return_tdo) return tdo_bits" 1971,"def write_tdi_bits(self, buff, return_tdo=False, TMS=True): """""" Command controller to write TDI data (with constant TMS bit) to the physical scan chain. Optionally return TDO bits sent back from scan the chain. Args: data - bits to send over TDI line of scan chain (bitarray) return_tdo (bool) - return the devices bitarray response TMS (bool) - whether TMS should send a bitarray of all 0's of same length as `data` (i.e False) or all 1's (i.e. True) Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tdi_bits(bitarray(""11111""), return_tdo=True) >>> c.jtag_disable() """""" self._check_jtag() tms_bits = bitarray([TMS]*len(buff)) self._update_scanchain(tms_bits) self.bulkCommandDefault(_BMSG_WRITE_TDI % (return_tdo, TMS, len(buff).to_bytes(4, 'little'))) self.bulkWriteData(build_byte_align_buff(buff).tobytes()[::-1]) tdo_bits = self._read_tdo(len(buff)) if return_tdo else None self._get_adv_trans_stats(0x08, return_tdo) return tdo_bits" 1972,"def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False): """""" Command controller to write arbitrary TDI and TMS data to the physical scan chain. Optionally return TDO bits sent back from the scan chain. Args: tmsdata - bits to send over TMS line of scan chain (bitarray) must be the same length ad tdidata tdidata - bits to send over TDI line of scan chain (bitarray) must be the same length ad tmsdata return_tdo (bool) - return the devices bitarray response Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_tdi_bits(bitarray(""00001""), bitarray(""11111""), return_tdo=True) >>> c.jtag_disable() """""" self._check_jtag() if len(tmsdata) != len(tdidata): raise Exception(""TMSdata and TDIData must be the same length"") self._update_scanchain(tmsdata) count = len(tmsdata) t = time() outdata = bitarray([val for pair in zip(tmsdata, tdidata) for val in pair]) outdata = build_byte_align_buff(outdata).tobytes()[::-1] if self._scanchain and self._scanchain._print_statistics: print(""TDI/TDI DATA PREP TIME"", time()-t)#pragma: no cover t = time() self.bulkCommandDefault(_BMSG_WRITE_TMS_TDI % \ (return_tdo, count.to_bytes(4, 'little'))) self.bulkWriteData(outdata) if self._scanchain and self._scanchain._print_statistics: print(""TRANSFER TIME"", time()-t) t = time() tdo_bits = self._read_tdo(count) if return_tdo else None if self._scanchain and self._scanchain._print_statistics: print(""TDO READ TIME"", time()-t)#pragma: no cover self._get_adv_trans_stats(0x0A, return_tdo) return tdo_bits" 1973,"def read_tdo_bits(self, count, TMS=True, TDI=False): """""" Command controller to issue [count] bit transfers to the physicsl scan chain, with a constant TMS and TDI value, and reading back the returned TDO bits. Args: count (int) - Number of bits to read from TDO and write to TMS/TDI TMS (bool) - constant value to write to TMS for each bit read from TDO. TDI (bool) - constant value to write to TDI for each bit read from TDO. Returns: Returns the response (bitarray) from the physical scanchain's TDO line. Usage: >>> from proteusisc import getAttachedControllers >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> data = c.read_tdo_bits(32) >>> c.jtag_disable() """""" self._check_jtag() self._update_scanchain(bool(TMS)) self.bulkCommandDefault( _BMSG_READ_TDO % (TMS, TDI, count.to_bytes(4, 'little'))) res = self._read_tdo(count) self._get_adv_trans_stats(_BMSG_READ_TDO[2], True) return res" 1974,"def importProteinDatabase(filePath, proteindb=None, decoyTag='[decoy]', contaminationTag='[cont]', headerParser=None, forceId=False, cleavageRule='[KR]', minLength=5, maxLength=40, missedCleavage=2, ignoreIsoleucine=False, removeNtermM=True): """"""Generates a :class:`ProteinDatabase` by in silico digestion of proteins from a fasta file. :param filePath: File path :param proteindb: optional an existing :class:`ProteinDatabase` can be specified, otherwise a new instance is generated and returned :param decoyTag: If a fasta file contains decoy protein entries, they should be specified with a sequence tag :param contaminationTag: If a fasta file contains contamination protein entries, they should be specified with a sequence tag :param headerParser: optional a headerParser can be specified #TODO: describe how a parser looks like :param forceId: bool, if True and no id can be extracted from the fasta header the whole header sequence is used as a protein id instead of raising an exception. :param cleavageRule: cleavage rule expressed in a regular expression, see :attr:`maspy.constants.expasy_rules` :param missedCleavage: number of allowed missed cleavage sites :param removeNtermM: bool, True to consider also peptides with the N-terminal Methionine of the protein removed :param minLength: int, only yield peptides with length >= minLength :param maxLength: int, only yield peptides with length <= maxLength :param ignoreIsoleucine: bool, if True treat Isoleucine and Leucine in peptide sequences as indistinguishable See also :func:`maspy.peptidemethods.digestInSilico` """""" proteindb = ProteinDatabase() if proteindb is None else proteindb fastaRead = _readFastaFile(filePath) for header, sequence in fastaRead: proteinTags = list() if header.startswith(decoyTag): isDecoy = True header = header.replace(decoyTag, '') proteinTags.append(decoyTag) else: isDecoy = False if header.startswith(contaminationTag): isCont = True header = header.replace(contaminationTag, '') proteinTags.append(contaminationTag) else: isCont = False headerInfo = _extractFastaHeader(header, headerParser, forceId) proteinId = ''.join(itertools.chain(proteinTags, [headerInfo['id']])) if 'name' in headerInfo: proteinName = ''.join(itertools.chain(proteinTags, [headerInfo['name']] ) ) else: proteinName = proteinId if proteinId not in proteindb.proteins: protein = ProteinSequence(proteinId, sequence) protein.name = proteinName protein.fastaHeader = header protein.fastaInfo = headerInfo proteindb.proteins[protein.id] = protein #Perform the insilico digestion _digestion = maspy.peptidemethods.digestInSilico(sequence, cleavageRule, missedCleavage, removeNtermM, minLength, maxLength ) #Add peptides to the protein database for unmodPeptide, info in _digestion: if ignoreIsoleucine: unmodPeptideNoIsoleucine = unmodPeptide.replace('I', 'L') if unmodPeptideNoIsoleucine in proteindb.peptides: currPeptide = proteindb.peptides[unmodPeptideNoIsoleucine] else: currPeptide = PeptideSequence(unmodPeptideNoIsoleucine, mc=info['missedCleavage'] ) proteindb.peptides[unmodPeptideNoIsoleucine] = currPeptide if unmodPeptide not in proteindb.peptides: proteindb.peptides[unmodPeptide] = currPeptide else: if unmodPeptide in proteindb.peptides: currPeptide = proteindb.peptides[unmodPeptide] else: currPeptide = PeptideSequence(unmodPeptide, mc=info['missedCleavage'] ) proteindb.peptides[unmodPeptide] = currPeptide if proteinId not in currPeptide.proteins: currPeptide.proteins.add(proteinId) #TODO: change that a peptide can appear multiple times in a # protein sequence. currPeptide.proteinPositions[proteinId] = (info['startPos'], info['endPos'] ) #Add peptide entries to the protein entries, define wheter a peptide can be #uniquely assigend to a single protein (.isUnique = True). for peptide, peptideEntry in viewitems(proteindb.peptides): numProteinMatches = len(peptideEntry.proteins) if numProteinMatches == 1: peptideEntry.isUnique = True elif numProteinMatches > 1: peptideEntry.isUnique = False else: raise Exception('No protein matches in proteindb for peptide' + 'sequence: ' + peptide) for proteinId in peptideEntry.proteins: if peptideEntry.isUnique: proteindb.proteins[proteinId].uniquePeptides.add(peptide) else: proteindb.proteins[proteinId].sharedPeptides.add(peptide) #Check protein entries if the digestions generated at least one peptide that #is uniquely assigned to the protein (.isUnique = True) for proteinEntry in viewvalues(proteindb.proteins): if len(proteinEntry.uniquePeptides) > 0: proteinEntry.isUnique = True else: proteinEntry.isUnique = False #Note: TODO, altough isoleucin is ignored, the protein entry should only #show the actually present ILE / LEU occurence, not any possibilities return proteindb" 1975,"def _readFastaFile(filepath): """"""Read a FASTA file and yields tuples of 'header' and 'sequence' entries. :param filepath: file path of the FASTA file :yields: FASTA entries in the format ('header', 'sequence'). The 'header' string does not contain the '>' and trailing white spaces. The 'sequence' string does not contain trailing white spaces, a '*' at the end of the sequence is removed. See also :func:`importProteinDatabase` and :func:`maspy.peptidemethods.digestInSilico`. """""" processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*') processHeaderLine = lambda line: line[1:].rstrip() with io.open(filepath) as openfile: #Iterate through lines until the first header is encountered try: line = next(openfile) while line[0] != '>': line = next(openfile) header = processHeaderLine(line) sequences = list() except StopIteration: errorText = 'File does not contain fasta entries.' raise maspy.errors.FileFormatError(errorText) for line in openfile: if line[0] == '>': yield header, processSequences(sequences) header = processHeaderLine(line) sequences = list() else: sequences.append(line) #Yield last entry if sequences: yield header, processSequences(sequences)" 1976,"def _extractFastaHeader(fastaHeader, parser=None, forceId=False): """"""Parses a fasta header and returns extracted information in a dictionary. Unless a custom parser is specified, a ``Pyteomics`` function is used, which provides parsers for the formats of UniProtKB, UniRef, UniParc and UniMES (UniProt Metagenomic and Environmental Sequences), described at `www.uniprot.org <http://www.uniprot.org/help/fasta-headers>_`. :param fastaHeader: str, protein entry header from a fasta file :param parser: is a function that takes a fastaHeader string and returns a dictionary, containing at least the key ""id"". If None the parser function from pyteomics ``pyteomics.fasta.parse()`` is used. :param forceId: bool, if True and no id can be extracted from the fasta header the whole header sequence is used as a protein id instead of raising an exception. :returns: dict, describing a fasta header """""" if parser is None: try: headerInfo = pyteomics.fasta.parse(fastaHeader) except pyteomics.auxiliary.PyteomicsError as pyteomicsError: #If forceId is set True, it uses the whole header as an id if forceId: headerInfo = {'id': fastaHeader} else: raise pyteomicsError else: headerInfo = parser(fastaHeader) return headerInfo" 1977,"def fastaParseSgd(header): """"""Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header """""" rePattern = '([\S]+)\s([\S]+).+(\"".+\"")' ID, name, description = re.match(rePattern, header).groups() info = {'id':ID, 'name':name, 'description':description} return info" 1978,"def _reprJSON(self): """"""Returns a JSON serializable represenation of a ``PeptideSequence`` class instance. Use :func:`maspy.proteindb.PeptideSequence._fromJSON()` to generate a new ``PeptideSequence`` instance from the return value. :returns: a JSON serializable python object """""" return {'__PepSeq__': [self.sequence, self.missedCleavage, self.isUnique, list(self.proteins), self.proteinPositions]}" 1979,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.proteindb.PeptideSequence` from a decoded JSON object (as generated by :func:`maspy.proteindb.PeptideSequence._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`PeptideSequence` """""" newInstance = cls(jsonobject[0], jsonobject[1]) newInstance.isUnique = jsonobject[2] newInstance.proteins = set(jsonobject[3]) newInstance.proteinPositions = jsonobject[4] return newInstance" 1980,"def _reprJSON(self): """"""Returns a JSON serializable represenation of a ``ProteinSequence`` class instance. Use :func:`maspy.proteindb.ProteinSequence._fromJSON()` to generate a new ``ProteinSequence`` instance from the return value. :returns: a JSON serializable python object """""" jsonDict = self.__dict__ jsonDict['uniquePeptides'] = list(jsonDict['uniquePeptides']) jsonDict['sharedPeptides'] = list(jsonDict['sharedPeptides']) return {'__ProtSeq__': jsonDict}" 1981,"def _fromJSON(cls, jsonobject): """"""Generates a new instance of :class:`maspy.proteindb.ProteinSequence` from a decoded JSON object (as generated by :func:`maspy.proteindb.ProteinSequence._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`ProteinSequence` """""" newInstance = cls(None, None) newInstance.__dict__.update(jsonobject) newInstance.uniquePeptides = set(newInstance.uniquePeptides) newInstance.sharedPeptides = set(newInstance.sharedPeptides) return newInstance" 1982,"def save(self, path, compress=True): """"""Writes the ``.proteins`` and ``.peptides`` entries to the hard disk as a ``proteindb`` file. .. note:: If ``.save()`` is called and no ``proteindb`` file is present in the specified path a new files is generated, otherwise the old file is replaced. :param path: filedirectory to which the ``proteindb`` file is written. The output file name is specified by ``self.info['name']`` :param compress: bool, True to use zip file compression """""" with aux.PartiallySafeReplace() as msr: filename = self.info['name'] + '.proteindb' filepath = aux.joinpath(path, filename) with msr.open(filepath, mode='w+b') as openfile: self._writeContainer(openfile, compress=compress)" 1983,"def _writeContainer(self, filelike, compress=True): """"""Writes the ``.proteins`` and ``.peptides`` entries to the ``proteindb`` format. In addition it also dumps the ``self.info`` entry to the zipfile with the filename ``info``. For details see :func:`maspy.auxiliary.writeJsonZipfile()` :param filelike: path to a file (str) or a file-like object :param compress: bool, True to use zip file compression """""" aux.writeJsonZipfile(filelike, self.proteins, compress, 'w', 'proteins') aux.writeJsonZipfile(filelike, self.peptides, compress, 'a', 'peptides') zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, 'a', allowZip64=True) as containerFile: infodata = {key: value for key, value in viewitems(self.info) if key != 'path' } containerFile.writestr('info', json.dumps(infodata, zipcomp))" 1984,"def load(cls, path, name): """"""Imports the specified ``proteindb`` file from the hard disk. :param path: filedirectory of the ``proteindb`` file :param name: filename without the file extension "".proteindb"" .. note:: this generates rather large files, which actually take longer to import than to newly generate. Maybe saving / loading should be limited to the protein database whitout in silico digestion information. """""" filepath = aux.joinpath(path, name + '.proteindb') with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. proteinsString = io.TextIOWrapper(containerZip.open('proteins'), encoding='utf-8' ).read() peptidesString = io.TextIOWrapper(containerZip.open('peptides'), encoding='utf-8' ).read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8' ).read() newInstance = cls() newInstance.proteins = json.loads(proteinsString, object_hook=ProteinSequence.jsonHook) newInstance.peptides = json.loads(peptidesString, object_hook=PeptideSequence.jsonHook) newInstance.info.update(json.loads(infoString)) return newInstance" 1985,"def _calculateCoverageMasks(proteindb, peptidedb): """"""Calcualte the sequence coverage masks for all proteindb elements. Private method used by :class:`ProteinDatabase`. A coverage mask is a numpy boolean array with the length of the protein sequence. Each protein position that has been covered in at least one peptide is set to True. Coverage masks are calculated for unique and for shared peptides. Peptides are matched to proteins according to positions derived by the digestion of the FASTA file. Alternatively peptides could also be matched to proteins just by sequence as it is done in :func:`pyteomics.parser.coverage`, but this is not the case here. :param proteindb: a dictionary containing :class:`ProteinSequence` entries, for example ``ProteinDatabase.proteins`` :param proteindb: a dictionary containing :class:`PeptideSequence` entries, for example ``ProteinDatabase.peptides`` Sets two attributes for each ``ProteinSequence`` entry: ``.coverageMaskUnique`` = coverage mask of unique peptides ``.coverageMaskShared`` = coverage mask of shared peptides """""" for proteinId, proteinEntry in viewitems(proteindb): coverageMaskUnique = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.uniquePeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskUnique[startPos-1:endPos] = True coverageMaskShared = numpy.zeros(proteinEntry.length(), dtype='bool') for peptide in proteinEntry.sharedPeptides: startPos, endPos = peptidedb[peptide].proteinPositions[proteinId] coverageMaskShared[startPos-1:endPos] = True setattr(proteinEntry, 'coverageMaskUnique', coverageMaskUnique) setattr(proteinEntry, 'coverageMaskShared', coverageMaskShared)" 1986,"def fetch_keywords(codedata) : """""" Fetch keywords by shaman.KeywordFetcher Get average probabilities of keyword and language """""" # Read row in codedata and count keywords in codes with langauge tmp = {} language_counts = {} for index, (language, code) in enumerate(codedata) : if language not in shaman.SUPPORTING_LANGUAGES : continue if language not in tmp : tmp[language] = {} language_counts[language] = 0 language_counts[language] += 1 for keyword in shaman.KeywordFetcher.fetch( code ) : # if keyword exists in fetched data, add '1' to keyword data tmp[language][keyword] = tmp[language].get(keyword, 0) + 1 print('Fetch keyword %d/%d ' % (index, len(codedata)), end='\r') # Get dataset indexed by keyword ret = {} for language in tmp : for keyword, count in tmp[ language ].items() : if keyword not in ret : ret[ keyword ] = {} ret[ keyword ][ language ] = (count / language_counts[ language ]) # Probability print('Fetch keyword completed ') return ret" 1987,"def match_patterns(codedata) : """""" Match patterns by shaman.PatternMatcher Get average ratio of pattern and language """""" ret = {} for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) : print('Matching pattern %d ""%s""' % (index1+1, pattern)) matcher = shaman.PatternMatcher(pattern) tmp = {} for index2, (language, code) in enumerate(codedata) : if language not in shaman.SUPPORTING_LANGUAGES : continue if len(code) <= 20 or len(code) > 100000 : continue if language not in tmp : tmp[language] = [] ratio = matcher.getratio(code) tmp[language].append(ratio) print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r') ret[pattern] = {} for language, data in tmp.items() : ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1) print('Matching patterns completed ') return ret" 1988,"def facility(self, column=None, value=None, **kwargs): """""" Check information related to Radiation facilities. >>> RADInfo().facility('state_code', 'CA') """""" return self._resolve_call('RAD_FACILITY', column, value, **kwargs)" 1989,"def facility_type(self, column=None, value=None, **kwargs): """""" Basic identifying information for a RADInfo facility, including the improved facility information maintained by the Facility Registry System (FRS). >>> RADInfo().facility_type('cit_ref_code', '40CFR300') """""" return self._resolve_call('RAD_FACILITY_TYPE', column, value, **kwargs)" 1990,"def geo(self, column=None, value=None, **kwargs): """""" Locate a facility through geographic location. >>> RADInfo().geo('geometric_type_code', '001') """""" return self._resolve_call('RAD_GEO_LOCATION', column, value, **kwargs)" 1991,"def regulation(self, column=None, value=None, **kwargs): """""" Provides relevant information about applicable regulations. >>> RADInfo().regulation('title_id', 40) """""" return self._resolve_call('RAD_REGULATION', column, value, **kwargs)" 1992,"def regulatory_program(self, column=None, value=None, **kwargs): """""" Identifies the regulatory authority governing a facility, and, by virtue of that identification, also identifies the regulatory program of interest and the type of facility. >>> RADInfo().regulatory_program('sec_cit_ref_flag', 'N') """""" return self._resolve_call('RAD_REGULATORY_PROG', column, value, **kwargs)" 1993,"def collect_basic_info(): """""" collect basic info about the system, os, python version... """""" s = sys.version_info _collect(json.dumps({'sys.version_info':tuple(s)})) _collect(sys.version) return sys.version" 1994,"def call(function): """""" decorator that collect function call count. """""" message = 'call:%s.%s' % (function.__module__,function.__name__) @functools.wraps(function) def wrapper(*args, **kwargs): _collect(message) return function(*args, **kwargs) return wrapper" 1995,"def _parse_ip_addr_show(raw_result): """""" Parse the 'ip addr list dev' command raw output. :param str raw_result: os raw result string. :rtype: dict :return: The parsed result of the show interface command in a \ dictionary of the form: :: { 'os_index' : '0', 'dev' : 'eth0', 'falgs_str': 'BROADCAST,MULTICAST,UP,LOWER_UP', 'mtu': 1500, 'state': 'down', 'link_type' 'ether', 'mac_address': '00:50:56:01:2e:f6', 'inet': '20.1.1.2', 'inet_mask': '24', 'inet6': 'fe80::42:acff:fe11:2', 'inte6_mask': '64' } """""" # does link exist? show_re = ( r'""(?P<dev>\S+)""\s+does not exist' ) re_result = search(show_re, raw_result) result = None if not (re_result): # match top two lines for serveral 'always there' variables show_re = ( r'\s*(?P<os_index>\d+):\s+(?P<dev>\S+):\s+<(?P<falgs_str>.*)?>.*?' r'mtu\s+(?P<mtu>\d+).+?state\s+(?P<state>\w+).*' r'\s*link/(?P<link_type>\w+)\s+(?P<mac_address>\S+)' ) re_result = search(show_re, raw_result, DOTALL) result = re_result.groupdict() # seek inet if its there show_re = ( r'((inet )\s*(?P<inet>[^/]+)/(?P<inet_mask>\d{1,2}))' ) re_result = search(show_re, raw_result) if (re_result): result.update(re_result.groupdict()) # seek inet6 if its there show_re = ( r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\d{1,2}))' ) re_result = search(show_re, raw_result) if (re_result): result.update(re_result.groupdict()) # cleanup dictionary before returning for key, value in result.items(): if value is not None: if value.isdigit(): result[key] = int(value) return result" 1996,"def _parse_ip_stats_link_show(raw_result): """""" Parse the 'ip -s link show dev <dev>' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show interface command in a \ dictionary of the form: :: { 'rx_bytes': 0, 'rx_packets': 0, 'rx_errors': 0, 'rx_dropped': 0, 'rx_overrun': 0, 'rx_mcast': 0, 'tx_bytes': 0, 'tx_packets': 0, 'tx_errors': 0, 'tx_dropped': 0, 'tx_carrier': 0, 'tx_collisions': 0, } """""" show_re = ( r'.+?RX:.*?\n' r'\s*(?P<rx_bytes>\d+)\s+(?P<rx_packets>\d+)\s+(?P<rx_errors>\d+)\s+' r'(?P<rx_dropped>\d+)\s+(?P<rx_overrun>\d+)\s+(?P<rx_mcast>\d+)' r'.+?TX:.*?\n' r'\s*(?P<tx_bytes>\d+)\s+(?P<tx_packets>\d+)\s+(?P<tx_errors>\d+)\s+' r'(?P<tx_dropped>\d+)\s+(?P<tx_carrier>\d+)\s+(?P<tx_collisions>\d+)' ) re_result = match(show_re, raw_result, DOTALL) result = None if (re_result): result = re_result.groupdict() for key, value in result.items(): if value is not None: if value.isdigit(): result[key] = int(value) return result" 1997,"def interface(enode, portlbl, addr=None, up=None, shell=None): """""" Configure a interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left ""as-is""). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to add to the interface: - IPv4 address and netmask to assign to the interface in the form ``'192.168.20.20/24'``. - IPv6 address and subnets to assign to the interface in the form ``'2001::1/120'``. :param bool up: Bring up or down the interface. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """""" assert portlbl port = enode.ports[portlbl] if addr is not None: assert ip_interface(addr) cmd = 'ip addr add {addr} dev {port}'.format(addr=addr, port=port) response = enode(cmd, shell=shell) assert not response if up is not None: cmd = 'ip link set dev {port} {state}'.format( port=port, state='up' if up else 'down' ) response = enode(cmd, shell=shell) assert not response" 1998,"def remove_ip(enode, portlbl, addr, shell=None): """""" Remove an IP address from an interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left ""as-is""). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to remove from the interface: - IPv4 address to remove from the interface in the form ``'192.168.20.20'`` or ``'192.168.20.20/24'``. - IPv6 address to remove from the interface in the form ``'2001::1'`` or ``'2001::1/120'``. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """""" assert portlbl assert ip_interface(addr) port = enode.ports[portlbl] cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port) response = enode(cmd, shell=shell) assert not response" 1999,"def add_route(enode, route, via, shell=None): """""" Add a new static route. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str route: Route to add, an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'`` or ``'default'``. :param str via: Via for the route as an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'``. :param shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. :type shell: str or None """""" via = ip_address(via) version = '-4' if (via.version == 6) or \ (route != 'default' and ip_network(route).version == 6): version = '-6' cmd = 'ip {version} route add {route} via {via}'.format( version=version, route=route, via=via ) response = enode(cmd, shell=shell) assert not response" 2000,"def add_link_type_vlan(enode, portlbl, name, vlan_id, shell=None): """""" Add a new virtual link with the type set to VLAN. Creates a new vlan device {name} on device {port}. Will raise an exception if value is already assigned. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped automatically. :param str name: specifies the name of the new virtual device. :param str vlan_id: specifies the VLAN identifier. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """""" assert name if name in enode.ports: raise ValueError('Port {name} already exists'.format(name=name)) assert portlbl assert vlan_id port = enode.ports[portlbl] cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}'.format( dev=port, name=name, vlan_id=vlan_id) response = enode(cmd, shell=shell) assert not response, 'Cannot add virtual link {name}'.format(name=name) enode.ports[name] = name" 2001,"def remove_link_type_vlan(enode, name, shell=None): """""" Delete a virtual link. Deletes a vlan device with the name {name}. Will raise an expection if the port is not already present. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str name: specifies the name of the new virtual device. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """""" assert name if name not in enode.ports: raise ValueError('Port {name} doesn\'t exists'.format(name=name)) cmd = 'ip link del link dev {name}'.format(name=name) response = enode(cmd, shell=shell) assert not response, 'Cannot remove virtual link {name}'.format(name=name) del enode.ports[name]" 2002,"def show_interface(enode, dev, shell=None): """""" Show the configured parameters and stats of an interface. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str dev: Unix network device name. Ex 1, 2, 3.. :rtype: dict :return: A combined dictionary as returned by both :func:`topology_lib_ip.parser._parse_ip_addr_show` :func:`topology_lib_ip.parser._parse_ip_stats_link_show` """""" assert dev cmd = 'ip addr list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) first_half_dict = _parse_ip_addr_show(response) d = None if (first_half_dict): cmd = 'ip -s link list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) second_half_dict = _parse_ip_stats_link_show(response) d = first_half_dict.copy() d.update(second_half_dict) return d" 2003,"def build_mmd(target_folder=DEFAULT_LIBRARY_DIR): """"""Build and install the MultiMarkdown shared library."""""" mmd_dir = tempfile.mkdtemp() mmd_repo = pygit2.clone_repository('https://github.com/jasedit/MultiMarkdown-5', mmd_dir, checkout_branch='fix_windows') mmd_repo.init_submodules() mmd_repo.update_submodules() build_dir = os.path.join(mmd_dir, 'build') old_pwd = os.getcwd() os.chdir(build_dir) cmake_cmd = ['cmake', '-DCMAKE_BUILD_TYPE=Release', '-DSHAREDBUILD=1', '..'] if platform.system() == 'Windows': is_64bit = platform.architecture()[0] == '64bit' generator = 'Visual Studio 14 2015{0}'.format(' Win64' if is_64bit else '') cmake_cmd.insert(-1, '-G') cmake_cmd.insert(-1, '{0}'.format(generator)) subprocess.call(cmake_cmd) PLATFORM_BUILDS[platform.system()]() lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()] if not os.path.exists(target_folder): os.mkdir(target_folder) src = os.path.join(build_dir, SHLIB_PREFIX[platform.system()], lib_file) dest = os.path.join(target_folder, lib_file) shutil.copyfile(src, dest) os.chdir(old_pwd) shutil.rmtree(mmd_dir, ignore_errors=True)" 2004,"def cli(ctx, amount, index, stage, deststage, stepresult, tostep, select, where, order, position): """"""Promote data from one stage to another(experimental) First collect the correct information with export, and promote result by adjusting command to promote and adding missing options. """""" if not ctx.bubble: msg = 'There is no bubble present, will not promote' ctx.say_yellow(msg) raise click.Abort() if stage not in STAGES: ctx.say_yellow('There is no known stage:' + stage) raise click.Abort() if stepresult not in exportables: ctx.say_yellow('stepresult not one of: ' + ', '.join(exportables)) raise click.Abort() ctx.gbc.say('promote:args', stuff=(ctx, amount, index, stage, deststage, stepresult, tostep, select, where, order, position)) data_gen = bubble_lod_load(ctx, stepresult, stage) ctx.gbc.say('data_gen:', stuff=data_gen, verbosity=20) part = get_gen_slice(ctx.gbc, data_gen, amount, index) ctx.gbc.say('selected part:', stuff=part, verbosity=20) aliases = get_pairs(ctx.gbc, select, missing_colon=True) if position: ctx.gbc.say('adding position to selection of columns:', stuff=aliases, verbosity=20) aliases.insert(0, {'key': buts('index'), 'val': 'BUBBLE_IDX'}) ctx.gbc.say('added position to selection of columns:', stuff=aliases, verbosity=20) wheres = get_pairs(ctx.gbc, where) # TODO: use aliases as lookup for wheres data = tablib.Dataset() data.headers = [sel['val'] for sel in aliases] ctx.gbc.say('select wheres:' + str(wheres), verbosity=20) ctx.gbc.say('select aliases:' + str(aliases), verbosity=20) ctx.gbc.say('select data.headers:' + str(data.headers), verbosity=20) # TODO: get this selecting stuff into a shared function from export try: for ditem in part: row = [] ctx.gbc.say('curr dict', stuff=ditem, verbosity=101) flitem = flat(ctx, ditem) ctx.gbc.say('curr flat dict', stuff=flitem, verbosity=101) row_ok = True for wp in wheres: # TODO: negative selects: k:None, k:False,k:Zero,k:Null,k:0,k:-1,k:'',k:"""", # TODO: negative selects: k:BUBBLE_NO_KEY,k:BUBBLE_NO_VAL if not wp['val'] in str(flitem[wp['key']]): row_ok = False if not row_ok: continue for sel in aliases: if sel['key'] in flitem: row.append(flitem[sel['key']]) else: # temporary to check, not use case for buts() bnp = '____BTS_NO_PATH_' tempv = get_flat_path(ctx, flitem, sel['key'] + '.*', bnp) if tempv != bnp: row.append(tempv) else: row.append('None') # TODO maybe 'NONE', or just '' or something like: # magic.export_format_none data.append(row) except Exception as excpt: ctx.say_red('Cannot promote data', stuff=excpt) raise click.Abort() if order: olast2 = order[-2:] ctx.gbc.say('order:' + order + ' last2:' + olast2, verbosity=100) if olast2 not in [':+', ':-']: data = data.sort(order, False) else: if olast2 == ':+': data = data.sort(order[:-2], False) if olast2 == ':-': data = data.sort(order[:-2], True)" 2005,"def _setup(): """""" Sets up the global import environment variables by registering the sub-folders for projex as import locations. When defining your custom manager, you will want to overload this method to do any sort of global initialization that you wish before continuing. :warning This method is called by the _setup method, and should not be called directly. """""" projex_path = os.getenv('PROJEX_PATH') if not projex_path: return base_path = os.path.dirname(__file__) logger.debug('Loading PROJEX_PATH: %s' % projex_path) # load the defaults from the install directory # load the paths from the environment paths = projex_path.split(os.path.pathsep) paths += [ os.path.join(base_path, 'userplug'), os.path.join(base_path, 'stdplug'), os.path.join(base_path, 'lib'), ] sys.path = paths + sys.path" 2006,"def appendPath(self, path): """""" Appends the inputted path to the end of the sys.path variable, provided the path does not already exist in it. :param path :type str :return bool: success """""" # normalize the path path = os.path.normcase(nstr(path)).strip() if path and path != '.' and path not in sys.path: sys.path.append(path) self._addedpaths.append(path) return True return False" 2007,"def expandvars(self, text, environ=None, cache=None): """""" Recursively expands the text variables, vs. the os.path \ method which only works at one level. The cache value should be \ left blank as it is used to protect against recursion. :param text | <str> environ | <dict> || None cache | <dict> { <str>: <str>, .. } :return <str> """""" if not environ: environ = os.environ # make sure we have data if not text: return '' # check for circular dependencies cache = cache or {} # return the cleaned variable output = nstr(text) keys = re.findall('\$(\w+)|\${(\w+)\}|\%(\w+)\%', text) for first, second, third in keys: repl = '' key = '' if first: repl = '$%s' % first key = first elif second: repl = '${%s}' % second key = second elif third: repl = '%%%s%%' % third key = third else: continue value = environ.get(key) if value: if key not in cache: cache[key] = value value = self.expandvars(value, environ, cache) else: err = '%s environ variable causes an infinite loop.' % key logger.warning(err) value = cache[key] else: value = repl output = output.replace(repl, value) return os.path.expanduser(output)" 2008,"def pushPath(self, path): """""" Pushes the inputted path at the front of the sys.path variable, making it the first path python uses when importing a module. :param path :type str :return bool: success """""" # normalize the path path = os.path.normcase(nstr(path)).strip() if path and path != '.' and path not in sys.path: sys.path.append(path) self._addedpaths.insert(0, path) return True return False" 2009,"def requires(self, *modules): """""" Registers the system paths for the inputted modules so that they can be imported properly. By default, this will check to see if the key PROJEX_[MODULE]_PATH exists in the environment, and if so, insert that path to the front of the sys.path for import. Out of the box installations will register import paths to default projex folders and won't need to define these path variables. (lib/,stdplug/,userplug) :param *modules ( <str>, .. ) :usage |>>> import projex |>>> projex.logger.setLevel( projex.logging.DEBUG ) |>>> projex.environ().requires( 'orb', 'anansi' ) |DEBUG: EnvManager.requires: PROJEX_ORB_PATH |DEBUG: EnvManager.requires: PROJEX_ANANSI_PATH |>>> import opb |>>> import openwebdk """""" self._setup() for module in modules: if '-' in module: parts = module.split('-') module = parts[0] version = '-'.join(parts) else: version = '' if module in self._loadedRequires: continue self._loadedRequires.append(module) path_key = 'PROJEX_%s_PATH' % nstr(module).upper() env_path = os.getenv(path_key) logger.debug('Looking up %s: %s' % (path_key, env_path)) # push the path for the particular module if found in the env if env_path: self.pushPath(env_path)" 2010,"def refactor(module, name, repl): """""" Replaces the name in the module dictionary with the inputted replace \ value. :param module | <str> || <module> name | <str> repl | <variant> :return <bool> """""" name = nstr(name) # import a module when refactoring based on a string if isinstance(module, basestring): try: module = __import__(module) except ImportError: logger.exception('Could not import module: %s' % module) return False try: glbls = module.__dict__ except AttributeError: err = '%s cannot support refactoring.' % module.__name__ logger.exception(err) return False if name in glbls: # refactor the value glbls[name] = repl return True else: err = '%s is not a member of %s.' % (name, module.__name__) logger.warning(err) return False" 2011,"def current(): """""" Returns the current environment manager for the projex system. :return <EnvManager> """""" if not EnvManager._current: path = os.environ.get('PROJEX_ENVMGR_PATH') module = os.environ.get('PROJEX_ENVMGR_MODULE') clsname = os.environ.get('PROJEX_ENVMGR_CLASS') cls = EnvManager if module and clsname: # check if the user specified an import path if path: logger.info('Adding env manager path: %s' % path) sys.path.insert(0, path) logger.info('Loading env manager: %s.%s' % (module, clsname)) try: __import__(module) mod = sys.modules[module] cls = getattr(mod, clsname) except ImportError: logger.error('Could not import env manager %s', module) except KeyError: logger.error('Could not import env manager %s', module) except AttributeError: msg = '%s is not a valid class of %s' % (clsname, module) logger.error(msg) EnvManager._current = cls() return EnvManager._current" 2012,"def fileImport(filepath, ignore=None): """""" Imports the module located at the given filepath. :param filepath | <str> ignore | [<str>, ..] || None :return <module> || None """""" basepath, package = EnvManager.packageSplit(filepath) if not (basepath and package): return None # make sure this is not part of the ignored package list if ignore and package in ignore: return None basepath = os.path.normcase(basepath) if basepath not in sys.path: sys.path.insert(0, basepath) logger.debug('Importing: %s' % package) try: __import__(package) module = sys.modules[package] except ImportError: logger.exception('ImportError: %s' % package) return None except KeyError: logger.exception('Could not find sys.modules package: %s' % package) return None except StandardError: logger.exception('Unknown error occurred not import %s' % package) return None return module" 2013,"def packageSplit(filepath): """""" Determines the python path, and package information for the inputted filepath. :param filepath | <str> :return (<str> path, <str> package) """""" filepath = nstr(filepath).strip().strip('.') if not filepath: return '', '' basepath, module = os.path.split(nstr(filepath)) module = os.path.splitext(module)[0] pathsplit = os.path.normpath(basepath).split(os.path.sep) packagesplit = [] if module and module != '__init__': packagesplit.append(module) testpath = os.path.sep.join(pathsplit + ['__init__.py']) while os.path.exists(testpath): packagesplit.insert(0, pathsplit[-1]) pathsplit = pathsplit[:-1] testpath = os.path.sep.join(pathsplit + ['__init__.py']) return os.path.sep.join(pathsplit), '.'.join(packagesplit)" 2014,"def save(keystorerc=None, keystore=None, files=[], verbose=False): '''create a keystore, compress and encrypt to file''' config = None if keystorerc: config = config_reader.read(keystorerc) if not config: print('No configuration found.', file=sys.stderr) sys.exit(-1) elif keystore and len(files) > 0: config = { 'keystore': keystore, 'files': files } if 'verbose' in config and config['verbose']: verbose = True keystore_path = None if 'keystore' not in config: print('.keystorerc needs to specify a keystore file path.', file=sys.stderr) sys.exit(-1) keystore_path = os.path.expanduser(config['keystore']) if os.path.isdir(keystore_path): print('keystore cannot be a folder: {}'.format(config['keystore']), file=sys.stderr) sys.exit(-1) elif not os.path.isfile(keystore_path): # If keystore file does not exist already, attempt to create one try: pathlib.Path(keystore_path).touch() except OSError as err: print('keystore cannot be accessed: {}\n{}'.format(config['keystore'], err), file=sys.stderr) sys.exit(-1) # iterate through keys and add them here keystore = {} try: for p in config['files']: expanded_path = os.path.expanduser(p) path = pathlib.Path(expanded_path) if verbose: print('Inspecting {}:'.format(expanded_path)) if not path.exists(): print('Error: File or folder does not exist: {}'.format(p), file=sys.stderr) sys.exit(-1) if path.is_dir(): for dirpath, dirnames, filenames in os.walk(expanded_path): for name in filenames: fullpath = os.path.join(dirpath, name) if verbose: print('Adding {} ...'.format(fullpath)) with open(fullpath, 'rb') as keyfile: b64_bytes = base64.encodebytes(keyfile.read()).decode('utf-8') keystore[fullpath] = b64_bytes elif path.is_file(): fullpath = expanded_path if verbose: print('Adding {} ...'.format(fullpath)) with open(fullpath, 'rb') as keyfile: b64_bytes = base64.encodebytes(keyfile.read()).decode('utf-8') keystore[fullpath] = b64_bytes if verbose: print('Added {} key(s) to keystore.\n'.format(len(keystore))) # prompt user for a one-time passphase for encryption do_passphrases_match = False passphrase = None print('This passphrase is used to decrypt your keystore. Please remember it.') while not do_passphrases_match: passphrase = getpass.getpass('Please enter a passphrase: ') passphrase_verify = getpass.getpass('Please verify your passphrase: ') do_passphrases_match = passphrase != '' and passphrase == passphrase_verify if passphrase == '': print('Passphrase cannot be empty.') elif not do_passphrases_match: print('Passphrases do not match. Please try again.') if verbose: print('Passphrase accepted. Encrypting ...') # serialise, compress, encrypt serial_keystore = json.dumps(keystore) compressed_keystore = gzip.compress(serial_keystore.encode('utf-8')) try: encrypted_keystore = simplecrypt.encrypt(passphrase, compressed_keystore) except simplecrypt.EncryptionException as err: print('You managed to bump into a very, very rare issue with AES.\nPlease contact the author. {}'.format(err), file=sys.stder) sys.exit(-1) # save encrypted keystore to file keystore_path = os.path.expanduser(keystore_path) if verbose: print('Writing to keystore file {}'.format(keystore_path)) with open(keystore_path, 'wb') as keystore_file: keystore_file.write(encrypted_keystore) if verbose: print('Keystore successfully created: ') # if verbose: print(encrypted_keystore) except KeyError as err: print('.keystorerc config is missing `files` attribute: {}'.format(err), file=sys.stderr) sys.exit(-1) except TypeError as err: print('Error: {}'.format(err), file=sys.stderr) traceback.print_exc() sys.exit(-1) except OSError as err: print('The file system gave an error: {}'.format(err), file=sys.stderr) sys.exit(-1) except Exception as err: print('Serious error. Please report this bug to the author: {}'.format(err), file=sys.stderr) sys.exit(-1)" 2015,"def join_all(self, *parts): """""" Join all parts with domain. Example domain: https://www.python.org :param parts: Other parts, example: ""/doc"", ""/py27"" :return: url """""" url = util.join_all(self.domain, *parts) return url" 2016,"def add_params(self, endpoint, params): """""" Combine query endpoint and params. """""" assert endpoint.startswith(self.domain) return util.add_params(endpoint, params)" 2017,"def generate_requirements_files(self, base_dir='.'): """""" Generate set of requirements files for config """""" print(""Creating requirements files\n"") # TODO How to deal with requirements that are not simple, e.g. a github url shared = self._get_shared_section() requirements_dir = self._make_requirements_directory(base_dir) for section in self.config.sections(): if section == 'metadata': continue requirements = {} for option in self.config.options(section): requirements[option] = self.config.get(section, option) if not requirements: # No need to write out an empty file continue filename = os.path.join(requirements_dir, '%s.txt' % section) self._write_requirements_file(shared, section, requirements, filename)" 2018,"def _write_default_sections(self): """""" Starting from scratch, so create a default rc file """""" self.config.add_section('metadata') self.config.set('metadata', 'shared', 'common') self.config.add_section('common') self.config.add_section('development') self.config.add_section('production')" 2019,"def _parse_requirements(self, input): """""" Parse a list of requirements specifications. Lines that look like ""foobar==1.0"" are parsed; all other lines are silently ignored. Returns a tuple of tuples, where each inner tuple is: (package, version) """""" results = [] for line in input: (package, version) = self._parse_line(line) if package: results.append((package, version)) return tuple(results)" 2020,"def create_rc_file(self, packages): """""" Create a set of requirements files for config """""" print(""Creating rcfile '%s'\n"" % self.rc_filename) # TODO bug with == in config file if not self.config.sections(): self._write_default_sections() sections = {} section_text = [] for i, section in enumerate(self.config.sections()): if section == 'metadata': continue sections[i] = section section_text.append('%s. %s' % (i, section)) section_text = ' / '.join(section_text) self._remap_stdin() package_names = set() lines = packages.readlines() requirements = self._parse_requirements(lines) for (package, version) in requirements: package_names.add(package) section, configured_version = self._get_option(package) # Package already exists in configuration if section: # If there is a configured version, update it. If not, leave it unversioned. if configured_version: if configured_version != version: print(""Updating '%s' version from '%s' to '%s'"" % (package, configured_version, version)) self.config.set(section, package, version) continue section = self._get_section(package, sections, section_text) self._set_option(section, package, version) for section in self.config.sections(): if section == 'metadata': continue for option in self.config.options(section): if option not in package_names: print(""Removing package '%s'"" % option) self.config.remove_option(section, option) rc_file = open(self.rc_filename, 'w+') self.config.write(rc_file) rc_file.close()" 2021,"def upgrade_packages(self, packages): """""" Upgrade all specified packages to latest version """""" print(""Upgrading packages\n"") package_list = [] requirements = self._parse_requirements(packages.readlines()) for (package, version) in requirements: package_list.append(package) if package_list: args = [ ""pip"", ""install"", ""-U"", ] args.extend(package_list) subprocess.check_call(args) else: print(""No packages to upgrade"")" 2022,"def determine_extra_packages(self, packages): """""" Return all packages that are installed, but missing from ""packages"". Return value is a tuple of the package names """""" args = [ ""pip"", ""freeze"", ] installed = subprocess.check_output(args, universal_newlines=True) installed_list = set() lines = installed.strip().split('\n') for (package, version) in self._parse_requirements(lines): installed_list.add(package) package_list = set() for (package, version) in self._parse_requirements(packages.readlines()): package_list.add(package) removal_list = installed_list - package_list return tuple(removal_list)" 2023,"def remove_extra_packages(self, packages, dry_run=False): """""" Remove all packages missing from list """""" removal_list = self.determine_extra_packages(packages) if not removal_list: print(""No packages to be removed"") else: if dry_run: print(""The following packages would be removed:\n %s\n"" % ""\n "".join(removal_list)) else: print(""Removing packages\n"") args = [ ""pip"", ""uninstall"", ""-y"", ] args.extend(list(removal_list)) subprocess.check_call(args)" 2024,"def rewrap(self, **kwargs): """"""Inplace constructor. Depending on `self.inplace`, rewrap `obj`, or just update internal vars, possibly including the `obj`. """""" if self.inplace: for key, val in kwargs.items(): setattr(self, key, val) return self else: for key in ['obj', 'default', 'skipmissing', 'inplace', 'empty']: kwargs.setdefault(key, getattr(self, key)) return pluckable(**kwargs)" 2025,"def _filtered_list(self, selector): """"""Iterate over `self.obj` list, extracting `selector` from each element. The `selector` can be a simple integer index, or any valid key (hashable object). """""" res = [] for elem in self.obj: self._append(elem, selector, res) return res" 2026,"def _sliced_list(self, selector): """"""For slice selectors operating on lists, we need to handle them differently, depending on ``skipmissing``. In explicit mode, we may have to expand the list with ``default`` values. """""" if self.skipmissing: return self.obj[selector] # TODO: can be optimized by observing list bounds keys = xrange(selector.start or 0, selector.stop or sys.maxint, selector.step or 1) res = [] for key in keys: self._append(self.obj, key, res, skipmissing=False) return res" 2027,"def _extract_from_object(self, selector): """"""Extracts all values from `self.obj` object addressed with a `selector`. Selector can be a ``slice``, or a singular value extractor in form of a valid dictionary key (hashable object). Object (operated on) can be anything with an itemgetter or attrgetter, including, but limited to `dict`, and `list`. Itemgetter is preferred over attrgetter, except when called as `.key`. If `selector` is a singular value extractor (like a string, integer, etc), a single value (for a given key) is returned if key exists, an empty list if not. If `selector` is a ``slice``, each key from that range is extracted; failing-back, again, to an empty list. """""" if isinstance(selector, slice): # we must expand the slice manually, in order to be able to apply to # for example, to mapping types, or general objects # (e.g. slice `4::2` will filter all even numerical keys/attrs >=4) start = selector.start or 0 step = selector.step or 1 if selector.stop is None: if hasattr(self.obj, ""keys""): # filter keys by slice keys = \ [k for k in self.obj.keys() if isinstance(k, baseinteger) \ and k >= start and (k - start) % step == 0] elif hasattr(self.obj, ""__len__""): # object we slice should have a length (__len__ method), keys = xrange(start, len(self.obj), step) else: # otherwise, we don't know how to slice, so just skip it, # instead of failing keys = [] else: keys = xrange(start, selector.stop, step) else: keys = [selector] res = [] for key in keys: self._append(self.obj, key, res) return res" 2028,"def items(self): """"""Behave like `dict.items` for mapping types (iterator over (key, value) pairs), and like `iter` for sequence types (iterator over values). """""" if self.empty: return iter([]) val = self.value if hasattr(val, ""iteritems""): return val.iteritems() elif hasattr(val, ""items""): return val.items() else: return iter(self)" 2029,"def _mkpda(self, nonterms, productions, productions_struct, terminals, splitstring=1): """""" This function generates a PDA from a CNF grammar as described in: - http://www.oit.edu/faculty/sherry.yang/CST229/Lectures/7_pda.pdf - http://www.eng.utah.edu/~cs3100/lectures/l18/pda-notes.pdf If all of the grammar productions are in the Chomsky Normal Form, then follow the template for constructing a pushdown symautomata: 1. Start 2. Push S 3. Pop 4. Case: Nonterminal A: For every production rule of this form: A: BC, Push C and then Push B Args: nonterms (list): Non terminals list productions (dict): productions in the CNF form: A -> a or A -> b0b1, or S -> e productions_struct (dict): productions in the CNF form in structure form object.a for A -> a, object.b0 and object.b1 for A -> b0b1 and object.type where type is 1 for A-->a and 2 for A-->b0b1 terminals (list): All terminals splitstring (bool): If enabled an extra space is added after each symbol. Returns: PDA: The generated PDA """""" pda = PDA(self.alphabet) pda.nonterminals = nonterms pda.terminals = terminals pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].sym = '@closing' pda.s[pda.n].type = 1 pda.s[pda.n].trans[1] = [0] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 1 pda.s[pda.n].sym = nonterms[0] pda.s[pda.n].trans[2] = [0] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 2 pda.s[pda.n].trans[0] = ['@closing'] counter = 0 i = 0 while i < len(nonterms): j = 0 while j < len(productions[nonterms[i]]): if productions_struct[counter].type == 1: # ADD AND CONNECT STATE pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n if pda.n not in pda.s[2].trans: pda.s[2].trans[pda.n] = [] pda.s[2].trans[pda.n].append(nonterms[i]) if splitstring == 0: # FILL NEW STATE READ pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [productions_struct[counter].a] else: # THE FOLLOWIN SWITCH IS DUE TO THE REQUIREMENT OF # HAVING STRINGS SPLITTED TO SYMBOLS AND CAN INTERSECT # WITH DFA if productions_struct[counter].a not in terminals or \ len(productions_struct[counter].a) == 1: # FILL NEW STATE READ pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = [productions_struct[counter].a.lower()] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [' '] else: pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = \ [productions_struct[counter].a[0].lower()] k = 1 while k < len(productions_struct[counter].a) - 1: pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n +1] = \ [productions_struct[counter].a[k].lower()] k = k + 1 pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = \ [productions_struct[counter].a[-1].lower()] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [' '] else: # ADD AND CONNECT PUSH STATE pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n if pda.n not in pda.s[2].trans: pda.s[2].trans[pda.n] = [] pda.s[2].trans[pda.n].append(nonterms[i]) # FILL NEW STATE pda.s[pda.n].type = 1 pda.s[pda.n].sym = productions_struct[counter].b1 pda.s[pda.n].trans[(pda.n) + 1] = [0] # ADD AND CONNECT PUSH STATE (ALREADY CONNECTED) pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n # FILL NEW STATE pda.s[pda.n].type = 1 pda.s[pda.n].sym = productions_struct[counter].b0 pda.s[pda.n].trans[2] = [0] j = j + 1 counter = counter + 1 i = i + 1 return pda" 2030,"def forceutc(t: Union[str, datetime.datetime, datetime.date, np.datetime64]) -> Union[datetime.datetime, datetime.date]: """""" Add UTC to datetime-naive and convert to UTC for datetime aware input: python datetime (naive, utc, non-utc) or Numpy datetime64 #FIXME add Pandas and AstroPy time classes output: utc datetime """""" # need to passthrough None for simpler external logic. # %% polymorph to datetime if isinstance(t, str): t = parse(t) elif isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, datetime.datetime): pass elif isinstance(t, datetime.date): return t elif isinstance(t, (np.ndarray, list, tuple)): return np.asarray([forceutc(T) for T in t]) else: raise TypeError('datetime only input') # %% enforce UTC on datetime if t.tzinfo is None: # datetime-naive t = t.replace(tzinfo=UTC) else: # datetime-aware t = t.astimezone(UTC) # changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC return t" 2031,"def on_assert_failed_print_details(actual, expected): """""" Print text details in case of assertation failed errors. .. sourcecode:: python with on_assert_failed_print_details(actual_text, expected_text): assert actual == expected """""" try: yield except AssertionError: # diff = difflib.unified_diff(expected.splitlines(), actual.splitlines(), # ""expected"", ""actual"") diff = difflib.ndiff(expected.splitlines(), actual.splitlines()) diff_text = u""\n"".join(diff) print(u""DIFF (+ ACTUAL, - EXPECTED):\n{0}\n"".format(diff_text)) if DEBUG: print(u""expected:\n{0}\n"".format(expected)) print(u""actual:\n{0}\n"".format(actual)) raise" 2032,"def on_error_print_details(actual, expected): """""" Print text details in case of assertation failed errors. .. sourcecode:: python with on_error_print_details(actual_text, expected_text): ... # Do something """""" try: yield except Exception: diff = difflib.ndiff(expected.splitlines(), actual.splitlines()) diff_text = u""\n"".join(diff) print(u""DIFF (+ ACTUAL, - EXPECTED):\n{0}\n"".format(diff_text)) if DEBUG: print(u""expected:\n{0}\n"".format(expected)) print(u""actual:\n{0}"".format(actual)) raise" 2033,"def step_a_new_working_directory(context): """""" Creates a new, empty working directory """""" command_util.ensure_context_attribute_exists(context, ""workdir"", None) command_util.ensure_workdir_exists(context) shutil.rmtree(context.workdir, ignore_errors=True)" 2034,"def step_use_curdir_as_working_directory(context): """""" Uses the current directory as working directory """""" context.workdir = os.path.abspath(""."") command_util.ensure_workdir_exists(context)" 2035,"def step_a_file_named_filename_and_encoding_with(context, filename, encoding): """"""Creates a textual file with the content provided as docstring."""""" __encoding_is_valid = True assert context.text is not None, ""ENSURE: multiline text is provided."" assert not os.path.isabs(filename) assert __encoding_is_valid command_util.ensure_workdir_exists(context) filename2 = os.path.join(context.workdir, filename) pathutil.create_textfile_with_contents(filename2, context.text, encoding)" 2036,"def step_a_file_named_filename_with(context, filename): """"""Creates a textual file with the content provided as docstring."""""" step_a_file_named_filename_and_encoding_with(context, filename, ""UTF-8"") # -- SPECIAL CASE: For usage with behave steps. if filename.endswith("".feature""): command_util.ensure_context_attribute_exists(context, ""features"", []) context.features.append(filename)" 2037,"def step_an_empty_file_named_filename(context, filename): """""" Creates an empty file. """""" assert not os.path.isabs(filename) command_util.ensure_workdir_exists(context) filename2 = os.path.join(context.workdir, filename) pathutil.create_textfile_with_contents(filename2, """")" 2038,"def step_i_run_command(context, command): """""" Run a command as subprocess, collect its output and returncode. """""" command_util.ensure_workdir_exists(context) context.command_result = command_shell.run(command, cwd=context.workdir) command_util.workdir_save_coverage_files(context.workdir) if False and DEBUG: print(u""run_command: {0}"".format(command)) print(u""run_command.output {0}"".format(context.command_result.output))" 2039,"def step_it_should_pass_with(context): ''' EXAMPLE: ... when I run ""behave ..."" then it should pass with: """""" TEXT """""" ''' assert context.text is not None, ""ENSURE: multiline text is provided."" step_command_output_should_contain(context) assert_that(context.command_result.returncode, equal_to(0), context.command_result.output)" 2040,"def step_it_should_fail_with(context): ''' EXAMPLE: ... when I run ""behave ..."" then it should fail with: """""" TEXT """""" ''' assert context.text is not None, ""ENSURE: multiline text is provided."" step_command_output_should_contain(context) assert_that(context.command_result.returncode, is_not(equal_to(0)))" 2041,"def step_command_output_should_contain_text(context, text): ''' EXAMPLE: ... Then the command output should contain ""TEXT"" ''' expected_text = text if ""{__WORKDIR__}"" in expected_text or ""{__CWD__}"" in expected_text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output with on_assert_failed_print_details(actual_output, expected_text): textutil.assert_normtext_should_contain(actual_output, expected_text)" 2042,"def step_command_output_should_not_contain_text(context, text): ''' EXAMPLE: ... then the command output should not contain ""TEXT"" ''' expected_text = text if ""{__WORKDIR__}"" in text or ""{__CWD__}"" in text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output with on_assert_failed_print_details(actual_output, expected_text): textutil.assert_normtext_should_not_contain(actual_output, expected_text)" 2043,"def step_command_output_should_contain_exactly_text(context, text): """""" Verifies that the command output of the last command contains the expected text. .. code-block:: gherkin When I run ""echo Hello"" Then the command output should contain ""Hello"" """""" expected_text = text if ""{__WORKDIR__}"" in text or ""{__CWD__}"" in text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output textutil.assert_text_should_contain_exactly(actual_output, expected_text)" 2044,"def compile(self, prog, features=Features.ALL): """"""Currently this compiler simply returns an interpreter instead of compiling TODO: Write this compiler to increase LPProg run speed and to prevent exceeding maximum recursion depth Args: prog (str): A string containing the program. features (FeatureSet): The set of features to enable during compilation. Returns: LPProg """""" return LPProg(Parser(Tokenizer(prog, features), features).program(), features)" 2045,"def cprint(self, cstr): """""" Clear line, then reprint on same line :param cstr: string to print on current line """""" cstr = str(cstr) # Force it to be a string cstr_len = len(cstr) prev_cstr_len = len(self._prev_cstr) num_spaces = 0 if cstr_len < prev_cstr_len: num_spaces = abs(prev_cstr_len - cstr_len) try: print(cstr + "" "" * num_spaces, end='\r') self._prev_cstr = cstr except UnicodeEncodeError: print('Processing...', end='\r') self._prev_cstr = 'Processing...'" 2046,"def get_sql_for_new_models(apps=None, using=DEFAULT_DB_ALIAS): """""" Unashamedly copied and tweaked from django.core.management.commands.syncdb """""" connection = connections[using] # Get a list of already installed *models* so that references work right. tables = connection.introspection.table_names() seen_models = connection.introspection.installed_models(tables) created_models = set() pending_references = {} if apps: apps = [models.get_app(a) for a in apps] else: apps = models.get_apps() # Build the manifest of apps and models that are to be synchronized all_models = [ (app.__name__.split('.')[-2], [ m for m in models.get_models(app, include_auto_created=True) if router.allow_syncdb(using, m) ]) for app in apps ] def model_installed(model): opts = model._meta converter = connection.introspection.table_name_converter db_table_in = (converter(opts.db_table) in tables) auto_create_in = ( opts.auto_created and converter(opts.auto_created._meta.db_table) in tables ) return not (db_table_in or auto_create_in) manifest = SortedDict( (app_name, filter(model_installed, model_list)) for app_name, model_list in all_models ) statements = [] sql = None for app_name, model_list in manifest.items(): for model in model_list: # Create the model's database table, if it doesn't already exist. sql, references = connection.creation.sql_create_model( model, no_style(), seen_models ) seen_models.add(model) created_models.add(model) statements.append(""### New Model: %s.%s"" % ( app_name, str(model).replace(""'>"", """").split(""."")[-1] )) for refto, refs in references.items(): pending_references.setdefault(refto, []).extend(refs) if refto in seen_models: sql.extend( connection.creation.sql_for_pending_references( refto, no_style(), pending_references ) ) sql.extend( connection.creation.sql_for_pending_references( model, no_style(), pending_references ) ) statements.extend(sql) custom_sql = None for app_name, model_list in manifest.items(): for model in model_list: if model in created_models: custom_sql = custom_sql_for_model( model, no_style(), connection ) if custom_sql: statements.extend(custom_sql) index_sql = None for app_name, model_list in manifest.items(): for model in model_list: if model in created_models: index_sql = connection.creation.sql_indexes_for_model( model, no_style() ) if index_sql: statements.extend(index_sql) return statements" 2047,"def get_file_list(path, max_depth=1, cur_depth=0): """""" Recursively returns a list of all files up to ``max_depth`` in a directory. """""" if os.path.exists(path): for name in os.listdir(path): if name.startswith('.'): continue full_path = os.path.join(path, name) if os.path.isdir(full_path): if cur_depth == max_depth: continue file_list = get_file_list(full_path, max_depth, cur_depth + 1) for result in file_list: yield result else: yield full_path" 2048,"def get_applied_migrations(databases=None): """""" Returns a dictionary containing lists of all applied migrations where the key is the database alias. """""" if not databases: databases = get_capable_databases() else: # We only loop through databases that are listed as ""capable"" all_databases = list(get_capable_databases()) databases = list( itertools.ifilter(lambda x: x in all_databases, databases) ) results = defaultdict(list) for db in databases: for x in Migration.objects.using(db).order_by(""migration_label""): results[db].append(x.migration_label) return results" 2049,"def get_all_migrations(path, databases=None): """""" Returns a dictionary of database => [migrations] representing all migrations contained in ``path``. """""" # database: [(number, full_path)] possible_migrations = defaultdict(list) try: in_directory = sorted(get_file_list(path)) except OSError: import traceback print ""An error occurred while reading migrations from %r:"" % path traceback.print_exc() return {} # Iterate through our results and discover which migrations are # actually runnable for full_path in in_directory: child_path, script = os.path.split(full_path) name, ext = os.path.splitext(script) # the database component is default if this is in the root directory # is <directory> if in a subdirectory if path == child_path: db = DEFAULT_DB_ALIAS else: db = os.path.split(child_path)[-1] # filter by database if set if databases and db not in databases: continue match = MIGRATION_NAME_RE.match(name) if match is None: raise MigrationError(""Invalid migration file prefix %r "" ""(must begin with a number)"" % name) number = int(match.group(1)) if ext in ["".sql"", "".py""]: possible_migrations[db].append((number, full_path)) return possible_migrations" 2050,"def get_pending_migrations(path, databases=None, stop_at=None): """""" Returns a dictionary of database => [migrations] representing all pending migrations. """""" if stop_at is None: stop_at = float(""inf"") # database: [(number, full_path)] possible_migrations = get_all_migrations(path, databases) # database: [full_path] applied_migrations = get_applied_migrations(databases) # database: [full_path] to_execute = defaultdict(list) for database, scripts in possible_migrations.iteritems(): applied = applied_migrations[database] pending = to_execute[database] for number, migration in scripts: path, script = os.path.split(migration) if script not in applied and number <= stop_at: pending.append(script) return dict((k, v) for k, v in to_execute.iteritems() if v)" 2051,"def updateFgiAnnotationFromFi(fgiContainer, fiContainer, largerBetter): """""" #TODO: docstring :param fgiContainer: :param fiContainer: :param largerBetter: """""" for fgi in listvalues(fgiContainer.container): annotations = list() for specfile, fiId in zip(fgi.specfiles, fgi.featureIds): fi = fiContainer.getItem(specfile, fiId) if not fi.isAnnotated: continue annotations.append([fi.score, fi.peptide, fi.sequence]) annotations.sort(reverse=largerBetter) if len(annotations) > 0: fgi.isAnnotated = True fgi.score = annotations[0][0] fgi.peptide = annotations[0][1] fgi.sequence = annotations[0][2] else: fgi.isAnnotated = False" 2052,"def continuityGrouping(values, limit): """""" #TODO docstring :param values: ``numpy.array`` containg ``int`` or ``float``, must be sorted :param limit: the maximal difference between two values, if this number is exceeded a new group is generated :returns: a list containing array start and end positions of continuous groups """""" lastValue = values[0] lastPos = 0 groupStartPos = 0 groupPos = list() for currPos, currValue in enumerate(values): if currValue - lastValue > limit: groupPos.append((groupStartPos, lastPos)) groupStartPos = currPos lastPos = currPos lastValue = currValue groupPos.append((groupStartPos, lastPos)) return groupPos" 2053,"def massTimeContinuityGroups(arrays, mKey, tKey, mLimit, tLimit): """""" #TODO docstring :param arrays: a dictionary containing ``numpy.arrays``, must be sorted according to the ""mKey"" (mass key) value. :param mKey: ""arrays"" key that contains the mass ``numpy.array`` :param tKey: ""arrays"" key that contains the time ``numpy.array`` :param mLimit: maximal mass difference for separating continuity groups :param tLimit: maximal time difference for separating continuity groups :returns: a list containing array positions of continuous groups."""""" arrayPositions = numpy.array(range(listvalues(arrays)[0].size)) finalGroupPositions = list() for start, end in continuityGrouping(arrays[mKey], mLimit): if start == end: finalGroupPositions.append(arrayPositions[start:end+1]) continue #Perform time continuity grouping on the mass continuity groups preSelectionT = arrays[tKey][start:end+1] preSelectionM = arrays[mKey][start:end+1] preSelectionPositions = arrayPositions[start:end+1] _sort = numpy.argsort(preSelectionT) preGroups = continuityGrouping(preSelectionT[_sort], tLimit) #Perform a second round of mass continuity grouping finalGroupPrePos = list() for _start, _end in preGroups: preGroupPos = sorted(_sort[_start:_end+1]) secGroups = continuityGrouping(preSelectionM[preGroupPos], mLimit) for fStart, fEnd in secGroups: finalGroupPrePos.append(preGroupPos[fStart:fEnd+1]) #Add the final group positions for _pos in finalGroupPrePos: finalGroupPositions.append(preSelectionPositions[_pos]) return finalGroupPositions" 2054,"def getContGroupArrays(arrays, groupPositions, arrayKeys=None): """"""Convinience function to generate a subset of arrays from specified array positions. :param arrays: a dictionary containing ``numpy.arrays`` :param groupPositions: arrays positions that should be included in the subset of arrays :param arrayKeys: a list of ""arrays"" keys that should be included in the subset of arrays, if None all keys are selected :returns: a dictionary containing ``numpy.arrays`` """""" if arrayKeys is None: arrayKeys = list(viewkeys(arrays)) matchingArrays = dict() for key in arrayKeys: matchingArrays[key] = arrays[key][groupPositions] return matchingArrays" 2055,"def calcDistMatchArr(matchArr, tKey, mKey): """"""Calculate the euclidean distance of all array positions in ""matchArr"". :param matchArr: a dictionary of ``numpy.arrays`` containing at least two entries that are treated as cartesian coordinates. :param tKey: #TODO: docstring :param mKey: #TODO: docstring :returns: #TODO: docstring {'eucDist': numpy.array([eucDistance, eucDistance, ...]), 'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...]) } """""" #Calculate all sorted list of all eucledian feature distances matchArrSize = listvalues(matchArr)[0].size distInfo = {'posPairs': list(), 'eucDist': list()} _matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1) for pos1 in range(matchArrSize-1): for pos2 in range(pos1+1, matchArrSize): distInfo['posPairs'].append((pos1, pos2)) distInfo['posPairs'] = numpy.array(distInfo['posPairs']) distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix) distSort = numpy.argsort(distInfo['eucDist']) for key in list(viewkeys(distInfo)): distInfo[key] = distInfo[key][distSort] return distInfo" 2056,"def proximityGrouping(matchArr, distInfo, distLimit, categoryKey): """""" #TODO: docstring. Group according to the distance value provided by ``distInfo['eucDist']`` with the limitation that each ... category value can occur only once per group. :param matchArr: #TODO: docstring :param distInfo: #TODO: docstring, must be sorted, provide keys ""posPairs"" and ""eucDist"". As generated by :func:`calcDistMatchArr()` :param distLimit: #TODO: docstring :param categoryKey: #TODO: docstring :returns: #TODO: docstring """""" #Group fi according to their proximity matchArrSize = listvalues(matchArr)[0].size linkageGroups = {p: [p] for p in range(matchArrSize)} posToGroup = {p: p for p in range(matchArrSize)} groupCategories = {p: set([s]) for p, s in zip(range(matchArrSize), matchArr[categoryKey] ) } for (pos1, pos2), dist in zip(distInfo['posPairs'], distInfo['eucDist']): if dist > distLimit: break id1 = posToGroup[pos1] id2 = posToGroup[pos2] if groupCategories[id1].intersection(groupCategories[id2]): continue linkageGroups[id1].extend(linkageGroups[id2]) groupCategories[id1].update(groupCategories[id2]) for _pos in linkageGroups[id2]: posToGroup[_pos] = id1 del linkageGroups[id2] del groupCategories[id2] return linkageGroups" 2057,"def fiGroupFromLinkageGroup(matchArr, arrPos, groupId, timeKey, massKey): """""" #TODO: docstring """""" fgi = Fgi(groupId) matchArr['isAnnotated'][arrPos] minT = numpy.min(matchArr[timeKey][arrPos]) maxT = numpy.max(matchArr[timeKey][arrPos]) minM = numpy.min(matchArr[massKey][arrPos]) maxM = numpy.max(matchArr[massKey][arrPos]) fgi.isValid = True fgi.specfiles = matchArr['specfile'][arrPos].tolist() fgi.featureIds = matchArr['id'][arrPos].tolist() fgi.isAnnotated = numpy.any(matchArr['isAnnotated'][arrPos]) fgi.coordinates = ((minT, maxT), (minM, maxM)) #fgi.clusterType = clusterType return fgi" 2058,"def generateFeatureGroups(fgiContainer, linkageGroups, matchArr, timeKey, massKey, logMassKey, massScalingFactor): """""" #TODO: docstring :param fgiContainer: :param linkageGroups: :returns: a list of ids of the newly generated :class:`Fgi` """""" #Generate feature groups from the linked features newFgiIds = list() for linkageGroup in viewvalues(linkageGroups): fgiId = fgiContainer._getNextFgiId() fgi = fiGroupFromLinkageGroup(matchArr, linkageGroup, fgiId, timeKey, massKey ) fgiContainer.container[fgiId] = fgi fgi.metrics = clusterMetrics(matchArr[timeKey][linkageGroup], matchArr[logMassKey][linkageGroup], massScalingFactor=massScalingFactor ) fgi.rt = fgi.metrics['meanTime'] fgi.mz = fgi.metrics['meanMass'] newFgiIds.append(fgiId) return newFgiIds" 2059,"def clusterMetrics(timeValues, massValues, massScalingFactor=1): """""" #TODO: docstring """""" metrics = dict() metrics['meanTime'] = numpy.mean(timeValues) metrics['meanMass'] = numpy.mean(massValues) metrics['devTime'] = timeValues - metrics['meanTime'] metrics['devMass'] = massValues - metrics['meanMass'] #metrics['devMass'] = (1-metrics['meanMass']/massValues) metrics['spreadTime'] = numpy.max(timeValues) - numpy.min(timeValues) metrics['spreadMass'] = numpy.max(massValues) - numpy.min(massValues) #metrics['spreadMass'] = (1-numpy.min(massValues) / numpy.max(massValues)) metrics['devEuc'] = numpy.sqrt(numpy.power(metrics['devTime'], 2) + numpy.power(metrics['devMass']*massScalingFactor, 2) ) metrics['meanEuc'] = numpy.mean(metrics['devEuc']) metrics['devTime'] = metrics['devTime'].tolist() metrics['devMass'] = metrics['devMass'].tolist() metrics['devEuc'] = metrics['devEuc'].tolist() return metrics" 2060,"def lfqFeatureGrouping(fiContainer, timeLimit=40, massLimit=10*1e-6, eucLimit=None, timeKey='rt', massKey='mz', massScalingFactor=None, categoryKey='specfile', charges=None, matchArraySelector=None, specfiles=None): """""" #TODO: docstring :param fiContainer: #TODO: docstring :param timeLimit: #TODO: docstring :param massLimit: #TODO: docstring :param eucLimit: #TODO: docstring :param timeKey: #TODO: docstring :param massKey: #TODO: docstring :param massScalingFactor: #TODO: docstring :param categoryKey: #TODO: docstring :param charges: #TODO: docstring :param matchArraySelector: #TODO: docstring :param specfiles: limit grouping to these specfiles :returns: #TODO docstring, :class:`FgiContainer` """""" # --- perform the whole feature grouping process --- # targetChargeStates = range(1, 6) if charges is None else charges if matchArraySelector is None: matchArraySelector = lambda arr: numpy.any(arr['isAnnotated']) if massScalingFactor is None: massScalingFactor = timeLimit / massLimit if eucLimit is None: eucLimit = timeLimit if specfiles is None: specfiles = sorted(viewkeys(fiContainer.info)) #'massToleranceMode': 'relative' #'timeToleranceMode': 'absolute' fgiContainer = FgiContainer(specfiles) logMassLimit = log2RelativeMassLimit(massLimit) logMassKey = 'logMass' logToleranceFactor = massLimit / log2RelativeMassLimit(massLimit) logMassScalingFactor = massScalingFactor * logToleranceFactor """""" Note: because ""a"" is similar to ""b"" a = (1- 400 / 400.001) * massScalingFactor b = (numpy.log2(400.001) - numpy.log2(400)) * logMassScalingFactor """""" fiArrayKeys = [massKey, timeKey, 'isAnnotated', 'isMatched'] for _charge in targetChargeStates: # - Prepare feature arrays - # fiSelector = lambda fi: fi.charge == _charge and fi.isValid fiArrays = fiContainer.getArrays(fiArrayKeys, specfiles, sort=massKey, selector=fiSelector) fiArrays['logMass'] = numpy.log2(fiArrays[massKey]) if listvalues(fiArrays)[0].size == 0: continue # - group features which are in close mass and time proximity - # continuousGroups = massTimeContinuityGroups(fiArrays, logMassKey, timeKey, logMassLimit, timeLimit ) # - perform proximity grouping - # matchArrayKeys = list(viewkeys(fiArrays)) for groupId in range(len(continuousGroups)): #Grab the arrays of the current feature continuity group groupPositions = continuousGroups[groupId] matchArr = getContGroupArrays(fiArrays, groupPositions, matchArrayKeys ) if not matchArraySelector(matchArr): continue #Calculate a sorted list of all euclidean feature distances matchArr['mNorm'] = matchArr[logMassKey] * logMassScalingFactor distInfo = calcDistMatchArr(matchArr, timeKey, 'mNorm') #Group fi according to their proximity linkageGroups = proximityGrouping(matchArr, distInfo, eucLimit, categoryKey ) #Generate feature groups from the linked features fgiIds = generateFeatureGroups(fgiContainer, linkageGroups, matchArr, timeKey, massKey, logMassKey, logMassScalingFactor ) #Set charge manually for fgiId in fgiIds: fgiContainer.container[fgiId].charge = _charge #Mark overlapping groups as not valid (fgi.isValid = False) fgiDoOverlap = findFgiOverlaps(fgiContainer, fgiIds) #Add feature intensities to the feature groups fgiContainer.updateIntensities(fiContainer) return fgiContainer" 2061,"def getArrays(self, attr=None, sort=False, reverse=False, selector=None, defaultValue=None, report='lfq'): """""" #TODO: docstring """""" selector = (lambda fgi: fgi.isValid) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'intensities'] + aux.toList(attr)) items = self.getItems(sort, reverse, selector) arrays = _getArrays(items, attr, defaultValue) for specfile in self._matrixTemplate: arrays[specfile] = list() for intensities in arrays['intensities']: for specfile, intensitiy in zip(self._matrixTemplate, intensities): arrays[specfile].append(intensitiy) for specfile in self._matrixTemplate: arrays[specfile] = numpy.array(arrays[specfile], dtype=numpy.float64 ) del arrays['intensities'] return arrays" 2062,"def getItems(self, sort=False, reverse=False, selector=None): """""" #TODO: docstring """""" selector = (lambda fgi: fgi.isValid) if selector is None else selector _container = {'_': self.container} return _getItems(_container, '_', sort, reverse, selector)" 2063,"def load(self, path, name): """"""Imports the specified ``fgic`` file from the hard disk. :param path: filedirectory to which the ``fgic`` file is written. :param name: filename, without file extension """""" filename = name + '.fgic' filepath = aux.joinpath(path, filename) with zipfile.ZipFile(filepath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8' ).read() self.container = json.loads(jsonString, object_hook=Fgi.jsonHook) self.info.update(json.loads(infoString)) self._matrixTemplate = self.info['_matrixTemplate'] del self.info['_matrixTemplate']" 2064,"def updateIntensities(self, fiContainer, iKey='intensity'): """""" #TODO: docstring :param fiContainer: :param iKey: Attribute name of :class:`Fi` that contains the feature intensity or an abundance measure. Default ""intensity"" """""" for fgi in listvalues(self.container): intensities = list() specfileIds = {i: j for i, j in zip(fgi.specfiles, fgi.featureIds)} for specfile in self._matrixTemplate: if specfile in specfileIds: fi = fiContainer.getItem(specfile, specfileIds[specfile]) intensities.append(getattr(fi, iKey)) else: intensities.append(None) fgi.intensities = intensities" 2065,"def command(argv, scope): """""" Looks up a particular command from the inputted arguments for the given \ scope. :param argv | [<str>, ..] scope | <dict> :return <climethod> || None """""" if inspect.ismodule(scope): scope = vars(scope) for cmd in scope.values(): if not isinstance(cmd, climethod): continue if cmd.__name__ in argv: return cmd return None" 2066,"def commands(scope): """""" Looks up all climethod instances from the inputted scope. :return [<climethod>, ..] """""" if inspect.ismodule(scope): scope = vars(scope) return [cmd for cmd in scope.values() if isinstance(cmd, climethod)]" 2067,"def generate(module): """""" Generates a new interface from the inputted module. :param module | <module> :return <Interface> """""" inter = Interface(PROGRAM_NAME) inter.register(module, True) return inter" 2068,"def parser(scope, usage=''): """""" Generates a default parser for the inputted scope. :param scope | <dict> || <module> usage | <str> callable | <str> :return <OptionParser> """""" subcmds = [] for cmd in commands(scope): subcmds.append(cmd.usage()) if subcmds: subcmds.sort() usage += '\n\nSub-Commands:\n ' usage += '\n '.join(subcmds) parse = PARSER_CLASS(usage=usage) parse.prog = PROGRAM_NAME return parse" 2069,"def process(argv, scope, interface=None): """""" Processes any commands within the scope that matches the inputted arguments. If a subcommand is found, then it is run, and the system exists with the return value from the command. :param argv | [<str>, ..] scope | <dict> :return (<dict> options, <tuple> arguments) """""" cmd = command(argv, scope) if cmd: sys.exit(cmd.run(argv)) name = PROGRAM_NAME if interface: name = interface.name() _parser = parser(scope, '{0} [options] [<subcommand>] [<arg>]'.format(name)) options, args = _parser.parse_args(argv) return options.__dict__, args" 2070,"def usage(self): """""" Returns the usage string for this method. :return <str> """""" arg_list = ' '.join(self.cmd_args).upper() name = self.interface.name() return '%s [options] %s %s' % (name, self.__name__, arg_list)" 2071,"def parser(self): """""" Creates a parser for the method based on the documentation. :return <OptionParser> """""" usage = self.usage() if self.__doc__: usage += '\n' + nstr(self.__doc__) parse = PARSER_CLASS(usage=usage) shorts = {v: k for k, v in self.short_keys.items()} for key, default in self.cmd_opts.items(): # default key, cannot be duplicated if key == 'help': continue try: short = '-' + shorts[key] except KeyError: short = '' if default is True: action = 'store_false' elif default is False: action = 'store_true' else: action = 'store' # add the option parse.add_option(short, '--%s' % key, action=action, default=default) return parse" 2072,"def run(self, argv): """""" Parses the inputted options and executes the method. :param argv | [<str>, ..] """""" (opts, args) = self.parser().parse_args(argv) func_args = args[args.index(self.__name__) + 1:] func_kwds = opts.__dict__ return self.__call__(*func_args, **func_kwds)" 2073,"def register(self, obj, autogenerate=False): """""" Registers the inputted object to this scope. :param obj | <module> || <function> || <climethod> """""" scope = self._scope # register a module if type(obj).__name__ == 'module': for key, value in vars(obj).items(): # register a climethod if isinstance(value, climethod): value.interface = self scope[key] = value # register a function elif inspect.isfunction(value) and autogenerate: meth = climethod(value) meth.interface = self scope[key] = meth # register a climethod elif isinstance(obj, climethod): obj.interface = self scope[obj.__name__] = obj # register a function elif inspect.isfunction(obj) and autogenerate: meth = climethod(obj) meth.interface = self scope[meth.__name__] = meth" 2074,"def clone(cls, srcpath, destpath, encoding='utf-8'): """"""Clone an existing repository to a new bare repository."""""" cmd = [GIT, 'clone', '--quiet', '--bare', srcpath, destpath] subprocess.check_call(cmd) return cls(destpath, encoding)" 2075,"def create(cls, path, encoding='utf-8'): """"""Create a new bare repository"""""" cmd = [GIT, 'init', '--quiet', '--bare', path] subprocess.check_call(cmd) return cls(path, encoding)" 2076,"def get_as_dict(self): u"""""" Exports self as ordinary dict(), replacing recursively all instances of ElasticDict() to dict() :rtype: dict() """""" def convert(val): if isinstance(val, tuple): return tuple(convert(v) for v in val) elif isinstance(val, list): return [convert(v) for v in val] elif isinstance(val, (dict, ElasticDict)): return {k: convert(v) for k, v in val.iteritems()} else: return val return convert(self.__dict__)" 2077,"def create_from(value): u"""""" Create an instance of ElasticDict() where all nested dict()'s are replaced to ElasticDict() :rtype: ElasticDict (if value is dict()), else type(value) """""" def convert(val): if isinstance(val, tuple): return tuple(convert(v) for v in val) elif isinstance(val, list): return [convert(v) for v in val] elif isinstance(val, (dict, ElasticDict)): return ElasticDict({k: convert(v) for k, v in val.iteritems()}) else: return val return convert(value)" 2078,"def cache(self, dependency: Dependency, value): """""" Store an instance of dependency in the cache. Does nothing if dependency is NOT a threadlocal or a singleton. :param dependency: The ``Dependency`` to cache :param value: The value to cache for dependency :type dependency: Dependency """""" if dependency.threadlocal: setattr(self._local, dependency.name, value) elif dependency.singleton: self._singleton[dependency.name] = value" 2079,"def cached(self, dependency): """""" Get a cached instance of dependency. :param dependency: The ``Dependency`` to retrievie value for :type dependency: ``Dependency`` :return: The cached value """""" if dependency.threadlocal: return getattr(self._local, dependency.name, None) elif dependency.singleton: return self._singleton.get(dependency.name)" 2080,"def _set(self, name, factory, singleton=False, threadlocal=False): """""" Add a dependency factory to the registry :param name: Name of dependency :param factory: function/callable that returns dependency :param singleton: When True, makes the dependency a singleton. Factory will only be called on first use, subsequent uses receive a cached value. :param threadlocal: When True, register dependency as a threadlocal singleton, Same functionality as ``singleton`` except :class:`Threading.local` is used to cache return values. """""" name = name or factory.__name__ factory._giveme_registered_name = name dep = Dependency(name, factory, singleton, threadlocal) self._registry[name] = dep" 2081,"def get(self, name: str): """""" Get an instance of dependency, this can be either a cached instance or a new one (in which case the factory is called) """""" dep = None try: dep = self._registry[name] except KeyError: raise DependencyNotFoundError(name) from None value = self.cached(dep) if value is None: value = dep.factory() self.cache(dep, value) return value" 2082,"def register(self, function=None, *, singleton=False, threadlocal=False, name=None): """""" Add an object to the injector's registry. Can be used as a decorator like so: >>> @injector.register ... def my_dependency(): ... or a plain function call by passing in a callable injector.register(my_dependency) :param function: The function or callable to add to the registry :param name: Set the name of the dependency. Defaults to the name of `function` :param singleton: When True, register dependency as a singleton, this means that `function` is called on first use and its return value cached for subsequent uses. Defaults to False :param threadlocal: When True, register dependency as a threadlocal singleton, Same functionality as ``singleton`` except :class:`Threading.local` is used to cache return values. :type function: callable :type singleton: bool :type threadlocal: bool :type name: string """""" def decorator(function=None): self._set(name, function, singleton, threadlocal) return function if function: return decorator(function) return decorator" 2083,"def inject(self, function=None, **names): """""" Inject dependencies into `funtion`'s arguments when called. >>> @injector.inject ... def use_dependency(dependency_name): ... >>> use_dependency() The `Injector` will look for registered dependencies matching named arguments and automatically pass them to the given function when it's called. :param function: The function to inject into :type function: callable :param \**names: in the form of ``argument='name'`` to override the default behavior which matches dependency names with argument names. """""" def decorator(function): @wraps(function) def wrapper(*args, **kwargs): sig = signature(function) params = sig.parameters bound = sig.bind_partial(*args, **kwargs) bound.apply_defaults() injected_kwargs = {} for key, value in params.items(): if key not in bound.arguments: name = names.get(key) if name: # Raise error when dep named explicitly # and missing injected_kwargs[key] = self.get(name) else: try: injected_kwargs[key] = self.get(key) except DependencyNotFoundError as e: warnings.warn( ambigious_not_found_msg.format(key), DependencyNotFoundWarning ) injected_kwargs.update(bound.kwargs) return function(*bound.args, **injected_kwargs) return wrapper if function: return decorator(function) return decorator" 2084,"def resolve(self, dependency): """""" Resolve dependency as instance attribute of given class. >>> class Users: ... db = injector.resolve(user_db) ... ... def get_by_id(self, user_id): ... return self.db.get(user_id) When the attribute is first accessed, it will be resolved from the corresponding dependency function """""" if isinstance(dependency, str): name = dependency else: name = dependency._giveme_registered_name return DeferredProperty( partial(self.get, name) )" 2085,"def _fetch_itemslist(self, current_item): """""" Get a all available apis """""" if current_item.is_root: html = requests.get(self.base_url).text soup = BeautifulSoup(html, 'html.parser') for item_html in soup.select("".row .col-md-6""): try: label = item_html.select_one(""h2"").text except Exception: continue yield API(label, blob=item_html) else: # parameter = current_item.parent # data = requests.get(parameter.url) for resource in current_item.json[""resource""]: label = u""{}, {}"".format(resource[""title""], resource[""summary""]) yield SMHIDataset(label, blob=resource)" 2086,"def _fetch_data(self, dataset, query={}, include_inactive_stations=False): """""" Should yield dataset rows """""" data = [] parameter = dataset station_dim = dataset.dimensions[""station""] all_stations = station_dim.allowed_values # Step 1: Prepare query if ""station"" not in query: if include_inactive_stations: # Get all stations query[""station""] = list(all_stations) else: # Get only active stations query[""station""] = list(station_dim.active_stations()) else: if not isinstance(query[""station""], list): query[""station""] = [query[""station""]] # Make sure that the queried stations actually exist query[""station""] = [ all_stations.get_by_label(x) for x in query[""station""]] if ""period"" not in query: # TODO: I'd prepare to do dataset.get(""period"").allowed_values here query[""period""] = PERIODS elif not isinstance(query[""period""], list): query[""period""] = [query[""period""]] for period in query[""period""]: if period not in PERIODS: msg = u""{} is not an allowed period"".format(period) raise Exception(msg) # Step 3: Get data n_queries = len(query[""station""]) * len(query[""period""]) counter = 0 print(""Fetching data with {} queries."".format(n_queries)) for station in query[""station""]: for period in query[""period""]: url = dataset.url\ .replace("".json"", ""/station/{}/period/{}/data.csv""\ .format(station.key, period)) print(""/GET {} "".format(url)) r = requests.get(url) if r.status_code == 200: raw_data = DataCsv().from_string(r.content).to_dictlist() # TODO: This is a very hard coded parse function # Expects fixed start row and number of cols for row in raw_data: #timepoint = datetime.strptime(timepoint_str, ""%Y-%m-%d %H:%M:%S"") value_col = parameter.id.split("","")[0] value = float(row[value_col]) row[""parameter""] = parameter.id row[""station""] = station.label row[""station_key""] = station.key row[""period""] = period row.pop(value_col,None) datapoint = Result(value, row) yield datapoint elif r.status_code == 404: print(""Warning no data at {}"".format(url)) else: raise Exception(""Connection error for {}"".format(url))" 2087,"def _get_example_csv(self): """"""For dimension parsing """""" station_key = self.json[""station""][0][""key""] period = ""corrected-archive"" url = self.url\ .replace("".json"", ""/station/{}/period/{}/data.csv""\ .format(station_key, period)) r = requests.get(url) if r.status_code == 200: return DataCsv().from_string(r.content) else: raise Exception(""Error connecting to api"")" 2088,"def batch(iterable, length): """""" Returns a series of iterators across the inputted iterable method, broken into chunks based on the inputted length. :param iterable | <iterable> | (list, tuple, set, etc.) length | <int> :credit http://en.sharejs.com/python/14362 :return <generator> :usage |>>> import projex.iters |>>> for batch in projex.iters.batch(range(100), 10): |... print list(batch) |[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] |[10, 11, 12, 13, 14, 15, 16, 17, 18, 19] |[20, 21, 22, 23, 24, 25, 26, 27, 28, 29] |[30, 31, 32, 33, 34, 35, 36, 37, 38, 39] |[40, 41, 42, 43, 44, 45, 46, 47, 48, 49] |[50, 51, 52, 53, 54, 55, 56, 57, 58, 59] |[60, 61, 62, 63, 64, 65, 66, 67, 68, 69] |[70, 71, 72, 73, 74, 75, 76, 77, 78, 79] |[80, 81, 82, 83, 84, 85, 86, 87, 88, 89] |[90, 91, 92, 93, 94, 95, 96, 97, 98, 99] """""" source_iter = iter(iterable) while True: batch_iter = itertools.islice(source_iter, length) yield itertools.chain([batch_iter.next()], batch_iter)" 2089,"def group(iterable): """""" Creates a min/max grouping for the inputted list of numbers. This will shrink a list into the group sets that are available. :param iterable | <iterable> | (list, tuple, set, etc.) :return <generator> [(<int> min, <int> max), ..] """""" numbers = sorted(list(set(iterable))) for _, grouper in itertools.groupby(numbers, key=lambda i, c=itertools.count(): i - next(c)): subset = list(grouper) yield subset[0], subset[-1]" 2090,"def plural(formatter, value, name, option, format): """"""Chooses different textension for locale-specific pluralization rules. Spec: `{:[p[lural]][(locale)]:msgstr0|msgstr1|...}` Example:: >>> smart.format(u'There {num:is an item|are {} items}.', num=1} There is an item. >>> smart.format(u'There {num:is an item|are {} items}.', num=10} There are 10 items. """""" # Extract the plural words from the format string. words = format.split('|') # This extension requires at least two plural words. if not name and len(words) == 1: return # This extension only formats numbers. try: number = decimal.Decimal(value) except (ValueError, decimal.InvalidOperation): return # Get the locale. locale = Locale.parse(option) if option else formatter.locale # Select word based on the plural tag index. index = get_plural_tag_index(number, locale) return formatter.format(words[index], value)" 2091,"def get_choice(value): """"""Gets a key to choose a choice from any value."""""" if value is None: return 'null' for attr in ['__name__', 'name']: if hasattr(value, attr): return getattr(value, attr) return str(value)" 2092,"def choose(formatter, value, name, option, format): """"""Adds simple logic to format strings. Spec: `{:c[hoose](choice1|choice2|...):word1|word2|...[|default]}` Example:: >>> smart.format(u'{num:choose(1|2|3):one|two|three|other}, num=1) u'one' >>> smart.format(u'{num:choose(1|2|3):one|two|three|other}, num=4) u'other' """""" if not option: return words = format.split('|') num_words = len(words) if num_words < 2: return choices = option.split('|') num_choices = len(choices) # If the words has 1 more item than the choices, the last word will be # used as a default choice. if num_words not in (num_choices, num_choices + 1): n = num_choices raise ValueError('specify %d or %d choices' % (n, n + 1)) choice = get_choice(value) try: index = choices.index(choice) except ValueError: if num_words == num_choices: raise ValueError('no default choice supplied') index = -1 return formatter.format(words[index], value)" 2093,"def list_(formatter, value, name, option, format): """"""Repeats the items of an array. Spec: `{:[l[ist]:]item|spacer[|final_spacer[|two_spacer]]}` Example:: >>> fruits = [u'apple', u'banana', u'coconut'] >>> smart.format(u'{fruits:list:{}|, |, and | and }', fruits=fruits) u'apple, banana, and coconut' >>> smart.format(u'{fruits:list:{}|, |, and | and }', fruits=fruits[:2]) u'apple and banana' """""" if not format: return if not hasattr(value, '__getitem__') or isinstance(value, string_types): return words = format.split(u'|', 4) num_words = len(words) if num_words < 2: # Require at least two words for item format and spacer. return num_items = len(value) item_format = words[0] # NOTE: SmartFormat.NET treats a not nested item format as the format # string to format each items. For example, `x` will be treated as `{:x}`. # But the original tells us this behavior has been deprecated so that # should be removed. So SmartFormat for Python doesn't implement the # behavior. spacer = u'' if num_words < 2 else words[1] final_spacer = spacer if num_words < 3 else words[2] two_spacer = final_spacer if num_words < 4 else words[3] buf = io.StringIO() for x, item in enumerate(value): if x == 0: pass elif x < num_items - 1: buf.write(spacer) elif x == 1: buf.write(two_spacer) else: buf.write(final_spacer) buf.write(formatter.format(item_format, item, index=x)) return buf.getvalue()" 2094,"def add_seconds(datetime_like_object, n, return_date=False): """""" Returns a time that n seconds after a time. :param datetimestr: a datetime object or a datetime str :param n: number of seconds, value can be negative **中文文档** 返回给定日期N秒之后的时间。 """""" a_datetime = parser.parse_datetime(datetime_like_object) a_datetime = a_datetime + timedelta(seconds=n) if return_date: # pragma: no cover return a_datetime.date() else: return a_datetime" 2095,"def add_months(datetime_like_object, n, return_date=False): """""" Returns a time that n months after a time. Notice: for example, the date that one month after 2015-01-31 supposed to be 2015-02-31. But there's no 31th in Feb, so we fix that value to 2015-02-28. :param datetimestr: a datetime object or a datetime str :param n: number of months, value can be negative :param return_date: returns a date object instead of datetime **中文文档** 返回给定日期N月之后的时间。 """""" a_datetime = parser.parse_datetime(datetime_like_object) month_from_ordinary = a_datetime.year * 12 + a_datetime.month month_from_ordinary += n year, month = divmod(month_from_ordinary, 12) # try assign year, month, day try: a_datetime = datetime( year, month, a_datetime.day, a_datetime.hour, a_datetime.minute, a_datetime.second, a_datetime.microsecond, tzinfo=a_datetime.tzinfo, ) # 肯定是由于新的月份的日子不够, 所以肯定是月底, # 那么直接跳到下一个月的第一天, 再回退一天 except ValueError: month_from_ordinary += 1 year, month = divmod(month_from_ordinary, 12) a_datetime = datetime( year, month, 1, a_datetime.hour, a_datetime.minute, a_datetime.second, a_datetime.microsecond, tzinfo=a_datetime.tzinfo, ) a_datetime = add_days(a_datetime, -1) if return_date: # pragma: no cover return a_datetime.date() else: return a_datetime" 2096,"def add_years(datetime_like_object, n, return_date=False): """""" Returns a time that n years after a time. :param datetimestr: a datetime object or a datetime str :param n: number of years, value can be negative :param return_date: returns a date object instead of datetime **中文文档** 返回给定日期N年之后的时间。 """""" a_datetime = parser.parse_datetime(datetime_like_object) # try assign year, month, day try: a_datetime = datetime( a_datetime.year + n, a_datetime.month, a_datetime.day, a_datetime.hour, a_datetime.minute, a_datetime.second, a_datetime.microsecond, tzinfo=a_datetime.tzinfo, ) except ValueError: # Must be xxxx-02-29 a_datetime = datetime( a_datetime.year + n, 2, 28, a_datetime.hour, a_datetime.minute, a_datetime.second, a_datetime.microsecond) if return_date: # pragma: no cover return a_datetime.date() else: return a_datetime" 2097,"def _floor_to(dt, hour, minute, second): """""" Route the given datetime to the latest time with the hour, minute, second before it. """""" new_dt = dt.replace(hour=hour, minute=minute, second=second) if new_dt <= dt: return new_dt else: return new_dt - timedelta(days=1)" 2098,"def _round_to(dt, hour, minute, second): """""" Route the given datetime to the latest time with the hour, minute, second before it. """""" new_dt = dt.replace(hour=hour, minute=minute, second=second) if new_dt == dt: return new_dt elif new_dt < dt: before = new_dt after = new_dt + timedelta(days=1) elif new_dt > dt: before = new_dt - timedelta(days=1) after = new_dt d1 = dt - before d2 = after - dt if d1 < d2: return before elif d1 > d2: return after else: return before" 2099,"def round_to(dt, hour, minute, second, mode=""round""): """""" Round the given datetime to specified hour, minute and second. :param mode: 'floor' or 'ceiling' .. versionadded:: 0.0.5 message **中文文档** 将给定时间对齐到最近的一个指定了小时, 分钟, 秒的时间上。 """""" mode = mode.lower() if mode not in _round_to_options: raise ValueError( ""'mode' has to be one of %r!"" % list(_round_to_options.keys())) return _round_to_options[mode](dt, hour, minute, second)" 2100,"def _log(self, content): """""" Write a string to the log """""" self._buffer += content if self._auto_flush: self.flush()" 2101,"def reset(self): """""" Erase the log and reset the timestamp """""" self._buffer = '' self._chars_flushed = 0 self._game_start_timestamp = datetime.datetime.now()" 2102,"def logpath(self): """""" Return the logfile path and filename as a string. The file with name self.logpath() is written to on flush(). The filename contains the log's timestamp and the names of players in the game. The logpath changes when reset() or _set_players() are called, as they change the timestamp and the players, respectively. """""" name = '{}-{}.catan'.format(self.timestamp_str(), '-'.join([p.name for p in self._players])) path = os.path.join(self._log_dir, name) if not os.path.exists(self._log_dir): os.mkdir(self._log_dir) return path" 2103,"def flush(self): """""" Append the latest updates to file, or optionally to stdout instead. See the constructor for logging options. """""" latest = self._latest() self._chars_flushed += len(latest) if self._use_stdout: file = sys.stdout else: file = open(self.logpath(), 'a') print(latest, file=file, flush=True, end='') if not self._use_stdout: file.close()" 2104,"def log_game_start(self, players, terrain, numbers, ports): """""" Begin a game. Erase the log, set the timestamp, set the players, and write the log header. The robber is assumed to start on the desert (or off-board). :param players: iterable of catan.game.Player objects :param terrain: list of 19 catan.board.Terrain objects. :param numbers: list of 19 catan.board.HexNumber objects. :param ports: list of catan.board.Port objects. """""" self.reset() self._set_players(players) self._logln('{} v{}'.format(__name__, __version__)) self._logln('timestamp: {0}'.format(self.timestamp_str())) self._log_players(players) self._log_board_terrain(terrain) self._log_board_numbers(numbers) self._log_board_ports(ports) self._logln('...CATAN!')" 2105,"def log_player_roll(self, player, roll): """""" :param player: catan.game.Player :param roll: integer or string, the sum of the dice """""" self._logln('{0} rolls {1}{2}'.format(player.color, roll, ' ...DEUCES!' if int(roll) == 2 else ''))" 2106,"def log_player_buys_road(self, player, location): """""" :param player: catan.game.Player :param location: string, see hexgrid.location() """""" self._logln('{0} buys road, builds at {1}'.format( player.color, location ))" 2107,"def log_player_buys_settlement(self, player, location): """""" :param player: catan.game.Player :param location: string, see hexgrid.location() """""" self._logln('{0} buys settlement, builds at {1}'.format( player.color, location ))" 2108,"def log_player_buys_city(self, player, location): """""" :param player: catan.game.Player :param location: string, see hexgrid.location() """""" self._logln('{0} buys city, builds at {1}'.format( player.color, location ))" 2109,"def log_player_trades_with_port(self, player, to_port, port, to_player): """""" :param player: catan.game.Player :param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] :param port: catan.board.Port :param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] """""" self._log('{0} trades '.format(player.color)) # to_port items self._log('[') for i, (num, res) in enumerate(to_port): if i > 0: self._log(', ') self._log('{0} {1}'.format(num, res.value)) self._log(']') self._log(' to port {0} for '.format(port.type.value)) # to_player items self._log('[') for i, (num, res) in enumerate(to_player): if i > 0: self._log(', ') self._log('{0} {1}'.format(num, res.value)) self._log(']') self._log('\n')" 2110,"def log_player_trades_with_other_player(self, player, to_other, other, to_player): """""" :param player: catan.game.Player :param to_other: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] :param other: catan.board.Player :param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] """""" self._log('{0} trades '.format(player.color)) # to_other items self._log('[') for i, (num, res) in enumerate(to_other): if i > 0: self._log(', ') self._log('{0} {1}'.format(num, res.value)) self._log(']') self._log(' to player {0} for '.format(other.color)) # to_player items self._log('[') for i, (num, res) in enumerate(to_player): if i > 0: self._log(', ') self._log('{0} {1}'.format(num, res.value)) self._log(']') self._log('\n')" 2111,"def log_player_plays_knight(self, player, location, victim): """""" :param player: catan.game.Player :param location: string, see hexgrid.location() :param victim: catan.game.Player """""" self._logln('{0} plays knight'.format(player.color)) self.log_player_moves_robber_and_steals(player, location, victim)" 2112,"def log_player_plays_road_builder(self, player, location1, location2): """""" :param player: catan.game.Player :param location1: string, see hexgrid.location() :param location2: string, see hexgrid.location() """""" self._logln('{0} plays road builder, builds at {1} and {2}'.format( player.color, location1, location2 ))" 2113,"def log_player_plays_year_of_plenty(self, player, resource1, resource2): """""" :param player: catan.game.Player :param resource1: catan.board.Terrain :param resource2: catan.board.Terrain """""" self._logln('{0} plays year of plenty, takes {1} and {2}'.format( player.color, resource1.value, resource2.value ))" 2114,"def log_player_plays_monopoly(self, player, resource): """""" :param player: catan.game.Player :param resource: catan.board.Terrain """""" self._logln('{0} plays monopoly on {1}'.format( player.color, resource.value ))" 2115,"def log_player_ends_turn(self, player): """""" :param player: catan.game.Player """""" seconds_delta = (datetime.datetime.now() - self._latest_timestamp).total_seconds() self._logln('{0} ends turn after {1}s'.format(player.color, round(seconds_delta))) self._latest_timestamp = datetime.datetime.now()" 2116,"def _log_board_terrain(self, terrain): """""" Tiles are logged counterclockwise beginning from the top-left. See module hexgrid (https://github.com/rosshamish/hexgrid) for the tile layout. :param terrain: list of catan.board.Terrain objects """""" self._logln('terrain: {0}'.format(' '.join(t.value for t in terrain)))" 2117,"def _log_board_numbers(self, numbers): """""" Numbers are logged counterclockwise beginning from the top-left. See module hexgrid (https://github.com/rosshamish/hexgrid) for the tile layout. :param numbers: list of catan.board.HexNumber objects. """""" self._logln('numbers: {0}'.format(' '.join(str(n.value) for n in numbers)))" 2118,"def _log_board_ports(self, ports): """""" A board with no ports is allowed. In the logfile, ports must be sorted - ascending by tile identifier (primary) - alphabetical by edge direction (secondary) :param ports: list of catan.board.Port objects """""" ports = sorted(ports, key=lambda port: (port.tile_id, port.direction)) self._logln('ports: {0}'.format(' '.join('{}({} {})'.format(p.type.value, p.tile_id, p.direction) for p in ports)))" 2119,"def _log_players(self, players): """""" :param players: list of catan.game.Player objects """""" self._logln('players: {0}'.format(len(players))) for p in self._players: self._logln('name: {0}, color: {1}, seat: {2}'.format(p.name, p.color, p.seat))" 2120,"def _set_players(self, _players): """""" Players will always be set in seat order (1,2,3,4) """""" self._players = list() _players = list(_players) _players.sort(key=lambda p: p.seat) for p in _players: self._players.append(p)" 2121,"def _SetGuide(self, guideName): """""" Select guide corresponding to guideName Parameters ---------- guideName : string Name of guide to use. Note ---------- Supported guide names are: EPGUIDES """""" if(guideName == epguides.EPGuidesLookup.GUIDE_NAME): self._guide = epguides.EPGuidesLookup() else: raise Exception(""[RENAMER] Unknown guide set for TVRenamer selection: Got {}, Expected {}"".format(guideName, epguides.EPGuidesLookup.GUIDE_NAME))" 2122,"def _GetUniqueFileShowNames(self, tvFileList): """""" Return a list containing all unique show names from tvfile.TVFile object list. Parameters ---------- tvFileList : list List of tvfile.TVFile objects. Returns ---------- set The set of show names from the tvfile.TVFile list. """""" showNameList = [tvFile.fileInfo.showName for tvFile in tvFileList] return(set(showNameList))" 2123,"def _GetShowID(self, stringSearch, origStringSearch = None): """""" Search for given string as an existing entry in the database file name table or, if no match is found, as a show name from the TV guide. If an exact match is not found in the database the user can accept or decline the best match from the TV guide or can provide an alternate match to lookup. Parameters ---------- stringSearch : string String to look up in database or guide. origStringSearch : string [optional: default = None] Original search string, used by recusive function calls. Returns ---------- tvfile.ShowInfo or None If no show id could be found this returns None, otherwise it returns a tvfile.ShowInfo object containing show name and show id. """""" showInfo = tvfile.ShowInfo() if origStringSearch is None: goodlogging.Log.Info(""RENAMER"", ""Looking up show ID for: {0}"".format(stringSearch)) origStringSearch = stringSearch goodlogging.Log.IncreaseIndent() showInfo.showID = self._db.SearchFileNameTable(stringSearch) if showInfo.showID is None: goodlogging.Log.Info(""RENAMER"", ""No show ID match found for '{0}' in database"".format(stringSearch)) showNameList = self._guide.ShowNameLookUp(stringSearch) if self._skipUserInput is True: if len(showNameList) == 1: showName = showNameList[0] goodlogging.Log.Info(""RENAMER"", ""Automatic selection of showname: {0}"".format(showName)) else: showName = None goodlogging.Log.Info(""RENAMER"", ""Show skipped - could not make automatic selection of showname"") else: showName = util.UserAcceptance(showNameList) if showName in showNameList: libEntry = self._db.SearchTVLibrary(showName = showName) if libEntry is None: if self._skipUserInput is True: response = 'y' else: goodlogging.Log.Info(""RENAMER"", ""No show by this name found in TV library database. Is this a new show for the database?"") response = goodlogging.Log.Input(""RENAMER"", ""Enter 'y' (yes), 'n' (no) or 'ls' (list existing shows): "") response = util.ValidUserResponse(response, ('y', 'n', 'ls')) if response.lower() == 'ls': dbLibList = self._db.SearchTVLibrary() if dbLibList is None: goodlogging.Log.Info(""RENAMER"", ""TV library is empty"") response = 'y' else: dbShowNameList = [i[1] for i in dbLibList] dbShowNameStr = ', '.join(dbShowNameList) goodlogging.Log.Info(""RENAMER"", ""Existing shows in database are: {0}"".format(dbShowNameStr)) response = goodlogging.Log.Input(""RENAMER"", ""Is this a new show? [y/n]: "") response = util.ValidUserResponse(response, ('y', 'n')) if response.lower() == 'y': showInfo.showID = self._db.AddShowToTVLibrary(showName) showInfo.showName = showName else: try: dbShowNameList except NameError: dbLibList = self._db.SearchTVLibrary() if dbLibList is None: goodlogging.Log.Info(""RENAMER"", ""No show ID found - TV library is empty"") return None dbShowNameList = [i[1] for i in dbLibList] while showInfo.showID is None: matchShowList = util.GetBestMatch(showName, dbShowNameList) showName = util.UserAcceptance(matchShowList) if showName is None: goodlogging.Log.Info(""RENAMER"", ""No show ID found - could not match to existing show"") return None elif showName in matchShowList: showInfo.showID = self._db.SearchTVLibrary(showName = showName)[0][0] showInfo.showName = showName else: showInfo.showID = libEntry[0][0] self._db.AddToFileNameTable(origStringSearch, showInfo.showID) goodlogging.Log.DecreaseIndent() return showInfo elif showName is None: goodlogging.Log.DecreaseIndent() return None else: goodlogging.Log.DecreaseIndent() return self._GetShowID(showName, origStringSearch) else: goodlogging.Log.Info(""RENAMER"", ""Match found: show ID = {0}"".format(showInfo.showID)) if origStringSearch != stringSearch: self._db.AddToFileNameTable(origStringSearch, showInfo.showID) goodlogging.Log.DecreaseIndent() return showInfo" 2124,"def _GetShowInfo(self, stringSearch): """""" Calls GetShowID and does post processing checks on result. Parameters ---------- stringSearch : string String to look up in database or guide. Returns ---------- tvfile.ShowInfo or None If GetShowID returns None or if it returns showInfo with showID = None then this will return None, otherwise it will return the showInfo object. """""" goodlogging.Log.Info(""RENAMER"", ""Looking up show info for: {0}"".format(stringSearch)) goodlogging.Log.IncreaseIndent() showInfo = self._GetShowID(stringSearch) if showInfo is None: goodlogging.Log.DecreaseIndent() return None elif showInfo.showID is None: goodlogging.Log.DecreaseIndent() return None elif showInfo.showName is None: showInfo.showName = self._db.SearchTVLibrary(showID = showInfo.showID)[0][1] goodlogging.Log.Info(""RENAMER"", ""Found show name: {0}"".format(showInfo.showName)) goodlogging.Log.DecreaseIndent() return showInfo else: goodlogging.Log.DecreaseIndent() return showInfo" 2125,"def _MoveFileToLibrary(self, oldPath, newPath): """""" Move file from old file path to new file path. This follows certain conditions: - If file already exists at destination do rename inplace. - If file destination is on same file system and doesn't exist rename and move. - If source and destination are on different file systems do rename in-place, and if forceCopy is true copy to dest and move orig to archive directory. Parameters ---------- oldPath : string Old file path. newPath : string New file path. Returns ---------- boolean If old and new file paths are the same or if the new file path already exists this returns False. If file rename is skipped for any reason this returns None otherwise if rename completes okay it returns True. """""" if oldPath == newPath: return False goodlogging.Log.Info(""RENAMER"", ""PROCESSING FILE: {0}"".format(oldPath)) if os.path.exists(newPath): goodlogging.Log.Info(""RENAMER"", ""File skipped - file aleady exists in TV library at {0}"".format(newPath)) return False newDir = os.path.dirname(newPath) os.makedirs(newDir, exist_ok=True) try: os.rename(oldPath, newPath) except OSError as ex: if ex.errno is errno.EXDEV: goodlogging.Log.Info(""RENAMER"", ""Simple rename failed - source and destination exist on different file systems"") goodlogging.Log.Info(""RENAMER"", ""Renaming file in-place"") newFileName = os.path.basename(newPath) origFileDir = os.path.dirname(oldPath) renameFilePath = os.path.join(origFileDir, newFileName) if oldPath != renameFilePath: renameFilePath = util.CheckPathExists(renameFilePath) goodlogging.Log.Info(""RENAMER"", ""Renaming from {0} to {1}"".format(oldPath, renameFilePath)) else: goodlogging.Log.Info(""RENAMER"", ""File already has the correct name ({0})"".format(newFileName)) try: os.rename(oldPath, renameFilePath) except Exception as ex2: goodlogging.Log.Info(""RENAMER"", ""File rename skipped - Exception ({0}): {1}"".format(ex2.args[0], ex2.args[1])) else: if self._forceCopy is True: goodlogging.Log.Info(""RENAMER"", ""Copying file to new file system {0} to {1}"".format(renameFilePath, newPath)) try: shutil.copy2(renameFilePath, newPath) except shutil.Error as ex3: err = ex3.args[0] goodlogging.Log.Info(""RENAMER"", ""File copy failed - Shutil Error: {0}"".format(err)) else: util.ArchiveProcessedFile(renameFilePath, self._archiveDir) return True else: goodlogging.Log.Info(""RENAMER"", ""File copy skipped - copying between file systems is disabled (enabling this functionality is slow)"") else: goodlogging.Log.Info(""RENAMER"", ""File rename skipped - Exception ({0}): {1}"".format(ex.args[0], ex.args[1])) except Exception as ex: goodlogging.Log.Info(""RENAMER"", ""File rename skipped - Exception ({0}): {1}"".format(ex.args[0], ex.args[1])) else: goodlogging.Log.Info(""RENAMER"", ""RENAME COMPLETE: {0}"".format(newPath)) return True" 2126,"def _CreateNewSeasonDir(self, seasonNum): """""" Creates a new season directory name in the form 'Season <NUM>'. If skipUserInput is True this will be accepted by default otherwise the user can choose to accept this, use the base show directory or enter a different name. Parameters ---------- seasonNum : int Season number. Returns ---------- string or None If the user accepts the generated directory name or gives a new name this will be returned. If it the user chooses to use the base directory an empty string is returned. If the user chooses to skip at this input stage None is returned. """""" seasonDirName = ""Season {0}"".format(seasonNum) goodlogging.Log.Info(""RENAMER"", ""Generated directory name: '{0}'"".format(seasonDirName)) if self._skipUserInput is False: response = goodlogging.Log.Input(""RENAMER"", ""Enter 'y' to accept this directory, 'b' to use base show directory, 'x' to skip this file or enter a new directory name to use: "") response = util.CheckEmptyResponse(response) else: response = 'y' if response.lower() == 'b': return '' elif response.lower() == 'y': return seasonDirName elif response.lower() == 'x': return None else: return response" 2127,"def _LookUpSeasonDirectory(self, showID, showDir, seasonNum): """""" Look up season directory. First attempt to find match from database, otherwise search TV show directory. If no match is found in the database the user can choose to accept a match from the TV show directory, enter a new directory name to use or accept an autogenerated name. Parameters ---------- showID : int Show ID number showDir : string Path to show file directory seasonNum : int Season number Returns ---------- string Name of season directory to use. This can be a blank string to use the root show directory, an autogenerated string or a user given string. """""" goodlogging.Log.Info(""RENAMER"", ""Looking up season directory for show {0}"".format(showID)) goodlogging.Log.IncreaseIndent() # Look up existing season folder from database seasonDirName = self._db.SearchSeasonDirTable(showID, seasonNum) if seasonDirName is not None: goodlogging.Log.Info(""RENAMER"", ""Found season directory match from database: {0}"".format(seasonDirName)) else: # Look up existing season folder in show directory goodlogging.Log.Info(""RENAMER"", ""Looking up season directory (Season {0}) in {1}"".format(seasonNum, showDir)) if os.path.isdir(showDir) is False: goodlogging.Log.Info(""RENAMER"", ""Show directory ({0}) is not an existing directory"".format(showDir)) seasonDirName = self._CreateNewSeasonDir(seasonNum) else: matchDirList = [] for dirName in os.listdir(showDir): subDir = os.path.join(showDir, dirName) if os.path.isdir(subDir): seasonResult = re.findall(""Season"", dirName) if len(seasonResult) > 0: numResult = re.findall(""[0-9]+"", dirName) numResult = set(numResult) if len(numResult) == 1: if int(numResult.pop()) == int(seasonNum): matchDirList.append(dirName) if self._skipUserInput is True: if len(matchDirList) == 1: userAcceptance = matchDirList[0] goodlogging.Log.Info(""RENAMER"", ""Automatic selection of season directory: {0}"".format(seasonDirName)) else: userAcceptance = None goodlogging.Log.Info(""RENAMER"", ""Could not make automatic selection of season directory"") else: listDirPrompt = ""enter 'ls' to list all items in show directory"" userAcceptance = util.UserAcceptance(matchDirList, promptComment = listDirPrompt, xStrOverride = ""to create new season directory"") if userAcceptance in matchDirList: seasonDirName = userAcceptance elif userAcceptance is None: seasonDirName = self._CreateNewSeasonDir(seasonNum) else: recursiveSelectionComplete = False promptOnly = False dirLookup = userAcceptance while recursiveSelectionComplete is False: dirList = os.listdir(showDir) if dirLookup.lower() == 'ls': dirLookup = '' promptOnly = True if len(dirList) == 0: goodlogging.Log.Info(""RENAMER"", ""Show directory is empty"") else: goodlogging.Log.Info(""RENAMER"", ""Show directory contains: {0}"".format(', '.join(dirList))) else: matchDirList = util.GetBestMatch(dirLookup, dirList) response = util.UserAcceptance(matchDirList, promptComment = listDirPrompt, promptOnly = promptOnly, xStrOverride = ""to create new season directory"") promptOnly = False if response in matchDirList: seasonDirName = response recursiveSelectionComplete = True elif response is None: seasonDirName = self._CreateNewSeasonDir(seasonNum) recursiveSelectionComplete = True else: dirLookup = response # Add season directory to database if seasonDirName is not None: self._db.AddSeasonDirTable(showID, seasonNum, seasonDirName) goodlogging.Log.DecreaseIndent() return seasonDirName" 2128,"def _CreateNewShowDir(self, showName): """""" Create new directory name for show. An autogenerated choice, which is the showName input that has been stripped of special characters, is proposed which the user can accept or they can enter a new name to use. If the skipUserInput variable is True the autogenerated value is accepted by default. Parameters ---------- showName : string Name of TV show Returns ---------- string or None Either the autogenerated directory name, the user given directory name or None if the user chooses to skip at this input stage. """""" stripedDir = util.StripSpecialCharacters(showName) goodlogging.Log.Info(""RENAMER"", ""Suggested show directory name is: '{0}'"".format(stripedDir)) if self._skipUserInput is False: response = goodlogging.Log.Input('RENAMER', ""Enter 'y' to accept this directory, 'x' to skip this show or enter a new directory to use: "") else: response = 'y' if response.lower() == 'x': return None elif response.lower() == 'y': return stripedDir else: return response" 2129,"def _GenerateLibraryPath(self, tvFile, libraryDir): """""" Creates a full path for TV file in TV library. This initially attempts to directly match a show directory in the database, if this fails it searches the library directory for the best match. The user can then select an existing match or can propose a new directory to use as the show root directory. The season directory is also generated and added to the show and library directories. This is then used by the tvFile GenerateNewFilePath method to create a new path for the file. Parameters ---------- tvFile : tvfile.TVFile Contains show and file info. libraryDir : string Root path of TV library directory. Returns ---------- tvfile.TVFile This is an updated version of the input object. """""" goodlogging.Log.Info(""RENAMER"", ""Looking up library directory in database for show: {0}"".format(tvFile.showInfo.showName)) goodlogging.Log.IncreaseIndent() showID, showName, showDir = self._db.SearchTVLibrary(showName = tvFile.showInfo.showName)[0] if showDir is None: goodlogging.Log.Info(""RENAMER"", ""No directory match found in database - looking for best match in library directory: {0}"".format(libraryDir)) dirList = os.listdir(libraryDir) listDir = False matchName = tvFile.showInfo.showName while showDir is None: if len(dirList) == 0: goodlogging.Log.Info(""RENAMER"", ""TV Library directory is empty"") response = None else: if listDir is True: goodlogging.Log.Info(""RENAMER"", ""TV library directory contains: {0}"".format(', '.join(dirList))) else: matchDirList = util.GetBestMatch(matchName, dirList) listDir = False if self._skipUserInput is True: if len(matchDirList) == 1: response = matchDirList[0] goodlogging.Log.Info(""RENAMER"", ""Automatic selection of show directory: {0}"".format(response)) else: response = None goodlogging.Log.Info(""RENAMER"", ""Could not make automatic selection of show directory"") else: listDirPrompt = ""enter 'ls' to list all items in TV library directory"" response = util.UserAcceptance(matchDirList, promptComment = listDirPrompt, promptOnly = listDir, xStrOverride = ""to create new show directory"") if response is None: showDir = self._CreateNewShowDir(tvFile.showInfo.showName) if showDir is None: goodlogging.Log.DecreaseIndent() return tvFile elif response.lower() == 'ls': listDir = True elif response in matchDirList: showDir = response else: matchName = response self._db.UpdateShowDirInTVLibrary(showID, showDir) # Add base directory to show path showDir = os.path.join(libraryDir, showDir) goodlogging.Log.DecreaseIndent() # Lookup and add season directory to show path seasonDir = self._LookUpSeasonDirectory(showID, showDir, tvFile.showInfo.seasonNum) if seasonDir is None: return tvFile else: showDir = os.path.join(showDir, seasonDir) # Call tvFile function to generate file name tvFile.GenerateNewFilePath(showDir) return tvFile" 2130,"def Run(self): """""" Renames all TV files from the constructor given file list. It follows a number of key steps: 1) Extract a list of unique show titles from file name and lookup actual show names from database or TV guide. 2) Update each file with showID and showName. 3) Get episode name for all remaining files in valid list. 4) Print file details and generate new file paths. 5) Rename files. 6) List skipped and incompatible files. """""" # ------------------------------------------------------------------------ # Get list of unique fileInfo show names and find matching actual show # names from database or TV guide # ------------------------------------------------------------------------ showNameMatchDict = {} uniqueFileShowList = self._GetUniqueFileShowNames(self._fileList) if len(uniqueFileShowList) > 0: goodlogging.Log.Seperator() for fileShowName in uniqueFileShowList: showNameMatchDict[fileShowName] = self._GetShowInfo(fileShowName) goodlogging.Log.NewLine() # ------------------------------------------------------------------------ # Update each file with showID and showName # ------------------------------------------------------------------------ incompatibleFileList = [] validShowFileList = [] for tvFile in self._fileList: if showNameMatchDict[tvFile.fileInfo.showName] is None: incompatibleFileList.append(tvFile) else: tvFile.showInfo.showID = showNameMatchDict[tvFile.fileInfo.showName].showID tvFile.showInfo.showName = showNameMatchDict[tvFile.fileInfo.showName].showName validShowFileList.append(tvFile) # ------------------------------------------------------------------------ # Get episode name for all remaining files in valid list # ------------------------------------------------------------------------ if len(validShowFileList) > 0: goodlogging.Log.Seperator() validEpisodeNameFileList = [] goodlogging.Log.Info(""RENAMER"", ""Looking up episode names:\n"") for tvFile in validShowFileList: tvFile.showInfo.episodeName = self._guide.EpisodeNameLookUp(tvFile.showInfo.showName, tvFile.showInfo.seasonNum, tvFile.showInfo.episodeNum) if tvFile.showInfo.episodeName is None: incompatibleFileList.append(tvFile) else: validEpisodeNameFileList.append(tvFile) goodlogging.Log.Info(""RENAMER"", ""{0} S{1}E{2}: {3}"".format(tvFile.showInfo.showName, tvFile.showInfo.seasonNum, tvFile.showInfo.episodeNum, tvFile.showInfo.episodeName)) goodlogging.Log.NewLine() # ------------------------------------------------------------------------ # Print file details and generate new file paths # ------------------------------------------------------------------------ goodlogging.Log.Seperator() renameFileList = [] skippedFileList = [] goodlogging.Log.Info(""RENAMER"", ""Generating library paths:\n"") if len(validEpisodeNameFileList) == 0: goodlogging.Log.Info(""RENAMER"", ""No compatible files were detected"") else: for tvFile in validEpisodeNameFileList: tvFile.Print() goodlogging.Log.NewLine() if self._inPlaceRename is False: tvFile = self._GenerateLibraryPath(tvFile, self._tvDir) else: tvFile.GenerateNewFilePath() if tvFile.fileInfo.newPath is None: incompatibleFileList.append(tvFile) elif tvFile.fileInfo.origPath != tvFile.fileInfo.newPath: renameFileList.append(tvFile) else: skippedFileList.append(tvFile) goodlogging.Log.NewLine() # ------------------------------------------------------------------------ # Rename files # ------------------------------------------------------------------------ goodlogging.Log.Seperator() goodlogging.Log.Info(""RENAMER"", ""Renamable files:\n"") if len(renameFileList) == 0: goodlogging.Log.Info(""RENAMER"", ""No renamable files were detected"") else: showName = None renameFileList.sort() for tvFile in renameFileList: if showName is None or showName != tvFile.showInfo.showName: showName = tvFile.showInfo.showName goodlogging.Log.Info(""RENAMER"", ""{0}"".format(showName)) goodlogging.Log.IncreaseIndent() goodlogging.Log.Info(""RENAMER"", ""FROM: {0}"".format(tvFile.fileInfo.origPath)) goodlogging.Log.Info(""RENAMER"", ""TO: {0}"".format(tvFile.fileInfo.newPath)) goodlogging.Log.DecreaseIndent() goodlogging.Log.NewLine() if self._skipUserInput is False: response = goodlogging.Log.Input('RENAMER', ""***WARNING*** CONTINUE WITH RENAME PROCESS? [y/n]: "") response = util.ValidUserResponse(response, ('y','n')) else: response = 'y' if response == 'n': goodlogging.Log.Info(""RENAMER"", ""Renaming process skipped"") elif response == 'y': goodlogging.Log.NewLine() if self._inPlaceRename is False: goodlogging.Log.Info(""RENAMER"", ""Adding files to TV library:\n"") else: goodlogging.Log.Info(""RENAMER"", ""Renaming files:\n"") for tvFile in renameFileList: self._MoveFileToLibrary(tvFile.fileInfo.origPath, tvFile.fileInfo.newPath) goodlogging.Log.NewLine() # ------------------------------------------------------------------------ # List skipped files # ------------------------------------------------------------------------ if len(skippedFileList) > 0: goodlogging.Log.Seperator() goodlogging.Log.Info(""RENAMER"", ""Skipped files:"") goodlogging.Log.IncreaseIndent() for tvFile in skippedFileList: if tvFile.fileInfo.origPath == tvFile.fileInfo.newPath: goodlogging.Log.Info(""RENAMER"", ""{0} (No rename required)"".format(tvFile.fileInfo.origPath)) else: goodlogging.Log.Info(""RENAMER"", ""{0} (Unknown reason)"".format(tvFile.fileInfo.origPath)) goodlogging.Log.DecreaseIndent() # ------------------------------------------------------------------------ # List incompatible files # ------------------------------------------------------------------------ if len(incompatibleFileList) > 0: goodlogging.Log.Seperator() goodlogging.Log.Info(""RENAMER"", ""Incompatible files:"") goodlogging.Log.IncreaseIndent() for tvFile in incompatibleFileList: if tvFile.showInfo.showName is None: goodlogging.Log.Info(""RENAMER"", ""{0} (Missing show name)"".format(tvFile.fileInfo.origPath)) elif tvFile.showInfo.episodeName is None: goodlogging.Log.Info(""RENAMER"", ""{0} (Missing episode name)"".format(tvFile.fileInfo.origPath)) elif tvFile.fileInfo.newPath is None: goodlogging.Log.Info(""RENAMER"", ""{0} (Failed to create new file path)"".format(tvFile.fileInfo.origPath)) else: goodlogging.Log.Info(""RENAMER"", ""{0} (Unknown reason)"".format(tvFile.fileInfo.origPath)) goodlogging.Log.DecreaseIndent()" 2131,"def get_api_publisher(self, social_user): """""" message: <str> image: <file> as object_attachment owner_id: <str> """""" def _post(owner_id=None, **kwargs): api = self.get_api(social_user, owner_id) return api.post('{}/feed'.format(owner_id or 'me'), params=kwargs) return _post" 2132,"def catch(ignore=[], was_doing=""something important"", helpfull_tips=""you should use a debugger"", gbc=None): """""" Catch, prepare and log error :param exc_cls: error class :param exc: exception :param tb: exception traceback """""" exc_cls, exc, tb=sys.exc_info() if exc_cls in ignore: msg='exception in ignorelist' gbc.say('ignoring caught:'+str(exc_cls)) return 'exception in ignorelist' ex_message = traceback.format_exception_only(exc_cls, exc)[-1] ex_message = ex_message.strip() # TODO: print(ex_message) error_frame = tb while error_frame.tb_next is not None: error_frame = error_frame.tb_next file = error_frame.tb_frame.f_code.co_filename line = error_frame.tb_lineno stack = traceback.extract_tb(tb) formated_stack = [] for summary in stack: formated_stack.append({ 'file': summary[0], 'line': summary[1], 'func': summary[2], 'text': summary[3] }) event = { 'was_doing':was_doing, 'message': ex_message, 'errorLocation': { 'file': file, 'line': line, 'full': file + ' -> ' + str(line) }, 'stack': formated_stack #, #'time': time.time() } try: #logging.info('caught:'+pformat(event)) gbc.cry('caught:'+pformat(event)) print('Bubble3: written error to log') print('Bubble3: tips for fixing this:') print(helpfull_tips) except Exception as e: print('Bubble3: cant log error cause of %s' % e)" 2133,"def from_name(api_url, name, dry_run=False): """""" doesn't require a token config param as all of our data is currently public """""" return DataSet( '/'.join([api_url, name]).rstrip('/'), token=None, dry_run=dry_run )" 2134,"def secured_clipboard(item): """"""This clipboard only allows 1 paste """""" expire_clock = time.time() def set_text(clipboard, selectiondata, info, data): # expire after 15 secs if 15.0 >= time.time() - expire_clock: selectiondata.set_text(item.get_secret()) clipboard.clear() def clear(clipboard, data): """"""Clearing of the buffer is deferred this only gets called if the paste is actually triggered """""" pass targets = [(""STRING"", 0, 0) ,(""TEXT"", 0, 1) ,(""COMPOUND_TEXT"", 0, 2) ,(""UTF8_STRING"", 0, 3)] cp = gtk.clipboard_get() cp.set_with_data(targets, set_text, clear)" 2135,"def get_active_window(): """"""Get the currently focused window """""" active_win = None default = wnck.screen_get_default() while gtk.events_pending(): gtk.main_iteration(False) window_list = default.get_windows() if len(window_list) == 0: print ""No Windows Found"" for win in window_list: if win.is_active(): active_win = win.get_name() return active_win" 2136,"def get(self): """"""Get quota from Cloud Provider."""""" # get all network quota from Cloud Provider. attrs = (""networks"", ""security_groups"", ""floating_ips"", ""routers"", ""internet_gateways"") for attr in attrs: setattr(self, attr, eval(""self.get_{}()"". format(attr)))" 2137,"def join_css_class(css_class, *additional_css_classes): """""" Returns the union of one or more CSS classes as a space-separated string. Note that the order will not be preserved. """""" css_set = set(chain.from_iterable( c.split(' ') for c in [css_class, *additional_css_classes] if c)) return ' '.join(css_set)" 2138,"def _init_middlewares(self): """"""Initialize hooks and middlewares If you have another Middleware, like BrokeMiddleware for e.x You can append this to middleware: self.middleware.append(BrokeMiddleware()) """""" self.middleware = [DeserializeMiddleware()] self.middleware += \ [FuncMiddleware(hook) for hook in self.before_hooks()] self.middleware.append(SerializeMiddleware())" 2139,"def _init_routes_and_middlewares(self): """"""Initialize hooks and URI routes to resources."""""" self._init_middlewares() self._init_endpoints() self.app = falcon.API(middleware=self.middleware) self.app.add_error_handler(Exception, self._error_handler) for version_path, endpoints in self.catalog: for route, resource in endpoints: self.app.add_route(version_path + route, resource)" 2140,"def _error_handler(self, exc, request, response, params): """"""Handler error"""""" if isinstance(exc, falcon.HTTPError): raise exc LOG.exception(exc) raise falcon.HTTPInternalServerError('Internal server error', six.text_type(exc))" 2141,"def _get_server_cls(self, host): """"""Return an appropriate WSGI server class base on provided host :param host: The listen host for the zaqar API server. """""" server_cls = simple_server.WSGIServer if netutils.is_valid_ipv6(host): if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 return server_cls" 2142,"def listen(self): """"""Self-host using 'bind' and 'port' from the WSGI config group."""""" msgtmpl = (u'Serving on host %(host)s:%(port)s') host = CONF.wsgi.wsgi_host port = CONF.wsgi.wsgi_port LOG.info(msgtmpl, {'host': host, 'port': port}) server_cls = self._get_server_cls(host) httpd = simple_server.make_server(host, port, self.app, server_cls) httpd.serve_forever()" 2143,"def _create_map(self): """"""Initialize Brzozowski Algebraic Method"""""" # at state i is represented by the regex self.B[i] for state_a in self.mma.states: self.A[state_a.stateid] = {} # Create a map state to state, with the transition symbols for arc in state_a.arcs: if arc.nextstate in self.A[state_a.stateid]: self.A[state_a.stateid][arc.nextstate].append(self.mma.isyms.find(arc.ilabel)) else: self.A[state_a.stateid][arc.nextstate] = [self.mma.isyms.find(arc.ilabel)] if state_a.final: self.A[state_a.stateid]['string'] = ['']" 2144,"def get_promise(self): """"""Return the special set of promises for run_instruction. Run Instruction has to support multiple promises (one for reading data, and one for reading back the status from IR. All other primitives have a single promise, so fitting multiple into this system causes some API consistencies. This should be reviewed to see if a more coherent alternative is available. """""" if self._promise is None: promise = [] if self.read: promise.append(TDOPromise(self._chain, 0, self.bitcount)) else: promise.append(None) if self.read_status: promise.append(TDOPromise(self._chain, 0, self.dev._desc._ir_length)) else: promise.append(None) self._promise = promise return self._promise" 2145,"def return_primitive(fn): """""" Decorator which wraps a single argument function to ignore any arguments of primitive type (simply returning them unmodified). """""" @wraps(fn) def wrapped_fn(x): if isinstance(x, PRIMITIVE_TYPES): return x return fn(x) return wrapped_fn" 2146,"def _get_dataset(self, dataset, name, color): """""" Encode a dataset """""" global palette html = ""{"" html += '\t""label"": ""' + name + '"",' if color is not None: html += '""backgroundColor"": ""' + color + '"",\n' else: html += '""backgroundColor"": ' + palette + ',\n' html += '""data"": ' + self._format_list(dataset) + ',\n' html += ""}"" return html" 2147,"def get(self, slug, xdata, ydatasets, label, opts, style, ctype): """""" Returns html for a chart """""" xdataset = self._format_list(xdata) width = ""100%"" height = ""300px"" if opts is not None: if ""width"" in opts: width = str(opts[""width""]) if ""height"" in opts: height = str(opts[""height""]) stylestr = '<style>#container_' + slug + \ ' { width:' + width + ' !important; height:' + \ height + ' !important}</style>\n' html = stylestr html += '<div id=""container_' + slug + \ '""><canvas id=""canvas_' + slug + '""></canvas></div>\n' html += '<script>\n' html += 'var data = {\n' html += 'labels: ' + xdataset + ',\n' html += 'datasets:[\n' colors = None if ""color"" in style: colors = style[""color""] i = 0 for dataset in ydatasets: name = dataset[""name""] data = dataset[""data""] html += self._get_dataset(data, name, colors) if i < len(ydatasets) - 1: html += "","" i += 1 html += ']\n' html += '}\n' html += 'window.onload = function() {' html += 'var ctx = document.getElementById(""canvas_' + \ slug + '"").getContext(""2d"");' html += 'window.myChart = new Chart(ctx, {' html += 'type: ""' + ctype + '"",' html += 'data: data,' html += 'options: {' html += 'spanGaps: false,' html += 'responsive: true,' html += 'maintainAspectRatio: false,' if ""legend"" in opts: html += 'legend: {' html += 'position: ""' + opts[""legend""] + '"",' html += '},' else: html += 'legend: {' html += 'display: false,' html += '},' if ""title"" in opts: html += 'title: {' html += 'display: true,' html += 'text: ""' + opts[""title""] + '""' html += '}' html += '}' html += '});' html += '};' html += '</script>\n' return html" 2148,"def _format_list(self, data): """""" Format a list to use in javascript """""" dataset = ""["" i = 0 for el in data: if pd.isnull(el): dataset += ""null"" else: dtype = type(data[i]) if dtype == int or dtype == float: dataset += str(el) else: dataset += '""' + el + '""' if i < len(data) - 1: dataset += ', ' dataset += ""]"" return dataset" 2149,"def status(self, status, headers=None): ''' Respond with given status and no content :type status: int :param status: status code to return :type headers: dict :param headers: dictionary of headers to add to response :returns: itself :rtype: Rule ''' self.response = _Response(status, headers) return self" 2150,"def text(self, text, status=200, headers=None): ''' Respond with given status and text content :type text: str :param text: text to return :type status: int :param status: status code to return :type headers: dict :param headers: dictionary of headers to add to response :returns: itself :rtype: Rule ''' self.response = _Response(status, headers, text.encode('utf8')) return self" 2151,"def json(self, json_doc, status=200, headers=None): ''' Respond with given status and JSON content. Will also set ``'Content-Type'`` to ``'applicaion/json'`` if header is not specified explicitly :type json_doc: dict :param json_doc: dictionary to respond with converting to JSON string :type status: int :param status: status code to return :type headers: dict :param headers: dictionary of headers to add to response ''' headers = headers or {} if 'content-type' not in headers: headers['content-type'] = 'application/json' return self.text(json.dumps(json_doc), status, headers)" 2152,"def matches(self, method, path, headers, bytes=None): ''' Checks if rule matches given request parameters :type method: str :param method: HTTP method, e.g. ``'GET'``, ``'POST'``, etc. Can take any custom string :type path: str :param path: request path including query parameters, e.g. ``'/users?name=John%20Doe'`` :type bytes: bytes :param bytes: request body :returns: ``True`` if this rule matches given params :rtype: bool ''' return self._expectation.matches(method, path, headers, bytes)" 2153,"def always(self, method, path=None, headers=None, text=None, json=None): ''' Sends response every time matching parameters are found util :func:`Server.reset` is called :type method: str :param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string :type path: str :param path: request path including query parameters :type headers: dict :param headers: dictionary of headers to expect. If omitted any headers will do :type text: str :param text: request text to expect. If ommited any text will match :type json: dict :param json: request json to expect. If ommited any json will match, if present text param will be ignored :rtype: Rule :returns: newly created expectation rule ''' rule = Rule(method, path, headers, text, json) return self._add_rule_to(rule, self._always_rules)" 2154,"def on(self, method, path=None, headers=None, text=None, json=None): ''' Sends response to matching parameters one time and removes it from list of expectations :type method: str :param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string :type path: str :param path: request path including query parameters :type headers: dict :param headers: dictionary of headers to expect. If omitted any headers will do :type text: str :param text: request text to expect. If ommited any text will match :type json: dict :param json: request json to expect. If ommited any json will match, if present text param will be ignored :rtype: Rule :returns: newly created expectation rule ''' rule = Rule(method, path, headers, text, json) return self._add_rule_to(rule, self._rules)" 2155,"def start(self): ''' Starts a server on the port provided in the :class:`Server` constructor in a separate thread :rtype: Server :returns: server instance for chaining ''' self._handler = _create_handler_class(self._rules, self._always_rules) self._server = HTTPServer(('', self._port), self._handler) self._thread = Thread(target=self._server.serve_forever, daemon=True) self._thread.start() self.running = True return self" 2156,"def stop(self): ''' Shuts the server down and waits for server thread to join ''' self._server.shutdown() self._server.server_close() self._thread.join() self.running = False" 2157,"def assert_no_pending(self, target_rule=None): ''' Raises a :class:`PendingRequestsLeftException` error if server has target rule non-resolved. When target_rule argument is ommitted raises if server has any pending expectations. Useful in ``tearDown()`` test method to verify that test had correct expectations :type target_rule: Rule :param target_rule: will raise if this rule is left pending :raises: :class:`PendingRequestsLeftException` ''' if target_rule: if target_rule in self._rules: raise PendingRequestsLeftException() elif self._rules: raise PendingRequestsLeftException()" 2158,"def build(path, query=None, fragment=''): """""" Generates a URL based on the inputted path and given query options and fragment. The query should be a dictionary of terms that will be generated into the URL, while the fragment is the anchor point within the target path that will be navigated to. If there are any wildcards within the path that are found within the query, they will be inserted into the path itself and removed from the query string. :example |>>> import skyline.gui |>>> skyline.gui.build_url('sky://projects/%(project)s', | {'project': 'Test', 'asset': 'Bob'}) |'sky://projects/Test/?asset=Bob' :param path | <str> query | <dict> || None fragment | <str> || None :return <str> | url """""" url = nstr(path) # replace the optional arguments in the url keys = projex.text.findkeys(path) if keys: if query is None: query = {} opts = {} for key in keys: opts[key] = query.pop(key, '%({})s'.format(key)) url %= opts # add the query if query: if type(query) is dict: mapped_query = {} for key, value in query.items(): mapped_query[nstr(key)] = nstr(value) query_str = urllib.urlencode(mapped_query) else: query_str = nstr(query) url += '?' + query_str # include the fragment if fragment: url += '#' + fragment return url" 2159,"def parse(url): """""" Parses out the information for this url, returning its components expanded out to Python objects. :param url | <str> :return (<str> path, <dict> query, <str> fragment) """""" result = urlparse.urlparse(nstr(url)) path = result.scheme + '://' + result.netloc if result.path: path += result.path query = {} # extract the python information from the query if result.query: url_query = urlparse.parse_qs(result.query) for key, value in url_query.items(): if type(value) == list and len(value) == 1: value = value[0] query[key] = value return path, query, result.fragment" 2160,"def register(scheme): """""" Registers a new scheme to the urlparser. :param schema | <str> """""" scheme = nstr(scheme) urlparse.uses_fragment.append(scheme) urlparse.uses_netloc.append(scheme) urlparse.uses_params.append(scheme) urlparse.uses_query.append(scheme) urlparse.uses_relative.append(scheme)" 2161,"def send(self, stats): ""Format stats and send to one or more Graphite hosts"" buf = cStringIO.StringIO() now = int(time.time()) num_stats = 0 # timer stats pct = stats.percent timers = stats.timers for key, vals in timers.iteritems(): if not vals: continue # compute statistics num = len(vals) vals = sorted(vals) vmin = vals[0] vmax = vals[-1] mean = vmin max_at_thresh = vmax if num > 1: idx = round((pct / 100.0) * num) tmp = vals[:int(idx)] if tmp: max_at_thresh = tmp[-1] mean = sum(tmp) / idx key = 'stats.timers.%s' % key buf.write('%s.mean %f %d\n' % (key, mean, now)) buf.write('%s.upper %f %d\n' % (key, vmax, now)) buf.write('%s.upper_%d %f %d\n' % (key, pct, max_at_thresh, now)) buf.write('%s.lower %f %d\n' % (key, vmin, now)) buf.write('%s.count %d %d\n' % (key, num, now)) num_stats += 1 # counter stats counts = stats.counts for key, val in counts.iteritems(): buf.write('stats.%s %f %d\n' % (key, val / stats.interval, now)) buf.write('stats_counts.%s %f %d\n' % (key, val, now)) num_stats += 1 # counter stats gauges = stats.gauges for key, val in gauges.iteritems(): buf.write('stats.%s %f %d\n' % (key, val, now)) buf.write('stats_counts.%s %f %d\n' % (key, val, now)) num_stats += 1 buf.write('statsd.numStats %d %d\n' % (num_stats, now)) # TODO: add support for N retries for host in self._hosts: # flush stats to graphite try: sock = socket.create_connection(host) sock.sendall(buf.getvalue()) sock.close() except Exception, ex: self.error(E_SENDFAIL % ('graphite', host, ex))" 2162,"def coerce(self, value): """"""Convert any value into a string value. Args: value (any): The value to coerce. Returns: str: The string representation of the value. """""" if isinstance(value, compat.basestring): return value return str(value)" 2163,"def coerce(self, value): """"""Convert a value into a pattern matched string value. All string values are matched against a regex before they are considered acceptable values. Args: value (any): The value to coerce. Raises: ValueError: If the value is not an acceptable value. Returns: str: The pattern matched value represented. """""" if not isinstance(value, compat.basestring): value = str(value) if not self._re.match(value): raise ValueError( ""The value {0} does not match the pattern {1}"".format( value, self.pattern, ) ) return value" 2164,"def get_payload(self, *args, **kwargs): """"""Receive all passed in args, kwargs, and combine them together with any required params"""""" if not kwargs: kwargs = self.default_params else: kwargs.update(self.default_params) for item in args: if isinstance(item, dict): kwargs.update(item) if hasattr(self, 'type_params'): kwargs.update(self.type_params(*args, **kwargs)) return kwargs" 2165,"async def read_frame(self) -> DataFrame: """"""Read a single frame from the local buffer. If no frames are available but the stream is still open, waits until more frames arrive. Otherwise, raises StreamConsumedError. When a stream is closed, a single `None` is added to the data frame Queue to wake up any waiting `read_frame` coroutines. """""" if self._data_frames.qsize() == 0 and self.closed: raise StreamConsumedError(self.id) frame = await self._data_frames.get() self._data_frames.task_done() if frame is None: raise StreamConsumedError(self.id) return frame" 2166,"def read_frame_nowait(self) -> Optional[DataFrame]: """"""Read a single frame from the local buffer immediately. If no frames are available but the stream is still open, returns None. Otherwise, raises StreamConsumedError. """""" try: frame = self._data_frames.get_nowait() except asyncio.QueueEmpty: if self.closed: raise StreamConsumedError(self.id) return None self._data_frames.task_done() if frame is None: raise StreamConsumedError(self.id) return frame" 2167,"def plucks(obj, selector, default=None): """"""Safe itemgetter for structured objects. Happily operates on all (nested) objects that implement the item getter, i.e. the `[]` operator. The `selector` is ~ ``(<key>|<index>|<slice>|\*)(\.(<key>|<index>|<slice>|\*))*``. Parts (keys) in the selector path are separated with a dot. If the key looks like a number it's interpreted as such, i.e. as an index (so beware of numeric string keys in `dict`s). Python slice syntax is supported with keys like: ``2:7``, ``:5``, ``::-1``. A special key is ``*``, equivalent to the slice-all op ``:``. Note its usage does not serve functional, but annotational purpose -- feel free to leave it out (check the last example below). Examples: obj = { 'users': [{ 'uid': 1234, 'name': { 'first': 'John', 'last': 'Smith', } }, { 'uid': 2345, 'name': { 'last': 'Bono' } }] } plucks(obj, 'users.1.name') -> {'last': 'Bono'} plucks(obj, 'users.*.name.last') -> ['Smith', 'Bono'] plucks(obj, 'users.name.first') -> ['John'] Note: since the dot `.` is used as a separator, keys can not contain dots. """""" def _filter(iterable, index): res = [] for obj in iterable: try: res.append(obj[index]) except: pass return res def _int(val): try: return int(val) except: return None def _parsekey(key): m = re.match(r""^(?P<index>-?\d+)$"", key) if m: return int(m.group('index')) m = re.match(r""^(?P<start>-?\d+)?""\ r""(:(?P<stop>-?\d+)?(:(?P<step>-?\d+)?)?)?$"", key) if m: return slice(_int(m.group('start')), _int(m.group('stop')), _int(m.group('step'))) if key == '*': return slice(None) return key miss = False for key in selector.split('.'): index = _parsekey(key) if miss: if isinstance(index, basestring): obj = {} else: obj = [] try: if isinstance(index, basestring): if isinstance(obj, list): obj = _filter(obj, index) else: obj = obj[index] else: obj = obj[index] miss = False except: miss = True if miss: return default else: return obj" 2168,"def pluck(obj, selector, default=None, skipmissing=True): """"""Alternative implementation of `plucks` that accepts more complex selectors. It's a wrapper around `pluckable`, so a `selector` can be any valid Python expression comprising attribute getters (``.attr``) and item getters (``[1, 4:8, ""key""]``). Example: pluck(obj, ""users[2:5, 10:15].name.first"") equal to: pluckable(obj).users[2:5, 10:15].name.first.value """""" if not selector: return obj if selector[0] != '[': selector = '.%s' % selector wrapped_obj = pluckable(obj, default=default, skipmissing=skipmissing, inplace=True) return eval(""wrapped_obj%s.value"" % selector)" 2169,"def merge(a, b, op=None, recurse_list=False, max_depth=None): """"""Immutable merge ``a`` structure with ``b`` using binary operator ``op`` on leaf nodes. All nodes at, or below, ``max_depth`` are considered to be leaf nodes. Merged structure is returned, input data structures are not modified. If ``recurse_list=True``, leaf lists of equal length will be merged on a list-element level. Lists are considered to be leaf nodes by default (``recurse_list=False``), and they are merged with user-provided ``op``. Note the difference:: merge([1, 2], [3, 4]) ==> [1, 2, 3, 4] merge([1, 2], [3, 4], recurse_list=True) ==> [4, 6] """""" if op is None: op = operator.add if max_depth is not None: if max_depth < 1: return op(a, b) else: max_depth -= 1 if isinstance(a, dict) and isinstance(b, dict): result = {} for key in set(chain(a.keys(), b.keys())): if key in a and key in b: result[key] = merge(a[key], b[key], op=op, recurse_list=recurse_list, max_depth=max_depth) elif key in a: result[key] = deepcopy(a[key]) elif key in b: result[key] = deepcopy(b[key]) return result elif isinstance(a, list) and isinstance(b, list): if recurse_list and len(a) == len(b): # merge subelements result = [] for idx in range(len(a)): result.append(merge(a[idx], b[idx], op=op, recurse_list=recurse_list, max_depth=max_depth)) return result else: # merge lists return op(a, b) # all other merge ops should be handled by ``op``. # default ``operator.add`` will handle addition of numeric types, but fail # with TypeError for incompatible types (eg. str + None, etc.) return op(a, b)" 2170,"def readProfile(filename): """""" input: string, name of a file containing a profile description output: asp.TermSet, with atoms matching the contents of the input file Parses a profile description, and returns a TermSet object. """""" p = profile_parser.Parser() accu = TermSet() file = open(filename,'r') s = file.readline() while s!="""": try: accu = p.parse(s,filename) except EOFError: break s = file.readline() return accu" 2171,"def _param_deprecation_warning(schema, deprecated, context): """"""Raises warning about using the 'old' names for some parameters. The new naming scheme just has two underscores on each end of the word for consistency """""" for i in deprecated: if i in schema: msg = 'When matching {ctx}, parameter {word} is deprecated, use __{word}__ instead' msg = msg.format(ctx = context, word = i) warnings.warn(msg, Warning)" 2172,"def check(schema, data, trace=False): """"""Verify some json. Args: schema - the description of a general-case 'valid' json object. data - the json data to verify. Returns: bool: True if data matches the schema, False otherwise. Raises: TypeError: If the schema is of an unknown data type. ValueError: If the schema contains a string with an invalid value. If the schema attempts to reference a non-existent named schema. """""" if trace == True: trace = 1 else: trace = None return _check(schema, data, trace=trace)" 2173,"def has_perm(self, user, perm, obj=None, *args, **kwargs): """"""Test user permissions for a single action and object. :param user: The user to test. :type user: ``User`` :param perm: The action to test. :type perm: ``str`` :param obj: The object path to test. :type obj: ``tutelary.engine.Object`` :returns: ``bool`` -- is the action permitted? """""" try: if not self._obj_ok(obj): if hasattr(obj, 'get_permissions_object'): obj = obj.get_permissions_object(perm) else: raise InvalidPermissionObjectException return user.permset_tree.allow(Action(perm), obj) except ObjectDoesNotExist: return False" 2174,"def permitted_actions(self, user, obj=None): """"""Determine list of permitted actions for an object or object pattern. :param user: The user to test. :type user: ``User`` :param obj: A function mapping from action names to object paths to test. :type obj: callable :returns: ``list(tutelary.engine.Action)`` -- permitted actions. """""" try: if not self._obj_ok(obj): raise InvalidPermissionObjectException return user.permset_tree.permitted_actions(obj) except ObjectDoesNotExist: return []" 2175,"def validate(self,options): """""" Validate the options or exit() """""" if not options.port: self.parser.error(""'port' is required"") if options.port == options.monitor_port: self.parser.error(""'port' and 'monitor-port' must not be the same."") if options.buffer_size <= 0: self.parser.error(""'buffer_size' must be > 0."") try: codecs.getencoder(options.char_encoding) except LookupError: self.parser.error(""invalid 'char-encoding' %s"" % options.char_encoding) if not options.host: options.host = socket.gethostname()" 2176,"def list(self, name, platform='', genre=''): """""" The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters. """""" data_list = self.db.get_data(self.list_path, name=name, platform=platform, genre=genre) data_list = data_list.get('Data') or {} games = data_list.get('Game') or [] return [self._build_item(**i) for i in games]" 2177,"def list(self): """""" No argument is required for this method as per the API server specification. """""" data_list = self.db.get_data(self.list_path) data_list = data_list.get('Data') or {} platforms = (data_list.get('Platforms') or {}).get('Platform') or [] return [self._build_item(**i) for i in platforms]" 2178,"def games(self, platform): """""" It returns a list of games given the platform *alias* (usually is the game name separated by ""-"" instead of white spaces). """""" platform = platform.lower() data_list = self.db.get_data(self.games_path, platform=platform) data_list = data_list.get('Data') or {} return [Game(self.db.game, **i) for i in data_list.get('Game') or {}]" 2179,"def remove_none_dict_values(obj): """""" Remove None values from dict. """""" if isinstance(obj, (list, tuple, set)): return type(obj)(remove_none_dict_values(x) for x in obj) elif isinstance(obj, dict): return type(obj)((k, remove_none_dict_values(v)) for k, v in obj.items() if v is not None) else: return obj" 2180,"def update_history(cloud_hero): """""" Send each command to the /history endpoint. """""" user_command = ' '.join(sys.argv) timestamp = int(time.time()) command = (user_command, timestamp) cloud_hero.send_history([command])" 2181,"def get_scenfit(instance, OS, FP, FC, EP): '''returns the scenfit of data and model described by the ``TermSet`` object [instance]. ''' sem = [sign_cons_prg, bwd_prop_prg] if OS : sem.append(one_state_prg) if FP : sem.append(fwd_prop_prg) if FC : sem.append(founded_prg) if EP : sem.append(elem_path_prg) inst = instance.to_file() prg = sem + scenfit + [inst] coptions = '--opt-strategy=5' solver = GringoClasp(clasp_options=coptions) solution = solver.run(prg,collapseTerms=True,collapseAtoms=False) opt = solution[0].score[0] os.unlink(inst) return opt" 2182,"def get_scenfit_labelings(instance,nm, OS, FP, FC, EP): ''' returns a list of atmost [nm] ``TermSet`` representing scenfit labelings to the system described by the ``TermSet`` object [instance]. ''' sem = [sign_cons_prg, bwd_prop_prg] if OS : sem.append(one_state_prg) if FP : sem.append(fwd_prop_prg) if FC : sem.append(founded_prg) if EP : sem.append(elem_path_prg) inst = instance.to_file() prg = sem + scenfit + [inst] coptions = '--opt-strategy=5' solver = GringoClasp(clasp_options=coptions) solution = solver.run(prg,collapseTerms=True,collapseAtoms=False) opt = solution[0].score[0] prg = prg + [show_labels_prg, show_err_prg] coptions = str(nm)+' --project --opt-strategy=5 --opt-mode=optN --opt-bound='+str(opt) solver2 = GringoClasp(clasp_options=coptions) models = solver2.run(prg,collapseTerms=True,collapseAtoms=False) os.unlink(inst) return models" 2183,"def get_opt_add_remove_edges_greedy(instance): ''' only apply with elementary path consistency notion ''' sem = [sign_cons_prg, elem_path_prg, fwd_prop_prg, bwd_prop_prg] inst = instance.to_file() prg = [ inst, remove_edges_prg, min_repairs_prg, show_rep_prg ] + sem + scenfit coptions = '1 --project --opt-strategy=5 --opt-mode=optN --quiet=1' solver = GringoClasp(clasp_options=coptions) models = solver.run(prg, collapseTerms=True, collapseAtoms=False) bscenfit = models[0].score[0] brepscore = models[0].score[1] #print('model: ',models[0]) #print('bscenfit: ',bscenfit) #print('brepscore: ',brepscore) strt_edges = TermSet() fedges = [(strt_edges, bscenfit, brepscore)] tedges = [] dedges = [] coptions = '0 --project --opt-strategy=5 --opt-mode=optN --quiet=1' solver = GringoClasp(clasp_options=coptions) while fedges: # sys.stdout.flush() # print (""TODO: "",len(fedges)) (oedges, oscenfit, orepscore) = fedges.pop() # print('(oedges,oscenfit, orepscore):',(oedges,oscenfit, orepscore)) # print('len(oedges):',len(oedges)) # extend till no better solution can be found end = True # assume this time its the end f_oedges = TermSet(oedges).to_file() prg = [ inst, f_oedges, remove_edges_prg, best_one_edge_prg, min_repairs_prg, show_rep_prg ] + sem + scenfit models = solver.run(prg, collapseTerms=True, collapseAtoms=False) nscenfit = models[0].score[0] nrepscore = models[0].score[1]+2*(len(oedges)) # print('nscenfit: ',nscenfit) # print('nrepscore: ',nrepscore) if (nscenfit < oscenfit) or nrepscore < orepscore: # better score or more that 1 scenfit # print('maybe better solution:') # print('#models: ',len(models)) for m in models: #print('MMM ',models) nend = TermSet() for a in m : if a.pred() == 'rep' : if a.arg(0)[0:7]=='addeddy' : # print('new addeddy to',a.arg(0)[8:-1]) nend = String2TermSet('edge_end('+(a.arg(0)[8:-1])+')') # search starts of the addeddy # print('search best edge starts') f_end = TermSet(nend).to_file() prg = [ inst, f_oedges, remove_edges_prg, f_end, best_edge_start_prg, min_repairs_prg, show_rep_prg ] + sem + scenfit starts = solver.run(prg, collapseTerms=True, collapseAtoms=False) os.unlink(f_end) # print(starts) for s in starts: n2scenfit = s.score[0] n2repscore = s.score[1]+2*(len(oedges)) # print('n2scenfit: ', n2scenfit) # print('n2repscore: ', n2repscore) if (n2scenfit < oscenfit) or n2repscore < orepscore: # better score or more that 1 scenfit # print('better solution:') if (n2scenfit<bscenfit): bscenfit = n2scenfit # update bscenfit brepscore = n2repscore if (n2scenfit == bscenfit) : if (n2repscore<brepscore) : brepscore = n2repscore nedge = TermSet() for a in s : if a.pred() == 'rep' : if a.arg(0)[0:7]=='addedge' : # print('new edge ',a.arg(0)[8:-1]) nedge = String2TermSet('obs_elabel('+(a.arg(0)[8:-1])+')') end = False nedges = oedges.union(nedge) if (nedges,n2scenfit,n2repscore) not in fedges and nedges not in dedges: fedges.append((nedges,n2scenfit,n2repscore)) dedges.append(nedges) if end : if (oedges,oscenfit,orepscore) not in tedges and oscenfit == bscenfit and orepscore == brepscore: # print('LAST tedges append',oedges) tedges.append((oedges,oscenfit,orepscore)) # end while os.unlink(f_oedges) # take only the results with the best scenfit redges=[] for (tedges,tscenfit,trepairs) in tedges: if tscenfit == bscenfit: redges.append((tedges,trepairs)) os.unlink(inst) return (bscenfit,redges)" 2184,"def get_opt_repairs_add_remove_edges_greedy(instance,nm, edges): ''' only apply with elementary path consistency notion ''' sem = [sign_cons_prg, elem_path_prg, fwd_prop_prg, bwd_prop_prg] inst = instance.to_file() f_edges = TermSet(edges).to_file() prg = [ inst, f_edges, remove_edges_prg, min_repairs_prg, show_rep_prg, ] + sem + scenfit coptions = str(nm)+' --project --opt-strategy=5 --opt-mode=optN --quiet=1' solver = GringoClasp(clasp_options=coptions) models = solver.run(prg, collapseTerms=True, collapseAtoms=False) #print(models) #nscenfit = models[0].score[0] #nrepscore = models[0].score[1] #print('scenfit: ', nscenfit) #print('repscore: ', nrepscore) os.unlink(f_edges) os.unlink(inst) return models" 2185,"def validate_implementation_for_auto_decode_and_soupify(func): """""" Validate that :func:`auto_decode_and_soupify` is applicable to this function. If not applicable, a ``NotImplmentedError`` will be raised. """""" arg_spec = inspect.getargspec(func) for arg in [""response"", ""html"", ""soup""]: if arg not in arg_spec.args: raise NotImplementedError( (""{func} method has to take the keyword syntax input: "" ""{arg}"").format(func=func, arg=arg) )" 2186,"def auto_decode_and_soupify(encoding=None, errors=decoder.ErrorsHandle.strict): """""" This decorator assume that there are three argument in keyword syntax: - ``response``: ``requests.Response`` or ``scrapy.http.Reponse`` - ``html``: html string - ``soup``: ``bs4.BeautifulSoup`` 1. if ``soup`` is not available, it will automatically be generated from ``html``. 2. if ``html`` is not available, it will automatically be generated from ``response``. Usage:: @auto_decode_and_soupify() def parse(response, html, soup): ... **中文文档** 此装饰器会自动检测函数中名为 ``response``, ``html``, ``soup`` 的参数, 并在 ``html``, ``soup`` 未给出的情况下, 自动生成所期望的值. 被此装饰器装饰的函数必须 要有以上提到的三个参数. 并且在使用时, 必须使用keyword的形式进行输入. """""" def deco(func): func_hash = hash(func) if not _auto_decode_and_soupify_implementation_ok_mapper \ .get(func_hash, False): validate_implementation_for_auto_decode_and_soupify(func) _auto_decode_and_soupify_implementation_ok_mapper[func_hash] = True def wrapper(*args, **kwargs): try: response = kwargs.get(""response"") html = kwargs.get(""html"") soup = kwargs.get(""soup"") except KeyError as e: raise NotImplementedError( (""{func} method has to take the keyword syntax input: "" ""{e}"").format(func=func, e=e) ) if html is None: binary = access_binary(response) try: html = decoder.decode( binary=binary, url=response.url, encoding=encoding, errors=errors, ) except Exception as e: # pragma: no cover raise DecodeError(str(e)) kwargs[""html""] = html if soup is None: soup = soupify(html) kwargs[""soup""] = soup return func(*args, **kwargs) return wrapper return deco" 2187,"def check(a, b): """""" Checks to see if the two values are equal to each other. :param a | <str> b | <str> :return <bool> """""" aencrypt = encrypt(a) bencrypt = encrypt(b) return a == b or a == bencrypt or aencrypt == b" 2188,"def decodeBase64(text, encoding='utf-8'): """""" Decodes a base 64 string. :param text | <str> encoding | <str> :return <str> """""" text = projex.text.toBytes(text, encoding) return projex.text.toUnicode(base64.b64decode(text), encoding)" 2189,"def decrypt(text, key=None): """""" Decrypts the inputted text using the inputted key. :param text | <str> key | <str> :return <str> """""" if key is None: key = ENCRYPT_KEY bits = len(key) text = base64.b64decode(text) iv = text[:16] cipher = AES.new(key, AES.MODE_CBC, iv) return unpad(cipher.decrypt(text[16:]))" 2190,"def decryptfile(filename, key=None, outfile=None, chunk=64 * 1024): """""" Decrypts a file using AES (CBC mode) with the given key. If no file is supplied, then the inputted file will be modified in place. The chunk value will be the size with which the function uses to read and encrypt the file. Larger chunks can be faster for some files and machines. The chunk MUST be divisible by 16. :param text | <str> key | <str> outfile | <str> || None chunk | <int> """""" if key is None: key = ENCRYPT_KEY if not outfile: outfile = os.path.splitext(filename)[0] with open(filename, 'rb') as input: origsize = struct.unpack('<Q', input.read(struct.calcsize('Q')))[0] iv = input.read(16) cipher = AES.new(key, AES.MODE_CBC, iv) with open(outfile, 'wb') as output: while True: data = input.read(chunk) if len(data) == 0: break data = cipher.decrypt(data) data = unpad(data) output.write(data) output.truncate(origsize)" 2191,"def encodeBase64(text, encoding='utf-8'): """""" Decodes a base 64 string. :param text | <str> encoding | <str> :return <str> """""" text = projex.text.toBytes(text, encoding) return base64.b64encode(text)" 2192,"def encrypt(text, key=None): """""" Encrypts the inputted text using the AES cipher. If the PyCrypto module is not included, this will simply encode the inputted text to base64 format. :param text | <str> key | <str> :return <str> """""" if key is None: key = ENCRYPT_KEY bits = len(key) text = pad(text, bits) iv = Random.new().read(16) cipher = AES.new(key, AES.MODE_CBC, iv) return base64.b64encode(iv + cipher.encrypt(text))" 2193,"def encryptfile(filename, key=None, outfile=None, chunk=64 * 1024): """""" Encrypts a file using AES (CBC mode) with the given key. If no file is supplied, then the inputted file will be modified in place. The chunk value will be the size with which the function uses to read and encrypt the file. Larger chunks can be faster for some files and machines. The chunk MUST be divisible by 16. :param text | <str> key | <str> outfile | <str> || None chunk | <int> """""" if key is None: key = ENCRYPT_KEY if not outfile: outfile = filename + '.enc' iv = Random.new().read(16) cipher = AES.new(key, AES.MODE_CBC, iv) filesize = os.path.getsize(filename) with open(filename, 'rb') as input: with open(outfile, 'wb') as output: output.write(struct.pack('<Q', filesize)) output.write(iv) while True: data = input.read(chunk) if len(data) == 0: break data = pad(data, len(key)) output.write(cipher.encrypt(data))" 2194,"def generateKey(password, bits=32): """""" Generates a new encryption key based on the inputted password. :param password | <str> bits | <int> | 16 or 32 bits :return <str> """""" if bits == 32: hasher = hashlib.sha256 elif bits == 16: hasher = hashlib.md5 else: raise StandardError('Invalid hash type') return hasher(password).digest()" 2195,"def generateToken(bits=32): """""" Generates a random token based on the given parameters. :return <str> """""" if bits == 64: hasher = hashlib.sha256 elif bits == 32: hasher = hashlib.md5 else: raise StandardError('Unknown bit level.') return hasher(nstr(random.getrandbits(256))).hexdigest()" 2196,"def pad(text, bits=32): """""" Pads the inputted text to ensure it fits the proper block length for encryption. :param text | <str> bits | <int> :return <str> """""" return text + (bits - len(text) % bits) * chr(bits - len(text) % bits)" 2197,"def Client(version=__version__, resource=None, provider=None, **kwargs): """"""Initialize client object based on given version. :params version: version of CAL, define at setup.cfg :params resource: resource type (network, compute, object_storage, block_storage) :params provider: provider object :params cloud_config: cloud auth config :params **kwargs: specific args for resource :return: class Client HOW-TO: The simplest way to create a client instance is initialization:: >> from calplus import client >> calplus = client.Client(version='1.0.0', resource='compute', provider=provider_object, some_needed_args_for_ComputeClient) """""" versions = _CLIENTS.keys() if version not in versions: raise exceptions.UnsupportedVersion( 'Unknown client version or subject' ) if provider is None: raise exceptions.ProviderNotDefined( 'Not define Provider for Client' ) support_types = CONF.providers.driver_mapper.keys() if provider.type not in support_types: raise exceptions.ProviderTypeNotFound( 'Unknow provider.' ) resources = _CLIENTS[version].keys() if not resource: raise exceptions.ResourceNotDefined( 'Not define Resource, choose one: compute, network,\ object_storage, block_storage.' ) elif resource.lower() not in resources: raise exceptions.ResourceNotFound( 'Unknow resource: compute, network,\ object_storage, block_storage.' ) LOG.info('Instantiating {} client ({})' . format(resource, version)) return _CLIENTS[version][resource]( provider.type, provider.config, **kwargs)" 2198,"def accession(self): """""" Parse accession number from commonly supported formats. If the defline does not match one of the following formats, the entire description (sans leading caret) will be returned. * >gi|572257426|ref|XP_006607122.1| * >gnl|Tcas|XP_008191512.1 * >lcl|PdomMRNAr1.2-10981.1 """""" accession = None if self.defline.startswith('>gi|'): match = re.match('>gi\|\d+\|[^\|]+\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) elif self.defline.startswith('>gnl|'): match = re.match('>gnl\|[^\|]+\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) elif self.defline.startswith('>lcl|'): match = re.match('>lcl\|([^\|\n ]+)', self.defline) if match: accession = match.group(1) return accession" 2199,"def format_seq(self, outstream=None, linewidth=70): """""" Print a sequence in a readable format. :param outstream: if `None`, formatted sequence is returned as a string; otherwise, it is treated as a file-like object and the formatted sequence is printed to the outstream :param linewidth: width for wrapping sequences over multiple lines; set to 0 for no wrapping """""" if linewidth == 0 or len(self.seq) <= linewidth: if outstream is None: return self.seq else: print(self.seq, file=outstream) return i = 0 seq = '' while i < len(self.seq): if outstream is None: seq += self.seq[i:i+linewidth] + '\n' else: print(self.seq[i:i+linewidth], file=outstream) i += linewidth if outstream is None: return seq" 2200,"def get_validator(filter_data): """""" ask every matcher whether it can serve such filter data :param filter_data: :return: """""" for matcher_type, m in matchers.items(): if hasattr(m, 'can_handle') and m.can_handle(filter_data): filter_data = m.handle(filter_data) return filter_data" 2201,"def run(): """"""Run the examples"""""" # NOTE(kiennt): Until now, this example isn't finished yet, # because we don't have any completed driver # Get a network client with openstack driver. network_client = client.Client(version=_VERSION, resource=_RESOURCES[0], provider=_PROVIDER) # net = network_client.create('daikk', '10.0.0.0/24') # list_subnet = network_client.list() # network_client.show(list_subnet[0].get(""id"")) network_client.delete(""4b983028-0f8c-4b63-b10c-6e8420bb7903"")" 2202,"def sort(self, attr): """"""Sort the ratings based on an attribute"""""" self.entries = Sorter(self.entries, self.category, attr).sort_entries() return self" 2203,"def get_title(self): """"""Title is either the chart header for a cable ratings page or above the opening description for a broadcast ratings page. """""" if self.category == 'cable': strings = get_strings(self.soup, 'strong') else: strings = get_strings(self.soup, 'b') if len(strings) == 0: strings = get_strings(self.soup, 'strong') if len(strings) >= 1 and self.category == 'cable': return strings[0] elif len(strings) > 0 and 'Fast' in strings[-1]: return strings[0] return ''.join(strings)" 2204,"def get_json(self): """"""Serialize ratings object as JSON-formatted string"""""" ratings_dict = { 'category': self.category, 'date': self.date, 'day': self.weekday, 'next week': self.next_week, 'last week': self.last_week, 'entries': self.entries, 'url': self.url } return to_json(ratings_dict)" 2205,"def _get_url_params(self, shorten=True): """"""Returns a list of each parameter to be used for the url format."""""" cable = True if self.category == 'cable' else False url_date = convert_month(self.date, shorten=shorten, cable=cable) return [ BASE_URL, self.weekday.lower(), self.category + '-ratings', url_date.replace(' ', '-') ]" 2206,"def _match_show(self, show): """"""Match a query for a specific show/list of shows"""""" if self.show: return match_list(self.show, show) else: return True" 2207,"def _match_net(self, net): """"""Match a query for a specific network/list of networks"""""" if self.network: return match_list(self.network, net) else: return True" 2208,"def _verify_page(self): """"""Verify the ratings page matches the correct date"""""" title_date = self._get_date_in_title().lower() split_date = self.date.lower().split() split_date[0] = split_date[0][:3] return all(term in title_date for term in split_date)" 2209,"def _get_ratings_page(self): """"""Do a limited search for the correct url."""""" # Use current posted date to build url self._build_url() soup = get_soup(self.url) if soup: return soup # Try building url again with unshortened month self._build_url(shorten=False) soup = get_soup(self.url) if soup: return soup # If not page is found, use search return SearchDaily(self.category, date=self.date).fetch_result()" 2210,"def _build_url(self, shorten=True): """"""Build the url for a cable ratings page"""""" self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten))" 2211,"def fetch_entries(self): """"""Fetch data and parse it to build a list of cable entries."""""" data = [] for row in self.get_rows(): # Stop fetching data if limit has been met if exceeded_limit(self.limit, len(data)): break entry = row.find_all('td') entry_dict = {} show = entry[0].string net = entry[1].string if not self._match_query(show, net): continue entry_dict['show'] = show entry_dict['net'] = net entry_dict['time'] = entry[2].string if ',' in entry[3].string: entry_dict['viewers'] = entry[3].string.replace(',', '.') else: entry_dict['viewers'] = '0.' + entry[3].string entry_dict['rating'] = entry[4].string # Add data to create cable entry data.append(Entry(**entry_dict)) return data" 2212,"def _build_url(self, shorten=True): """"""Build the url for a broadcast ratings page"""""" url_order = self._get_url_params(shorten=shorten) # For fast ratings, switch weekday and category in url if self.category != 'final': url_order[1], url_order[2] = url_order[2], url_order[1] self.url = URL_FORMAT.format(*url_order)" 2213,"def get_rows(self): """"""Get the rows from a broadcast ratings chart"""""" table = self.soup.find_all('tr')[1:-3] return [row for row in table if row.contents[3].string]" 2214,"def fetch_entries(self): """"""Fetch data and parse it to build a list of broadcast entries."""""" current_time = '' data = [] for row in self.get_rows(): # Stop fetching data if limit has been met if exceeded_limit(self.limit, len(data)): break entry = row.find_all('td') entry_dict = {} show_time = entry[0].string if show_time and show_time != current_time: current_time = show_time if not show_time: show_time = current_time entry_dict['time'] = show_time show_string = entry[1].string.split('(') show = show_string[0][:-1] net = self._get_net(show_string) if not self._match_query(show, net): continue entry_dict['show'] = show entry_dict['net'] = net entry_dict['viewers'] = entry[3].string.strip('*') entry_dict['rating'], entry_dict['share'] = self._get_rating(entry) # Add data to initialize broadcast entry data.append(Entry(**entry_dict)) return data" 2215,"def get_averages(self): """"""Get the broadcast network averages for that day. Returns a dictionary: key: network name value: sub-dictionary with 'viewers', 'rating', and 'share' as keys """""" networks = [unescape_html(n.string) for n in self.soup.find_all('td', width='77')] table = self.soup.find_all('td', style=re.compile('^font')) # Each element is a list split as [rating, share] rateshares = [r.string.split('/') for r in table[:5] if r.string] viewers = [v.string for v in table[5:] if v.string] averages = {} # Load the averages dict for index, network in enumerate(networks): viewer = convert_float(unescape_html(viewers[index])) rating = convert_float(unescape_html(rateshares[index][0])) share = convert_float(unescape_html(rateshares[index][1])) averages[network] = {'viewer': viewer, 'rating': rating, 'share': share} return averages" 2216,"def _get_net(self, entry): """"""Get the network for a specific row"""""" try: net = entry[1] return net[net.find('(')+1:net.find(')')] except IndexError: return None" 2217,"def _get_rating(self, entry): """"""Get the rating and share for a specific row"""""" r_info = '' for string in entry[2].strings: r_info += string rating, share = r_info.split('/') return (rating, share.strip('*'))" 2218,"def _visit(self, L, marked, tempmarked): """""" Sort features topologically. This recursive function uses depth-first search to find an ordering of the features in the feature graph that is sorted both topologically and with respect to genome coordinates. Implementation based on Wikipedia's description of the algorithm in Cormen's *Introduction to Algorithms*. http://en.wikipedia.org/wiki/Topological_sorting#Algorithms There are potentially many valid topological sorts of a feature graph, but only one that is also sorted with respect to genome coordinates (excluding different orderings of, for example, exons and CDS features with the same coordinates). Iterating through feature children in reversed order (in this functions' inner-most loop) seems to be the key to sorting with respect to genome coordinates. """""" assert not self.is_pseudo if self in tempmarked: raise Exception('feature graph is cyclic') if self not in marked: tempmarked[self] = True features = list() if self.siblings is not None and self.is_toplevel: features.extend(reversed(self.siblings)) if self.children is not None: features.extend(reversed(self.children)) if len(features) > 0: for feature in features: feature._visit(L, marked, tempmarked) marked[self] = True del tempmarked[self] L.insert(0, self)" 2219,"def add_child(self, child, rangecheck=False): """"""Add a child feature to this feature."""""" assert self.seqid == child.seqid, \ ( 'seqid mismatch for feature {} ({} vs {})'.format( self.fid, self.seqid, child.seqid ) ) if rangecheck is True: assert self._strand == child._strand, \ ('child of feature {} has a different strand'.format(self.fid)) assert self._range.contains(child._range), \ ( 'child of feature {} is not contained within its span ' '({}-{})'.format(self.fid, child.start, child.end) ) if self.children is None: self.children = list() self.children.append(child) self.children.sort()" 2220,"def pseudoify(self): """""" Derive a pseudo-feature parent from the given multi-feature. The provided multi-feature does not need to be the representative. The newly created pseudo-feature has the same seqid as the provided multi- feature, and spans its entire range. Otherwise, the pseudo-feature is empty. It is used only for convenience in sorting. """""" assert self.is_toplevel assert self.is_multi assert len(self.multi_rep.siblings) > 0 rep = self.multi_rep start = min([s.start for s in rep.siblings + [rep]]) end = max([s.end for s in rep.siblings + [rep]]) parent = Feature(None) parent._pseudo = True parent._seqid = self._seqid parent.set_coord(start, end) parent._strand = self._strand for sibling in rep.siblings + [rep]: parent.add_child(sibling, rangecheck=True) parent.children = sorted(parent.children) rep.siblings = sorted(rep.siblings) return parent" 2221,"def slug(self): """""" A concise slug for this feature. Unlike the internal representation, which is 0-based half-open, the slug is a 1-based closed interval (a la GFF3). """""" return '{:s}@{:s}[{:d}, {:d}]'.format(self.type, self.seqid, self.start + 1, self.end)" 2222,"def add_sibling(self, sibling): """""" Designate this a multi-feature representative and add a co-feature. Some features exist discontinuously on the sequence, and therefore cannot be declared with a single GFF3 entry (which can encode only a single interval). The canonical encoding for these types of features is called a multi-feature, in which a single feature is declared on multiple lines with multiple entries all sharing the same feature type and ID attribute. This is commonly done with coding sequence (CDS) features. In this package, each multi-feature has a single ""representative"" feature object, and all other objects/entries associated with that multi-feature are attached to it as ""siblings"". Invoking this method will designate the calling feature as the multi-feature representative and add the argument as a sibling. """""" assert self.is_pseudo is False if self.siblings is None: self.siblings = list() self.multi_rep = self sibling.multi_rep = self self.siblings.append(sibling)" 2223,"def source(self, newsource): """"""When modifying source, also update children with matching source."""""" oldsource = self.source for feature in self: if feature.source == oldsource: feature._source = newsource" 2224,"def type(self, newtype): """"""If the feature is a multifeature, update all entries."""""" self._type = newtype if self.is_multi: for sibling in self.multi_rep.siblings: sibling._type = newtype" 2225,"def transform(self, offset, newseqid=None): """"""Transform the feature's coordinates by the given offset."""""" for feature in self: feature._range.transform(offset) if newseqid is not None: feature.seqid = newseqid" 2226,"def add_attribute(self, attrkey, attrvalue, append=False, oldvalue=None): """""" Add an attribute to this feature. Feature attributes are stored as nested dictionaries. Each feature can only have one ID, so ID attribute mapping is 'string' to 'string'. All other attributes can have multiple values, so mapping is 'string' to 'dict of strings'. By default, adding an attribute that already exists will cause the old value to be overwritten. If the `append` option is true, the new attribute value will not overwrite the old value, but will be appended as a second value. (Note: ID attributes can have only 1 value.) If the `oldvalue` option is set, the new value will replace the old value. This is necessary for updating an attribute that has multiple values without completely overwriting all old values. (Note: The `append` option is ignored when `oldvalue` is set.) """""" # Handle ID/Parent relationships if attrkey == 'ID': if self.children is not None: oldid = self.get_attribute('ID') for child in self.children: child.add_attribute('Parent', attrvalue, oldvalue=oldid) self._attrs[attrkey] = attrvalue if self.is_multi: self.multi_rep._attrs[attrkey] = attrvalue for sibling in self.multi_rep.siblings: sibling._attrs[attrkey] = attrvalue return # Handle all other attribute types if oldvalue is not None: if attrkey in self._attrs: assert oldvalue in self._attrs[attrkey] del self._attrs[attrkey][oldvalue] if attrkey not in self._attrs or append is False: self._attrs[attrkey] = dict() self._attrs[attrkey][attrvalue] = True" 2227,"def get_attribute(self, attrkey, as_string=False, as_list=False): """""" Get the value of an attribute. By default, returns a string for ID and attributes with a single value, and a list of strings for attributes with multiple values. The `as_string` and `as_list` options can be used to force the function to return values as a string (comma-separated in case of multiple values) or a list. """""" assert not as_string or not as_list if attrkey not in self._attrs: return None if attrkey == 'ID': return self._attrs[attrkey] attrvalues = list(self._attrs[attrkey]) attrvalues.sort() if len(attrvalues) == 1 and not as_list: return attrvalues[0] elif as_string: return ','.join(attrvalues) return attrvalues" 2228,"def parse_attributes(self, attrstring): """""" Parse an attribute string. Given a string with semicolon-separated key-value pairs, populate a dictionary with the given attributes. """""" if attrstring in [None, '', '.']: return dict() attributes = dict() keyvaluepairs = attrstring.split(';') for kvp in keyvaluepairs: if kvp == '': continue key, value = kvp.split('=') if key == 'ID': assert ',' not in value attributes[key] = value continue values = value.split(',') valdict = dict((val, True) for val in values) attributes[key] = valdict return attributes" 2229,"def attribute_crawl(self, key): """""" Grab all attribute values associated with the given feature. Traverse the given feature (and all of its descendants) to find all values associated with the given attribute key. >>> import tag >>> reader = tag.GFF3Reader(tag.pkgdata('otau-no-seqreg.gff3')) >>> features = tag.select.features(reader) >>> for feature in features: ... names = feature.attribute_crawl('Name') ... print(sorted(list(names))) ['Ot01g00060', 'XM_003074019.1', 'XP_003074065.1'] ['Ot01g00070', 'XM_003074020.1', 'XP_003074066.1'] ['Ot01g00080', 'XM_003074021.1', 'XP_003074067.1'] ['Ot01g00090', 'XM_003074022.1', 'XP_003074068.1'] ['Ot01g00100', 'XM_003074023.1', 'XP_003074069.1'] ['Ot01g00110', 'XM_003074024.1', 'XP_003074070.1'] """""" union = set() for feature in self: values = feature.get_attribute(key, as_list=True) if values is not None: union.update(set(values)) return union" 2230,"def ncbi_geneid(self): """""" Retrieve this feature's NCBI GeneID if it's present. NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes (example: `Dbxref=GeneID:103504972`). This function locates and returns the GeneID if present, or returns `None` otherwise. """""" values = self.get_attribute('Dbxref', as_list=True) if values is None: return None for value in values: if value.startswith('GeneID:'): key, geneid = value.split(':') return geneid return None" 2231,"def cdslen(self): """""" Translated length of this feature. Undefined for non-mRNA features. """""" if self.type != 'mRNA': return None return sum([len(c) for c in self.children if c.type == 'CDS'])" 2232,"def parse_querystring(msg): 'parse a querystring into keys and values' for part in msg.querystring.strip().lstrip('?').split('&'): key, value = part.split('=') yield key, value" 2233,"def AddClusterTags(r, tags, dry_run=False): """""" Adds tags to the cluster. @type tags: list of str @param tags: tags to add to the cluster @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """""" query = { ""dry-run"": dry_run, ""tag"": tags, } return r.request(""put"", ""/2/tags"", query=query)" 2234,"def DeleteClusterTags(r, tags, dry_run=False): """""" Deletes tags from the cluster. @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """""" query = { ""dry-run"": dry_run, ""tag"": tags, } return r.request(""delete"", ""/2/tags"", query=query)" 2235,"def GetInstances(r, bulk=False): """""" Gets information about instances on the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or list of str @return: if bulk is True, info about the instances, else a list of instances """""" if bulk: return r.request(""get"", ""/2/instances"", query={""bulk"": 1}) else: instances = r.request(""get"", ""/2/instances"") return r.applier(itemgetters(""id""), instances)" 2236,"def GetInstanceInfo(r, instance, static=None): """""" Gets information about an instance. @type instance: string @param instance: Instance name @rtype: string @return: Job ID """""" if static is None: return r.request(""get"", ""/2/instances/%s/info"" % instance) else: return r.request(""get"", ""/2/instances/%s/info"" % instance, query={""static"": static})" 2237,"def CreateInstance(r, mode, name, disk_template, disks, nics, **kwargs): """""" Creates a new instance. More details for parameters can be found in the RAPI documentation. @type mode: string @param mode: Instance creation mode @type name: string @param name: Hostname of the instance to create @type disk_template: string @param disk_template: Disk template for instance (e.g. plain, diskless, file, or drbd) @type disks: list of dicts @param disks: List of disk definitions @type nics: list of dicts @param nics: List of NIC definitions @type dry_run: bool @keyword dry_run: whether to perform a dry run @type no_install: bool @keyword no_install: whether to create without installing OS(true=don't install) @rtype: int @return: job id """""" if INST_CREATE_REQV1 not in r.features: raise GanetiApiError(""Cannot create Ganeti 2.1-style instances"") query = {} if kwargs.get(""dry_run""): query[""dry-run""] = 1 if kwargs.get(""no_install""): query[""no-install""] = 1 # Make a version 1 request. body = { _REQ_DATA_VERSION_FIELD: 1, ""mode"": mode, ""name"": name, ""disk_template"": disk_template, ""disks"": disks, ""nics"": nics, } conflicts = set(kwargs.iterkeys()) & set(body.iterkeys()) if conflicts: raise GanetiApiError(""Required fields can not be specified as"" "" keywords: %s"" % "", "".join(conflicts)) kwargs.pop(""dry_run"", None) body.update(kwargs) return r.request(""post"", ""/2/instances"", query=query, content=body)" 2238,"def DeleteInstance(r, instance, dry_run=False): """""" Deletes an instance. @type instance: str @param instance: the instance to delete @rtype: int @return: job id """""" return r.request(""delete"", ""/2/instances/%s"" % instance, query={""dry-run"": dry_run})" 2239,"def ActivateInstanceDisks(r, instance, ignore_size=False): """""" Activates an instance's disks. @type instance: string @param instance: Instance name @type ignore_size: bool @param ignore_size: Whether to ignore recorded size @return: job id """""" return r.request(""put"", ""/2/instances/%s/activate-disks"" % instance, query={""ignore_size"": ignore_size})" 2240,"def RecreateInstanceDisks(r, instance, disks=None, nodes=None): """"""Recreate an instance's disks. @type instance: string @param instance: Instance name @type disks: list of int @param disks: List of disk indexes @type nodes: list of string @param nodes: New instance nodes, if relocation is desired @rtype: string @return: job id """""" body = {} if disks is not None: body[""disks""] = disks if nodes is not None: body[""nodes""] = nodes return r.request(""post"", ""/2/instances/%s/recreate-disks"" % instance, content=body)" 2241,"def GrowInstanceDisk(r, instance, disk, amount, wait_for_sync=False): """""" Grows a disk of an instance. More details for parameters can be found in the RAPI documentation. @type instance: string @param instance: Instance name @type disk: integer @param disk: Disk index @type amount: integer @param amount: Grow disk by this amount (MiB) @type wait_for_sync: bool @param wait_for_sync: Wait for disk to synchronize @rtype: int @return: job id """""" body = { ""amount"": amount, ""wait_for_sync"": wait_for_sync, } return r.request(""post"", ""/2/instances/%s/disk/%s/grow"" % (instance, disk), content=body)" 2242,"def AddInstanceTags(r, instance, tags, dry_run=False): """""" Adds tags to an instance. @type instance: str @param instance: instance to add tags to @type tags: list of str @param tags: tags to add to the instance @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """""" query = { ""tag"": tags, ""dry-run"": dry_run, } return r.request(""put"", ""/2/instances/%s/tags"" % instance, query=query)" 2243,"def DeleteInstanceTags(r, instance, tags, dry_run=False): """""" Deletes tags from an instance. @type instance: str @param instance: instance to delete tags from @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """""" query = { ""tag"": tags, ""dry-run"": dry_run, } return r.request(""delete"", ""/2/instances/%s/tags"" % instance, query=query)" 2244,"def RebootInstance(r, instance, reboot_type=None, ignore_secondaries=False, dry_run=False): """""" Reboots an instance. @type instance: str @param instance: instance to rebot @type reboot_type: str @param reboot_type: one of: hard, soft, full @type ignore_secondaries: bool @param ignore_secondaries: if True, ignores errors for the secondary node while re-assembling disks (in hard-reboot mode only) @type dry_run: bool @param dry_run: whether to perform a dry run """""" query = { ""ignore_secondaries"": ignore_secondaries, ""dry-run"": dry_run, } if reboot_type: if reboot_type not in (""hard"", ""soft"", ""full""): raise GanetiApiError(""reboot_type must be one of 'hard',"" "" 'soft', or 'full'"") query[""type""] = reboot_type return r.request(""post"", ""/2/instances/%s/reboot"" % instance, query=query)" 2245,"def ShutdownInstance(r, instance, dry_run=False, no_remember=False, timeout=120): """""" Shuts down an instance. @type instance: str @param instance: the instance to shut down @type dry_run: bool @param dry_run: whether to perform a dry run @type no_remember: bool @param no_remember: if true, will not record the state change @rtype: string @return: job id """""" query = { ""dry-run"": dry_run, ""no-remember"": no_remember, } content = { ""timeout"": timeout, } return r.request(""put"", ""/2/instances/%s/shutdown"" % instance, query=query, content=content)" 2246,"def StartupInstance(r, instance, dry_run=False, no_remember=False): """""" Starts up an instance. @type instance: str @param instance: the instance to start up @type dry_run: bool @param dry_run: whether to perform a dry run @type no_remember: bool @param no_remember: if true, will not record the state change @rtype: string @return: job id """""" query = { ""dry-run"": dry_run, ""no-remember"": no_remember, } return r.request(""put"", ""/2/instances/%s/startup"" % instance, query=query)" 2247,"def ReinstallInstance(r, instance, os=None, no_startup=False, osparams=None): """""" Reinstalls an instance. @type instance: str @param instance: The instance to reinstall @type os: str or None @param os: The operating system to reinstall. If None, the instance's current operating system will be installed again @type no_startup: bool @param no_startup: Whether to start the instance automatically """""" if INST_REINSTALL_REQV1 in r.features: body = { ""start"": not no_startup, } if os is not None: body[""os""] = os if osparams is not None: body[""osparams""] = osparams return r.request(""post"", ""/2/instances/%s/reinstall"" % instance, content=body) # Use old request format if osparams: raise GanetiApiError(""Server does not support specifying OS"" "" parameters for instance reinstallation"") query = { ""nostartup"": no_startup, } if os: query[""os""] = os return r.request(""post"", ""/2/instances/%s/reinstall"" % instance, query=query)" 2248,"def ReplaceInstanceDisks(r, instance, disks=None, mode=REPLACE_DISK_AUTO, remote_node=None, iallocator=None, dry_run=False): """""" Replaces disks on an instance. @type instance: str @param instance: instance whose disks to replace @type disks: list of ints @param disks: Indexes of disks to replace @type mode: str @param mode: replacement mode to use (defaults to replace_auto) @type remote_node: str or None @param remote_node: new secondary node to use (for use with replace_new_secondary mode) @type iallocator: str or None @param iallocator: instance allocator plugin to use (for use with replace_auto mode) @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """""" if mode not in REPLACE_DISK: raise GanetiApiError(""Invalid mode %r not one of %r"" % (mode, REPLACE_DISK)) query = { ""mode"": mode, ""dry-run"": dry_run, } if disks: query[""disks""] = "","".join(str(idx) for idx in disks) if remote_node: query[""remote_node""] = remote_node if iallocator: query[""iallocator""] = iallocator return r.request(""post"", ""/2/instances/%s/replace-disks"" % instance, query=query)" 2249,"def ExportInstance(r, instance, mode, destination, shutdown=None, remove_instance=None, x509_key_name=None, destination_x509_ca=None): """""" Exports an instance. @type instance: string @param instance: Instance name @type mode: string @param mode: Export mode @rtype: string @return: Job ID """""" body = { ""destination"": destination, ""mode"": mode, } if shutdown is not None: body[""shutdown""] = shutdown if remove_instance is not None: body[""remove_instance""] = remove_instance if x509_key_name is not None: body[""x509_key_name""] = x509_key_name if destination_x509_ca is not None: body[""destination_x509_ca""] = destination_x509_ca return r.request(""put"", ""/2/instances/%s/export"" % instance, content=body)" 2250,"def MigrateInstance(r, instance, mode=None, cleanup=None): """""" Migrates an instance. @type instance: string @param instance: Instance name @type mode: string @param mode: Migration mode @type cleanup: bool @param cleanup: Whether to clean up a previously failed migration """""" body = {} if mode is not None: body[""mode""] = mode if cleanup is not None: body[""cleanup""] = cleanup return r.request(""put"", ""/2/instances/%s/migrate"" % instance, content=body)" 2251,"def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False, target_node=None): """"""Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id """""" body = { ""ignore_consistency"": ignore_consistency, } if iallocator is not None: body[""iallocator""] = iallocator if target_node is not None: body[""target_node""] = target_node return r.request(""put"", ""/2/instances/%s/failover"" % instance, content=body)" 2252,"def RenameInstance(r, instance, new_name, ip_check, name_check=None): """""" Changes the name of an instance. @type instance: string @param instance: Instance name @type new_name: string @param new_name: New instance name @type ip_check: bool @param ip_check: Whether to ensure instance's IP address is inactive @type name_check: bool @param name_check: Whether to ensure instance's name is resolvable """""" body = { ""ip_check"": ip_check, ""new_name"": new_name, } if name_check is not None: body[""name_check""] = name_check return r.request(""put"", ""/2/instances/%s/rename"" % instance, content=body)" 2253,"def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial): """""" Waits for job changes. @type job_id: int @param job_id: Job ID for which to wait """""" body = { ""fields"": fields, ""previous_job_info"": prev_job_info, ""previous_log_serial"": prev_log_serial, } return r.request(""get"", ""/2/jobs/%s/wait"" % job_id, content=body)" 2254,"def CancelJob(r, job_id, dry_run=False): """""" Cancels a job. @type job_id: int @param job_id: id of the job to delete @type dry_run: bool @param dry_run: whether to perform a dry run """""" return r.request(""delete"", ""/2/jobs/%s"" % job_id, query={""dry-run"": dry_run})" 2255,"def GetNodes(r, bulk=False): """""" Gets all nodes in the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or str @return: if bulk is true, info about nodes in the cluster, else list of nodes in the cluster """""" if bulk: return r.request(""get"", ""/2/nodes"", query={""bulk"": 1}) else: nodes = r.request(""get"", ""/2/nodes"") return r.applier(itemgetters(""id""), nodes)" 2256,"def EvacuateNode(r, node, iallocator=None, remote_node=None, dry_run=False, early_release=False, mode=None, accept_old=False): """""" Evacuates instances from a Ganeti node. @type node: str @param node: node to evacuate @type iallocator: str or None @param iallocator: instance allocator to use @type remote_node: str @param remote_node: node to evaucate to @type dry_run: bool @param dry_run: whether to perform a dry run @type early_release: bool @param early_release: whether to enable parallelization @type accept_old: bool @param accept_old: Whether caller is ready to accept old-style (pre-2.5) results @rtype: string, or a list for pre-2.5 results @return: Job ID or, if C{accept_old} is set and server is pre-2.5, list of (job ID, instance name, new secondary node); if dry_run was specified, then the actual move jobs were not submitted and the job IDs will be C{None} @raises GanetiApiError: if an iallocator and remote_node are both specified """""" if iallocator and remote_node: raise GanetiApiError(""Only one of iallocator or remote_node can"" "" be used"") query = { ""dry-run"": dry_run, } if iallocator: query[""iallocator""] = iallocator if remote_node: query[""remote_node""] = remote_node if NODE_EVAC_RES1 in r.features: # Server supports body parameters body = { ""early_release"": early_release, } if iallocator is not None: body[""iallocator""] = iallocator if remote_node is not None: body[""remote_node""] = remote_node if mode is not None: body[""mode""] = mode else: # Pre-2.5 request format body = None if not accept_old: raise GanetiApiError(""Server is version 2.4 or earlier and"" "" caller does not accept old-style"" "" results (parameter accept_old)"") # Pre-2.5 servers can only evacuate secondaries if mode is not None and mode != NODE_EVAC_SEC: raise GanetiApiError(""Server can only evacuate secondary instances"") if iallocator is not None: query[""iallocator""] = iallocator if remote_node is not None: query[""remote_node""] = remote_node if query: query[""early_release""] = 1 return r.request(""post"", ""/2/nodes/%s/evacuate"" % node, query=query, content=body)" 2257,"def MigrateNode(r, node, mode=None, dry_run=False, iallocator=None, target_node=None): """""" Migrates all primary instances from a node. @type node: str @param node: node to migrate @type mode: string @param mode: if passed, it will overwrite the live migration type, otherwise the hypervisor default will be used @type dry_run: bool @param dry_run: whether to perform a dry run @type iallocator: string @param iallocator: instance allocator to use @type target_node: string @param target_node: Target node for shared-storage instances @rtype: int @return: job id """""" query = { ""dry-run"": dry_run, } if NODE_MIGRATE_REQV1 in r.features: body = {} if mode is not None: body[""mode""] = mode if iallocator is not None: body[""iallocator""] = iallocator if target_node is not None: body[""target_node""] = target_node else: # Use old request format if target_node is not None: raise GanetiApiError(""Server does not support specifying"" "" target node for node migration"") body = None if mode is not None: query[""mode""] = mode return r.request(""post"", ""/2/nodes/%s/migrate"" % node, query=query, content=body)" 2258,"def SetNodeRole(r, node, role, force=False, auto_promote=False): """""" Sets the role for a node. @type node: str @param node: the node whose role to set @type role: str @param role: the role to set for the node @type force: bool @param force: whether to force the role change @type auto_promote: bool @param auto_promote: Whether node(s) should be promoted to master candidate if necessary @rtype: int @return: job id """""" query = { ""force"": force, ""auto_promote"": auto_promote, } return r.request(""put"", ""/2/nodes/%s/role"" % node, query=query, content=role)" 2259,"def PowercycleNode(r, node, force=False): """""" Powercycles a node. @type node: string @param node: Node name @type force: bool @param force: Whether to force the operation @rtype: string @return: job id """""" query = { ""force"": force, } return r.request(""post"", ""/2/nodes/%s/powercycle"" % node, query=query)" 2260,"def GetNodeStorageUnits(r, node, storage_type, output_fields): """""" Gets the storage units for a node. @type node: str @param node: the node whose storage units to return @type storage_type: str @param storage_type: storage type whose units to return @type output_fields: str @param output_fields: storage type fields to return @rtype: int @return: job id where results can be retrieved """""" query = { ""storage_type"": storage_type, ""output_fields"": output_fields, } return r.request(""get"", ""/2/nodes/%s/storage"" % node, query=query)" 2261,"def ModifyNodeStorageUnits(r, node, storage_type, name, allocatable=None): """""" Modifies parameters of storage units on the node. @type node: str @param node: node whose storage units to modify @type storage_type: str @param storage_type: storage type whose units to modify @type name: str @param name: name of the storage unit @type allocatable: bool or None @param allocatable: Whether to set the ""allocatable"" flag on the storage unit (None=no modification, True=set, False=unset) @rtype: int @return: job id """""" query = { ""storage_type"": storage_type, ""name"": name, } if allocatable is not None: query[""allocatable""] = allocatable return r.request(""put"", ""/2/nodes/%s/storage/modify"" % node, query=query)" 2262,"def RepairNodeStorageUnits(r, node, storage_type, name): """""" Repairs a storage unit on the node. @type node: str @param node: node whose storage units to repair @type storage_type: str @param storage_type: storage type to repair @type name: str @param name: name of the storage unit to repair @rtype: int @return: job id """""" query = { ""storage_type"": storage_type, ""name"": name, } return r.request(""put"", ""/2/nodes/%s/storage/repair"" % node, query=query)" 2263,"def AddNodeTags(r, node, tags, dry_run=False): """""" Adds tags to a node. @type node: str @param node: node to add tags to @type tags: list of str @param tags: tags to add to the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """""" query = { ""tag"": tags, ""dry-run"": dry_run, } return r.request(""put"", ""/2/nodes/%s/tags"" % node, query=query, content=tags)" 2264,"def DeleteNodeTags(r, node, tags, dry_run=False): """""" Delete tags from a node. @type node: str @param node: node to remove tags from @type tags: list of str @param tags: tags to remove from the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """""" query = { ""tag"": tags, ""dry-run"": dry_run, } return r.request(""delete"", ""/2/nodes/%s/tags"" % node, query=query)" 2265,"def GetGroups(r, bulk=False): """""" Gets all node groups in the cluster. @type bulk: bool @param bulk: whether to return all information about the groups @rtype: list of dict or str @return: if bulk is true, a list of dictionaries with info about all node groups in the cluster, else a list of names of those node groups """""" if bulk: return r.request(""get"", ""/2/groups"", query={""bulk"": 1}) else: groups = r.request(""get"", ""/2/groups"") return r.applier(itemgetters(""name""), groups)" 2266,"def CreateGroup(r, name, alloc_policy=None, dry_run=False): """""" Creates a new node group. @type name: str @param name: the name of node group to create @type alloc_policy: str @param alloc_policy: the desired allocation policy for the group, if any @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id """""" query = { ""dry-run"": dry_run, } body = { ""name"": name, ""alloc_policy"": alloc_policy } return r.request(""post"", ""/2/groups"", query=query, content=body)" 2267,"def DeleteGroup(r, group, dry_run=False): """""" Deletes a node group. @type group: str @param group: the node group to delete @type dry_run: bool @param dry_run: whether to peform a dry run @rtype: int @return: job id """""" query = { ""dry-run"": dry_run, } return r.request(""delete"", ""/2/groups/%s"" % group, query=query)" 2268,"def RenameGroup(r, group, new_name): """""" Changes the name of a node group. @type group: string @param group: Node group name @type new_name: string @param new_name: New node group name @rtype: int @return: job id """""" body = { ""new_name"": new_name, } return r.request(""put"", ""/2/groups/%s/rename"" % group, content=body)" 2269,"def AssignGroupNodes(r, group, nodes, force=False, dry_run=False): """""" Assigns nodes to a group. @type group: string @param group: Node gropu name @type nodes: list of strings @param nodes: List of nodes to assign to the group @rtype: int @return: job id """""" query = { ""force"": force, ""dry-run"": dry_run, } body = { ""nodes"": nodes, } return r.request(""put"", ""/2/groups/%s/assign-nodes"" % group, query=query, content=body)" 2270,"def AddGroupTags(r, group, tags, dry_run=False): """""" Adds tags to a node group. @type group: str @param group: group to add tags to @type tags: list of string @param tags: tags to add to the group @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: string @return: job id """""" query = { ""dry-run"": dry_run, ""tag"": tags, } return r.request(""put"", ""/2/groups/%s/tags"" % group, query=query)" 2271,"def DeleteGroupTags(r, group, tags, dry_run=False): """""" Deletes tags from a node group. @type group: str @param group: group to delete tags from @type tags: list of string @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: string @return: job id """""" query = { ""dry-run"": dry_run, ""tag"": tags, } return r.request(""delete"", ""/2/groups/%s/tags"" % group, query=query)" 2272,"def Query(r, what, fields, qfilter=None): """""" Retrieves information about resources. @type what: string @param what: Resource name, one of L{constants.QR_VIA_RAPI} @type fields: list of string @param fields: Requested fields @type qfilter: None or list @param qfilter: Query filter @rtype: string @return: job id """""" body = { ""fields"": fields, } if qfilter is not None: body[""qfilter""] = body[""filter""] = qfilter return r.request(""put"", ""/2/query/%s"" % what, content=body)" 2273,"def QueryFields(r, what, fields=None): """""" Retrieves available fields for a resource. @type what: string @param what: Resource name, one of L{constants.QR_VIA_RAPI} @type fields: list of string @param fields: Requested fields @rtype: string @return: job id """""" query = {} if fields is not None: query[""fields""] = "","".join(fields) return r.request(""get"", ""/2/query/%s/fields"" % what, query=query)" 2274,"def createalphabet(alphabetinput=None): """""" Creates a sample alphabet containing printable ASCII characters """""" if alphabetinput and os.path.isfile(alphabetinput): return _load_alphabet(alphabetinput) elif alphabetinput: alpha = [] setlist = alphabetinput.split(',') for alphaset in setlist: a = int(alphaset.split('-')[0]) b = int(alphaset.split('-')[1]) for i in range(a, b): alpha.append(str(unichr(i))) return alpha alpha = [] for i in range(32, 127): alpha.append(str(unichr(i))) return alpha" 2275,"def _instant_search(self): """"""Determine possible keys after a push or pop """""" _keys = [] for k,v in self.searchables.iteritems(): if self.string in v: _keys.append(k) self.candidates.append(_keys)" 2276,"def best_guess(self): """"""Return the gnomekeyring position of the closest matching """""" best_guess_ever = (0, 0) # (key, string) points = defaultdict(float) points[0] = 0 if len(self.string) > 0: for key in self.candidate_keys: guess = self.searchables[key] if guess == self.string: points[key] += 100 break # skip, entry longer then guess if len(self.string) > len(guess): continue # begins with if guess.startswith(self.string): points[key] += 1 # contained in if self.string in guess: points[key] += 1 # percentage of user search string in best guess if points[key] > 0: points[key] += float(len(self.string))/len(guess) for k,v in points.iteritems(): if points[best_guess_ever[0]] < points[k]: best_guess_ever = (k, self.searchables[k]) return best_guess_ever" 2277,"def find_html_files(self, destination): """""" Finds all html files in the given destination. """""" for root, dirs, files in os.walk(destination): for f in files: if f.endswith('.html'): yield os.path.join(root, f)" 2278,"def minify_file(self, target): """""" Minifies the target html file. """""" html = open(target, 'rb').read() enc = chardet.detect(html)['encoding'] with codecs.open(target, 'r+', enc) as f: result = htmlmin.minify(f.read(), **self.options) f.seek(0) f.write(result) f.truncate()" 2279,"def on_after_build_all(self, builder, **extra): """""" after-build-all lektor event """""" # NOTE(vesuvium): compatibility for lektor 2.X and 3.X try: is_enabled = self.is_enabled(builder.build_flags) except AttributeError: is_enabled = self.is_enabled(builder.extra_flags) if not is_enabled: return reporter.report_generic('Starting HTML minification') for htmlfile in self.find_html_files(builder.destination_path): self.minify_file(htmlfile) reporter.report_generic('HTML minification finished')" 2280,"def InterpretWaveform(raw, integersOnly=False, headersOnly=False, noTimeArray=False): """""" Take the raw binary from a file saved from the LeCroy, read from a file using the 2 lines: with open(filename, ""rb"") as file: raw = file.read() And extracts various properties of the saved time trace. Parameters ---------- raw : bytes Bytes object containing the binary contents of the saved raw/trc file integersOnly : bool, optional If True, only returns the unprocessed integers (read from the ADC) rather than the signal in volts. Defaults to False. headersOnly : bool, optional If True, only returns the file header. Defaults to False. noTimeArray : bool, optional If true returns timeStart, timeStop and timeStep and doesn't create the time array Returns ------- WAVEDESC : dict dictionary containing some properties of the time trace and oscilloscope settings extracted from the header file. x : ndarray / tuple The array of time values recorded by the oscilloscope or, if noTimeArray is True, returns a tuplef of (timeStart, timeStop, timeStep) y : ndarray The array of voltage values recorded by the oscilloscope integers : ndarray The array of raw integers recorded from the ADC and stored in the binary file MissingData : bool bool stating if any data was missing """""" MissingData = False from struct import unpack if raw[0:1] != b'#': cmd = raw.split(b',')[0] # ""C1:WF ALL"" or similar wave = raw[len(cmd)+1:] # Remove the above command text (and trailing else: wave = raw del raw # if wave[0:1] != b'#': # warnings.warn('Waveform format not as expected, time trace may be missing data') # MissingData = True n = int(wave[1:2]) # number of digits in length of data N = int(wave[2:2+n]) # number describing length of data if wave.endswith(b'\n'): wave = wave[:-1] wave = wave[2+n:] # if N != len(wave): # warnings.warn('Length of waveform not as expected, time trace may be missing data') # MissingData = True # Code to parse WAVEDESC generated by parsing template, returned from scope query ""TEMPLATE?"" # Note that this is not well tested and will not handle unusual settings WAVEDESC = dict() WAVEDESC['DESCRIPTOR_NAME'] = wave[0:16].strip(b'\x00') WAVEDESC['TEMPLATE_NAME'] = wave[16:32].strip(b'\x00') WAVEDESC['COMM_TYPE'] = {0: 'byte',1: 'word'}[unpack(b""<H"", wave[32:34])[0]] WAVEDESC['COMM_ORDER'] = {0: 'HIFIRST',1: 'LOFIRST'}[unpack(""<H"", wave[34:36])[0]] WAVEDESC['WAVE_DESCRIPTOR'] = unpack('<l', wave[36:40])[0] WAVEDESC['USER_TEXT'] = unpack('<l', wave[40:44])[0] WAVEDESC['RES_DESC1'] = unpack('<l', wave[44:48])[0] WAVEDESC['TRIGTIME_ARRAY'] = unpack('<l', wave[48:52])[0] WAVEDESC['RIS_TIME_ARRAY'] = unpack('<l', wave[52:56])[0] WAVEDESC['RES_ARRAY1'] = unpack('<l', wave[56:60])[0] WAVEDESC['WAVE_ARRAY_1'] = unpack('<l', wave[60:64])[0] WAVEDESC['WAVE_ARRAY_2'] = unpack('<l', wave[64:68])[0] WAVEDESC['RES_ARRAY2'] = unpack('<l', wave[68:72])[0] WAVEDESC['RES_ARRAY3'] = unpack('<l', wave[72:76])[0] WAVEDESC['INSTRUMENT_NAME'] = wave[76:92].strip(b'\x00') WAVEDESC['INSTRUMENT_NUMBER'] = unpack('<l', wave[92:96])[0] WAVEDESC['TRACE_LABEL'] = wave[96:112].strip(b'\x00') WAVEDESC['RESERVED1'] = unpack('<h', wave[112:114])[0] WAVEDESC['RESERVED2'] = unpack('<h', wave[114:116])[0] WAVEDESC['WAVE_ARRAY_COUNT'] = unpack('<l', wave[116:120])[0] WAVEDESC['PNTS_PER_SCREEN'] = unpack('<l', wave[120:124])[0] WAVEDESC['FIRST_VALID_PNT'] = unpack('<l', wave[124:128])[0] WAVEDESC['LAST_VALID_PNT'] = unpack('<l', wave[128:132])[0] WAVEDESC['FIRST_POINT'] = unpack('<l', wave[132:136])[0] WAVEDESC['SPARSING_FACTOR'] = unpack('<l', wave[136:140])[0] WAVEDESC['SEGMENT_INDEX'] = unpack('<l', wave[140:144])[0] WAVEDESC['SUBARRAY_COUNT'] = unpack('<l', wave[144:148])[0] WAVEDESC['SWEEPS_PER_ACQ'] = unpack('<l', wave[148:152])[0] WAVEDESC['POINTS_PER_PAIR'] = unpack('<h', wave[152:154])[0] WAVEDESC['PAIR_OFFSET'] = unpack('<h', wave[154:156])[0] WAVEDESC['VERTICAL_GAIN'] = unpack('<f', wave[156:160])[0] WAVEDESC['VERTICAL_OFFSET'] = unpack('<f', wave[160:164])[0] WAVEDESC['MAX_VALUE'] = unpack('<f', wave[164:168])[0] WAVEDESC['MIN_VALUE'] = unpack('<f', wave[168:172])[0] WAVEDESC['NOMINAL_BITS'] = unpack('<h', wave[172:174])[0] WAVEDESC['NOM_SUBARRAY_COUNT'] = unpack('<h', wave[174:176])[0] WAVEDESC['HORIZ_INTERVAL'] = unpack('<f', wave[176:180])[0] WAVEDESC['HORIZ_OFFSET'] = unpack('<d', wave[180:188])[0] WAVEDESC['PIXEL_OFFSET'] = unpack('<d', wave[188:196])[0] WAVEDESC['VERTUNIT'] = wave[196:244].strip(b'\x00') WAVEDESC['HORUNIT'] = wave[244:292].strip(b'\x00') WAVEDESC['HORIZ_UNCERTAINTY'] = unpack('<f', wave[292:296])[0] WAVEDESC['TRIGGER_TIME'] = wave[296:312] # Format time_stamp not implemented WAVEDESC['ACQ_DURATION'] = unpack('<f', wave[312:316])[0] WAVEDESC['RECORD_TYPE'] = {0: 'single_sweep',1: 'interleaved',2: 'histogram',3: 'graph',4: 'filter_coefficient',5: 'complex',6: 'extrema',7: 'sequence_obsolete',8: 'centered_RIS',9: 'peak_detect'}[unpack(""<H"", wave[316:318])[0]] WAVEDESC['PROCESSING_DONE'] = {0: 'no_processing',1: 'fir_filter',2: 'interpolated',3: 'sparsed',4: 'autoscaled',5: 'no_result',6: 'rolling',7: 'cumulative'}[unpack(""<H"", wave[318:320])[0]] WAVEDESC['RESERVED5'] = unpack('<h', wave[320:322])[0] WAVEDESC['RIS_SWEEPS'] = unpack('<h', wave[322:324])[0] WAVEDESC['TIMEBASE'] = {0: '1_ps/div',1: '2_ps/div',2: '5_ps/div',3: '10_ps/div',4: '20_ps/div',5: '50_ps/div',6: '100_ps/div',7: '200_ps/div',8: '500_ps/div',9: '1_ns/div',10: '2_ns/div',11: '5_ns/div',12: '10_ns/div',13: '20_ns/div',14: '50_ns/div',15: '100_ns/div',16: '200_ns/div',17: '500_ns/div',18: '1_us/div',19: '2_us/div',20: '5_us/div',21: '10_us/div',22: '20_us/div',23: '50_us/div',24: '100_us/div',25: '200_us/div',26: '500_us/div',27: '1_ms/div',28: '2_ms/div',29: '5_ms/div',30: '10_ms/div',31: '20_ms/div',32: '50_ms/div',33: '100_ms/div',34: '200_ms/div',35: '500_ms/div',36: '1_s/div',37: '2_s/div',38: '5_s/div',39: '10_s/div',40: '20_s/div',41: '50_s/div',42: '100_s/div',43: '200_s/div',44: '500_s/div',45: '1_ks/div',46: '2_ks/div',47: '5_ks/div',100: 'EXTERNAL'}[unpack(""<H"", wave[324:326])[0]] WAVEDESC['VERT_COUPLING'] = {0: 'DC_50_Ohms',1: 'ground',2: 'DC_1MOhm',3: 'ground',4: 'AC_1MOhm'}[unpack(""<H"", wave[326:328])[0]] WAVEDESC['PROBE_ATT'] = unpack('<f', wave[328:332])[0] WAVEDESC['FIXED_VERT_GAIN'] = {0: '1_uV/div',1: '2_uV/div',2: '5_uV/div',3: '10_uV/div',4: '20_uV/div',5: '50_uV/div',6: '100_uV/div',7: '200_uV/div',8: '500_uV/div',9: '1_mV/div',10: '2_mV/div',11: '5_mV/div',12: '10_mV/div',13: '20_mV/div',14: '50_mV/div',15: '100_mV/div',16: '200_mV/div',17: '500_mV/div',18: '1_V/div',19: '2_V/div',20: '5_V/div',21: '10_V/div',22: '20_V/div',23: '50_V/div',24: '100_V/div',25: '200_V/div',26: '500_V/div',27: '1_kV/div'}[unpack(""<H"", wave[332:334])[0]] WAVEDESC['BANDWIDTH_LIMIT'] = {0: 'off',1: 'on'}[unpack(""<H"", wave[334:336])[0]] WAVEDESC['VERTICAL_VERNIER'] = unpack('<f', wave[336:340])[0] WAVEDESC['ACQ_VERT_OFFSET'] = unpack('<f', wave[340:344])[0] WAVEDESC['WAVE_SOURCE'] = {0: 'CHANNEL_1',1: 'CHANNEL_2',2: 'CHANNEL_3',3: 'CHANNEL_4',9: 'UNKNOWN'}[unpack(""<H"", wave[344:346])[0]] if len(wave[346:]) != WAVEDESC['WAVE_ARRAY_1']: warnings.warn('Binary data not the expected length, time trace may be missing data') MissingData = True if headersOnly: return WAVEDESC, MissingData else: from numpy import fromstring, int16, arange if MissingData != True: integers = fromstring(wave[346:], dtype=int16) else: integers = fromstring(wave[346:][:-1], dtype=int16) if integersOnly: return (WAVEDESC, integers, MissingData) elif noTimeArray: y = integers * WAVEDESC['VERTICAL_GAIN'] - WAVEDESC['VERTICAL_OFFSET'] x = arange(len(integers)) * WAVEDESC['HORIZ_INTERVAL'] + WAVEDESC['HORIZ_OFFSET'] timeStart = x[0] timeStop = x[-1] timeStep = x[1]-x[0] return (WAVEDESC, (timeStart, timeStop, timeStep), y, integers, MissingData) else: y = integers * WAVEDESC['VERTICAL_GAIN'] - WAVEDESC['VERTICAL_OFFSET'] x = arange(len(integers)) * WAVEDESC['HORIZ_INTERVAL'] + WAVEDESC['HORIZ_OFFSET'] return (WAVEDESC, x, y, integers, MissingData)" 2281,"def raw(self, channel=1): """""" Reads the raw input from the oscilloscope. Parameters ---------- channel : int channel number of read Returns ------- rawData : bytes raw binary data read from the oscilloscope """""" self.waitOPC() self.write('COMM_FORMAT DEF9,WORD,BIN') self.write('C%u:WAVEFORM?' % channel) return self.read_raw()" 2282,"def features(entrystream, type=None, traverse=False): """""" Pull features out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only features of the specified type; set to :code:`None` to retrieve all features :param traverse: by default, only top-level features are selected; set to :code:`True` to search each feature graph for the specified feature type """""" for feature in entry_type_filter(entrystream, tag.Feature): if traverse: if type is None: message = 'cannot traverse without a specific feature type' raise ValueError(message) if type == feature.type: yield feature else: for subfeature in feature: if type == subfeature.type: yield subfeature else: if not type or type == feature.type: yield feature" 2283,"def window(featurestream, seqid, start=None, end=None, strict=True): """""" Pull features out of the designated genomic interval. This function uses 0-based half-open intervals, not the 1-based closed intervals used by GFF3. :param featurestream: a stream of feature entries :param seqid: ID of the sequence from which to select features :param start: start of the genomic interval :param end: end of the genomic interval :param strict: when set to :code:`True`, only features completely contained within the interval are selected; when set to :code:`False`, any feature overlapping the interval is selected """""" region = None if start and end: region = tag.Range(start, end) for feature in featurestream: if feature.seqid != seqid: continue if region: if strict: if region.contains(feature._range): yield feature else: if region.overlap(feature._range): yield feature else: yield feature" 2284,"def directives(entrystream, type=None): """""" Pull directives out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only directives of the specified type; set to :code:`None` to retrieve all directives """""" for directive in entry_type_filter(entrystream, tag.Directive): if not type or type == directive.type: yield directive" 2285,"def validate_driver(f): """"""Check driver on"""""" def check_driver(request): drivers = get_all_driver() drivers = filter(drivers, request) if drivers: return f(request, drivers) else: raise Exception('Driver is not found') return check_driver" 2286,"def cli(ctx, stage, port): """"""Web interface(experimental)."""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not listen') raise click.Abort() gbc = ctx.gbc WEB = None if stage in STAGES: STAGE = ctx.cfg.CFG[stage] if 'SERVER' in STAGE: SERVER=STAGE.SERVER if 'WEB' in SERVER: WEB=SERVER.WEB if not WEB: ctx.say_red('There is no SERVER.WEB in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() web_server = get_server(gbc, WEB, ctx.home) try: # TODO: bg & # src_listening = web_server.start_web(ctx=gbc, web_server.start_web(ctx=gbc, port=port, stage=stage) except Exception as e: ctx.say_red( 'cannot start web server e ' + WEB) ctx.say_red(str(e)) raise click.Abort('cannot listen')" 2287,"def get_plural_tag_index(number, locale): """"""Gets the plural tag index of a number on the plural rule of a locale:: >>> get_plural_tag_index(1, 'en_US') 0 >>> get_plural_tag_index(2, 'en_US') 1 >>> get_plural_tag_index(100, 'en_US') 1 """""" locale = Locale.parse(locale) plural_rule = locale.plural_form used_tags = plural_rule.tags | set([_fallback_tag]) tag, index = plural_rule(number), 0 for _tag in _plural_tags: if _tag == tag: return index if _tag in used_tags: index += 1" 2288,"def strings_to_(strings: Iterable[str], f: Callable) -> Iterable[Any]: """""" Convert a list of strings to a list of certain form, specified by *f*. :param strings: a list of string :param f: a function that converts your string :return: type undefined, but specified by `to_type` .. doctest:: >>> strings_to_(['0.333', '0.667', '0.250'], float) [0.333, 0.667, 0.25] """""" if not all_string_like(strings): raise TypeError('All have to be strings!') # ``type(strs)`` is the container of *strs*. return type(strings)(map(f, strings))" 2289,"def strings_to_integers(strings: Iterable[str]) -> Iterable[int]: """""" Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0] """""" return strings_to_(strings, lambda x: int(float(x)))" 2290,"def string_to_double_precision_float(s: str) -> float: """""" Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float by Python ``float`` function, so I wrote this function to help conversion. For example, :param s: a string denoting a double precision number :return: a Python floating point number .. doctest:: >>> string_to_double_precision_float('1d-82') 1e-82 >>> string_to_double_precision_float('1.0D-82') 1e-82 >>> string_to_double_precision_float('0.8D234') 8e+233 >>> string_to_double_precision_float('.8d234') 8e+233 """""" first, second, exponential = re.match( ""(-?\d*)\.?(-?\d*)d(-?\d+)"", s, re.IGNORECASE).groups() return float(first + '.' + second + 'e' + exponential)" 2291,"def string_to_general_float(s: str) -> float: """""" Convert a string to corresponding single or double precision scientific number. :param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number :return: a float or raise an error .. doctest:: >>> string_to_general_float('1.0D-5') 1e-05 >>> string_to_general_float('1Dx') Traceback (most recent call last): ... ValueError: The string '1Dx' does not corresponds to a double precision number! >>> string_to_general_float('.8d234') 8e+233 >>> string_to_general_float('0.1') 0.1 """""" if 'D' in s.upper(): # Possible double precision number try: return string_to_double_precision_float(s) except ValueError: raise ValueError( ""The string '{0}' does not corresponds to a double precision number!"".format(s)) else: return float(s)" 2292,"def match_one_string(pattern: str, s: str, *args): """""" Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead. :param pattern: :param s: :param args: :return: .. doctest:: >>> p = ""\d+"" >>> s = ""abc 123 def"" >>> match_one_string(p, s, int) 123 >>> print(match_one_string(p, ""abc"")) Pattern ""\d+"" not found, or more than one found in string abc! None >>> print(match_one_string(p, ""abc 123 def 456"")) Pattern ""\d+"" not found, or more than one found in string abc 123 def 456! None """""" try: # `match` is either an empty list or a list of string. match, = re.findall(pattern, s) if len(args) == 0: # If no wrapper argument is given, return directly the matched string return match elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match wrapper, = args return wrapper(match) else: raise TypeError( 'Multiple wrappers are given! Only one should be given!') except ValueError: print(""Pattern \""{0}\"" not found, or more than one found in string {1}!"".format( pattern, s))" 2293,"def match_one_pattern(pattern: str, s: str, *args: Optional[Callable], **flags): """""" Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no wrapper is given, return the pure matched string. If no match is found, return None. :param pattern: a pattern, can be a string or a regular expression :param s: a string :param args: at most 1 argument can be given :param flags: the same flags as ``re.findall``'s :return: .. doctest:: >>> p = ""\d+"" >>> s = ""abc 123 def 456"" >>> match_one_pattern(p, s) ['123', '456'] >>> match_one_pattern(p, s, int) [123, 456] >>> match_one_pattern(p, ""abc 123 def"") ['123'] >>> print(match_one_pattern('s', 'abc')) Pattern ""s"" not found in string abc! None >>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE) ['S', 's'] """""" match: Optional[List[str]] = re.findall(pattern, s, **flags) # `match` is either an empty list or a list of strings. if match: if len(args) == 0: # If no wrapper argument is given, return directly the matched string return match elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match wrapper, = args return [wrapper(m) for m in match] else: raise TypeError( 'Multiple wrappers are given! Only one should be given!') else: # If no match is found print(""Pattern \""{0}\"" not found in string {1}!"".format(pattern, s)) return None" 2294,"def all_string_like(iterable: Iterable[object]) -> bool: """""" If any element of an iterable is not a string, return `True`. :param iterable: Can be a set, a tuple, a list, etc. :return: Whether any element of an iterable is not a string. .. doctest:: >>> all_string_like(['a', 'b', 'c', 3]) False >>> all_string_like(('a', 'b', 'c', 'd')) True """""" return all(is_string_like(_) for _ in iterable)" 2295,"def source_filename(self, docname: str, srcdir: str): """""" Get the full filename to referenced image """""" docpath = Path(srcdir, docname) parent = docpath.parent imgpath = parent.joinpath(self.filename) # Does this exist? if not imgpath.exists(): msg = f'Image does not exist at ""{imgpath}""' raise SphinxError(msg) return imgpath" 2296,"def env_updated(self, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, resource ): """""" Make images and enter them in Sphinx's output writer """""" docname = resource.docname srcdir = sphinx_app.env.srcdir source_imgpath = self.source_filename(docname, srcdir) # Copy the image to the Sphinx build directory build_dir = sphinx_app.outdir docpath = Path(docname) parent = docpath.parent target_imgpath = str(Path(build_dir, parent, self.filename)) # Does the target dir exist yet in the build dir? Probably not. If # not, make it target_dir = Path(build_dir, parent) if not target_dir.exists(): target_dir.mkdir(parents=True, exist_ok=True) shutil.copy(source_imgpath, target_imgpath)" 2297,"def catalog(self, table='', column=''): """"""Lookup the values available for querying."""""" lookup_table = self.lookup_table if lookup_table is not None: if table: if column: column = column.upper() return lookup_table[table][column] return lookup_table[table] # Show what methods are available. return self.lookup_methods return None" 2298,"def _resolve_call(self, table, column='', value='', **kwargs): """"""Internal method to resolve the API wrapper call."""""" if not column: return self.catalog(table) elif not value: return self.catalog(table, column) # We have all the table, column, and value, and now need to # ensure they're all strings and uppercase. column = column.upper() value = str(value).upper() data = self.call_api(table, column, value, **kwargs) if isinstance(data, dict): # Data is actually the first value. data = data.values()[0] return data" 2299,"def call_api(self, table, column, value, **kwargs): """"""Exposed method to connect and query the EPA's API."""""" try: output_format = kwargs.pop('output_format') except KeyError: output_format = self.output_format url_list = [self.base_url, table, column, quote(value), 'rows'] rows_count = self._number_of_rows(**kwargs) url_list.append(rows_count) url_string = '/'.join(url_list) xml_data = urlopen(url_string).read() data = self._format_data(output_format, xml_data) return data" 2300,"def _number_of_rows(self, start=0, count=100, **kwargs): """"""Internal method to format the number of rows the EPA API returns."""""" first = str(start) last = str(start + count) string_format = ':'.join([first, last]) return string_format" 2301,"def get_reference(self, rtype: str, label: str): """""" Return reference filed under rtype/label The references are organized by field/label, e.g. category/cat1. This lets us use a shorthand notation to go the resource, e.g. ref:category:cat1 instead of folder1/folder2/cat1. """""" # We are doing this instead of dictionary access in case we change # the storage later to a multidict thingy for optimization. reftype = self.data.get(rtype) if reftype: # The reftype might be ""python"" or ""sphinx"" or something else # from an Intersphinx registry, not something internal to # Kaybee. return reftype[label]" 2302,"def add_reference(self, reftype: str, label: str, target): """""" Add reference object in references under rtype/label=target """""" # The self.data[reftype] dict springs into being during the # register_references event handler at startup, which looks in the # kb registry for all registered reference names. self.data[reftype][label] = target" 2303,"def resource_references(self, resource) -> Mapping[str, List[Any]]: """""" Resolve and return reference resources pointed to by object Fields in resource.props can flag that they are references by using the references type. This method scans the model, finds any fields that are references, and returns the reference resources pointed to by those references. Note that we shouldn't get to the point of dangling references. Our custom Sphinx event should raise a references error during the build process (though maybe it is just a warning?) """""" references = dict() for reference_label in resource.props.references: references[reference_label] = [] # Iterate over each value on this field, e.g. # tags: tag1, tag2, tag3 for target_label in resource.props.references.get(reference_label): # Ask the site to get the object target = self.get_reference(reference_label, target_label) references[reference_label].append(target) return references" 2304,"def start(self, retry_limit=None): """""" Try to connect to Twitter's streaming API. :param retry_limit: The maximum number of retries in case of failures. Default is None (unlimited) :raises :class:`~tweepy.error.TweepyError`: If there's some critical API error """""" # Run tweepy stream wrapper_listener = TweepyWrapperListener(listener=self.listener) stream = tweepy.Stream(auth=self.client.tweepy_api.auth, listener=wrapper_listener) retry_counter = 0 while retry_limit is None or retry_counter <= retry_limit: try: retry_counter += 1 if not self.client.config.get('user_stream'): logging.info('Listening to public stream') stream.filter(follow=self.filter.follow, track=self.filter.track) else: if self.filter.follow: logging.warning('Follow filters won\'t be used in user stream') logging.info('Listening to user stream') stream.userstream(track=self.filter.track) except AttributeError as e: # Known Tweepy's issue https://github.com/tweepy/tweepy/issues/576 if ""'NoneType' object has no attribute 'strip'"" in str(e): pass else: raise" 2305,"def mappingBasedGrouping(protToPeps): """"""Performs protein grouping based only on protein to peptide mappings. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} #TODO: REFACTORING!!! returns a ProteinInference object """""" inference = ProteinInference(protToPeps) pepToProts = inference.pepToProts proteinClusters = _findProteinClusters(protToPeps, pepToProts) proteins = {} for clusterId, proteinCluster in enumerate(proteinClusters, 1): clusterProtToPeps = {p: protToPeps[p] for p in proteinCluster} #Find sameset proteins, define unique and non unique sameset proteins #NOTE: already unique proteins could be excluded to find sameset proteins samesetProteins = _findSamesetProteins(clusterProtToPeps) mergedProtToPeps = _mergeProteinEntries(samesetProteins, clusterProtToPeps) mergedPepToProts = _invertMapping(mergedProtToPeps) uniqueProteins = _findUniqueMappingValues(mergedPepToProts) remainingProteins = set(mergedProtToPeps).difference(uniqueProteins) # Remove subset proteins and check if remaining proteins become unique subsetProteinInfo = _findSubsetProteins(remainingProteins, mergedProtToPeps, mergedPepToProts) subsetProteins = [p for p, _ in subsetProteinInfo] subsetRemovedProtToPeps = _reducedProtToPeps(mergedProtToPeps, subsetProteins) subsetRemovedPepToProts = _invertMapping(subsetRemovedProtToPeps) uniqueSubsetRemoved = _findUniqueMappingValues(subsetRemovedPepToProts) remainingProteins = remainingProteins.difference(subsetProteins) remainingProteins = remainingProteins.difference(uniqueSubsetRemoved) # Find redundant proteins # subsumableProteins = _findRedundantProteins(subsetRemovedProtToPeps, subsetRemovedPepToProts) remainingNonRedundant = remainingProteins.difference(subsumableProteins) groupInitiatingProteins = uniqueSubsetRemoved.union(remainingNonRedundant) # - Generate protein groups and assign proteins to groups - # #Generate protein groups clusterGroupIds = set() for protein in groupInitiatingProteins: proteinIds = AUX.toList(protein) groupId = inference.addProteinGroup(proteinIds[0]) inference.addLeadingToGroups(proteinIds, groupId) clusterGroupIds.add(groupId) #Add redundant proteins here (must be subsumable I guess) for protein in subsumableProteins: proteinIds = AUX.toList(protein) connectedProteins = _mappingGetValueSet( mergedPepToProts, mergedProtToPeps[protein] ) flatConnectedProteins = _flattenMergedProteins(connectedProteins) groupIds = _mappingGetValueSet( inference._proteinToGroupIds, flatConnectedProteins ) inference.addSubsumableToGroups(proteinIds, groupIds) assert len(groupIds) > 1 #Add subgroup proteins to the respective groups #NOTE: proteins that are only a subset of subsumable proteins are not #to be added as subset proteins to a group but as subsumable proteins. for protein, supersetProteins in subsetProteinInfo: proteinIds = AUX.toList(protein) #If the protein is a subset of at least one protein, that is not a #subsumable protein, then it should be added to the group as subset. leadingSuperProteins = supersetProteins.intersection( groupInitiatingProteins) if leadingSuperProteins: flatSupersetProteins = _flattenMergedProteins( leadingSuperProteins) superGroupIds = _mappingGetValueSet( inference._proteinToGroupIds, flatSupersetProteins ) inference.addSubsetToGroups(proteinIds, superGroupIds) #However, if all its super proteins are subsumable, the protein #itself is a subsumable protein. else: flatSupersetProteins = _flattenMergedProteins(supersetProteins) superGroupIds = _mappingGetValueSet( inference._proteinToGroupIds, flatSupersetProteins ) inference.addSubsumableToGroups(proteinIds, superGroupIds) subsumableProteins.update(proteinIds) assert superGroupIds # - Define peptide properties - # groupToPeps = dict() allSubsumablePeps = set() for groupId in clusterGroupIds: group = inference.groups[groupId] if group.subsumableProteins: subsumablePeptides = _mappingGetValueSet( protToPeps, group.subsumableProteins ) allSubsumablePeps.update(subsumablePeptides) groupPeptides = _mappingGetValueSet(protToPeps, group.proteins) groupToPeps[groupId] = groupPeptides pepToGroups = _invertMapping(groupToPeps) #Get unique peptides from peptide to protein mapping uniquePeptides = _findUniqueMappingKeys(mergedPepToProts) #Shared peptides have a groupPeptideCount > 1 nonSharedPeptides = _findUniqueMappingKeys(pepToGroups) sharedPeptides = set(pepToGroups).difference(nonSharedPeptides) #Subsumable peptides are peptides from subsumable proteins that #are not shared peptides of multiple groups subsumablePeptides = allSubsumablePeps.difference(sharedPeptides) #groupUniquePeptides are the remaining ones (not shared with subsumable #proteins, groupPeptideCount == 1, not unique peptides) groupUniquePeptides = nonSharedPeptides.difference(subsumablePeptides) groupUniquePeptides = groupUniquePeptides.difference(uniquePeptides) inference._uniquePeptides.update(uniquePeptides) inference._groupUniquePeptides.update(groupUniquePeptides) inference._groupSubsumablePeptides.update(subsumablePeptides) inference._sharedPeptides.update(sharedPeptides) # - Generate protein entries and add them to the inference object - # subsetProteinInfoDict = dict(subsetProteinInfo) for protein, peptides in viewitems(mergedProtToPeps): _uniquePeptides = peptides.intersection(uniquePeptides) _groupUniquePeptides = peptides.intersection(groupUniquePeptides) _subsumablePeptides = peptides.intersection(subsumablePeptides) _sharedPeptides = peptides.intersection(sharedPeptides) proteinIds = AUX.toList(protein) for proteinId in proteinIds: proteinEntry = Protein(proteinId, peptides) if protein in groupInitiatingProteins: proteinEntry.isLeading = True elif protein in subsumableProteins: proteinEntry.isSubsumable = True if protein in subsetProteins: superset = subsetProteinInfoDict[protein] proteinEntry.isSubset = _flattenMergedProteins(superset) if len(proteinIds) > 1: proteinEntry.isSameset = set(proteinIds) inference.proteins[proteinId] = proteinEntry #Add peptides to protein entry proteinEntry.uniquePeptides = _uniquePeptides proteinEntry.groupUniquePeptides = _groupUniquePeptides proteinEntry.groupSubsumablePeptides = _subsumablePeptides proteinEntry.sharedPeptides = _sharedPeptides # - Save cluster information - # for proteinId in proteinCluster: inference._proteinToClusterId[proteinId] = clusterId inference.clusters[clusterId] = clusterGroupIds allProteins = set() for proteinGroup in viewvalues(inference.groups): allProteins.update(proteinGroup.proteins) allProteins.update(proteinGroup.subsumableProteins) assert len(allProteins) == len(protToPeps) return inference" 2306,"def _findProteinClusters(protToPeps, pepToProts): """"""Find protein clusters in the specified protein to peptide mappings. A protein cluster is a group of proteins that are somehow directly or indirectly connected by shared peptides. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :returns: a list of protein clusters, each cluster is a set of proteins """""" clusters = list() resolvingProteins = set(protToPeps) while resolvingProteins: protein = resolvingProteins.pop() proteinCluster = set([protein]) peptides = set(protToPeps[protein]) parsedPeptides = set() while len(peptides) != len(parsedPeptides): for peptide in peptides: proteinCluster.update(pepToProts[peptide]) parsedPeptides.update(peptides) for protein in proteinCluster: peptides.update(protToPeps[protein]) clusters.append(proteinCluster) resolvingProteins = resolvingProteins.difference(proteinCluster) return clusters" 2307,"def _findSamesetProteins(protToPeps, proteins=None): """"""Find proteins that are mapped to an identical set of peptides. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for having equal evidence. If not specified all proteins are tested :returns: a list of sorted protein tuples that share equal peptide evidence """""" proteins = viewkeys(protToPeps) if proteins is None else proteins equalEvidence = ddict(set) for protein in proteins: peptides = protToPeps[protein] equalEvidence[tuple(sorted(peptides))].add(protein) equalProteins = list() for proteins in viewvalues(equalEvidence): if len(proteins) > 1: equalProteins.append(tuple(sorted(proteins))) return equalProteins" 2308,"def _findSubsetProteins(proteins, protToPeps, pepToProts): """"""Find proteins which peptides are a sub-set, but not a same-set to other proteins. :param proteins: iterable, proteins that are tested for being a subset :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :returns: a list of pairs of protein and their superset proteins. [(protein, {superset protein, ...}), ...] """""" proteinsEqual = lambda prot1, prot2: protToPeps[prot1] == protToPeps[prot2] subGroups = list() for protein in proteins: peptideCounts = Counter() for peptide in protToPeps[protein]: proteins = pepToProts[peptide] peptideCounts.update(proteins) peptideCount = peptideCounts.pop(protein) superGroups = set() for sharingProtein, sharedPeptides in peptideCounts.most_common(): if peptideCount == sharedPeptides: if not proteinsEqual(protein, sharingProtein): superGroups.add(sharingProtein) else: break if superGroups: subGroups.append((protein, superGroups)) return subGroups" 2309,"def _findRedundantProteins(protToPeps, pepToProts, proteins=None): """"""Returns a set of proteins with redundant peptide evidence. After removing the redundant proteins from the ""protToPeps"" and ""pepToProts"" mapping, all remaining proteins have at least one unique peptide. The remaining proteins are a ""minimal"" set of proteins that are able to explain all peptides. However, this is not guaranteed to be the optimal solution with the least number of proteins. In addition it is possible that multiple solutions with the same number of ""minimal"" proteins exist. Procedure for finding the redundant proteins: 1. Generate a list of proteins that do not contain any unique peptides, a unique peptide has exactly one protein entry in ""pepToProts"". 2. Proteins are first sorted in ascending order of the number of peptides. Proteins with an equal number of peptides are sorted in descending order of their sorted peptide frequencies (= proteins per peptide). If two proteins are still equal, they are sorted alpha numerical in descending order according to their protein names. For example in the case of a tie between proteins ""A"" and ""B"", protein ""B"" would be removed. 3. Parse this list of sorted non unique proteins; If all its peptides have a frequency value of greater 1; mark the protein as redundant; remove its peptides from the peptide frequency count, continue with the next entry. 4. Return the set of proteins marked as redundant. :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for being redundant. If None all proteins in ""protToPeps"" are parsed. :returns: a set of redundant proteins, i.e. proteins that are not necessary to explain all peptides """""" if proteins is None: proteins = viewkeys(protToPeps) pepFrequency = _getValueCounts(pepToProts) protPepCounts = _getValueCounts(protToPeps) getCount = operator.itemgetter(1) getProt = operator.itemgetter(0) #TODO: quick and dirty solution #NOTE: add a test for merged proteins proteinTuples = list() for protein in proteins: if isinstance(protein, tuple): proteinTuples.append(protein) else: proteinTuples.append(tuple([protein])) sort = list() for protein in sorted(proteinTuples, reverse=True): if len(protein) == 1: protein = protein[0] protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]] if min(protPepFreq) > 1: sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True)) sort.append((protein, sortValue)) sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True)) redundantProteins = set() for protein in sortedProteins: for pep in protToPeps[protein]: if pepFrequency[pep] <= 1: break else: protPepFrequency = Counter(protToPeps[protein]) pepFrequency.subtract(protPepFrequency) redundantProteins.add(protein) return redundantProteins" 2310,"def _mergeProteinEntries(proteinLists, protToPeps): """"""Returns a new ""protToPeps"" dictionary with entries merged that are present in proteinLists. NOTE: The key of the merged entry is a tuple of the sorted protein keys. This behaviour might change in the future; the tuple might be replaced by simply one of the protein entries which is then representative for all. :param proteinLists: a list of protein groups that will be merged [{protein, ...}, ...] :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :returns: dict, {protein: set([peptid, ...])} """""" mergedProtToPeps = dict(protToPeps) for proteins in proteinLists: for protein in proteins: peptides = mergedProtToPeps.pop(protein) mergedProtein = tuple(sorted(proteins)) mergedProtToPeps[mergedProtein] = peptides return mergedProtToPeps" 2311,"def _reducedProtToPeps(protToPeps, proteins): """"""Returns a new, reduced ""protToPeps"" dictionary that does not contain entries present in ""proteins"". :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: a list of proteinSet :returns: dict, protToPeps not containing entries from ""proteins"" """""" return {k: v for k, v in viewitems(protToPeps) if k not in proteins}" 2312,"def _findUniqueMappingValues(mapping): """"""Find mapping entries that are unique for one key (value length of 1). .. Note: This function can be used to find unique proteins by providing a peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping values """""" uniqueMappingValues = set() for entries in viewvalues(mapping): if len(entries) == 1: uniqueMappingValues.update(entries) return uniqueMappingValues" 2313,"def _findUniqueMappingKeys(mapping): """"""Find mapping keys that only have one entry (value length of 1. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping keys """""" uniqueMappingKeys = set() for key, entries in viewitems(mapping): if len(entries) == 1: uniqueMappingKeys.add(key) return uniqueMappingKeys" 2314,"def _invertMapping(mapping): """"""Converts a protein to peptide or peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: an inverted mapping that each entry of the values points to a set of initial keys. """""" invertedMapping = ddict(set) for key, values in viewitems(mapping): for value in values: invertedMapping[value].add(key) return invertedMapping" 2315,"def _getValueCounts(mapping): """"""Returns a counter object; contains for each key of the mapping the counts of the respective value element (= set length). :param mapping: dict, for each key contains a set of entries. :returns: a counter """""" return Counter({k: len(v) for k, v in viewitems(mapping)})" 2316,"def _mappingGetValueSet(mapping, keys): """"""Return a combined set of values from the mapping. :param mapping: dict, for each key contains a set of entries returns a set of combined entries """""" setUnion = set() for k in keys: setUnion = setUnion.union(mapping[k]) return setUnion" 2317,"def _flattenMergedProteins(proteins): """"""Return a set where merged protein entries in proteins are flattened. :param proteins: an iterable of proteins, can contain merged protein entries in the form of tuple([protein1, protein2]). returns a set of protein entries, where all entries are strings """""" proteinSet = set() for protein in proteins: if isinstance(protein, tuple): proteinSet.update(protein) else: proteinSet.add(protein) return proteinSet" 2318,"def getGroups(self, proteinId): """"""Return a list of protein groups a protein is associated with."""""" return [self.groups[gId] for gId in self._proteinToGroupIds[proteinId]]" 2319,"def addProteinGroup(self, groupRepresentative): """"""Adds a new protein group and returns the groupId. The groupId is defined using an internal counter, which is incremented every time a protein group is added. The groupRepresentative is added as a leading protein. :param groupRepresentative: the protein group representing protein :returns: the protein groups groupId """""" groupId = self._getNextGroupId() self.groups[groupId] = ProteinGroup(groupId, groupRepresentative) self.addLeadingToGroups(groupRepresentative, groupId) return groupId" 2320,"def addLeadingToGroups(self, proteinIds, groupIds): """"""Add one or multiple leading proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string. """""" for groupId in AUX.toList(groupIds): self.groups[groupId].addLeadingProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)" 2321,"def addSubsetToGroups(self, proteinIds, groupIds): """"""Add one or multiple subset proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string. """""" for groupId in AUX.toList(groupIds): self.groups[groupId].addSubsetProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)" 2322,"def addSubsumableToGroups(self, proteinIds, groupIds): """"""Add one or multiple subsumable proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string. """""" for groupId in AUX.toList(groupIds): self.groups[groupId].addSubsumableProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)" 2323,"def _addProteinIdsToGroupMapping(self, proteinIds, groupId): """"""Add a groupId to one or multiple entries of the internal proteinToGroupId mapping. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupId: str, a groupId """""" for proteinId in AUX.toList(proteinIds): self._proteinToGroupIds[proteinId].add(groupId)" 2324,"def _addProteins(self, proteinIds, containerNames): """"""Add one or multiple proteinIds to the respective container. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param containerNames: list, entries must be one or multiple of 'leading', 'subset', 'subsumableProteins' or 'proteins' :param addToProteins: bool, if True the proteinIds are added to the """""" proteinIds = AUX.toList(proteinIds) for containerName in containerNames: proteinContainer = getattr(self, containerName) proteinContainer.update(proteinIds)" 2325,"def satisfies(self, other): """"""Check if the capabilities of a primitive are enough to satisfy a requirement. Should be called on a Requirement that is acting as a capability of a primitive. This method returning true means that the capability advertised here is enough to handle representing the data described by the Requirement passed in as 'other'. Here is a chart showing what satisfies what. other A C 0 1 |Y N N N N s A|Y Y Y Y Y e C|Y - Y Y Y l 0|Y * * Y N f 1|Y * * N Y ' ' = No Care A = arbitrary C = Constant 0 = ZERO 1 = ONE Y = YES N = NO - = Could satisfy with multiple instances * = Not yet determined behavior. Used for bitbanging controllers. """""" if other.isnocare: return True if self.isnocare: return False if self.arbitrary: return True if self.constant and not other.arbitrary: return True if self.value is other.value and not other.arbitrary\ and not other.constant: return True return False" 2326,"def _list(self, foldername=""INBOX"", reverse=False, since=None): """"""Do structured list output. Sorts the list by date, possibly reversed, filtered from 'since'. The returned list is: foldername, message key, message object """""" folder = self.folder \ if foldername == ""INBOX"" \ else self._getfolder(foldername) def sortcmp(d): try: return d[1].date except: return -1 lst = folder.items() if not since else folder.items_since(since) sorted_lst = sorted(lst, key=sortcmp, reverse=1 if reverse else 0) itemlist = [(folder, key, msg) for key,msg in sorted_lst] return itemlist" 2327,"def ls(self, foldername=""INBOX"", reverse=False, since=None, grep=None, field=None, stream=sys.stdout): """"""Do standard text list of the folder to the stream. 'foldername' is the folder to list.. INBOX by default. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch. 'grep' allows text matching on the whole record 'field' allows only 1 field to be output """""" if foldername == """": foldername = ""INBOX"" msg_list = self._list(foldername, reverse, since) for folder, mk, m in msg_list: try: # I am very unsure about this defaulting of foldername output_items = ( ""%s%s%s"" % (folder.folder or foldername or ""INBOX"", SEPERATOR, mk), m.date, m.get_from()[0:50] if m.get_from() else """", m.get_flags(), re.sub(""\n"", """", m.get_subject() or """") ) output_string = ""% -20s % 20s % 50s [%s] %s"" % output_items if not grep or (grep and grep in output_string): if field: print(output_items[int(field)], file=stream) else: print(output_string, file=stream) except IOError as e: if e.errno == errno.EPIPE: # Broken pipe we can ignore return self.logger.exception(""whoops!"") except Exception as e: self.logger.exception(""whoops!"")" 2328,"def lisp(self, foldername=""INBOX"", reverse=False, since=None, stream=sys.stdout): """"""Do JSON list of the folder to the stream. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch. """""" def fromval(hdr): if hdr: return parseaddr(hdr) for folder, mk, m in self._list(foldername, reverse, since): try: print(json.dumps({ 'folder': folder.folder or foldername or ""INBOX"", 'key': ""%s%s%s"" % (folder.folder or foldername or ""INBOX"", SEPERATOR, mk), 'date': str(m.date), ""flags"": m.get_flags(), 'from': fromval(m.get_from()), 'subject': re.sub(""\n|\'|\"""", _escape, m.get_subject() or """") }), file=stream) except IOError as e: if e.errno == errno.EPIPE: # Broken pipe we can ignore return self.logger.exception(""whoops!"") except Exception as e: self.logger.exception(""whoops!"")" 2329,"def lsfolders(self, stream=sys.stdout): """"""List the subfolders"""""" for f in self.folder.folders(): print(f.folder.strip("".""), file=stream)" 2330,"def _get(self, msgid): """"""Yields the message header against each part from the message."""""" foldername, msgkey = msgid.split(SEPERATOR) folder = self.folder if foldername == ""INBOX"" else self._getfolder(foldername) # Now look up the message msg = folder[msgkey] msg.is_seen = True hdr = list(msg.items()) for p in msg.walk(): yield hdr,p return" 2331,"def gettext(self, msgid, stream=sys.stdout, splitter=""--text follows this line--\n""): """"""Get the first text part we can find and print it as a message. This is a simple cowpath, most of the time you want the first plain part. 'msgid' is the message to be used 'stream' is printed to with the header, splitter, first-textpart 'splitter' is text used to split the header from the body, Emacs uses this """""" for hdr,part in self._get(msgid): if part.get_content_type() == ""text/plain"": for name,val in hdr: # Use the subtype, since we're printing just that - tidy it up first if name.lower() == ""content-type"": val = part[""content-type""] val = "" "".join([l.strip() for l in val.split(""\n"")]) print(""%s: %s"" % (name,val), file=stream) print(splitter, file=stream) payload = part.get_payload(decode=True) # There seems to be a problem with the parser not doing charsets for parts chartype = part.get_charset() \ or _get_charset(part.get(""Content-Type"", """")) \ or ""us-ascii"" print(payload.decode(chartype), file=stream) break" 2332,"def getrawpart(self, msgid, stream=sys.stdout): """"""Get the first part from the message and print it raw. """""" for hdr, part in self._get(msgid): pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream) break" 2333,"def getrawpartid(self, msgid, partid, stream=sys.stdout): """"""Get a specific part from the message and print it raw. """""" parts = [part for hdr,part in self._get(msgid)] part = parts[int(partid)] pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream)" 2334,"def getraw(self, msgid, stream=sys.stdout): """"""Get the whole message and print it. """""" foldername, msgkey = msgid.split(SEPERATOR) folder = self.folder if foldername == ""INBOX"" else self._getfolder(foldername) msg = folder[msgkey] print(msg.content)" 2335,"def getstruct(self, msgid, as_json=False, stream=sys.stdout): """"""Get and print the whole message. as_json indicates whether to print the part list as JSON or not. """""" parts = [part.get_content_type() for hdr, part in self._get(msgid)] if as_json: print(json.dumps(parts), file=stream) else: for c in parts: print(c, file=stream)" 2336,"def _extract_alphabet(self, grammar): """""" Extract an alphabet from the given grammar. """""" alphabet = set([]) for terminal in grammar.Terminals: alphabet |= set([x for x in terminal]) self.alphabet = list(alphabet)" 2337,"def _mpda(self, re_grammar, splitstring=0): """""" Args: re_grammar (list): A list of grammar rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA """""" cnfgrammar = CNFGenerator(re_grammar) if not self.alphabet: self._extract_alphabet(cnfgrammar) cnftopda = CnfPda(self.alphabet) productions = {} nonterminals = [] nonterminals.append(cnfgrammar.init_symbol) for key in list(cnfgrammar.grammar_nonterminals): if key != cnfgrammar.init_symbol: nonterminals.append(key) for key in list(cnfgrammar.grammar_nonterminals): j = 0 productions[key] = {} # print 'testing '+key for pair in cnfgrammar.grammar_rules: cnf_form = list(pair) if cnf_form[0] == key: productions[key][j] = {} if isinstance(cnf_form[1], type(())): # print list(p[1]) productions[key][j]['b0'] = list(cnf_form[1])[0] productions[key][j]['b1'] = list(cnf_form[1])[1] else: # print p[1] productions[key][j]['a'] = cnf_form[1] j = j + 1 return cnftopda.initialize( nonterminals, productions, list( cnfgrammar.grammar_terminals), splitstring)" 2338,"def yyparse(self, cfgfile, splitstring=0): """""" Args: cfgfile (str): The path for the file containing the CFG rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA """""" re_grammar = self._read_file(cfgfile) mma = self._mpda(re_grammar, splitstring) return mma" 2339,"def natural(a, b): """""" Sorts the inputted items by their natural order, trying to extract a \ number from them to sort by. :param a <str> b <str> :return <int> 1 || 0 || -1 :usage |>>> from projex import sorting |>>> a = [ 'test1', 'test2', 'test10', 'test20', 'test09' ] |>>> a.sort() |>>> print a |['test09', 'test1', 'test10', 'test2', 'test20'] |>>> a.sort( sorting.natural ) |>>> print a |['test1', 'test2', 'test09', 'test10', 'test20'] """""" stra = nstr(a).lower() strb = nstr(b).lower() # test to see if the two are identical if stra == strb: return 0 # look up all the pairs of items aresults = EXPR_NATURAL.findall(stra) bresults = EXPR_NATURAL.findall(strb) # make sure we have the same number of results bcount = len(bresults) for i in range(len(aresults)): # make sure we don't exceed the number of elements in b if bcount <= i: break atext, anum = aresults[i] btext, bnum = bresults[i] # compare the text components if atext != btext: return cmp(atext, btext) if not anum: anum = 0 if not bnum: bnum = 0 # compare the numeric components anum = int(anum) bnum = int(bnum) if anum != bnum: return cmp(anum, bnum) # b has less characters than a, so should sort before return 1" 2340,"def versional(a, b): """""" Sorts the inputted items by their natural order, trying to extract a \ number from them to sort by. :param a <str> b <str> :return <int> 1 || 0 || -1 :usage |>>> from projex import sorting |>>> a = [ 'test-1.1.2', 'test-1.02', 'test-1.2', 'test-1.18' ] |>>> a.sort() |>>> print a |['test-1.02', 'test-1.1.2', 'test-1.18', 'test-1.2'] |>>> a.sort( sorting.natural ) |>>> print a |['test-1.1.2', 'test-1.02', 'test-1.2', 'test-1.18'] |>>> a.sort( sorting.versional ) |>>> print a |['test-1.1.2', 'test-1.02', 'test-1.18', 'test-1.2'] """""" stra = nstr(a).lower() strb = nstr(b).lower() # look up all the pairs of items aresults = EXPR_VERSIONAL.findall(stra) bresults = EXPR_VERSIONAL.findall(strb) # make sure we have the same number of results bcount = len(bresults) for i in range(len(aresults)): # make sure we don't exceed the number of elements in b if bcount <= i: break atext, anum = aresults[i] btext, bnum = bresults[i] # compare the text components if atext != btext: return cmp(atext, btext) if not anum: anum = 0 if not bnum: bnum = 0 # compare the numeric components if atext == '.': anum = int(float('.' + anum) * 10000) bnum = int(float('.' + bnum) * 10000) else: anum = int(anum) bnum = int(bnum) if anum != bnum: return cmp(anum, bnum) # b has less characters than a, so should sort before return 1" 2341,"def action(self, column=None, value=None, **kwargs): """""" The underlying GICS table provides codes and descriptions identifying the current status or disposition of a grant project. >>> GICS().action('action_code', 'A') """""" return self._resolve_call('GIC_ACTION', column, value, **kwargs)" 2342,"def applicant(self, column=None, value=None, **kwargs): """""" Find the applicant information for a grant. >>> GICS().applicant('zip_code', 94105) """""" return self._resolve_call('GIC_APPLICANT', column, value, **kwargs)" 2343,"def assistance(self, column=None, value=None, **kwargs): """""" Provides the Catalog of Federal Domestic Assistance (CFDA) codes and names. """""" return self._resolve_call('GIC_ASST_PGM', column, value, **kwargs)" 2344,"def authority(self, column=None, value=None, **kwargs): """"""Provides codes and associated authorizing statutes."""""" return self._resolve_call('GIC_AUTHORITY', column, value, **kwargs)" 2345,"def construction(self, column=None, value=None, **kwargs): """""" Identifies monetary, descriptive, and milestone information for Wastewater Treatment construction grants. >>> GICS().construction('complete_percent', 91) """""" return self._resolve_call('GIC_CONSTRUCTION', column, value, **kwargs)" 2346,"def eligible_cost(self, column=None, value=None, **kwargs): """""" The assistance dollar amounts by eligible cost category. >>> GICS().eligible_cost('amount', 100000) """""" return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs)" 2347,"def grant(self, column=None, value=None, **kwargs): """""" Provides various award, project, and grant personnel information. >>> GICS().grant('project_city_name', 'San Francisco') """""" return self._resolve_call('GIC_GRANT', column, value, **kwargs)" 2348,"def grant_assistance(self, column=None, value=None, **kwargs): """"""Many-to-many table connecting grants and assistance."""""" return self._resolve_call('GIC_GRANT_ASST_PGM', column, value, **kwargs)" 2349,"def grant_authority(self, column=None, value=None, **kwargs): """"""Many-to-many table connecting grants and authority."""""" return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs)" 2350,"def lab_office(self, column=None, value=None, **kwargs): """"""Abbreviations, names, and locations of labratories and offices."""""" return self._resolve_call('GIC_LAB_OFFICE', column, value, **kwargs)" 2351,"def milestone(self, column=None, value=None, **kwargs): """""" Status codes and related dates of certain grants, >>> GICS().milestone('milestone_date', '16-MAR-01') """""" return self._resolve_call('GIC_MILESTONE', column, value, **kwargs)" 2352,"def record_type(self, column=None, value=None, **kwargs): """""" Codes and descriptions indicating whether an award is for a new project or for the continuation of a currently funded one. >>> GICS().record_type('record_type_code', 'A') """""" return self._resolve_call('GIC_RECORD_TYPE', column, value, **kwargs)" 2353,"def srf_cap(self, column=None, value=None, **kwargs): """""" Fiscal dollar amounts for State Revolving Fund Capitalization Grants. >>> GICS().srf_cap('grant_number', '340001900') """""" return self._resolve_call('GIC_SRF_CAP', column, value, **kwargs)" 2354,"def status(self, column=None, value=None, **kwargs): """""" Provides codes and descriptions of project milestones. >>> GICS().status('status_code', 'AF') """""" return self._resolve_call('GIC_STATUS', column, value, **kwargs)" 2355,"def recClearTag(element): """"""Applies maspy.xml.clearTag() to the tag attribute of the ""element"" and recursively to all child elements. :param element: an :instance:`xml.etree.Element` """""" children = element.getchildren() if len(children) > 0: for child in children: recClearTag(child) element.tag = clearTag(element.tag)" 2356,"def recRemoveTreeFormating(element): """"""Removes whitespace characters, which are leftovers from previous xml formatting. :param element: an instance of lxml.etree._Element str.strip() is applied to the ""text"" and the ""tail"" attribute of the element and recursively to all child elements. """""" children = element.getchildren() if len(children) > 0: for child in children: recRemoveTreeFormating(child) if element.text is not None: if len(element.text.strip()) == 0: element.text = None else: element.text = element.text.strip() if element.tail is not None: if len(element.tail.strip()) == 0: element.tail = None else: element.tail = element.tail.strip()" 2357,"def recCopyElement(oldelement): """"""Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the ""oldelement"" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements """""" newelement = ETREE.Element(oldelement.tag, oldelement.attrib) if len(oldelement.getchildren()) > 0: for childelement in oldelement.getchildren(): newelement.append(recCopyElement(childelement)) return newelement" 2358,"def cvParamFromDict(attributes): """"""Python representation of a mzML cvParam = tuple(accession, value, unitAccession). :param attributes: #TODO: docstring :returns: #TODO: docstring """""" keys = ['accession', 'value', 'unitAccession'] return tuple(attributes[key] if key in attributes else None for key in keys)" 2359,"def userParamFromDict(attributes): """"""Python representation of a mzML userParam = tuple(name, value, unitAccession, type) :param attributes: #TODO: docstring :returns: #TODO: docstring """""" keys = ['name', 'value', 'unitAccession', 'type'] return tuple(attributes[key] if key in attributes else None for key in keys)" 2360,"def getParam(xmlelement): """"""Converts an mzML xml element to a param tuple. :param xmlelement: #TODO docstring :returns: a param tuple or False if the xmlelement is not a parameter ('userParam', 'cvParam' or 'referenceableParamGroupRef') """""" elementTag = clearTag(xmlelement.tag) if elementTag in ['userParam', 'cvParam', 'referenceableParamGroupRef']: if elementTag == 'cvParam': param = cvParamFromDict(xmlelement.attrib) elif elementTag == 'userParam': param = userParamFromDict(xmlelement.attrib) else: param = refParamGroupFromDict(xmlelement.attrib) else: param = False return param" 2361,"def extractParams(xmlelement): """""" #TODO docstring :param xmlelement: #TODO docstring :returns: #TODO docstring """""" params = list() children = list() for child in xmlelement.getchildren(): param = getParam(child) if param: params.append(param) else: children.append(child) return params, children" 2362,"def xmlAddParams(parentelement, params): """"""Generates new mzML parameter xml elements and adds them to the 'parentelement' as xml children elements. :param parentelement: :class:`xml.etree.Element`, an mzML element :param params: a list of mzML parameter tuples ('cvParam', 'userParam' or 'referencableParamGroup') """""" if not params: return None for param in params: if len(param) == 3: cvAttrib = {'cvRef': param[0].split(':')[0], 'accession': param[0], 'name':oboTranslator.getNameWithId(param[0]) } if param[1]: cvAttrib.update({'value': param[1]}) else: cvAttrib.update({'value': ''}) if param[2]: unitName = oboTranslator.getNameWithId(param[2]) cvAttrib.update({'unitAccession': param[2], 'unitCvRef': param[2].split(':')[0], 'unitName': unitName }) paramElement = ETREE.Element('cvParam', **cvAttrib) elif len(param) == 4: userAttrib = {'name': param[0]} if param[1]: userAttrib.update({'value': param[1]}) else: userAttrib.update({'value': ''}) if param[2]: userAttrib.update({'unitAccession': param[2], 'unitCvRef': param[2].split(':')[0] }) if param[3]: userAttrib.update({'type': param[3]}) paramElement = ETREE.Element('userParam', **userAttrib) elif param[0] == 'ref': refAttrib = {'ref': param[1]} paramElement = ETREE.Element('referenceableParamGroupRef', **refAttrib ) parentelement.append(paramElement)" 2363,"def interpretBitEncoding(bitEncoding): """"""Returns a floattype string and a numpy array type. :param bitEncoding: Must be either '64' or '32' :returns: (floattype, numpyType) """""" if bitEncoding == '64': floattype = 'd' # 64-bit numpyType = numpy.float64 elif bitEncoding == '32': floattype = 'f' # 32-bit numpyType = numpy.float32 else: errorText = ''.join(['bitEncoding \'', bitEncoding, '\' not defined. ', 'Must be \'64\' or \'32\'' ]) raise TypeError(errorText) return (floattype, numpyType)" 2364,"def decodeBinaryData(binaryData, arrayLength, bitEncoding, compression): """"""Function to decode a mzML byte array into a numpy array. This is the inverse function of :func:`encodeBinaryData`. Concept inherited from :func:`pymzml.spec.Spectrum._decode` of the python library `pymzML <https://pymzml.github.io/>`_. :param binaryData: #TODO: docstring :param arrayLength: #TODO: docstring :param binEncoding: #TODO: docstring :param compression: #TODO: docstring :returns: #TODO: docstring """""" #TODO: should raise an error if a wrong compression is specified bitEncodedData = binaryData.encode(""utf-8"") bitDecodedData = B64DEC(bitEncodedData) floattype, numpyType = interpretBitEncoding(bitEncoding) if compression == 'zlib': decompressedData = zlib.decompress(bitDecodedData) else: decompressedData = bitDecodedData fmt = '{endian}{arraylength}{floattype}'.format(endian='<', arraylength=arrayLength, floattype=floattype ) dataArray = numpy.array(UNPACK(fmt, decompressedData), dtype=numpyType) return dataArray" 2365,"def encodeBinaryData(dataArray, bitEncoding, compression): """"""Function to encode a ``numpy.array`` into a mzML byte array. This is the inverse function of :func:`decodeBinaryData`. :param dataArray: #TODO: docstring :param bitEncoding: #TODO: docstring :param compression: #TODO: docstring :returns: #TODO: docstring """""" #TODO: should raise an error if a wrong compression is specified arrayLength = len(dataArray) floattype, __ = interpretBitEncoding(bitEncoding) fmt = '{endian}{arraylength}{floattype}'.format(endian='<', arraylength=arrayLength, floattype=floattype ) packedData = PACK(fmt, *dataArray) if compression == 'zlib': compressedData = zlib.compress(packedData) else: compressedData = packedData encodedData = B64ENC(compressedData) return encodedData, arrayLength" 2366,"def findBinaryDataType(params): """""" #TODO: docstring from: http://www.peptideatlas.org/tmp/mzML1.1.0.html#binaryDataArray a binaryDataArray ""MUST supply a *child* term of MS:1000518 (binary data type) only once"" :param params: #TODO: docstring :returns: #TODO: docstring """""" binaryDataType = None cvParam = None for param in params: if param[0] in binaryDataArrayTypes: binaryDataType = binaryDataArrayTypes[param[0]] cvParam = param break return binaryDataType, cvParam" 2367,"def extractBinaries(binaryDataArrayList, arrayLength): """""" #TODO: docstring :param binaryDataArrayList: #TODO: docstring :param arrayLength: #TODO: docstring :returns: #TODO: docstring """""" extractedArrays = dict() arrayInfo = dict() for binaryData in binaryDataArrayList: if findParam(binaryData['params'], 'MS:1000523') is not None: bitEncoding = '64' else: bitEncoding = '32' if findParam(binaryData['params'], 'MS:1000574') is not None: compression = 'zlib' else: compression = None dataType, dataTypeParam = findBinaryDataType(binaryData['params']) if binaryData['binary']: extractedArrays[dataType] = decodeBinaryData(binaryData['binary'], arrayLength, bitEncoding, compression ) else: __, numpyType = interpretBitEncoding(bitEncoding) extractedArrays[dataType] = numpy.array([], dtype=numpyType) binaryData['binary'] = None arrayInfo[dataType] = {'dataProcessingRef': None, 'params': binaryData['params'] } if 'dataProcessingRef' in binaryData: arrayInfo[dataType]['dataProcessingRef'] = \ binaryData['dataProcessingRef'] return extractedArrays, arrayInfo" 2368,"def sublistReader(xmlelement): """""" #TODO: docstring """""" #Note: actually I'm not 100% sure how this function behaves elements = list() params, children = extractParams(xmlelement) for child in children: currElement = dict() currElement.update(child.attrib) childparams, subchildren = extractParams(child) if childparams: currElement['params'] = childparams for subchild in subchildren: subchildTag = clearTag(subchild.tag) if 'List' in subchildTag: listelements, listparams = sublistReader(subchild) simplelist = [listelement['params'] for listelement in listelements] currElement[subchildTag] = simplelist else: subchildparams, _ = extractParams(subchild) currElement[subchildTag] = subchildparams if subchildTag == 'binary' and subchild.text: currElement[subchildTag] = subchild.text.strip() elements.append(currElement) return elements, params" 2369,"def next(self): """""" #TODO: docstring :returns: #TODO: docstring """""" try: self.event, self.element = next(self.iterator) self.elementTag = clearTag(self.element.tag) except StopIteration: clearParsedElements(self.element) raise StopIteration return self.event, self.element, self.elementTag" 2370,"def loadMetadata(self): """""" #TODO: docstring """""" #TODO: change that spectra dont have to be iterated to extract metadata #node if self._parsed: raise TypeError('Mzml file already parsed.') [None for _ in self._parseMzml()] self._parsed = True" 2371,"def parseSpectra(self): """""" #TODO: docstring :returns: #TODO: docstring """""" #Note: the spectra need to be iterated completely to save the #metadataNode if self._parsed: raise TypeError('Mzml file already parsed.') self._parsed = True return self._parseMzml()" 2372,"def _parseMzml(self): """""" #TODO: docstring """""" #TODO: this is already pretty nested, reduce that eg by using a function # processRunNode for event, element, elementTag in self: if elementTag == 'mzML': metadataNode = ETREE.Element(self.elementTag, self.element.attrib ) _, _, targetTag = next(self) break while targetTag != 'mzML': if targetTag == 'run': runNode = ETREE.Element('run', self.element.attrib) next(self) while self.event != 'end' or self.elementTag != 'run': if self.elementTag == 'spectrumList': #Add spectrumListNode specListAttrib = {'defaultDataProcessingRef': self.element.attrib['defaultDataProcessingRef'] } specListNode = ETREE.Element('spectrumList', specListAttrib) runNode.append(specListNode) #Parse and yield spectrum xml elements while self.event != 'end' or self.elementTag != 'spectrumList': if self.event == 'end' and self.elementTag == 'spectrum': yield self.element clearParsedElements(self.element) next(self) elif self.elementTag == 'chromatogramList': #Add chromatogramListNode chromListAttrib = {'defaultDataProcessingRef': self.element.attrib['defaultDataProcessingRef'] } chromListNode = ETREE.Element('chromatogramList', chromListAttrib ) runNode.append(chromListNode) #Parse and store chromatogram xml elements while self.event != 'end' or self.elementTag != 'chromatogramList': if self.event == 'end' and self.elementTag == 'chromatogram': self.chromatogramList.append(self.element) #Alternatively also the chromatogram xml #elements could be yielded: # yield self.element # clearParsedElements(self.element) next(self) else: runNode.append(self.element) next(self) metadataNode.append(runNode) break else: while self.event != 'end' or self.elementTag != targetTag: next(self) metadataNode.append(self.element) _, _, targetTag = next(self) recClearTag(metadataNode) recRemoveTreeFormating(metadataNode) self.metadataNode = recCopyElement(metadataNode) self.openfile.close()" 2373,"def calc_partition_function(mass, omega_array, temperature_array): """""" Calculates the partition function of your system at each point in time. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as the Hamiltonian temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Returns: ------- Partition function : array The Partition Function at every point in time over a given trap-frequency and temperature change. """""" Kappa_t= mass*omega_array**2 return _np.sqrt(4*_np.pi**2*_scipy.constants.Boltzmann**2*temperature_array**2/(mass*Kappa_t))" 2374,"def calc_entropy(phase_space_density_array): """""" Calculates the entropy of your system at each point in time for your given phase space density evolution in time. Parameters ---------- phase_space_density_array : array array which represents the phase space density at every point in time Returns: ------- entropy : array The entropy of the particle at every point in time via the phase space density method. """""" entropy = -_scipy.constants.Boltzmann*_np.log(phase_space_density_array) return entropy" 2375,"def calc_hamiltonian(self, mass, omega_array): """""" Calculates the standard (pot+kin) Hamiltonian of your system. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as self.position_data Requirements ------------ self.position_data : array Already filtered for the degree of freedom of intrest and converted into meters. Returns ------- Hamiltonian : array The calculated Hamiltonian """""" Kappa_t= mass*omega_array**2 self.E_pot = 0.5*Kappa_t*self.position_data**2 self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2 self.Hamiltonian = self.E_pot + self.E_kin return self.Hamiltonian" 2376,"def calc_phase_space_density(self, mass, omega_array, temperature_array): """""" Calculates the partition function of your system at each point in time. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as the Hamiltonian temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Requirements ------------ self.position_data : array Already filtered for the degree of freedom of intrest and converted into meters. Returns: ------- Phasespace-density : array The Partition Function at every point in time over a given trap-frequency and temperature change. """""" return self.calc_hamiltonian(mass, omega_array)/calc_partition_function(mass, omega_array,temperature_array)" 2377,"def extract_thermodynamic_quantities(self,temperature_array): """""" Calculates the thermodynamic quantities of your system at each point in time. Calculated Quantities: self.Q (heat),self.W (work), self.Delta_E_kin, self.Delta_E_pot self.Delta_E (change of Hamiltonian), Parameters ---------- temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Requirements ------------ execute calc_hamiltonian on the DataObject first Returns: ------- Q : array The heat exchanged by the particle at every point in time over a given trap-frequency and temperature change. W : array The work ""done"" by the particle at every point in time over a given trap-frequency and temperature change. """""" beta = 1/(_scipy.constants.Boltzmann*temperature_array) self.Q = self.Hamiltonian*(_np.insert(_np.diff(beta),0,beta[1]-beta[0])*self.SampleFreq) self.W = self.Hamiltonian-self.Q self.Delta_E_kin = _np.diff(self.E_kin)*self.SampleFreq self.Delta_E_pot = _np.diff(self.E_pot)*self.SampleFreq self.Delta_E = _np.diff(self.Hamiltonian)*self.SampleFreq return self.Q, self.W" 2378,"def calc_mean_and_variance_of_variances(self, NumberOfOscillations): """""" Calculates the mean and variance of a set of varainces. This set is obtained by splitting the timetrace into chunks of points with a length of NumberOfOscillations oscillations. Parameters ---------- NumberOfOscillations : int The number of oscillations each chunk of the timetrace used to calculate the variance should contain. Returns ------- Mean : float Variance : float """""" SplittedArraySize = int(self.SampleFreq/self.FTrap.n) * NumberOfOscillations VoltageArraySize = len(self.voltage) SnippetsVariances = _np.var(self.voltage[:VoltageArraySize-_np.mod(VoltageArraySize,SplittedArraySize)].reshape(-1,SplittedArraySize),axis=1) return _np.mean(SnippetsVariances), _np.var(SnippetsVariances)" 2379,"def register_template_directory(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): """""" Add this resource's templates dir to template paths """""" template_bridge = sphinx_app.builder.templates actions = ResourceAction.get_callbacks(kb_app) for action in actions: f = os.path.dirname(inspect.getfile(action)) template_bridge.loaders.append(SphinxFileSystemLoader(f))" 2380,"def add_directives(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): """""" For each resource type, register a new Sphinx directive """""" for k, v in list(kb_app.config.resources.items()): sphinx_app.add_directive(k, ResourceDirective)" 2381,"def stamp_title(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): """""" Walk the tree and extra RST title into resource.title """""" # First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the title on the resource title = get_rst_title(doctree) resource.title = title" 2382,"def init_app(self, app, config_prefix=""PYBANKID""): """"""Initialize the `app` for use with this :class:`~PyBankID`. This is called automatically if `app` is passed to :meth:`~PyBankID.__init__`. The app is configured according to the configuration variables ``PREFIX_CERT_PATH``, ``PREFIX_KEY_PATH`` and ``PREFIX_TEST_SERVER``, where ""PREFIX"" defaults to ""PYBANKID"". :param flask.Flask app: the application to configure for use with this :class:`~PyBankID` :param str config_prefix: determines the set of configuration variables used to configure this :class:`~PyBankID`. """""" if ""pybankid"" not in app.extensions: app.extensions[""pybankid""] = {} if config_prefix in app.extensions[""pybankid""]: raise Exception('duplicate config_prefix ""{0}""'.format(config_prefix)) app.config.setdefault(self._config_key(""CERT_PATH""), """") app.config.setdefault(self._config_key(""KEY_PATH""), """") app.config.setdefault(self._config_key(""TEST_SERVER""), False) # Adding the three url endpoints. app.add_url_rule( ""/authenticate/<personal_number>"", view_func=self._authenticate ) app.add_url_rule(""/sign/<personal_number>"", view_func=self._sign) app.add_url_rule(""/collect/<order_ref>"", view_func=self._collect) if hasattr(app, ""teardown_appcontext""): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)" 2383,"def client(self): """"""The automatically created :py:class:`bankid.client.BankIDClient` object. :return: The BankID client. :rtype: :py:class:`bankid.jsonclient.BankIDJSONClient` """""" ctx = stack.top attr_name = self._config_key(""client"") if ctx is not None: if not hasattr(ctx, attr_name): setattr( ctx, attr_name, BankIDClient( ( current_app.config.get(self._config_key(""CERT_PATH"")), current_app.config.get(self._config_key(""KEY_PATH"")), ), current_app.config.get(self._config_key(""TEST_SERVER"")), ), ) return getattr(ctx, attr_name)" 2384,"def handle_exception(error): """"""Simple method for handling exceptions raised by `PyBankID`. :param flask_pybankid.FlaskPyBankIDError error: The exception to handle. :return: The exception represented as a dictionary. :rtype: dict """""" response = jsonify(error.to_dict()) response.status_code = error.status_code return response" 2385,"def create_from_pybankid_exception(cls, exception): """"""Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError` """""" return cls( ""{0}: {1}"".format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__), )" 2386,"def to_dict(self): """"""Create a dict representation of this exception. :return: The dictionary representation. :rtype: dict """""" rv = dict(self.payload or ()) rv[""message""] = self.message return rv" 2387,"def integers(num, minimum, maximum, base=10): # TODO: Ensure numbers within bounds """"""Random integers within specified interval. The integer generator generates truly random integers in the specified interval. Parameters ---------- num : int, bounds=[1, 1E4] Total number of integers in returned array. minimum : int, bounds=[-1E9, 1E9] Minimum value (inclusive) of returned integers. maximum : int, bounds=[-1E9, 1E9] Maximum value (inclusive) of returned integers. base: int, values=[2, 8, 10, 16], default=10 Base used to print numbers in array, the default is decimal representation (base=10). Returns ------- integers : array A 1D numpy array containing integers between the specified bounds. Examples -------- Generate an array of 10 integers with values between -100 and 100, inclusive: >>> integers(10, -100, 100) A coin toss, where heads=1 and tails=0, with multiple flips (flips should be an odd number): >>> sum(integers(5, 0, 1)) """""" function = 'integers' num, minimum, maximum = list(map(int, [num, minimum, maximum])) # INPUT ERROR CHECKING # Check input values are within range if (1 <= num <= 10 ** 4) is False: print('ERROR: %s is out of range' % num) return if (-10 ** 9 <= minimum <= 10 ** 9) is False: print('ERROR: %s is out of range' % minimum) return if (-10 ** 9 <= maximum <= 10 ** 9) is False: print('ERROR: %s is out of range' % maximum) return if maximum < minimum: print('ERROR: %s is less than %s' % (maximum, minimum)) return base = int(base) if base not in [2, 8, 10, 16]: raise Exception('Base not in range!') opts = {'num': num, 'min': minimum, 'max': maximum, 'col': 1, 'base': base, 'format': 'plain', 'rnd': 'new'} integers = get_http(RANDOM_URL, function, opts) integers_arr = str_to_arr(integers) return integers_arr" 2388,"def sequence(minimum, maximum): """"""Randomize a sequence of integers."""""" function = 'sequences' opts = {'min': minimum, 'max': maximum, 'col': 1, 'format': 'plain', 'rnd': 'new'} deal = get_http(RANDOM_URL, function, opts) deal_arr = str_to_arr(deal) return deal_arr" 2389,"def string(num, length, digits=False, upper=True, lower=True, unique=False): """"""Random strings."""""" function = 'strings' # Convert arguments to random.org style # for a discussion on the method see: http://bit.ly/TKGkOF digits = convert(digits) upper = convert(upper) lower = convert(lower) unique = convert(unique) opts = {'num': num, 'len': length, 'digits': digits, 'upperalpha': upper, 'loweralpha': lower, 'format': 'plain', 'rnd': 'new'} seq = get_http(RANDOM_URL, function, opts) seq = seq.strip().split('\n') # convert to list # seq_arr = str_to_arr(seq) return seq" 2390,"def quota(ip=None): """"""Check your quota."""""" # TODO: Add arbitrary user defined IP check url = 'http://www.random.org/quota/?format=plain' data = urlopen(url) credit = int(data.read().strip()) if data.code == 200: return credit else: return ""ERROR: Server responded with code %s"" % data.code" 2391,"def get_http(base_url, function, opts): """"""HTTP request generator."""""" url = (os.path.join(base_url, function) + '/?' + urlencode(opts)) data = urlopen(url) if data.code != 200: raise ValueError(""Random.rg returned server code: "" + str(data.code)) return data.read()" 2392,"def read(*p): """"""Build a file path from paths and return the contents."""""" with open(os.path.join(*p), 'r') as fi: return fi.read()" 2393,"def execute(self, processProtocol, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """"""Form a command and start a process in the desired environment. """""" raise NotImplementedError()" 2394,"def run(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """"""Execute a command and return the results of the completed run. """""" deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) d = defer.maybeDeferred(self.execute, processProtocol, command, env, path, uid, gid, usePTY, childFDs) d.addErrback(deferred.errback) return deferred" 2395,"def getOutput(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """"""Execute a command and get the output of the finished process. """""" deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) self.execute(processProtocol, command, env, path, uid, gid, usePTY, childFDs) @deferred.addCallback def getStdOut(tuple_): stdout, _stderr, _returnCode = tuple_ return stdout return deferred" 2396,"def getExitCode(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """"""Execute a command and get the return code of the finished process. """""" deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) self.execute(processProtocol, command, env, path, uid, gid, usePTY, childFDs) @deferred.addCallback def getStdOut(tuple_): _stdout, _stderr, exitCode = tuple_ return exitCode return deferred" 2397,"def validate_task(original_task): """""" Validates task and adds default values for missing options using the following steps. 1. If there is no input list specified or if it is None, the input spec is assumed to be ['*']. 2. If there are not outputs specified, or if the output spec is None or an empty list, the output spec is assumed to be ['*']. 3. If the input or output spec is not iterable, they are converted into single element tuples. If they are any iterable, they are converted into tuples. 4. The task['fn'] option must be callable. 5. If number of outputs is more than one, task['fn'] must be a generator function. 6. Generator functions are not supported for output spec of '*'. Returns new task with updated options """""" task = original_task._asdict() # Default values for inputs and outputs if 'inputs' not in task or task['inputs'] is None: task['inputs'] = ['*'] # Outputs list cannot be empty if ('outputs' not in task or task['outputs'] is None or len(task['outputs']) == 0): task['outputs'] = ['*'] # Convert to tuples (even for single values) if not hasattr(task['inputs'], '__iter__') or isinstance(task['inputs'], str): task['inputs'] = (task['inputs'],) else: task['inputs'] = tuple(task['inputs']) if not hasattr(task['outputs'], '__iter__') or isinstance(task['outputs'], str): task['outputs'] = (task['outputs'],) else: task['outputs'] = tuple(task['outputs']) if not callable(task['fn']): raise TypeError('Task function must be a callable object') if (len(task['outputs']) > 1 and not inspect.isgeneratorfunction(task['fn'])): raise TypeError('Multiple outputs are only supported with \ generator functions') if inspect.isgeneratorfunction(task['fn']): if task['outputs'][0] == '*': raise TypeError('Generator functions cannot be used for tasks with \ output specification ""*""') return Task(**task)" 2398,"def run_task(task, workspace): """""" Runs the task and updates the workspace with results. Parameters ---------- task - dict Task Description Examples: {'task': task_func, 'inputs': ['a', 'b'], 'outputs': 'c'} {'task': task_func, 'inputs': '*', 'outputs': '*'} {'task': task_func, 'inputs': ['*','a'], 'outputs': 'b'} Returns a new workspace with results """""" data = copy.copy(workspace) task = validate_task(task) # Prepare input to task inputs = [input_parser(key, data) for key in task.inputs] if inspect.isgeneratorfunction(task.fn): # Multiple output task # Assuming number of outputs are equal to number of return values data.update(zip(task.outputs, task.fn(*inputs))) else: # Single output task results = task.fn(*inputs) if task.outputs[0] != '*': results = {task.outputs[0]: results} elif not isinstance(results, dict): raise TypeError('Result should be a dict for output type *') data.update(results) return data" 2399,"def run_hook(name, workspace, hooks): """"""Runs all hooks added under the give name. Parameters ---------- name - str Name of the hook to invoke workspace - dict Workspace that the hook functions operate on hooks - dict of lists Mapping with hook names and callback functions """""" data = copy.copy(workspace) for hook_listener in hooks.get(name, []): # Hook functions may mutate the data and returns nothing hook_listener(data) return data" 2400,"def add_task(self, fn, inputs=None, outputs=None): """""" Adds a task to the workflow. Returns self to facilitate chaining method calls """""" # self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs}) self.tasks.append(Task(fn, inputs, outputs)) return self" 2401,"def add_hook(self, name, function): """""" Adds a function to be called for hook of a given name. The function gets entire workspace as input and does not return anything. Example: def hook_fcn(workspace): pass """""" if not callable(function): return ValueError('Hook function should be callable') if name not in self.hooks: self.hooks[name] = [] self.hooks[name].append(function) return self" 2402,"def dns(self): """"""DNS details."""""" dns = { 'elb': self.dns_elb(), 'elb_region': self.dns_elb_region(), 'global': self.dns_global(), 'region': self.dns_region(), 'instance': self.dns_instance(), } return dns" 2403,"def s3_app_bucket(self, include_region=False): """"""Generate s3 application bucket name. Args: include_region (bool): Include region in the name generation. """""" if include_region: s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data) else: s3_app_bucket = self.format['s3_app_bucket'].format(**self.data) return s3_app_bucket" 2404,"def shared_s3_app_bucket(self, include_region=False): """"""Generate shared s3 application bucket name. Args: include_region (bool): Include region in the name generation. """""" if include_region: shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data) else: shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data) return shared_s3_app_bucket" 2405,"def iam(self): """"""Generate iam details."""""" iam = { 'group': self.format['iam_group'].format(**self.data), 'lambda_role': self.format['iam_lambda_role'].format(**self.data), 'policy': self.format['iam_policy'].format(**self.data), 'profile': self.format['iam_profile'].format(**self.data), 'role': self.format['iam_role'].format(**self.data), 'user': self.format['iam_user'].format(**self.data), 'base': self.format['iam_base'].format(**self.data), } return iam" 2406,"def archaius(self): """"""Generate archaius bucket path."""""" bucket = self.format['s3_bucket'].format(**self.data) path = self.format['s3_bucket_path'].format(**self.data) archaius_name = self.format['s3_archaius_name'].format(**self.data) archaius = {'s3': archaius_name, 'bucket': bucket, 'path': path} return archaius" 2407,"def jenkins(self): """"""Generate jenkins job details."""""" job_name = self.format['jenkins_job_name'].format(**self.data) job = {'name': job_name} return job" 2408,"def gitlab(self): """"""Generate gitlab details."""""" main_name = self.format['git_repo'].format(**self.data) qe_name = self.format['git_repo_qe'].format(**self.data) config_name = self.format['git_repo_configs'].format(**self.data) git = { 'config': config_name, 'main': main_name, 'qe': qe_name, } return git" 2409,"def render(plain, urlHandler=None, templatePaths=None, options=None, defaultTag='div', wikiStyle='basic'): """""" Renders the inputted plain text wiki information into HTML rich text. :param plain | <str> | \ Include some additional documentation urlHandler | <UlrHandler> || None :return <str> html """""" if not plain: return '' __style = WIKI_STYLES.styles.get(wikiStyle, WIKI_STYLES.styles['basic']) # process the wiki text with the mako template system if not urlHandler: urlHandler = UrlHandler.current() # render the text out from mako template plain = projex.makotext.render(plain, options=options, templatePaths=templatePaths, silent=True) # generate wiki doc info lines = re.split('\n\r|\r\n|\n|\r', plain) curr_section = '' curr_section_level = 0 html = [] skip = [] nowiki_stack = [] nowiki_mode = 'pre' code_stack = [] table_stack = [] list_stack = [] section_stack = [] toc_data = [] align_div = '' list_indent = None ignore_list_stack = False # add the default tag html.append(__style['wiki_open'].format(tag=defaultTag)) for i, line in enumerate(lines): ignore_list_stack = False sline = line.strip() #---------------------------------------------------------------------- # INDENTATION CHECKS #---------------------------------------------------------------------- # check to see if we are continuing a list entry if list_indent: line_indent = len(re.match('\s*', line).group()) if line_indent < list_indent: list_indent = None html.append(__style['list_item_close']) else: ignore_list_stack = True if not sline: html.append(__style['spacer']) continue if i in skip: continue #---------------------------------------------------------------------- # ALIGNMENT #---------------------------------------------------------------------- # check for a center option center = EXPR_CENTER.match(sline) right = EXPR_RIGHT.match(sline) left = EXPR_LEFT.match(sline) if center: style = center.groups()[0] line = center.groups()[1].strip() if align_div and align_div != 'center': html.append(__style['align_close']) align_div = '' if not align_div: if style == '--': html.append(__style['align_center']) else: html.append(__style['align_center_floated']) align_div = 'center' else: html.append(__style['newline']) # check for a right align option elif right: style = right.groups()[0] line = right.groups()[1] if align_div and align_div != 'right': html.append(__style['align_close']) align_div = '' if not align_div: if style == '--': html.append(__style['align_right']) else: html.append(__style['align_right_floated']) align_div = 'right' else: html.append(__style['newline']) # check for a left align option elif left: style = left.groups()[1] line = left.groups()[0] if align_div and align_div != 'left': html.append(__style['align_close']) align_div = '' if not align_div: if style == '--': html.append(__style['align_left']) else: html.append(__style['align_left_floated']) align_div = 'left' else: html.append(__style['newline']) # otherwise, clear alignment elif align_div: html.append(__style['align_close']) align_div = '' #---------------------------------------------------------------------- # INDENTATION CHECKS #---------------------------------------------------------------------- # make sure we're on the same level if curr_section and sline and (len(line) - len(line.lstrip())) < curr_section_level: html += section_stack section_stack = [] curr_section = '' curr_section_level = 0 count = i while sline.endswith('\\') and count + 1 < len(lines): sline += ' ' + lines[count + 1].strip() skip.append(count) count += 1 #---------------------------------------------------------------------- # IGNORE WIKI INFORMATION #---------------------------------------------------------------------- # check to see what is wiki protected if sline.startswith('<nowiki'): mode = re.search('mode=""(\w*)""', sline) if mode: nowiki_mode = nstr(mode.group(1)) else: nowiki_mode = None if not ignore_list_stack: html += list_stack list_stack = [] html += table_stack table_stack = [] if nowiki_mode is None: html.append(__style['nowiki_open']) nowiki_stack.append(__style['nowiki_close']) else: nowiki_stack.append('') continue elif sline == '</nowiki>': html += nowiki_stack nowiki_stack = [] continue elif nowiki_stack: if nowiki_mode == 'safe': html.append(line) else: html.append(xml.sax.saxutils.escape(line)) continue #---------------------------------------------------------------------- # TABLES #---------------------------------------------------------------------- parts = line.split(' | ') if len(parts) == 1: html += table_stack table_stack = [] # replace standard items for key, repl in PRE_ESCAPE_REPLACE.items(): line = line.replace(key, repl) # strip out nowiki lines nowiki_dict = {} count = 0 for section in EXPR_NOWIKI.findall(line)[::2]: nowiki_dict['nowiki_%i' % count] = section newtext = '%%(nowiki_%i)s' % count line = line.replace('<nowiki>%s</nowiki>' % section, newtext) count += 1 #---------------------------------------------------------------------- # SECTIONS #---------------------------------------------------------------------- # check for a div section section = EXPR_SECTION.match(sline) if section: html += code_stack code_stack = [] name = section.group(2) if name != curr_section: html += section_stack section_stack = [] if name not in SECTION_ALERTS: section_stack.append(__style['section_close']) display = projex.text.capitalizeWords(name) mapped = SECTION_MAP.get(name, display) html.append(__style['section_open'].format(name=name, title=mapped)) else: display = projex.text.capitalizeWords(name) mapped = SECTION_MAP.get(name, display) section_stack.append(__style['section_alert_close']) url, success = urlHandler.resolve('img:%s.png' % name) html.append(__style['section_alert_open'].format(name=name, title=mapped)) curr_section = name else: html.append(__style['newline']) sline = sline.replace(section.group(), '') line = line.replace(section.group(), ' ' * len(section.group())) curr_section_level = len(line) - len(line.lstrip()) #---------------------------------------------------------------------- # CODE #---------------------------------------------------------------------- # check for code code = EXPR_CODE.match(sline) if code: templ = '' code_line = code.groups()[0] if not code_stack: lang = 'python' lang_search = EXPR_LANG.search(code_line) if lang_search: lang = lang_search.groups()[0] code_line = code_line.replace(lang_search.group(), '') templ = __style['code_open'].format(lang=lang) code_stack.append(__style['code_close']) escaped = xml.sax.saxutils.escape(code_line) if not ignore_list_stack: html += list_stack list_stack = [] html += table_stack table_stack = [] html.append(templ + escaped) continue # exit out of the code mode else: html += code_stack code_stack = [] #---------------------------------------------------------------------- # make sure we have no html data in the line if not sline: html.append(__style['paragraph_close']) html.append(__style['paragraph_open']) continue # check for horizontal rules if EXPR_HR.match(sline): style = '' html.append(__style['hr'].format(style=style)) continue #---------------------------------------------------------------------- # HEADERS #---------------------------------------------------------------------- # check for headers header = EXPR_HEADER.match(sline) if header: hopen, title, hclose = header.groups() hopencount = len(hopen) title = title.strip() if hopencount == len(hclose): name = projex.text.underscore(title) add = __style['header'].format(name=name, title=title, size=len(hopen)) spacing = '#' * hopencount opts = (spacing, name, title) toc_data.append('%s. [[#%s|%s]]' % opts) if not ignore_list_stack: html += list_stack list_stack = [] html += table_stack table_stack = [] html.append(add) continue line = xml.sax.saxutils.escape(line) for key, repl in POST_ESCAPE_REPLACE.items(): line = line.replace(key, repl) #---------------------------------------------------------------------- # CLASS TYPES #---------------------------------------------------------------------- # resolve any class links for result in EXPR_CLASS_LINK.findall(line): opts = result.split() for o, cls in enumerate(opts): # ignore base classes, need modules if '.' not in cls: continue url, success = urlHandler.resolveClass(cls) if success: opts[o] = __style['link_class'].format(url=url, text=cls.split('.')[-1]) info = __style['span_class'].format(crumbs=' '.join(opts)) line = line.replace('<' + result + '>', info) #---------------------------------------------------------------------- # GENERAL FORMATTING #---------------------------------------------------------------------- # replace formatting options for section in EXPR_UNDERLINE.findall(line)[::2]: text = __style['underline'].format(text=section) line = line.replace(""___%s___"" % section, text) for section in EXPR_INLINE_CODE.findall(line)[::2]: text = __style['inline_code'].format(text=section) line = line.replace(""`%s`"" % section, text) for section in EXPR_STRIKEOUT.findall(line)[::2]: text = __style['strikeout'].format(text=section) line = line.replace(""---%s---"" % section, text) for section in EXPR_BOLD.findall(line)[::2]: text = __style['bold'].format(text=section) line = line.replace(""'''%s'''"" % section, text) for section in EXPR_ITALIC.findall(line)[::2]: text = __style['italic'].format(text=section) line = line.replace(""''%s''"" % section, text) #---------------------------------------------------------------------- # IMAGES #---------------------------------------------------------------------- # resolve any images for grp, url in EXPR_IMG.findall(line): urlsplit = url.split('|') last_word = re.findall('\w+', urlsplit[0])[-1] if len(urlsplit) == 1: urlsplit.append('') url, _ = urlHandler.resolveImage(urlsplit[0]) line = line.replace(grp, __style['img'].format(url=url, style=urlsplit[1], title=last_word)) #---------------------------------------------------------------------- # COLORS #---------------------------------------------------------------------- # resolve any colors for grp, coloring in EXPR_COLOR.findall(line): splt = coloring.split('|') if len(splt) == 1: splt.append('') line = line.replace(grp, __style['color'].format(color=splt[0], text=splt[1])) #---------------------------------------------------------------------- # SPANS #---------------------------------------------------------------------- # resolve any spans for grp, coloring in EXPR_SPAN.findall(line): splt = coloring.split('|') if len(splt) == 1: splt.append('') templ = '<span style=""%s"">%s</span>' % (splt[0], splt[1]) line = line.replace(grp, __style['span'].format(style=splt[0], text=splt[1])) #---------------------------------------------------------------------- # LINKS #---------------------------------------------------------------------- # resolve any external urls for result in EXPR_EXTLINK.findall(line): grp = result[0] url = result[1] urlsplit = url.split() if len(urlsplit) == 1: urlsplit.append(urlsplit[0]) url = urlsplit[0] urltext = ' '.join(urlsplit[1:]) line = line.replace(grp, __style['link_ext'].format(url=url, text=urltext)) # resolve any internal urls for grp, url in EXPR_INTLINK.findall(line): urlsplit = url.split('|') if len(urlsplit) == 1: last_word = re.findall('\w+', urlsplit[0])[-1] urlsplit.append(last_word) url = urlsplit[0] title = '|'.join(urlsplit[1:]) found = True tagsplit = url.split('#') if len(tagsplit) == 1: url = url tag = '' else: url = tagsplit[0] tag = '#'.join(tagsplit[1:]) # make sure the url exists if url: url, exists = urlHandler.resolve(url) if not exists: found = False # join together the resolved url and the tag if tag: url = url + '#' + tag # generate the link if found: templ = __style['link_found'].format(url=url, text=title) else: templ = __style['link_not_found'].format(url=url, text=title) line = line.replace(grp, templ) #---------------------------------------------------------------------- # LISTS #---------------------------------------------------------------------- # process lists results = EXPR_LIST.match(line) if results: level, linetext = results.groups() level_count = len(level) level_type = 'unordered' if level[-1] == '*' else 'ordered' while level_count > len(list_stack): html.append(__style[level_type + '_list_open']) list_stack.append(__style[level_type + '_list_close']) while len(list_stack) > level_count: html.append(list_stack[-1]) list_stack = list_stack[:-1] space_line = line.replace(level + '.', ' ' * (len(level) + 1)) list_indent = len(re.match('\s*', space_line).group()) html.append(__style['list_item_open']) html.append(linetext) continue elif not ignore_list_stack: html += list_stack list_stack = [] #---------------------------------------------------------------------- # TABLES #---------------------------------------------------------------------- parts = line.split(' | ') if len(parts) > 1: if not table_stack: table_stack.append(__style['table_close']) html.append(__style['table_open']) cell_type = 'td' styles = '' cells = [] for part in parts: results = EXPR_TABLE_CELL.search(part) if not results: cells.append(__style['table_cell'].format(tag='td', style='', text=part.strip())) else: grp, cell_type, styles = results.groups() if not styles: styles = '' else: styles = styles.strip('[]') part = part.replace(grp, '').strip() opts = (cell_type, styles, part, cell_type) cells.append(__style['table_cell'].format(tag=cell_type, style=styles, text=part)) line = __style['table_row'].format(text=''.join(cells)) html.append((line % nowiki_dict)) else: html += table_stack table_stack = [] html.append(line % nowiki_dict) if align_div: html.append(__style['align_close']) if list_indent: html.append(__style['list_item_close']) html += table_stack html += list_stack html += code_stack html += nowiki_stack html += section_stack html.append(__style['wiki_close'].format(tag=defaultTag)) html_txt = '\n'.join(html) # resolve any table of contents for toc, options in EXPR_TOC.findall(html_txt): toc_wiki = '\n\t'.join(toc_data) toc_html = __style['toc_open'] toc_html += render(toc_wiki, urlHandler, templatePaths, options, 'div', wikiStyle) toc_html += __style['toc_close'] html_txt = html_txt.replace(toc, toc_html) # replace \[ and \] options html_txt = html_txt.replace('\[', '[').replace('\]', ']') return html_txt" 2410,"def get_value_matched_by_regex(field_name, regex_matches, string): """"""Ensure value stored in regex group exists."""""" try: value = regex_matches.group(field_name) if value is not None: return value except IndexError: pass raise MissingFieldError(string, field_name)" 2411,"def positive_int(val): """"""Parse `val` into a positive integer."""""" if isinstance(val, float): raise ValueError('""{}"" must not be a float'.format(val)) val = int(val) if val >= 0: return val raise ValueError('""{}"" must be positive'.format(val))" 2412,"def strictly_positive_int_or_none(val): """"""Parse `val` into either `None` or a strictly positive integer."""""" val = positive_int_or_none(val) if val is None or val > 0: return val raise ValueError('""{}"" must be strictly positive'.format(val))" 2413,"def oboTermParser(filepath): """"""Read a obo file and yield '[Term]' entries. :param filepath: file path of the .obo file :yields: lists containing all lines from a obo '[Term]' entry. Lines are not processed and still contain the newline character. """""" with io.open(filepath) as openfile: lineIter = iter([i.rstrip() for i in openfile.readlines()]) #Iterate through lines until the first obo ""[Term]"" is encountered try: line = next(lineIter) while line != '[Term]': line = next(lineIter) header = line #Remove entryLines = list() except StopIteration: errorText = 'File does not contain obo ""[Term]"" entries.' raise maspy.errors.FileFormatError(errorText) for line in lineIter: #Skip empty lines between entries if not line: continue if line == '[Term]': yield entryLines header = line #Remove entryLines = list() else: entryLines.append(line) #Yield last entry if entryLines: yield entryLines" 2414,"def _attributeLinesToDict(attributeLines): """"""Converts a list of obo 'Term' lines to a dictionary. :param attributeLines: a list of obo 'Term' lines. Each line contains a key and a value part which are separated by a ':'. :return: a dictionary containing the attributes of an obo 'Term' entry. NOTE: Some attributes can occur multiple times in one single term, for example 'is_a' or 'relationship'. However, currently only the last occurence is stored. """""" attributes = dict() for line in attributeLines: attributeId, attributeValue = line.split(':', 1) attributes[attributeId.strip()] = attributeValue.strip() return attributes" 2415,"def _termIsObsolete(oboTerm): """"""Determine wheter an obo 'Term' entry is marked as obsolete. :param oboTerm: a dictionary as return by :func:`maspy.ontology._attributeLinesToDict()` :return: bool """""" isObsolete = False if u'is_obsolete' in oboTerm: if oboTerm[u'is_obsolete'].lower() == u'true': isObsolete = True return isObsolete" 2416,"def load(self, filepath): """"""Import '[Term]' entries from an .obo file."""""" for attributeLines in oboTermParser(filepath): oboTerm = _attributeLinesToDict(attributeLines) if oboTerm['id'] not in self.oboTerms: self.oboTerms[oboTerm['id']] = oboTerm else: oldOboTerm = self.oboTerms[oboTerm['id']] oldTermIsObsolete = _termIsObsolete(oldOboTerm) newTermIsObsolete = _termIsObsolete(oboTerm) if oldTermIsObsolete and not newTermIsObsolete: self.oboTerms[oboTerm['id']] = oboTerm else: #At least one of two terms with identical id must be obsolete assert oldTermIsObsolete or newTermIsObsolete" 2417,"def discover_handler_classes(handlers_package): """""" Looks for handler classes within handler path module. Currently it's not looking deep into nested module. :param handlers_package: module path to handlers :type handlers_package: string :return: list of handler classes """""" if handlers_package is None: return # Add working directory into PYTHONPATH to import developer packages sys.path.insert(0, os.getcwd()) package = import_module(handlers_package) # Continue searching for module if package is not a module if hasattr(package, '__path__'): for _, modname, _ in pkgutil.iter_modules(package.__path__): import_module('{package}.{module}'.format(package=package.__name__, module=modname)) return registered_handlers" 2418,"def request(self, method, path, query=None, content=None): """""" Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned """""" if not path.startswith(""/""): raise ClientError(""Implementation error: Called with bad path %s"" % path) body = None if content is not None: data = self._json_encoder.encode(content) body = StringProducer(data) url = self._base_url + path if query: prepare_query(query) params = urlencode(query, doseq=True) url += ""?%s"" % params log.msg(""Sending request to %s %s %s"" % (url, self.headers, body), system=""Gentleman"") d = self._agent.request(method, url, headers=self.headers, bodyProducer=body) protocol = JsonResponseProtocol(d) @d.addErrback def connectionFailed(failure): failure.trap(ConnectionRefusedError) raise GanetiApiError(""Connection refused!"") @d.addCallback def cb(response): if response.code != 200: raise NotOkayError(code=response.code) response.deliverBody(protocol) return protocol.getData()" 2419,"def start(self): """""" Confirm that we may access the target cluster. """""" version = yield self.request(""get"", ""/version"") if version != 2: raise GanetiApiError(""Can't work with Ganeti RAPI version %d"" % version) log.msg(""Accessing Ganeti RAPI, version %d"" % version, system=""Gentleman"") self.version = version try: features = yield self.request(""get"", ""/2/features"") except NotOkayError, noe: if noe.code == 404: # Okay, let's calm down, this is totally reasonable. Certain # older Ganeti RAPIs don't have a list of features. features = [] else: # No, wait, panic was the correct thing to do. raise log.msg(""RAPI features: %r"" % (features,), system=""Gentleman"") self.features = features" 2420,"def get_multi_word_keywords(features): """"""This returns an OrderedDict containing the multi word keywords in order of length. This is so the tokenizer will match the longer matches before the shorter matches """""" keys = { 'is not': Token(TokenTypes.NOT_EQUAL, 'is not'), } return OrderedDict(sorted(list(keys.items()), key=lambda t: len(t[0]), reverse=True))" 2421,"def inside_try(func, options={}): """""" decorator to silence exceptions, for logging we want a ""safe"" fail of the functions """""" if six.PY2: name = func.func_name else: name = func.__name__ @wraps(func) def silenceit(*args, **kwargs): """""" the function func to be silenced is wrapped inside a try catch and returned, exceptions are logged exceptions are returned in an error dict takes all kinds of arguments and passes to the original func """""" excpt = None try: return func(*args, **kwargs) # pylint: disable=W0703 # inside_try.silenceit: Catching too general exception Exception # that's the idea! except Exception as excpt: # first tell the object in charge if 'ctx' in kwargs: ctx = kwargs['ctx'] else: # otherwise tell object defined in options # if we can be sure there is a context ctx = get_try_option(None, 'ctx') if not ctx: # tell a new object ctx = Bubble('Inside Try') # ctx.set_verbose(100); #todo: move to magic head = name + ': silenced function inside_try:Error:' if get_try_option(ctx, 'count_it'): ctx.gbc.cry(head + 'counting') if get_try_option(ctx, 'print_it'): ctx.gbc.cry(head + 'printing:' + str(excpt)) if get_try_option(ctx, 'print_args'): ctx.gbc.cry(head + 'printing ak:' + str(excpt)) ctx.gbc.cry('args', stuff=args) ctx.gbc.cry('kwargs', stuff=kwargs) if get_try_option(ctx, 'inspect_it'): ctx.gbc.cry(head + 'inspecting:', stuff=excpt) for s in inspect.stack(): ctx.gbc.cry(head + ':stack:', stuff=s) if get_try_option(ctx, 'log_it'): ctx.gbc.cry(head + 'logging') for s in inspect.stack(): ctx.gbc.cry(head + ':stack:', stuff=s) if get_try_option(ctx, 'reraise_it'): ctx.gbc.cry(head + 'reraising') raise excpt # always return error return {'error': str(excpt), 'silenced': name, 'args': args, 'kwargs': kwargs} return silenceit" 2422,"def start(self): """""" Start the server and run forever. """""" Server().start(self.options,self.handler_function, self.__class__.component_type)" 2423,"def collect(basepath, exclude=None, processPlugins=True): """""" Collects all the packages associated with the inputted filepath. :param module | <module> :return ([<str> pkg, ..], [(<str> path, <str> relpath), ..] data) """""" if exclude is None: exclude = ['.py', '.pyc', '.pyo', '.css', '.exe'] imports = [] datas = [] # walk the folder structure looking for all packages and data files basename = os.path.basename(basepath) basepath = os.path.abspath(basepath) baselen = len(basepath) - len(basename) plugfiles = [] for root, folders, files in os.walk(basepath): if '.svn' in root or '.git' in root: continue # mark the plugins file for load plugdata = None if processPlugins and '__plugins__.py' in files: filename = os.path.join(root, '__plugins__.py') package = projex.packageFromPath(filename) + '.__plugins__' pkgpath = projex.packageRootPath(filename) if pkgpath not in sys.path: sys.path.insert(0, pkgpath) # import the plugins module __import__(package) pkg = sys.modules[package] recurse = getattr(pkg, '__recurse__', False) plugdata = {'recurse': recurse, 'packages': [], 'path': root} plugfiles.append(plugdata) # look for any recursion plugins else: for data in plugfiles: if data['recurse'] and root.startswith(data['path']): plugdata = data break if plugdata is not None: packages = plugdata['packages'] # include package plugins for folder in folders: pkgpath = os.path.join(root, folder, '__init__.py') if os.path.exists(pkgpath): packages.append(projex.packageFromPath(pkgpath)) for file_ in files: module, ext = os.path.splitext(file_) # look for python modules if ext == '.py': package_path = projex.packageFromPath(os.path.join(root, file_)) if not package_path: continue if module != '__init__': package_path += '.' + module imports.append(package_path) # test to see if this is a plugin file if plugdata is not None and module not in ('__init__', '__plugins__'): plugdata['packages'].append(package_path) # look for data elif ext not in exclude: src = os.path.join(root, file_) targ = os.path.join(root[baselen:]) datas.append((src, targ)) # save the plugin information for plugdata in plugfiles: fname = os.path.join(plugdata['path'], '__plugins__.py') packages = plugdata['packages'] plugs = ',\n'.join(map(lambda x: ""r'{0}'"".format(x), packages)) data = [ '__recurse__ = {0}'.format(plugdata['recurse']), '__toc__ = [{0}]'.format(plugs) ] # write the data to the system f = open(fname, 'w') f.write('\n'.join(data)) f.close() return imports, datas" 2424,"def get_reverse(self): """"""By default, Cable entries are sorted by rating and Broadcast ratings are sorted by time. By default, float attributes are sorted from highest to lowest and non-float attributes are sorted alphabetically (show, net) or chronologically (time). """""" if self.sort in FLOAT_ATTRIBUTES: return True elif self.sort in NONFLOAT_ATTRIBUTES: return False else: raise InvalidSortError(self.sort)" 2425,"def sort_func(self, entry): """"""Return the key attribute to determine how data is sorted. Time will need to be converted to 24 hour time. In instances when float attributes will have an 'n/a' string, return 0. """""" key = entry[self.sort] if self.sort in FLOAT_ATTRIBUTES and not isinstance(key, float): return 0 # If value is 'n/a' string elif self.sort == 'time': return convert_time(key) elif self.sort == 'date': return convert_date(key) return key" 2426,"def sort_entries(self): """"""Get whether reverse is True or False. Return the sorted data."""""" return sorted(self.data, key=self.sort_func, reverse=self.get_reverse())" 2427,"def visible_fields(self): """""" Returns the reduced set of visible fields to output from the form. This method respects the provided ``fields`` configuration _and_ exlcudes all fields from the ``exclude`` configuration. If no ``fields`` where provided when configuring this fieldset, all visible fields minus the excluded fields will be returned. :return: List of bound field instances or empty tuple. """""" form_visible_fields = self.form.visible_fields() if self.render_fields: fields = self.render_fields else: fields = [field.name for field in form_visible_fields] filtered_fields = [field for field in fields if field not in self.exclude_fields] return [field for field in form_visible_fields if field.name in filtered_fields]" 2428,"def get_fieldsets(self, fieldsets=None): """""" This method returns a generator which yields fieldset instances. The method uses the optional fieldsets argument to generate fieldsets for. If no fieldsets argument is passed, the class property ``fieldsets`` is used. When generating the fieldsets, the method ensures that at least one fielset will be the primary fieldset which is responsible for rendering the non field errors and hidden fields. :param fieldsets: Alternative set of fieldset kwargs. If passed this set is prevered of the ``fieldsets`` property of the form. :return: generator which yields fieldset instances. """""" fieldsets = fieldsets or self.fieldsets if not fieldsets: raise StopIteration # Search for primary marker in at least one of the fieldset kwargs. has_primary = any(fieldset.get('primary') for fieldset in fieldsets) for fieldset_kwargs in fieldsets: fieldset_kwargs = copy.deepcopy(fieldset_kwargs) fieldset_kwargs['form'] = self if not has_primary: fieldset_kwargs['primary'] = True has_primary = True yield self.get_fieldset(**fieldset_kwargs)" 2429,"def generate_binding_credentials(self, binding): """"""Generate binding credentials This function will permit to define the configuration to connect to the instance. Those credentials will be stored on a secret and exposed to a a Pod. We should at least returns the 'username' and 'password'. Args: binding (AtlasServiceBinding.Binding): A binding Returns: dict: All credentials and secrets. Raises: ErrClusterConfig: Connection string to the cluster is not available. """""" uri = self.clusters.get(binding.instance.get_cluster(), None) if not uri: raise ErrClusterConfig(binding.instance.get_cluster()) # partial credentials creds = {""username"" : self.generate_binding_username(binding), ""password"" : pwgen(32, symbols=False), ""database"" : binding.instance.get_dbname()} # uri uri = uri % ( creds[""username""], creds[""password""], creds[""database""]) creds[""uri""] = uri # return creds return creds" 2430,"def generate_binding_permissions(self, binding, permissions): """"""Generate Users pemissions on the database Defining roles to the database for the users. We can pass extra information into parameters of the binding if needed (see binding.parameters). Args: binding (AtlasServiceBinding.Binding): A binding permissions (atlasapi.specs.DatabaseUsersPermissionsSpecs): Permissions for Atlas Returns: atlasapi.specs.DatabaseUsersPermissionsSpecs: Permissions for the new user """""" permissions.add_roles(binding.instance.get_dbname(), [RoleSpecs.dbAdmin, RoleSpecs.readWrite]) return permissions" 2431,"def get_chunks(Array, Chunksize): """"""Generator that yields chunks of size ChunkSize"""""" for i in range(0, len(Array), Chunksize): yield Array[i:i + Chunksize]" 2432,"def read_data_from_bin_file(fileName): """""" Loads the binary data stored in the a binary file and extracts the data for each channel that was saved, along with the sample rate and length of the data array. Parameters ---------- fileContent : bytes bytes object containing the data from a .bin file exported from the saleae data logger. Returns ------- ChannelData : list List containing a list which contains the data from each channel LenOf1Channel : int The length of the data in each channel NumOfChannels : int The number of channels saved SampleTime : float The time between samples (in seconds) SampleRate : float The sample rate (in Hz) """""" with open(fileName, mode='rb') as file: # b is important -> binary fileContent = file.read() (ChannelData, LenOf1Channel, NumOfChannels, SampleTime) = read_data_from_bytes(fileContent) return ChannelData, LenOf1Channel, NumOfChannels, SampleTime" 2433,"def read_data_from_bytes(fileContent): """""" Takes the binary data stored in the binary string provided and extracts the data for each channel that was saved, along with the sample rate and length of the data array. Parameters ---------- fileContent : bytes bytes object containing the data from a .bin file exported from the saleae data logger. Returns ------- ChannelData : list List containing a list which contains the data from each channel LenOf1Channel : int The length of the data in each channel NumOfChannels : int The number of channels saved SampleTime : float The time between samples (in seconds) SampleRate : float The sample rate (in Hz) """""" TotalDataLen = struct.unpack('Q', fileContent[:8])[0] # Unsigned long long NumOfChannels = struct.unpack('I', fileContent[8:12])[0] # unsigned Long SampleTime = struct.unpack('d', fileContent[12:20])[0] AllChannelData = struct.unpack(""f"" * ((len(fileContent) -20) // 4), fileContent[20:]) # ignore the heading bytes (= 20) # The remaining part forms the body, to know the number of bytes in the body do an integer division by 4 (since 4 bytes = 32 bits = sizeof(float) LenOf1Channel = int(TotalDataLen/NumOfChannels) ChannelData = list(get_chunks(AllChannelData, LenOf1Channel)) return ChannelData, LenOf1Channel, NumOfChannels, SampleTime" 2434,"def interpret_waveform(fileContent, RelativeChannelNo): """""" Extracts the data for just 1 channel and computes the corresponding time array (in seconds) starting from 0. Important Note: RelativeChannelNo is NOT the channel number on the Saleae data logger it is the relative number of the channel that was saved. E.g. if you save channels 3, 7 and 10, the corresponding RelativeChannelNos would be 0, 1 and 2. Parameters ---------- fileContent : bytes bytes object containing the data from a .bin file exported from the saleae data logger. RelativeChannelNo : int The relative order/position of the channel number in the saved binary file. See Important Note above! Returns ------- time : ndarray A generated time array corresponding to the data list Data : list The data from the relative channel requested SampleTime : float The time between samples (in seconds) """""" (ChannelData, LenOf1Channel, NumOfChannels, SampleTime) = read_data_from_bytes(fileContent) if RelativeChannelNo > NumOfChannels-1: raise ValueError(""There are {} channels saved, you attempted to read relative channel number {}. Pick a relative channel number between {} and {}"".format(NumOfChannels, RelativeChannelNo, 0, NumOfChannels-1)) data = ChannelData[RelativeChannelNo] del(ChannelData) time = _np.arange(0, SampleTime*LenOf1Channel, SampleTime) return (0,SampleTime*LenOf1Channel,SampleTime), data" 2435,"def getApi(): """"""Get Api for /health Returns: Blueprint: section for healt check """""" api = Blueprint('health', __name__, url_prefix='/') @api.route('health', methods=['GET']) def health(): '''Health check''' return jsonify({ ""status"" : True}) return api" 2436,"def get_coord_box(centre_x, centre_y, distance): """"""Get the square boundary coordinates for a given centre and distance"""""" """"""Todo: return coordinates inside a circle, rather than a square"""""" return { 'top_left': (centre_x - distance, centre_y + distance), 'top_right': (centre_x + distance, centre_y + distance), 'bottom_left': (centre_x - distance, centre_y - distance), 'bottom_right': (centre_x + distance, centre_y - distance), }" 2437,"def fleet_ttb(unit_type, quantity, factories, is_techno=False, is_dict=False, stasis_enabled=False): """""" Calculate the time taken to construct a given fleet """""" unit_weights = { UNIT_SCOUT: 1, UNIT_DESTROYER: 13, UNIT_BOMBER: 10, UNIT_CRUISER: 85, UNIT_STARBASE: 1, } govt_weight = 80 if is_dict else 100 prod_weight = 85 if is_techno else 100 weighted_qty = unit_weights[unit_type] * quantity ttb = (weighted_qty * govt_weight * prod_weight) * (2 * factories) # TTB is 66% longer with stasis enabled return ttb + (ttb * 0.66) if stasis_enabled else ttb" 2438,"def parse_fasta(data): # pragma: no cover """""" Load sequences in Fasta format. This generator function yields a Sequence object for each sequence record in a GFF3 file. Implementation stolen shamelessly from http://stackoverflow.com/a/7655072/459780. """""" name, seq = None, [] for line in data: line = line.rstrip() if line.startswith('>'): if name: yield Sequence(name, ''.join(seq)) name, seq = line, [] else: seq.append(line) if name: yield Sequence(name, ''.join(seq))" 2439,"def _resolve_features(self): """"""Resolve Parent/ID relationships and yield all top-level features."""""" for parentid in self.featsbyparent: parent = self.featsbyid[parentid] for child in self.featsbyparent[parentid]: parent.add_child(child, rangecheck=self.strict) # Replace top-level multi-feature reps with a pseudo-feature for n, record in enumerate(self.records): if not isinstance(record, Feature): continue if not record.is_multi: continue assert record.multi_rep == record newrep = sorted(record.siblings + [record])[0] if newrep != record: for sib in sorted(record.siblings + [record]): sib.multi_rep = newrep if sib != newrep: newrep.add_sibling(sib) record.siblings = None parent = newrep.pseudoify() self.records[n] = parent if not self.assumesorted: for seqid in self.inferred_regions: if seqid not in self.declared_regions: seqrange = self.inferred_regions[seqid] srstring = '##sequence-region {:s} {:d} {:d}'.format( seqid, seqrange.start + 1, seqrange.end ) seqregion = Directive(srstring) self.records.append(seqregion) for record in sorted(self.records): yield record self._reset()" 2440,"def _reset(self): """"""Clear internal data structure."""""" self.records = list() self.featsbyid = dict() self.featsbyparent = dict() self.countsbytype = dict()" 2441,"def get_by_label(self, label): """""" Return the first item with a specific label, or None. """""" return next((x for x in self if x.label == label), None)" 2442,"def getGenericAnswers(self, name, instruction, prompts): """"""Called when the server requests keyboard interactive authentication """""" responses = [] for prompt, _echo in prompts: password = self.getPassword(prompt) responses.append(password) return defer.succeed(responses)" 2443,"def pairwise(iterable): """""" Generate consecutive pairs of elements from the given iterable. """""" iterator = iter(iterable) try: first = next(iterator) except StopIteration: return for element in iterator: yield first, element first = element" 2444,"def pick_cert_for_twisted(netloc, possible): """""" Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate. """""" try: creds = possible[netloc] except KeyError: return (None, ()) key = ssl.KeyPair.load(creds.key.as_bytes(), FILETYPE_PEM) return ( ssl.PrivateCertificate.load( creds.chain.certificates[0].as_bytes(), key, FILETYPE_PEM, ), tuple( ssl.Certificate.load(cert.as_bytes(), FILETYPE_PEM) for cert in creds.chain.certificates[1:] ), )" 2445,"def pick_trust_for_twisted(netloc, possible): """""" Pick the right ""trust roots"" (certificate authority certificates) for the given server and return it in the form Twisted wants. Kubernetes certificates are often self-signed or otherwise exist outside of the typical certificate authority cartel system common for normal websites. This function tries to find the right authority to use. :param NetLocation netloc: The location of the server to consider. :param dict[pem.Certificate] possible: The available certificate authority certificates from which to choose. :return: A provider of ``twisted.internet.interfaces.IOpenSSLTrustRoot`` if there is a known certificate authority certificate for the given server. Otherwise, ``None``. """""" try: trust_cert = possible[netloc] except KeyError: return None cert = ssl.Certificate.load(trust_cert.as_bytes(), FILETYPE_PEM) return ssl.trustRootFromCertificates([cert])" 2446,"def https_policy_from_config(config): """""" Create an ``IPolicyForHTTPS`` which can authenticate a Kubernetes API server. :param KubeConfig config: A Kubernetes configuration containing an active context identifying a cluster. The resulting ``IPolicyForHTTPS`` will authenticate the API server for that cluster. :return IPolicyForHTTPS: A TLS context which requires server certificates signed by the certificate authority certificate associated with the active context's cluster. """""" server = config.cluster[""server""] base_url = URL.fromText(native_string_to_unicode(server)) ca_certs = pem.parse(config.cluster[""certificate-authority""].bytes()) if not ca_certs: raise ValueError(""No certificate authority certificate found."") ca_cert = ca_certs[0] try: # Validate the certificate so we have early failures for garbage data. ssl.Certificate.load(ca_cert.as_bytes(), FILETYPE_PEM) except OpenSSLError as e: raise ValueError( ""Invalid certificate authority certificate found."", str(e), ) netloc = NetLocation(host=base_url.host, port=base_url.port) policy = ClientCertificatePolicyForHTTPS( credentials={}, trust_roots={ netloc: ca_cert, }, ) return policy" 2447,"def authenticate_with_certificate_chain(reactor, base_url, client_chain, client_key, ca_cert): """""" Create an ``IAgent`` which can issue authenticated requests to a particular Kubernetes server using a client certificate. :param reactor: The reactor with which to configure the resulting agent. :param twisted.python.url.URL base_url: The base location of the Kubernetes API. :param list[pem.Certificate] client_chain: The client certificate (and chain, if applicable) to use. :param pem.Key client_key: The private key to use with the client certificate. :param pem.Certificate ca_cert: The certificate authority to respect when verifying the Kubernetes server certificate. :return IAgent: An agent which will authenticate itself to a particular Kubernetes server and which will verify that server or refuse to interact with it. """""" if base_url.scheme != u""https"": raise ValueError( ""authenticate_with_certificate() makes sense for HTTPS, not {!r}"".format( base_url.scheme ), ) netloc = NetLocation(host=base_url.host, port=base_url.port) policy = ClientCertificatePolicyForHTTPS( credentials={ netloc: TLSCredentials( chain=Chain(certificates=Certificates(client_chain)), key=client_key, ), }, trust_roots={ netloc: ca_cert, }, ) return Agent(reactor, contextFactory=policy)" 2448,"def authenticate_with_certificate(reactor, base_url, client_cert, client_key, ca_cert): """""" See ``authenticate_with_certificate_chain``. :param pem.Certificate client_cert: The client certificate to use. """""" return authenticate_with_certificate_chain( reactor, base_url, [client_cert], client_key, ca_cert, )" 2449,"def authenticate_with_serviceaccount(reactor, **kw): """""" Create an ``IAgent`` which can issue authenticated requests to a particular Kubernetes server using a service account token. :param reactor: The reactor with which to configure the resulting agent. :param bytes path: The location of the service account directory. The default should work fine for normal use within a container. :return IAgent: An agent which will authenticate itself to a particular Kubernetes server and which will verify that server or refuse to interact with it. """""" config = KubeConfig.from_service_account(**kw) policy = https_policy_from_config(config) token = config.user[""token""] agent = HeaderInjectingAgent( _to_inject=Headers({u""authorization"": [u""Bearer {}"".format(token)]}), _agent=Agent(reactor, contextFactory=policy), ) return agent" 2450,"def first_time_setup(self): """"""First time running Open Sesame? Create keyring and an auto-unlock key in default keyring. Make sure these things don't already exist. """""" if not self._auto_unlock_key_position(): pw = password.create_passwords()[0] attrs = {'application': self.keyring} gkr.item_create_sync(self.default_keyring ,gkr.ITEM_GENERIC_SECRET ,self.keyring ,attrs ,pw ,True) found_pos = self._auto_unlock_key_position() item_info = gkr.item_get_info_sync(self.default_keyring, found_pos) gkr.create_sync(self.keyring, item_info.get_secret())" 2451,"def _auto_unlock_key_position(self): """"""Find the open sesame password in the default keyring """""" found_pos = None default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring) for pos in default_keyring_ids: item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos) app = 'application' if item_attrs.has_key(app) and item_attrs[app] == ""opensesame"": found_pos = pos break return found_pos" 2452,"def get_position_searchable(self): """"""Return dict of the position and corrasponding searchable str """""" ids = gkr.list_item_ids_sync(self.keyring) position_searchable = {} for i in ids: item_attrs = gkr.item_get_attributes_sync(self.keyring, i) position_searchable[i] = item_attrs['searchable'] return position_searchable" 2453,"def _match_exists(self, searchable): """"""Make sure the searchable description doesn't already exist """""" position_searchable = self.get_position_searchable() for pos,val in position_searchable.iteritems(): if val == searchable: return pos return False" 2454,"def save_password(self, password, **attrs): """"""Save the new password, save the old password with the date prepended """""" pos_of_match = self._match_exists(attrs['searchable']) if pos_of_match: old_password = self.get_password(pos_of_match).get_secret() gkr.item_delete_sync(self.keyring, pos_of_match) desc = str(int(time.time())) + ""_"" + attrs['searchable'] gkr.item_create_sync(self.keyring ,gkr.ITEM_GENERIC_SECRET ,desc ,{} ,old_password ,True) desc = attrs['searchable'] pos = gkr.item_create_sync(self.keyring ,gkr.ITEM_GENERIC_SECRET ,desc ,attrs ,password ,True) return pos" 2455,"def get_descriptor_for_idcode(idcode): """"""Use this method to find bsdl descriptions for devices. The caching on this method drastically lower the execution time when there are a lot of bsdl files and more than one device. May move it into a metaclass to make it more transparent."""""" idcode = idcode&0x0fffffff id_str = ""XXXX""+bin(idcode)[2:].zfill(28) descr_file_path = _check_cache_for_idcode(id_str) if descr_file_path: with open(descr_file_path, 'r') as f: dat = json.load(f) if dat.get(""_file_version"",-1) == JTAGDeviceDescription.version: return JTAGDeviceDescription(dat.get('idcode'), dat.get('name'), dat.get('ir_length'), dat.get('instruction_opcodes'), dat.get('registers'), dat.get('instruction_register_map')) print("" Device detected (""+id_str+""). Fetching missing descriptor..."") sid = get_sid(id_str) details = get_details(sid) attribs = decode_bsdl(sid) #VERIFYING PARSED DATA FROM 2 SOURCES. MESSY BUT USEFUL. instruction_length = 0 if attribs.get('INSTRUCTION_LENGTH') ==\ details.get('INSTRUCTION_LENGTH'): instruction_length = attribs.get('INSTRUCTION_LENGTH') elif attribs.get('INSTRUCTION_LENGTH') and\ details.get('INSTRUCTION_LENGTH'): raise Exception(""INSTRUCTION_LENGTH can not be determined"") elif attribs.get('INSTRUCTION_LENGTH'): instruction_length = attribs.get('INSTRUCTION_LENGTH') else: instruction_length = details.get('INSTRUCTION_LENGTH') for instruction_name in details.get('instructions'): if instruction_name not in\ attribs.get('INSTRUCTION_OPCODE',[]): raise Exception(""INSTRUCTION_OPCODE sources do not match"") #print(attribs['IDCODE_REGISTER']) descr = JTAGDeviceDescription(attribs['IDCODE_REGISTER'].upper(), details['name'], instruction_length, attribs['INSTRUCTION_OPCODE'], attribs['REGISTERS'], attribs['INSTRUCTION_TO_REGISTER']) #CACHE DESCR AS FILE! if not os.path.isdir(base_descr_dir): os.makedirs(base_descr_dir) descr_file_path = os.path.join(base_descr_dir, attribs['IDCODE_REGISTER']\ .upper()+'.json') with open(descr_file_path, 'w') as f: json.dump(descr._dump(), f) return descr" 2456,"def parse_url(self): """"""Parse a git/ssh/http(s) url."""""" url = urlparse(self.url).path # handle git url = url.split('.git')[0] if ':' in url: url = url.split(':')[1] # Ony capture last two list items try: project, repo = url.split('/')[-2:] except ValueError: raise ParserError('""{}"" is not a valid repository URL.'.format(self.url)) return project, repo" 2457,"def _fetch_dimensions(self, dataset): """""" Iterate through semesters, counties and municipalities. """""" yield Dimension(u""school"") yield Dimension(u""year"", datatype=""year"") yield Dimension(u""semester"", datatype=""academic_term"", dialect=""swedish"") # HT/VT yield Dimension(u""municipality"", datatype=""year"", domain=""sweden/municipalities"")" 2458,"def _merge_configs(configs): """""" Merge one or more ``KubeConfig`` objects. :param list[KubeConfig] configs: The configurations to merge. :return KubeConfig: A single configuration object with the merged configuration. """""" result = { u""contexts"": [], u""users"": [], u""clusters"": [], u""current-context"": None, } for config in configs: for k in {u""contexts"", u""users"", u""clusters""}: try: values = config.doc[k] except KeyError: pass else: result[k].extend(values) if result[u""current-context""] is None: try: result[u""current-context""] = config.doc[u""current-context""] except KeyError: pass return KubeConfig(result)" 2459,"def _merge_configs_from_env(kubeconfigs): """""" Merge configuration files from a ``KUBECONFIG`` environment variable. :param bytes kubeconfigs: A value like the one given to ``KUBECONFIG`` to specify multiple configuration files. :return KubeConfig: A configuration object which has merged all of the configuration from the specified configuration files. Merging is performed according to https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#merging-kubeconfig-files """""" paths = list( FilePath(p) for p in kubeconfigs.split(pathsep) if p ) config = _merge_configs(list( KubeConfig.from_file(p.path) for p in paths )) return config" 2460,"def network_kubernetes_from_context( reactor, context=None, path=None, environ=None, default_config_path=FilePath(expanduser(u""~/.kube/config"")), ): """""" Create a new ``IKubernetes`` provider based on a kube config file. :param reactor: A Twisted reactor which will be used for I/O and scheduling. :param unicode context: The name of the kube config context from which to load configuration details. Or, ``None`` to respect the current context setting from the configuration. :param FilePath path: The location of the kube config file to use. :param dict environ: A environment direction in which to look up ``KUBECONFIG``. If ``None``, the real process environment will be inspected. This is used only if ``path`` is ``None``. :return IKubernetes: The Kubernetes service described by the named context. """""" if path is None: if environ is None: from os import environ try: kubeconfigs = environ[u""KUBECONFIG""] except KeyError: config = KubeConfig.from_file(default_config_path.path) else: config = _merge_configs_from_env(kubeconfigs) else: config = KubeConfig.from_file(path.path) if context is None: context = config.doc[u""current-context""] context = config.contexts[context] cluster = config.clusters[context[u""cluster""]] user = config.users[context[u""user""]] if isinstance(cluster[u""server""], bytes): base_url = URL.fromText(cluster[u""server""].decode(""ascii"")) else: base_url = URL.fromText(cluster[u""server""]) [ca_cert] = parse(cluster[u""certificate-authority""].bytes()) client_chain = parse(user[u""client-certificate""].bytes()) [client_key] = parse(user[u""client-key""].bytes()) agent = authenticate_with_certificate_chain( reactor, base_url, client_chain, client_key, ca_cert, ) return network_kubernetes( base_url=base_url, agent=agent, )" 2461,"def collection_location(obj): """""" Get the URL for the collection of objects like ``obj``. :param obj: Either a type representing a Kubernetes object kind or an instance of such a type. :return tuple[unicode]: Some path segments to stick on to a base URL to construct the location of the collection of objects like the one given. """""" # TODO kind is not part of IObjectLoader and we should really be loading # apiVersion off of this object too. kind = obj.kind apiVersion = obj.apiVersion prefix = version_to_segments[apiVersion] collection = kind.lower() + u""s"" if IObject.providedBy(obj): # Actual objects *could* have a namespace... namespace = obj.metadata.namespace else: # Types representing a kind couldn't possible. namespace = None if namespace is None: # If there's no namespace, look in the un-namespaced area. return prefix + (collection,) # If there is, great, look there. return prefix + (u""namespaces"", namespace, collection)" 2462,"def enter_new_scope(ctx): """""" we inside new scope with it onw :param ctx: :return: """""" ctx = ctx.clone() ctx.waiting_for = ctx.compiled_story().children_matcher() return ctx" 2463,"async def execute(ctx): """""" execute story part at the current context and make one step further :param ctx: :return: """""" tail_depth = len(ctx.stack()) - 1 story_part = ctx.get_current_story_part() logger.debug('# going to call: {}'.format(story_part.__name__)) waiting_for = story_part(ctx.message) if inspect.iscoroutinefunction(story_part): waiting_for = await waiting_for logger.debug('# got result {}'.format(waiting_for)) # story part could run callable story and return its context if isinstance(waiting_for, story_context.StoryContext): # for such cases is very important to know `tail_depth` # because story context from callable story already has # few stack items above our tail ctx = waiting_for.clone() ctx.waiting_for = callable.WaitForReturn() else: ctx = ctx.clone() ctx.waiting_for = waiting_for tail_data = ctx.message['session']['stack'][tail_depth]['data'] tail_step = ctx.message['session']['stack'][tail_depth]['step'] if ctx.is_waiting_for_input(): if isinstance(ctx.waiting_for, callable.EndOfStory): if isinstance(ctx.waiting_for.data, dict): new_data = {**ctx.get_user_data(), **ctx.waiting_for.data} else: new_data = ctx.waiting_for.data ctx.message = { **ctx.message, 'session': { **ctx.message['session'], 'data': new_data, }, } tail_step += 1 elif isinstance(ctx.waiting_for, loop.ScopeMatcher): # jumping in a loop tail_data = matchers.serialize(ctx.waiting_for) elif isinstance(ctx.waiting_for, loop.BreakLoop): tail_step += 1 else: tail_data = matchers.serialize( matchers.get_validator(ctx.waiting_for) ) tail_step += 1 ctx.message = modify_stack_in_message(ctx.message, lambda stack: stack[:tail_depth] + [{ 'data': tail_data, 'step': tail_step, 'topic': stack[tail_depth]['topic'], }] + stack[tail_depth + 1:]) logger.debug('# mutated ctx after execute') logger.debug(ctx) return ctx" 2464,"def iterate_storyline(ctx): """""" iterate the last storyline from the last visited story part :param ctx: :return: """""" logger.debug('# start iterate') compiled_story = ctx.compiled_story() if not compiled_story: return for step in range(ctx.current_step(), len(compiled_story.story_line)): ctx = ctx.clone() tail = ctx.stack_tail() ctx.message = modify_stack_in_message(ctx.message, lambda stack: stack[:-1] + [{ 'data': tail['data'], 'step': step, 'topic': tail['topic'], }]) logger.debug('# [{}] iterate'.format(step)) logger.debug(ctx) ctx = yield ctx" 2465,"def scope_in(ctx): """""" - build new scope on the top of stack - and current scope will wait for it result :param ctx: :return: """""" logger.debug('# scope_in') logger.debug(ctx) ctx = ctx.clone() compiled_story = None if not ctx.is_empty_stack(): compiled_story = ctx.get_child_story() logger.debug('# child') logger.debug(compiled_story) # we match child story loop once by message # what should prevent multiple matching by the same message ctx.matched = True ctx.message = modify_stack_in_message(ctx.message, lambda stack: stack[:-1] + [{ 'data': matchers.serialize(callable.WaitForReturn()), 'step': stack[-1]['step'], 'topic': stack[-1]['topic'] }]) try: if not compiled_story and ctx.is_scope_level_part(): compiled_story = ctx.get_current_story_part() except story_context.MissedStoryPart: pass if not compiled_story: compiled_story = ctx.compiled_story() logger.debug('# [>] going deeper') ctx.message = modify_stack_in_message(ctx.message, lambda stack: stack + [ stack_utils.build_empty_stack_item(compiled_story.topic)]) logger.debug(ctx) return ctx" 2466,"def scope_out(ctx): """""" drop last stack item if: - we have reach the end of stack - and don't wait any input :param ctx: :return: """""" logger.debug('# scope_out') logger.debug(ctx) # we reach the end of story line # so we could collapse previous scope and related stack item if ctx.is_tail_of_story() and ctx.could_scope_out(): logger.debug('# [<] return') ctx = ctx.clone() ctx.message['session']['stack'] = ctx.message['session']['stack'][:-1] if not ctx.is_empty_stack() and \ (ctx.is_scope_level_part() or \ ctx.is_breaking_a_loop()): # isinstance(ctx.get_current_story_part(), loop.StoriesLoopNode) and \ # isinstance(ctx.waiting_for, callable.EndOfStory) or \ ctx.message = modify_stack_in_message(ctx.message, lambda stack: stack[:-1] + [{ 'data': stack[-1]['data'], 'step': stack[-1]['step'] + 1, 'topic': stack[-1]['topic'], }]) if ctx.is_breaking_a_loop() and not ctx.is_scope_level(): ctx.waiting_for = None logger.debug(ctx) return ctx" 2467,"def str2date(self, date_str): """""" Parse date from string. If there's no template matches your string, Please go https://github.com/MacHu-GWU/rolex-project/issues submit your datetime string. I 'll update templates ASAP. This method is faster than :meth:`dateutil.parser.parse`. :param date_str: a string represent a date :type date_str: str :return: a date object **中文文档** 从string解析date。首先尝试默认模板, 如果失败了, 则尝试所有的模板。 一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的 字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。 该方法要快过 :meth:`dateutil.parser.parse` 方法。 """""" # try default date template try: a_datetime = datetime.strptime( date_str, self._default_date_template) return a_datetime.date() except: pass # try every date templates for template in date_template_list: try: a_datetime = datetime.strptime(date_str, template) self._default_date_template = template return a_datetime.date() except: pass # raise error raise ValueError(""Unable to parse date from: %r!"" % date_str)" 2468,"def _str2datetime(self, datetime_str): """""" Parse datetime from string. If there's no template matches your string, Please go https://github.com/MacHu-GWU/rolex-project/issues submit your datetime string. I 'll update templates ASAP. This method is faster than :meth:`dateutil.parser.parse`. :param datetime_str: a string represent a datetime :type datetime_str: str :return: a datetime object **中文文档** 从string解析datetime。首先尝试默认模板, 如果失败了, 则尝试所有的模板。 一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的 字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。 该方法要快过 :meth:`dateutil.parser.parse` 方法。 为了防止模板库失败的情况, 程序设定在失败后自动一直启用 :meth:`dateutil.parser.parse` 进行解析。你可以调用 :meth:`Parser.reset()` 方法恢复默认设定。 """""" # try default datetime template try: a_datetime = datetime.strptime( datetime_str, self._default_datetime_template) return a_datetime except: pass # try every datetime templates for template in datetime_template_list: try: a_datetime = datetime.strptime(datetime_str, template) self._default_datetime_template = template return a_datetime except: pass # raise error a_datetime = parse(datetime_str) self.str2datetime = parse return a_datetime" 2469,"def parse_date(self, value): """""" A lazy method to parse anything to date. If input data type is: - string: parse date from it - integer: use from ordinal - datetime: use date part - date: just return it """""" if isinstance(value, sixmini.string_types): return self.str2date(value) elif value is None: raise TypeError(""Unable to parse date from %r"" % value) elif isinstance(value, sixmini.integer_types): return date.fromordinal(value) elif isinstance(value, datetime): return value.date() elif isinstance(value, date): return value else: raise ValueError(""Unable to parse date from %r"" % value)" 2470,"def parse_datetime(self, value): """""" A lazy method to parse anything to datetime. If input data type is: - string: parse datetime from it - integer: use from ordinal - date: use date part and set hour, minute, second to zero - datetime: just return it """""" if isinstance(value, sixmini.string_types): return self.str2datetime(value) elif value is None: raise TypeError(""Unable to parse datetime from %r"" % value) elif isinstance(value, sixmini.integer_types): return from_utctimestamp(value) elif isinstance(value, float): return from_utctimestamp(value) elif isinstance(value, datetime): return value elif isinstance(value, date): return datetime(value.year, value.month, value.day) else: raise ValueError(""Unable to parse datetime from %r"" % value)" 2471,"def define(self): """"""If DFA is empty, create a sink state"""""" if len(self.states) == 0: for char in self.alphabet: self.add_arc(0, 0, char) self[0].final = False" 2472,"def add_state(self): """"""Adds a new state"""""" sid = len(self.states) self.states.append(DFAState(sid)) return sid" 2473,"def add_arc(self, src, dst, char): """"""Adds a new Arc Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The character for the transition Returns: None """""" # assert type(src) == type(int()) and type(dst) == type(int()), \ # ""State type should be integer."" # assert char in self.I # #print self.states #print src for s_idx in [src, dst]: if s_idx >= len(self.states): for i in range(len(self.states), s_idx + 1): self.states.append(DFAState(i)) for arc in self.states[src].arcs: if arc.ilabel == self.isyms.__getitem__(char) or char == EPSILON: self.nfa = True break self.states[src].arcs.append( DFAArc(src, dst, self.isyms.__getitem__(char)))" 2474,"def complement(self, alphabet): """""" Returns the complement of DFA Args: alphabet (list): The input alphabet Returns: None """""" states = sorted(self.states, key=attrgetter('initial'), reverse=True) for state in states: if state.final: state.final = False else: state.final = True" 2475,"def init_from_acceptor(self, acceptor): """""" Adds a sink state Args: alphabet (list): The input alphabet Returns: None """""" self.states = copy.deepcopy(acceptor.states) self.alphabet = copy.deepcopy(acceptor.alphabet) self.osyms = copy.deepcopy(acceptor.osyms) self.isyms = copy.deepcopy(acceptor.isyms)" 2476,"def load(self, txt_fst_file_name): """""" Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_file_name (str): The input file Returns: None """""" with open(txt_fst_file_name, 'r') as input_filename: for line in input_filename: line = line.strip() split_line = line.split() if len(split_line) == 1: self[int(split_line[0])].final = True else: self.add_arc(int(split_line[0]), int(split_line[1]), split_line[2].decode('hex'))" 2477,"def intersect(self, other): """"""Constructs an unminimized DFA recognizing the intersection of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the intersect operation Returns: Returns: DFA: The resulting DFA """""" operation = bool.__and__ self.cross_product(other, operation) return self" 2478,"def symmetric_difference(self, other): """"""Constructs an unminimized DFA recognizing the symmetric difference of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the symmetric difference operation Returns: DFA: The resulting DFA """""" operation = bool.__xor__ self.cross_product(other, operation) return self" 2479,"def union(self, other): """"""Constructs an unminimized DFA recognizing the union of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the union operation Returns: DFA: The resulting DFA """""" operation = bool.__or__ self.cross_product(other, operation) return self" 2480,"def _epsilon_closure(self, state): """""" Returns the \epsilon-closure for the state given as input. """""" closure = set([state.stateid]) stack = [state] while True: if not stack: break s = stack.pop() for arc in s: if self.isyms.find(arc.ilabel) != EPSILON or \ arc.nextstate in closure: continue closure.add(arc.nextstate) stack.append(self.states[arc.nextstate]) return closure" 2481,"def determinize(self): """""" Transforms a Non Deterministic DFA into a Deterministic Args: None Returns: DFA: The resulting DFA Creating an equivalent DFA is done using the standard algorithm. A nice description can be found in the book: Harry R. Lewis and Christos H. Papadimitriou. 1998. E print target_dfa_statelements of the Theory of Computation. """""" # Compute the \epsilon-closure for all states and save it in a diagram epsilon_closure = {} for state in self.states: sid = state.stateid epsilon_closure[sid] = self._epsilon_closure(state) # Get a transition diagram to speed up computations trans_table = {} for state in self.states: trans_table[state.stateid] = defaultdict(set) for arc in state: char = self.isyms.find(arc.ilabel) trans_table[state.stateid][char].add(arc.nextstate) # is_final function: # Given a set of nfa states representing a dfa_state return 1 if the # corresponding DFA state is a final state, i.e. if any of the # corresponding NFA states are final. is_final = lambda nfa_states, dfa_state: True \ if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \ else False # Precomputation is over, start executing the conversion algorithm state_idx = 1 nfa_states = copy.deepcopy(self.states) self.states = [] # Initialize the new DFA state list self.add_state() new_initial = epsilon_closure[nfa_states[0].stateid] self.states[0].final = is_final(nfa_states, new_initial) dfa_state_idx_map = { frozenset(new_initial) : 0 } stack = [new_initial] while True: # Iterate until all added DFA states are processed. if not stack: break # This is a set of states from the NFA src_dfa_state = stack.pop() src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)] for char in self.alphabet: # Compute the set of target states target_dfa_state = set([]) for nfa_state in src_dfa_state: next_states = \ set([y for x in trans_table[nfa_state][char] \ for y in epsilon_closure[x] ]) target_dfa_state.update(next_states) # If the computed state set is not part of our new DFA add it, # along with the transition for the current character. if frozenset(target_dfa_state) not in dfa_state_idx_map: self.add_state() dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx self.states[state_idx].final = is_final(nfa_states, target_dfa_state) state_idx += 1 stack.append(target_dfa_state) dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)] self.add_arc(src_dfa_state_idx, dst_state_idx, char) return self" 2482,"def invert(self): """"""Inverts the DFA final states"""""" for state in self.states: if state.final: state.final = False else: state.final = True" 2483,"def hopcroft(self): """""" Performs the Hopcroft minimization algorithm Args: None Returns: DFA: The minimized input DFA """""" def _getset(testset, partition): """""" Checks if a set is in a partition Args: testset (set): The examined set partition (list): A list of sets Returns: bool: A value indicating if it is a member or not """""" for part in partition: if set(testset) == set(part): return True return None def _create_transitions_representation(graph): """""" In order to speedup the transition iteration using the alphabet, the function creates an index Args: graph (DFA): The input dfa state (DFA state): The examined state Returns: dict: The generated transition map """""" return {x.stateid:{self.isyms.find(arc.ilabel): arc.nextstate \ for arc in x} for x in graph.states} def _create_reverse_transitions_representation(graph): """""" In order to speedup the transition iteration using the alphabet, the function creates an index Args: graph (DFA): The input dfa state (DFA state): The examined state Returns: dict: The generated transition map """""" return {x.stateid: {self.isyms.find(arc.ilabel): arc.nextstate \ for arc in x} for x in graph.states} def _reverse_to_source(target, group1): """""" Args: target (dict): A table containing the reverse transitions for each state group1 (list): A group of states Return: Set: A set of states for which there is a transition with the states of the group """""" new_group = [] for dst in group1: new_group += target[dst] return set(new_group) def _get_group_from_state(groups, sid): """""" Args: sid (int): The state identifier Return: int: The group identifier that the state belongs """""" for index, selectgroup in enumerate(groups): if sid in selectgroup: return index def _delta(graph, cur_state, char): """""" Function describing the transitions Args: graph (DFA): The DFA states cur_state (DFA state): The DFA current state char (str):: The char that will be used for the transition Return: DFA Node: The next state """""" for arc in cur_state.arcs: if graph.isyms.find(arc.ilabel) == char: return graph[arc.nextstate] def _partition_group(bookeeping, group): """""" Args: group (list): A group of states Return: tuple: A set of two groups """""" for (group1, group2) in bookeeping: if group & group1 != set() and not group.issubset(group1): new_g1 = group & group1 new_g2 = group - group1 return (new_g1, new_g2) if group & group2 != set() and not group.issubset(group2): new_g1 = group & group2 new_g2 = group - group2 return (new_g1, new_g2) assert False, ""Unmatched group partition"" def _object_set_to_state_list(objectset): """""" Args: objectset (list): A list of all the DFA states (as objects) Return: list: A list of all the DFA states (as identifiers) """""" return [state.stateid for state in objectset] def _get_accepted(graph): """""" Find the accepted states Args: graph (DFA): The DFA states Return: list: Returns the list of the accepted states """""" return [state for state in graph \ if state.final != TropicalWeight(float('inf'))] graph = self # Find Q set_q = set(_object_set_to_state_list(graph.states)) # We will work with states addresses here instead of states stateid for # more convenience set_f = set(_object_set_to_state_list(_get_accepted(graph))) # Perform P := {F, Q-F} set_nf = set_q.copy() - set_f.copy() groups = [set_f.copy(), set_nf.copy()] bookeeping = [(set_f, set_nf)] done = False while not done: done = True new_groups = [] for selectgroup in groups: # _check for each letter if it splits the current group for character in self.alphabet: # print 'Testing symbol: ', c target = defaultdict(list) target_states = defaultdict(int) new_g = [set(selectgroup)] for sid in selectgroup: # _check if all transitions using c are going in a state # in the same group. If they are going on a different # group then split deststate = _delta(graph, graph[sid], character) destgroup = _get_group_from_state(groups, deststate.stateid) target[destgroup].append(sid) target_states[destgroup] = deststate.stateid if len(target) > 1: inv_target_states = { v: k for k, v in target_states.iteritems()} new_g = [set(selectedstate) for selectedstate in target.values()] done = False # Get all the partitions of destgroups queue = [set([x for x in target_states.values()])] while queue: top = queue.pop(0) (group1, group2) = _partition_group(bookeeping, top) ng1 = _reverse_to_source( target, [inv_target_states[x] for x in group1]) ng2 = _reverse_to_source( target, [inv_target_states[x] for x in group2]) bookeeping.append((ng1, ng2)) if len(group1) > 1: queue.append(group1) if len(group2) > 1: queue.append(group2) break new_groups += new_g # End of iteration for the k-equivalence # Assign new groups and check if any change occured groups = new_groups # Make a copy of the old states, and prepare the # automaton to host the minimum states oldstates = copy.deepcopy(self.states) self.states = [] self.define() def findpart(stateid, partitions): """"""Searches for the groupt that the state identifier belongs to. Args: stateid (int): The state identifier partitions (list): The list of the groups Returns: set: The group that the stateid belongs to. """""" for group in partitions: if stateid in group: return frozenset(group) return frozenset(set( )) def add_state_if_not_exists(group, statesmap, final): """""" Adds a new state in the final dfa. It initialy checks if the group of states is already registered to the automaton. If it is registered, the state identifier is returned, or else, a new state is added. Args: group (frozenset): The group that the state identifier belongs statesmap (dict): A dictionary that maintains the state identifiers for each forzenset final (bool): A value indicating if the current state is final Returns: int: The new state identifier """""" if group not in statesmap: sid = self.add_state() self[sid].final = final statesmap[group] = sid return statesmap[group] statesmap = {} self.states = [] group = findpart(0, groups) sid = add_state_if_not_exists(frozenset(list(group)), statesmap, oldstates[0].final) self[sid].initial = True for group in groups: if len(group) == 0: continue sid = add_state_if_not_exists(frozenset(group), statesmap, oldstates[list(group)[0]].final) state = next(iter(group)) for arc in oldstates[state]: dst_group = findpart(arc.nextstate, groups) dst_sid = add_state_if_not_exists( dst_group, statesmap, oldstates[arc.nextstate].final) self.add_arc(sid, dst_sid, graph.isyms.find(arc.ilabel))" 2484,"def cross_product(self, dfa_2, accept_method): """"""A generalized cross-product constructor over two DFAs. The third argument is a binary boolean function f; a state (q1, q2) in the final DFA accepts if f(A[q1],A[q2]), where A indicates the acceptance-value of the state. Args: dfa_2: The second dfa accept_method: The boolean action Returns: None """""" dfa_1states = copy.deepcopy(self.states) dfa_2states = dfa_2.states self.states = [] states = {} def _create_transitions_representation(graph, state): """""" In order to speedup the transition iteration using the alphabet, the function creates an index Args: graph (DFA): The input dfa state (DFA state): The examined state Returns: dict: The generated transition map """""" return {self.isyms.find(arc.ilabel): graph[arc.nextstate] for arc in state} def _add_state_if_nonexistent(state_a, state_b): """""" Adds a new state in the final dfa, which is the combination of the input states. The initial and final flag is also placed on the new state. If the state already exists, its identifier is being returned. Args: state_a: The fist state identifier state_b: The second state identifier Returns: int: The new state identifier """""" if (state_a.stateid, state_b.stateid) not in states: states[(state_a.stateid, state_b.stateid)] \ = self.add_state() self[states[(state_a.stateid, state_b.stateid)]].initial \ = state_a.initial and state_b.initial self[states[(state_a.stateid, state_b.stateid)]].final \ = accept_method(state_a.final, state_b.final) return states[(state_a.stateid, state_b.stateid)] for state1, state2 in product(dfa_1states, dfa_2states): sid1 = _add_state_if_nonexistent(state1, state2) transitions_s1 = _create_transitions_representation(dfa_1states, state1) transitions_s2 = _create_transitions_representation(dfa_2states, state2) for char in self.alphabet: sid2 = _add_state_if_nonexistent( transitions_s1[char], transitions_s2[char]) self.add_arc(sid1, sid2, char)" 2485,"def as_list(self): """""" returns a list version of the object, based on it's attributes """""" if hasattr(self, 'cust_list'): return self.cust_list if hasattr(self, 'attr_check'): self.attr_check() cls_bltns = set(dir(self.__class__)) ret = [a for a in dir(self) if a not in cls_bltns and getattr(self, a)] return ret" 2486,"def as_dict(self): """""" returns an dict version of the object, based on it's attributes """""" if hasattr(self, 'cust_dict'): return self.cust_dict if hasattr(self, 'attr_check'): self.attr_check() cls_bltns = set(dir(self.__class__)) return {a: getattr(self, a) for a in dir(self) if a not in cls_bltns}" 2487,"def as_odict(self): """""" returns an odict version of the object, based on it's attributes """""" if hasattr(self, 'cust_odict'): return self.cust_odict if hasattr(self, 'attr_check'): self.attr_check() odc = odict() for attr in self.attrorder: odc[attr] = getattr(self, attr) return odc" 2488,"def fetch_and_parse(url, bodyLines): """"""Takes a url, and returns a dictionary of data with 'bodyLines' lines"""""" pageHtml = fetch_page(url) return parse(url, pageHtml, bodyLines)" 2489,"def find(pos, user): ''' Get a valid CashDiary for today from the given POS, it will return: - None: if no CashDiary is available today and older one was already closed - New CashDiary: if no CashDiary is available today but there is an older one which it was opened - Existing CashDiary: if a CashDiary is available today (open or close) ''' # Get checkpoint ck = dateparse.parse_time(getattr(settings, ""CASHDIARY_CLOSES_AT"", '03:00')) year = timezone.now().year month = timezone.now().month day = timezone.now().day hour = ck.hour minute = ck.minute second = ck.second checkpoint = timezone.datetime(year, month, day, hour, minute, second) # Get cashdiary = CashDiary.objects.filter(pos=pos, opened_date__gte=checkpoint).order_by(""-opened_date"").first() if not cashdiary: # No cashdiary found for today, check older one oldercashdiary = CashDiary.objects.filter(pos=pos, opened_date__lt=checkpoint).order_by(""-opened_date"").first() if oldercashdiary: if oldercashdiary.closed_user: cashdiary = None else: # Older cashdiary is not closed, we have to close it and open a new one amount_cash = oldercashdiary.amount_cash() amount_cards = oldercashdiary.amount_cards() # The older cashdiary is still opened, we have to close it and create a new one oldercashdiary.closed_cash = amount_cash oldercashdiary.closed_cards = amount_cards oldercashdiary.closed_user = user oldercashdiary.closed_date = timezone.now() oldercashdiary.save() # Open new cashdiary cashdiary = CashDiary() cashdiary.pos = pos cashdiary.opened_cash = amount_cash cashdiary.opened_cards = amount_cards cashdiary.opened_user = user cashdiary.opened_date = timezone.now() cashdiary.save() else: # initial new cashdiary cashdiary = CashDiary() cashdiary.pos = pos cashdiary.opened_cash = Decimal('0') cashdiary.opened_cards = Decimal('0') cashdiary.opened_user = user cashdiary.opened_date = timezone.now() cashdiary.save() # Return the found CashDiary return cashdiary" 2490,"def copy_rec(source, dest): """"""Copy files between diferent directories. Copy one or more files to an existing directory. This function is recursive, if the source is a directory, all its subdirectories are created in the destination. Existing files in destination are overwrited without any warning. Args: source (str): File or directory name. dest (str): Directory name. Raises: FileNotFoundError: Destination directory doesn't exist. """""" if os.path.isdir(source): for child in os.listdir(source): new_dest = os.path.join(dest, child) os.makedirs(new_dest, exist_ok=True) copy_rec(os.path.join(source, child), new_dest) elif os.path.isfile(source): logging.info(' Copy ""{}"" to ""{}""'.format(source, dest)) shutil.copy(source, dest) else: logging.info(' Ignoring ""{}""'.format(source))" 2491,"def build(self): """""" Builds this object into the desired output information. """""" signed = bool(self.options() & Builder.Options.Signed) # remove previous build information buildpath = self.buildPath() if not buildpath: raise errors.InvalidBuildPath(buildpath) # setup the environment for key, value in self.environment().items(): log.info('SET {0}={1}'.format(key, value)) os.environ[key] = value if os.path.exists(buildpath): shutil.rmtree(buildpath) # generate the build path for the installer os.makedirs(buildpath) # create the output path outpath = self.outputPath() if not os.path.exists(outpath): os.makedirs(outpath) # copy license information src = self.licenseFile() if src and os.path.exists(src): targ = os.path.join(buildpath, 'license.txt') shutil.copyfile(src, targ) # generate revision information if self.options() & Builder.Options.GenerateRevision: self.generateRevision() # generate documentation information if self.options() & Builder.Options.GenerateDocs: self.generateDocumentation(buildpath) # generate setup file if self.options() & Builder.Options.GenerateSetupFile: setuppath = os.path.join(self.sourcePath(), '..') egg = (self.options() & Builder.Options.GenerateEgg) != 0 self.generateSetupFile(setuppath, egg=egg) # generate executable information if self.options() & Builder.Options.GenerateExecutable: if not self.generateExecutable(signed=signed): return # generate zipfile information if self.options() & Builder.Options.GenerateZipFile: self.generateZipFile(self.outputPath()) # generate installer information if self.options() & Builder.Options.GenerateInstaller: self.generateInstaller(buildpath, signed=signed)" 2492,"def generateExecutable(self, outpath='.', signed=False): """""" Generates the executable for this builder in the output path. :param outpath | <str> """""" if not (self.runtime() or self.specfile()): return True if not self.distributionPath(): return True if os.path.exists(self.distributionPath()): shutil.rmtree(self.distributionPath()) if os.path.isfile(self.sourcePath()): basepath = os.path.normpath(os.path.dirname(self.sourcePath())) else: basepath = os.path.normpath(self.sourcePath()) # store the plugin table of contents self.generatePlugins(basepath) # generate the specfile if necessary specfile = self.specfile() # generate the spec file options opts = { 'name': self.name(), 'exname': self.executableName(), 'product': self.productName(), 'runtime': self.runtime(), 'srcpath': self.sourcePath(), 'buildpath': self.buildPath(), 'hookpaths': ',\n'.join(wrap_str(self.hookPaths())), 'hiddenimports': ',\n'.join(wrap_str(self.hiddenImports())), 'distpath': self.distributionPath(), 'platform': sys.platform, 'excludes': ',\n'.join(wrap_str(self.executableExcludes())) } if not specfile: datasets = [] for typ, data in self.executableData(): if typ == 'tree': args = { 'path': data[0], 'prefix': data[1], 'excludes': ','.join(wrap_str(data[2])) } datasets.append(templ.SPECTREE.format(**args)) else: args = {} args.update(data) args.setdefault('type', typ) datasets.append(templ.SPECDATA.format(**args)) opts['datasets'] = '\n'.join(datasets) opts.update(self._executableOptions) if self.executableCliName(): opts['cliname'] = self.executableCliName() opts['collect'] = templ.SPECFILE_CLI.format(**opts) else: opts['collect'] = templ.SPECFILE_COLLECT.format(**opts) if opts['onefile']: data = templ.SPECFILE_ONEFILE.format(**opts) else: data = templ.SPECFILE.format(**opts) # generate the spec file for building specfile = os.path.join(self.buildPath(), self.name() + '.spec') f = open(specfile, 'w') f.write(data) f.close() cmd = os.path.expandvars(self.executableOption('cmd')) success = cmdexec(cmd.format(spec=specfile)) == 0 if signed: binfile = os.path.join(opts['distpath'], opts['product'], opts['exname'] + '.exe') self.sign(binfile) return success" 2493,"def generateRevision(self): """""" Generates the revision file for this builder. """""" revpath = self.sourcePath() if not os.path.exists(revpath): return # determine the revision location revfile = os.path.join(revpath, self.revisionFilename()) mode = '' # test for svn revision try: args = ['svn', 'info', revpath] proc = subprocess.Popen(args, stdout=subprocess.PIPE) mode = 'svn' except WindowsError: try: args = ['git', 'rev-parse', 'HEAD', revpath] proc = subprocess.Popen(args, stdout=subprocess.PIPE) mode = 'git' except WindowsError: return # process SVN revision rev = None if mode == 'svn': for line in proc.stdout: data = re.match('^Revision: (\d+)', line) if data: rev = int(data.group(1)) break if rev is not None: try: f = open(revfile, 'w') f.write('__revision__ = {0}\n'.format(rev)) f.close() except IOError: pass" 2494,"def generateInstaller(self, outpath='.', signed=False): """""" Generates the installer for this builder. :param outpath | <str> """""" log.info('Generating Installer....') # generate the options for the installer opts = { 'name': self.name(), 'exname': self.executableName(), 'version': self.version(), 'company': self.company(), 'language': self.language(), 'license': self.license(), 'platform': sys.platform, 'product': self.productName(), 'outpath': self.outputPath(), 'instpath': self.installPath(), 'instname': self.installName(), 'buildpath': self.buildPath(), 'srcpath': self.sourcePath(), 'nsis_exe': os.environ['NSIS_EXE'], 'signed': '', 'signcmd': '' } basetempl = '' if self.runtime() and os.path.exists(self.distributionPath()): opts['compilepath'] = os.path.join(self.distributionPath(), self.executableName()) basetempl = templ.NSISAPP elif os.path.isfile(self.sourcePath()): opts['compilepath'] = self.sourcePath() opts['install'] = templ.NSISMODULE.format(**opts) basetempl = templ.NSISLIB else: opts['compilepath'] = self.sourcePath() opts['install'] = templ.NSISPACKAGE.format(**opts) basetempl = templ.NSISLIB # sign the uninstaller if signed and self.signcmd(): cmd = self.signcmd().format(filename='', cert=self.certificate()) cmd = os.path.expandvars(cmd) cmd = cmd.replace('""""', '') opts['signed'] = '!define SIGNED' opts['signcmd'] = cmd opts.update(self._installerOptions) # expand the plugin paths pre_section_plugins = [] post_section_plugins = [] install_plugins = [] uninstall_plugins = [] for filename in self.installerOption('pre_section_plugins', []): with open(filename, 'r') as f: pre_section_plugins.append(f.read().format(**opts)) for filename in self.installerOption('post_section_plugins', []): with open(filename, 'r') as f: post_section_plugins.append(f.read().format(**opts)) for filename in self.installerOption('install_section_plugins', []): with open(filename, 'r') as f: install_plugins.append(f.read().format(**opts)) for filename in self.installerOption('uninstall_section_plugins', []): with open(filename, 'r') as f: uninstall_plugins.append(f.read().formst(**opts)) opts['install_plugins'] = '\n'.join(install_plugins) opts['uninstall_plugins'] = '\n'.join(uninstall_plugins) opts['pre_section_plugins'] = '\n'.join(pre_section_plugins) opts['post_section_plugins'] = '\n'.join(post_section_plugins) opts['choose_directory'] = templ.NSISCHOOSEDIRECTORY if opts['choose_dir'] else '' req_license = self._installerOptions.pop('require_license_approval', False) if req_license: opts['require_license_approval'] = templ.NSISLICENSERADIO else: opts['require_license_approval'] = '' outfile = os.path.join(os.path.abspath(outpath), 'autogen.nsi') opts['__file__'] = outfile # update the additional directories addtl = [] for directory, source in self._installDirectories.items(): directory = os.path.expandvars(directory.format(**opts)) directory = os.path.normpath(directory) if source: source = os.path.expandvars(source.format(**opts)) source = os.path.abspath(source) addtl.append(' SetOutPath ""{0}""'.format(directory)) addtl.append(' File /nonfatal /r ""{0}""'.format(source)) else: addtl.append(' CreateDirectory ""{0}""'.format(directory)) opts['addtl_commands'] = '\n'.join(addtl) data = basetempl.format(**opts) # create the output file f = open(outfile, 'w') f.write(data) f.close() installerfile = os.path.join(self.outputPath(), self.installName()) installerfile += '-{0}.exe'.format(sys.platform) # run the installer cmd = os.path.expandvars(self.installerOption('cmd')) success = cmdexec(cmd.format(script=outfile)) # sign the installer if signed: self.sign(installerfile) log.info('Executing installer...') cmdexec(installerfile)" 2495,"def generateSetupFile(self, outpath='.', egg=False): """""" Generates the setup file for this builder. """""" outpath = os.path.abspath(outpath) outfile = os.path.join(outpath, 'setup.py') opts = { 'name': self.name(), 'distname': self.distributionName(), 'version': self.version(), 'author': self.author(), 'author_email': self.authorEmail(), 'keywords': self.keywords(), 'license': self.license(), 'brief': self.brief(), 'description': self.description(), 'url': self.companyUrl() } wrap_dict = lambda x: map(lambda k: ""r'{0}': [{1}]"".format(k[0], ',\n'.join(wrap_str(k[1]))), x.items()) opts['dependencies'] = ',\n'.join(wrap_str(self.dependencies())) opts['classifiers'] = ',\n'.join(wrap_str(self.classifiers())) if os.path.isfile(self.sourcePath()): basepath = os.path.normpath(os.path.dirname(self.sourcePath())) else: basepath = os.path.normpath(self.sourcePath()) self.generatePlugins(basepath) exts = set() for root, folders, files in os.walk(basepath): for file_ in files: _, ext = os.path.splitext(file_) if ext not in ('.py', '.pyc', '.pyo'): exts.add('*' + ext) exts = list(exts) text = templ.SETUPFILE.format(**opts) # generate the file if not os.path.exists(outfile): f = open(outfile, 'w') f.write(text) f.close() # generate the manifest file manfile = os.path.join(outpath, 'MANIFEST.in') if not os.path.exists(manfile): f = open(manfile, 'w') f.write('include *.md *.txt *.ini *.cfg *.rst\n') f.write('recursive-include {0} {1}\n'.format(self.name(), ' '.join(exts))) f.close() # generate the egg if egg: cmd = 'cd {0} && $PYTHON setup.py bdist_egg'.format(outpath) cmd = os.path.expandvars(cmd) cmdexec(cmd)" 2496,"def generateZipFile(self, outpath='.'): """""" Generates the zip file for this builder. """""" fname = self.installName() + '.zip' outfile = os.path.abspath(os.path.join(outpath, fname)) # clears out the exiting archive if os.path.exists(outfile): try: os.remove(outfile) except OSError: log.warning('Could not remove zipfile: %s', outfile) return False # generate the zip file zfile = zipfile.ZipFile(outfile, 'w') # zip up all relavent fields from the code base if os.path.isfile(self.sourcePath()): zfile.write(self.sourcePath(), os.path.basename(self.sourcePath())) else: basepath = os.path.abspath(os.path.join(self.sourcePath(), '..')) baselen = len(basepath) + 1 for root, folders, filenames in os.walk(basepath): # ignore hidden folders if '.svn' in root or '.git' in root: continue # ignore setuptools build info part = root[baselen:].split(os.path.sep)[0] if part in ('build', 'dist') or part.endswith('.egg-info'): continue # include files for filename in filenames: ext = os.path.splitext(filename)[1] if ext in self.ignoreFileTypes(): continue arcroot = root[baselen:].replace('\\', '/') arcname = os.path.join(arcroot, filename) log.info('Archiving %s...', arcname) zfile.write(os.path.join(root, filename), arcname) zfile.close() return True" 2497,"def installName(self): """""" Returns the name for the installer this builder will generate. :return <str> """""" opts = {'name': self.name(), 'version': self.version()} if self.revision(): opts['revision'] = '.{0}'.format(self.revision()) else: opts['revision'] = '' if self._installName: return self._installName.format(**opts) else: return '{name}-{version}{revision}'.format(**opts)" 2498,"def licenseFile(self): """""" Returns the license file for this builder. :return <str> """""" if self._licenseFile: return self._licenseFile elif self._license: f = projex.resources.find('licenses/{0}.txt'.format(self.license())) return f else: return ''" 2499,"def loadXml(self, xdata, filepath=''): """""" Loads properties from the xml data. :param xdata | <xml.etree.ElementTree.Element> """""" # build options opts = {'platform': sys.platform} mkpath = lambda x: _mkpath(filepath, x, **opts) # lookup environment variables xenv = xdata.find('environment') if xenv is not None: env = {} log.info('loading environment...') for xkey in xenv: text = xkey.text if text: env[xkey.tag] = os.path.expandvars(text) else: env[xkey.tag] = '' self.setEnvironment(env) # lookup general settings xsettings = xdata.find('settings') if xsettings is not None: for xsetting in xsettings: key = xsetting.tag val = xsetting.text attr = '_' + key if hasattr(self, attr): setattr(self, attr, val) # lookup options xoptions = xdata.find('options') if xoptions is not None: options = 0 for xopt in xoptions: key = xopt.tag value = xopt.text if value.lower() == 'true': try: options |= Builder.Options[key] except KeyError: continue self._options = options # lookup path options xpaths = xdata.find('paths') if xpaths is not None: for xpath in xpaths: key = xpath.tag path = xpath.text if key.endswith('Paths'): path = map(mkpath, path.split(';')) else: path = mkpath(path) setattr(self, '_' + key, path) # lookup executable options xexe = xdata.find('executable') if xexe is not None: exe_tags = {'runtime': '_runtime', 'exe': '_executableName', 'cli': '_executableCliName', 'product': '_productName'} for tag, prop in exe_tags.items(): xtag = xexe.find(tag) if xtag is not None: value = xtag.text if value.startswith('.'): value = mkpath(value) setattr(self, prop, value) # load exclude options xexcludes = xexe.find('excludes') if xexcludes is not None: excludes = [] for xexclude in xexcludes: excludes.append(xexclude.text) self.setExecutableExcludes(excludes) # load build data xexedata = xexe.find('data') if xexedata is not None: data = [] for xentry in xexedata: if xentry.tag == 'tree': path = xentry.get('path', '') if path: path = mkpath(path) else: path = self.sourcePath() prefix = xentry.get('prefix', os.path.basename(path)) excludes = xentry.get('excludes', '').split(';') if excludes: data.append(('tree', (path, prefix, excludes))) else: for xitem in xentry: data.append((xentry.tag, xitem.attrs)) self.setExecutableData(data) # load hidden imports xhiddenimports = xexe.find('hiddenimports') if xhiddenimports is not None: imports = [] for ximport in xhiddenimports: imports.append(ximport.text) self.setHiddenImports(imports) # load options xopts = xexe.find('options') if xopts is not None: for xopt in xopts: if xopt.text.startswith('.'): value = mkpath(xopt.text) else: value = xopt.text self._executableOptions[xopt.tag] = value # lookup installer options xinstall = xdata.find('installer') if xinstall is not None: install_tags = {'name': '_installName'} for tag, prop in install_tags.items(): xtag = xinstall.find(tag) if xtag is not None: value = xtag.text if value.startswith('.'): value = mkpath(value) setattr(self, prop, value) xopts = xinstall.find('options') if xopts is not None: for xopt in xopts: if xopt.text.startswith('.'): value = mkpath(xopt.text) else: value = xopt.text self._installerOptions[xopt.tag] = value xdirectories = xinstall.find('additional_directories') if xdirectories is not None: for xdir in xdirectories: self._installDirectories[xdir.get('path')] = xdir.get('source', '')" 2500,"def loadYaml(self, ydata, filepath=''): """""" Loads properties from the yaml data. :param ydata | <dict> """""" # build options opts = {'platform': sys.platform} mkpath = lambda x: _mkpath(filepath, x, **opts) # lookup environment variables env = {} for key, text in ydata.get('environment', {}).items(): if text: env[key] = os.path.expandvars(text) else: env[key] = '' self.setEnvironment(env) # lookup general settings for key, val in ydata.get('settings', {}).items(): attr = '_' + key if hasattr(self, attr): setattr(self, attr, val) # lookup options yoptions = ydata.get('options') if yoptions is not None: options = 0 for key, value in yoptions.items(): if value: try: options |= Builder.Options[key] except KeyError: continue self._options = options # lookup path options for key, path in ydata.get('paths', {}).items(): if key.endswith('Paths'): path = map(mkpath, path.split(';')) else: path = mkpath(path) setattr(self, '_' + key, path) # lookup executable options yexe = ydata.get('executable') if yexe is not None: exe_tags = {'runtime': '_runtime', 'exe': '_executableName', 'cli': '_executableCliName', 'product': '_productName'} for tag, prop in exe_tags.items(): if tag in yexe: value = yexe.pop(tag) if value.startswith('.'): value = mkpath(value) setattr(self, prop, value) # load exclude options self.setExecutableExcludes(yexe.get('excludes', [])) # load build data yexedata = yexe.get('data', {}) if yexedata: data = [] for key, value in yexedata.items(): if key == 'tree': path = value.get('path', '') if path: path = mkpath(path) else: path = self.sourcePath() prefix = value.get('prefix', os.path.basename(path)) excludes = value.get('excludes', '').split(';') if excludes: data.append(('tree', (path, prefix, excludes))) else: for item in value: data.append((key, item)) self.setExecutableData(data) # load hidden imports self.setHiddenImports(yexe.get('hiddenimports', [])) # load options for key, value in yexe.get('options', {}).items(): value = nstr(value) if value.startswith('.'): value = mkpath(value) self._executableOptions[key] = value # lookup installer options yinstall = ydata.get('installer') if yinstall is not None: install_tags = {'name': '_installName'} for tag, prop in install_tags.items(): if tag in yinstall: value = yinstall.pop(tag, None) if value.startswith('.'): value = mkpath(value) setattr(self, prop, value) for key, value in yinstall.get('options', {}).items(): if type(value) in (unicode, str) and value.startswith('.'): value = mkpath(value) self._installerOptions[key] = value for path in yinstall.get('additional_directories', []): self._installDirectories[path.get('path', '')] = path.get('source', '')" 2501,"def sign(self, filename): """""" Signs the filename with the certificate associated with this builder. :param filename | <str> :return <bool> | success """""" sign = self.signcmd() certificate = self.certificate() if not sign: log.error('No signcmd defined.') return False elif not certificate and '{cert}' in sign: log.error('No sign certificated defined.') return False log.info('Signing {0}...'.format(filename)) sign = os.path.expandvars(sign) filename = os.path.expandvars(filename) cert = os.path.expandvars(certificate) # let the previous process finish fully, or we might get some file errors time.sleep(2) return cmdexec(sign.format(filename=filename, cert=cert)) == 0" 2502,"def plugin(name, module=''): """""" Returns the plugin for the given name. By default, the base Builder instance will be returned. :param name | <str> """""" if module: mod = projex.importfile(module) if mod: return getattr(mod, nstr(name), None) return Builder._plugins.get(nstr(name))" 2503,"def register(plugin, name=None): """""" Registers the given builder as a plugin to the system. :param plugin | <subclass of PackageBuilder> name | <str> || None """""" if name is None: name = plugin.__name__ Builder._plugins[nstr(name)] = plugin" 2504,"def fromXml(cls, xdata, filepath=''): """""" Generates a new builder from the given xml data and then loads its information. :param xdata | <xml.etree.ElementTree.Element> :return <Builder> || None """""" builder = cls() builder.loadXml(xdata, filepath=filepath) return builder" 2505,"def fromYaml(cls, ydata, filepath=''): """""" Generates a new builder from the given yaml data and then loads its information. :param ydata | <dict> :return <Builder> || None """""" builder = cls() builder.loadYaml(ydata, filepath=filepath) return builder" 2506,"def fromFile(filename): """""" Parses the inputted xml file information and generates a builder for it. :param filename | <str> :return <Builder> || None """""" xdata = None ydata = None # try parsing an XML file try: xdata = ElementTree.parse(filename).getroot() except StandardError: xdata = None if xdata is None: # try parsing a yaml file if yaml: with open(filename, 'r') as f: text = f.read() try: ydata = yaml.load(text) except StandardError: return None else: log.warning('Could not process yaml builder!') # load a yaml definition if type(ydata) == dict: typ = ydata.get('type') module = ydata.get('module') builder = Builder.plugin(typ, module) if builder: return builder.fromYaml(ydata, os.path.dirname(filename)) else: log.warning('Could not find builder: {0}'.format(typ)) # load an xml definition elif xdata is not None: typ = xdata.get('type') module = xdata.get('module') builder = Builder.plugin(typ, module) if builder: return builder.fromXml(xdata, os.path.dirname(filename)) else: log.warning('Could not find builder: {0}'.format(typ)) return None" 2507,"def fromXml(cls, xdata, filepath=''): """""" Generates a new builder from the given xml data and then loads its information. :param xdata | <xml.etree.ElementTree.Element> :return <Builder> || None """""" module = None pkg_data = xdata.find('package') if pkg_data is not None: path = pkg_data.find('path').text name = pkg_data.find('name').text if filepath: path = os.path.join(filepath, path) path = os.path.abspath(path) sys.path.insert(0, path) sys.modules.pop(name, None) try: __import__(name) module = sys.modules[name] except (ImportError, KeyError): return None else: return None # generate the builder builder = cls(module) builder.loadXml(xdata, filepath=filepath) return builder" 2508,"def fromYaml(cls, ydata, filepath=''): """""" Generates a new builder from the given xml data and then loads its information. :param ydata | <xml.etree.ElementTree.Element> :return <Builder> || None """""" module = None pkg_data = ydata.get('package') if pkg_data is not None: path = pkg_data.get('path', '') name = pkg_data.get('name', '') if filepath: path = os.path.join(filepath, path) path = os.path.abspath(path) sys.path.insert(0, path) sys.modules.pop(name, None) try: __import__(name) module = sys.modules[name] except (ImportError, KeyError): return None else: return None # generate the builder builder = cls(module) builder.loadYaml(ydata, filepath=filepath) return builder" 2509,"def to_object(item): """""" Convert a dictionary to an object (recursive). """""" def convert(item): if isinstance(item, dict): return IterableObject({k: convert(v) for k, v in item.items()}) if isinstance(item, list): def yield_convert(item): for index, value in enumerate(item): yield convert(value) return list(yield_convert(item)) else: return item return convert(item)" 2510,"def to_dict(item): """""" Convert an object to a dictionary (recursive). """""" def convert(item): if isinstance(item, IterableObject): if isinstance(item.source, dict): return {k: convert(v.source) if hasattr(v, 'source') else convert(v) for k, v in item} else: return convert(item.source) elif isinstance(item, dict): return {k: convert(v) for k, v in item.items()} elif isinstance(item, list): def yield_convert(item): for index, value in enumerate(item): yield convert(value) return list(yield_convert(item)) else: return item return convert(item)" 2511,"def step_undefined_step_snippet_should_exist_for(context, step): """""" Checks if an undefined-step snippet is provided for a step in behave command output (last command). EXAMPLE: Then an undefined-step snippet should exist for ""Given an undefined step"" """""" undefined_step_snippet = make_undefined_step_snippet(step) context.execute_steps(u'''\ Then the command output should contain: """""" {undefined_step_snippet} """""" '''.format(undefined_step_snippet=text_indent(undefined_step_snippet, 4)))" 2512,"def step_undefined_step_snippet_should_not_exist_for(context, step): """""" Checks if an undefined-step snippet is provided for a step in behave command output (last command). """""" undefined_step_snippet = make_undefined_step_snippet(step) context.execute_steps(u'''\ Then the command output should not contain: """""" {undefined_step_snippet} """""" '''.format(undefined_step_snippet=text_indent(undefined_step_snippet, 4)))" 2513,"def step_undefined_step_snippets_should_exist_for_table(context): """""" Checks if undefined-step snippets are provided. EXAMPLE: Then undefined-step snippets should exist for: | Step | | When an undefined step is used | | Then another undefined step is used | """""" assert context.table, ""REQUIRES: table"" for row in context.table.rows: step = row[""Step""] step_undefined_step_snippet_should_exist_for(context, step)" 2514,"def step_undefined_step_snippets_should_not_exist_for_table(context): """""" Checks if undefined-step snippets are not provided. EXAMPLE: Then undefined-step snippets should not exist for: | Step | | When an known step is used | | Then another known step is used | """""" assert context.table, ""REQUIRES: table"" for row in context.table.rows: step = row[""Step""] step_undefined_step_snippet_should_not_exist_for(context, step)" 2515,"async def create_connection( host, port, *, loop=None, secure=True, ssl_context=None, **kwargs, ): """"""Open an HTTP/2 connection to the specified host/port. """""" loop = loop or asyncio.get_event_loop() secure = True if port == 443 else secure connection = HTTP2ClientConnection(host, loop=loop, secure=secure) if not isinstance(ssl_context, SSLContext): ssl_context = default_ssl_context() await loop.create_connection( lambda: connection, host=host, port=port, ssl=ssl_context, ) return connection" 2516,"def mixin (cls): """""" A decorator which adds event methods to a class giving it the ability to bind to and trigger events :param cls: the class to add the event logic to :type cls: class :return: the modified class :rtype: class """""" cls._events = {} cls.bind = Pyevent.bind.__func__ cls.unbind = Pyevent.unbind.__func__ cls.trigger = Pyevent.trigger.__func__ return cls" 2517,"def bind (self, event, callback): """""" Bind an event to a call function and ensure that it is called for the specified event :param event: the event that should trigger the callback :type event: str :param callback: the function that should be called :rtype callback: function """""" if self._events.has_key(event): self._events[event].append(callback) else: self._events[event] = [callback]" 2518,"def unbind (self, event, callback): """""" Unbind the callback from the event and ensure that it is never called :param event: the event that should be unbound :type event: str :param callback: the function that should be unbound :rtype callback: function """""" if self._events.has_key(event) and len(self._events[event]) > 0: for _callback in self._events[event]: if _callback == callback: self._events[event].remove(callback) if len(self._events[event]) == 0: del self._events[event]" 2519,"def trigger (self, event, *args, **kwargs): """""" Cause the callbacks associated with the event to be called :param event: the event that occurred :type event: str :param data: optional data to pass to the callback :type data: anything that should be passed to the callback """""" if self._events.has_key(event): for _callback in self._events[event]: try: _callback(args, kwargs) except TypeError: _callback()" 2520,"def get_wordlist(lang, wl_dir, po_path): #print(""Looking for Wordlist in:\nlang {}\nwl_dir {}\npo_path {}"".format(lang, wl_dir, po_path)) po_path = os.path.abspath(po_path) """""" If wl_dir is given, there may be a file called ""<lang>.txt"". If this is the case, this should be the wordlist we are looking for. """""" if wl_dir is not None: wl_path = os.path.join(wl_dir, lang + '.txt') if os.path.isfile(wl_path): return wl_path """""" If wl_dir is not given, the wordlist should live in a file named ""wordlist.txt"" either in the locales_dir for the default language or in the same directory as the .po-files """""" if po_path.endswith(""po""): # translated language po_dir = os.path.dirname(po_path) for f in os.scandir(po_dir): if f.name == ""wordlist.txt"": #print(""found wordlist in"", f.path) return f.path #print(""Checked po-dir, None Found"") """""" If no file was found so far, the po-files seem to lie in <lang>/LC_MESSAGES, and the wordlist should be in the directory above. """""" if os.path.basename(po_dir) == ""LC_MESSAGES"": for f in os.scandir(os.path.join(po_dir, "".."")): if f.name == ""wordlist.txt"": #print(""found wordlist in"", f.path) return f.path #print(""Checked LC_MESSAGES-dir. none found"") #print(""Checked lang-specific files"") if os.path.isdir(po_path): # default language for f in os.scandir(po_path): if f.name == ""wordlist.txt"": #print(""found wordlist in"", f.path) return f.path #print(""If this shows up, no wordlist was found"") return None" 2521,"def _read_options(paths,fname_def=None): """"""Builds a configuration reader function"""""" def reader_func(fname=fname_def, sect=None, sett=None, default=None): """"""Reads the configuration for trump"""""" cur_dir = os.path.dirname(os.path.realpath(__file__)) config_dir = os.path.join(cur_dir, *paths) config_files = [(f[:-4], f) for f in os.listdir(config_dir) if f[-4:] == "".cfg""] sample_files = [(f[:-11], f) for f in os.listdir(config_dir) if f[-11:] == "".cfg_sample""] if fname: config_files = [f for f in config_files if f[0] == fname] sample_files = [f for f in sample_files if f[0] == fname] config_files = dict(config_files) sample_files = dict(sample_files) cfg_files = sample_files for fn, f in config_files.iteritems(): cfg_files[fn] = f sample_files_exposed = [] confg = {} for src, fil in cfg_files.iteritems(): confg[src] = {} cfpr = ConfigParser.ConfigParser() cfpr.read(os.path.join(config_dir, fil)) for sec in cfpr.sections(): confg[src][sec] = dict(cfpr.items(sec)) if "".cfg_sample"" in fil: sample_files_exposed.append(fil) if len(sample_files_exposed) > 0: msg = "", "".join(sample_files_exposed) body = ""{} sample configuration files have been exposed. "" \ ""Rename *.cfg_sample to *.cfg, and populate the "" \ ""correct settings in the config and settings "" \ ""directories to avoid this warning."" msg = body.format(msg) warnings.warn(msg) keys = [] if fname: keys.append(fname) if sect: keys.append(sect) if sett: keys.append(sett) try: return get_from_nested(keys, confg) except KeyError: if default is not None: return default else: raise return reader_func" 2522,"def returnLabelStateMassDifferences(peptide, labelDescriptor, labelState=None, sequence=None): """"""Calculates the mass difference for alternative possible label states of a given peptide. See also :class:`LabelDescriptor`, :func:`returnLabelState()` :param peptide: Peptide to calculate alternative label states :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :param labelState: label state of the peptide, if None it is calculated by :func:`returnLabelState()` :param sequence: unmodified amino acid sequence of the ""peptide"", if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :returns: {alternativeLabelSate: massDifference, ...} or {} if the peptide label state is -1. .. note:: The massDifference plus the peptide mass is the expected mass of an alternatively labeled peptide """""" if labelState is None: labelState = returnLabelState(peptide, labelDescriptor) if sequence is None: sequence = maspy.peptidemethods.removeModifications(peptide) if labelState < 0: # special case for mixed label... # return dict() # define type and number of labels of the peptide labelModNumbers = dict() _positions = expectedLabelPosition(peptide, labelDescriptor.labels[labelState], sequence=sequence) for labelStateModList in viewvalues(_positions): for labelMod in labelStateModList: labelModNumbers.setdefault(labelMod, int()) labelModNumbers[labelMod] += 1 # calculate the combined labels mass of the peptide labelMass = int() for labelMod, modCounts in viewitems(labelModNumbers): labelMass += maspy.constants.aaModMass[labelMod] * modCounts # calculate mass differences to all other possible label states labelStateMassDifferences = dict() for possibleLabelState in viewkeys(labelDescriptor.labels): if possibleLabelState == labelState: continue labelModNumbers = dict() _positions = expectedLabelPosition(peptide, labelDescriptor.labels[possibleLabelState], sequence=sequence) for labelStateModList in viewvalues(_positions): for labelMod in labelStateModList: labelModNumbers.setdefault(labelMod, int()) labelModNumbers[labelMod] += 1 possibleLabelMass = int() for labelMod, modCounts in viewitems(labelModNumbers): possibleLabelMass += maspy.constants.aaModMass[labelMod] * modCounts possibleLabelMassDifference = possibleLabelMass - labelMass labelStateMassDifferences[possibleLabelState] = possibleLabelMassDifference return labelStateMassDifferences" 2523,"def returnLabelState(peptide, labelDescriptor, labelSymbols=None, labelAminoacids=None): """"""Calculates the label state of a given peptide for the label setup described in labelDescriptor :param peptide: peptide which label state should be calcualted :param labelDescriptor: :class:`LabelDescriptor`, describes the label setup of an experiment. :param labelSymbols: modifications that show a label, as returned by :func:`modSymbolsFromLabelInfo`. :param labelAminoacids: amino acids that can bear a label, as returned by :func:`modAminoacidsFromLabelInfo`. :returns: integer that shows the label state: >=0: predicted label state of the peptide -1: peptide sequence can't bear any labelState modifications -2: peptide modifications don't fit to any predicted labelState -3: peptide modifications fit to a predicted labelState, but not all predicted labelStates are distinguishable """""" if labelSymbols is None: labelSymbols = modSymbolsFromLabelInfo(labelDescriptor) if labelAminoacids is None: labelAminoacids = modAminoacidsFromLabelInfo(labelDescriptor) sequence = maspy.peptidemethods.removeModifications(peptide) modPositions = maspy.peptidemethods.returnModPositions(peptide, indexStart=0, removeModString=False) labelState = None #No amino acids in sequence which can bear a label modification #Note: at the moment presence of excluding modifications are ignored _validator = lambda seq, aa: (True if seq.find(aa) == -1 else False) if all([_validator(sequence, aa) for aa in labelAminoacids]): #No terminal label modifications specified by labelDescriptor if 'nTerm' not in labelAminoacids and 'cTerm' not in labelAminoacids: labelState = -1 # Check if the peptide mofidifcations fit to any predicted label state if labelState is None: peptideLabelPositions = dict() for labelSymbol in labelSymbols: if labelSymbol in viewkeys(modPositions): for sequencePosition in modPositions[labelSymbol]: peptideLabelPositions.setdefault(sequencePosition, list()) peptideLabelPositions[sequencePosition].append(labelSymbol) for sequencePosition in list(viewkeys(peptideLabelPositions)): peptideLabelPositions[sequencePosition] = \ sorted(peptideLabelPositions[sequencePosition]) predictedLabelStates = dict() for predictedLabelState, labelStateInfo in viewitems(labelDescriptor.labels): expectedLabelMods = expectedLabelPosition(peptide, labelStateInfo, sequence=sequence, modPositions=modPositions) predictedLabelStates[predictedLabelState] = expectedLabelMods if peptideLabelPositions == expectedLabelMods: #If another expectedLabel state has already been matched, then #there is an ambiguity between label states ... labelState = predictedLabelState if labelState is None: # Peptide mofidifcations don't fit to any predicted label state labelState = -2 elif labelState != -1: # Check if all predicted label states are distinguishable _comb = set(itertools.combinations(range(len(predictedLabelStates)), 2)) for state1, state2 in _comb: if predictedLabelStates[state1] == predictedLabelStates[state2]: labelState = -3 break return labelState" 2524,"def modSymbolsFromLabelInfo(labelDescriptor): """"""Returns a set of all modiciation symbols which were used in the labelDescriptor :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :returns: #TODO: docstring """""" modSymbols = set() for labelStateEntry in viewvalues(labelDescriptor.labels): for labelPositionEntry in viewvalues(labelStateEntry['aminoAcidLabels']): for modSymbol in aux.toList(labelPositionEntry): if modSymbol != '': modSymbols.add(modSymbol) return modSymbols" 2525,"def modAminoacidsFromLabelInfo(labelDescriptor): """"""Returns a set of all amino acids and termini which can bear a label, as described in ""labelDescriptor"". :param labelDescriptor: :class:`LabelDescriptor` describes the label setup of an experiment :returns: #TODO: docstring """""" modAminoacids = set() for labelStateEntry in viewvalues(labelDescriptor.labels): for labelPositionEntry in viewkeys(labelStateEntry['aminoAcidLabels']): for modAminoacid in aux.toList(labelPositionEntry): if modAminoacid != '': modAminoacids.add(modAminoacid) return modAminoacids" 2526,"def expectedLabelPosition(peptide, labelStateInfo, sequence=None, modPositions=None): """"""Returns a modification description of a certain label state of a peptide. :param peptide: Peptide sequence used to calculat the expected label state modifications :param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that describes a label state :param sequence: unmodified amino acid sequence of :var:`peptide`, if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :param modPositions: dictionary describing the modification state of ""peptide"", if None it is generated by :func:`maspy.peptidemethods.returnModPositions()` :returns: {sequence position: sorted list of expected label modifications on that position, ... } """""" if modPositions is None: modPositions = maspy.peptidemethods.returnModPositions(peptide, indexStart=0 ) if sequence is None: sequence = maspy.peptidemethods.removeModifications(peptide) currLabelMods = dict() for labelPosition, labelSymbols in viewitems(labelStateInfo['aminoAcidLabels']): labelSymbols = aux.toList(labelSymbols) if labelSymbols == ['']: pass elif labelPosition == 'nTerm': currLabelMods.setdefault(0, list()) currLabelMods[0].extend(labelSymbols) else: for sequencePosition in aux.findAllSubstrings(sequence, labelPosition): currLabelMods.setdefault(sequencePosition, list()) currLabelMods[sequencePosition].extend(labelSymbols) if labelStateInfo['excludingModifications'] is not None: for excludingMod, excludedLabelSymbol in viewitems(labelStateInfo['excludingModifications']): if excludingMod not in modPositions: continue for excludingModPos in modPositions[excludingMod]: if excludingModPos not in currLabelMods: continue if excludedLabelSymbol not in currLabelMods[excludingModPos]: continue if len(currLabelMods[excludingModPos]) == 1: del(currLabelMods[excludingModPos]) else: excludedModIndex = currLabelMods[excludingModPos].index(excludedLabelSymbol) currLabelMods[excludingModPos].pop(excludedModIndex) for sequencePosition in list(viewkeys(currLabelMods)): currLabelMods[sequencePosition] = sorted(currLabelMods[sequencePosition]) return currLabelMods" 2527,"def addLabel(self, aminoAcidLabels, excludingModifications=None): """"""Adds a new labelstate. :param aminoAcidsLabels: Describes which amino acids can bear which labels. Possible keys are the amino acids in one letter code and 'nTerm', 'cTerm'. Possible values are the modifications ids from :attr:`maspy.constants.aaModMass` as strings or a list of strings. An example for one expected label at the n-terminus and two expected labels at each Lysine: ``{'nTerm': 'u:188', 'K': ['u:188', 'u:188']}`` :param excludingModifications: optional, A Dectionary that describes which modifications can prevent the addition of labels. Keys and values have to be the modifications ids from :attr:`maspy.constants.aaModMass`. The key specifies the modification that prevents the label modification specified by the value. For example for each modification 'u:1' that is present at an amino acid or terminus of a peptide the number of expected labels at this position is reduced by one: ``{'u:1':'u:188'}`` """""" if excludingModifications is not None: self.excludingModifictions = True labelEntry = {'aminoAcidLabels': aminoAcidLabels, 'excludingModifications': excludingModifications } self.labels[self._labelCounter] = labelEntry self._labelCounter += 1" 2528,"def get_gen_slice(ctx=Bubble(), iterable=[], amount=-1, index=-1): """"""very crude way of slicing a generator"""""" ctx.gbc.say('get_gen_slice', stuff=iterable, verbosity=10) i = -1 # TODO # i = 0 #NATURAL INDEX, this will break all features with exports and -p if amount > 0: if index < 0: index = 0 else: for item in iterable: i += 1 item[buts('index')] = i ctx.gbc.say('Get gen NO slice:item %d' % i, verbosity=100) ctx.gbc.say('Get gen NO slice:a:%d i:%d' % (amount, index), verbosity=100) ctx.gbc.say('Get gen NO slice:item', stuff=item, verbosity=1000) yield item until = index + amount if six.PY2: sli = xrange(index, until) else: sli = range(index, until) ctx.gbc.say('Get gen slice:range %s' % str(sli), verbosity=1000) # TODO: iterable should be empty if not slicing # if valid slice ... for item in iterable: i += 1 if i in sli: ctx.gbc.say('Get gen slice:item %d' % i, verbosity=100) ctx.gbc.say('Get gen slice:a:%d i:%d' % (amount, index), verbosity=100) ctx.gbc.say('Get gen slice:item', stuff=item, verbosity=1000) item[buts('index')] = i yield item elif i > until: break else: pass" 2529,"def deploy_schema_to_db(self, mode='safe', files_deployment=None, vcs_ref=None, vcs_link=None, issue_ref=None, issue_link=None, compare_table_scripts_as_int=False, config_path=None, config_dict=None, config_object=None, source_code_path=None, auto_commit=False): """""" Deploys schema :param files_deployment: if specific script to be deployed, only find them :param mode: :param vcs_ref: :param vcs_link: :param issue_ref: :param issue_link: :param compare_table_scripts_as_int: :param config_path: :param config_dict: :param config_object: :param source_code_path: :param auto_commit: :return: dictionary of the following format: { code: 0 if all fine, otherwise something else, message: message on the output function_scripts_requested: list of function files requested for deployment function_scripts_deployed: list of function files deployed type_scripts_requested: list of type files requested for deployment type_scripts_deployed: list of type files deployed view_scripts_requested: list of view files requested for deployment view_scripts_deployed: list of view files deployed trigger_scripts_requested: list of trigger files requested for deployment trigger_scripts_deployed: list of trigger files deployed table_scripts_requested: list of table files requested for deployment table_scripts_deployed: list of table files deployed requested_files_count: count of requested files to deploy deployed_files_count: count of deployed files } :rtype: dict """""" return_value = {} if files_deployment: return_value['function_scripts_requested'] = files_deployment return_value['type_scripts_requested'] = [] return_value['view_scripts_requested'] = [] return_value['trigger_scripts_requested'] = [] return_value['table_scripts_requested'] = [] if auto_commit: if mode == 'safe' and files_deployment: self._logger.debug(""Auto commit mode is on. Be careful."") else: self._logger.error(""Auto commit deployment can only be done with file "" ""deployments and in safe mode for security reasons"") raise ValueError(""Auto commit deployment can only be done with file "" ""deployments and in safe mode for security reasons"") # set source code path if exists self._source_code_path = self._source_code_path or source_code_path # set configuration if either of config_path, config_dict, config_object are set. # Otherwise use configuration from class initialisation if config_object: self._config = config_object elif config_path or config_dict: self._config = pgpm.lib.utils.config.SchemaConfiguration(config_path, config_dict, self._source_code_path) # Check if in git repo if not vcs_ref: if pgpm.lib.utils.vcs.is_git_directory(self._source_code_path): vcs_ref = pgpm.lib.utils.vcs.get_git_revision_hash(self._source_code_path) self._logger.debug('commit reference to be deployed is {0}'.format(vcs_ref)) else: self._logger.debug('Folder is not a known vcs repository') self._logger.debug('Configuration of package {0} of version {1} loaded successfully.' .format(self._config.name, self._config.version.raw)) # TODO: change to to_string once discussed # .format(self._config.name, self._config.version.to_string())) # Get scripts type_scripts_dict = self._get_scripts(self._config.types_path, files_deployment, ""types"", self._source_code_path) if not files_deployment: return_value['type_scripts_requested'] = [key for key in type_scripts_dict] function_scripts_dict = self._get_scripts(self._config.functions_path, files_deployment, ""functions"", self._source_code_path) if not files_deployment: return_value['function_scripts_requested'] = [key for key in function_scripts_dict] view_scripts_dict = self._get_scripts(self._config.views_path, files_deployment, ""views"", self._source_code_path) if not files_deployment: return_value['view_scripts_requested'] = [key for key in view_scripts_dict] trigger_scripts_dict = self._get_scripts(self._config.triggers_path, files_deployment, ""triggers"", self._source_code_path) if not files_deployment: return_value['trigger_scripts_requested'] = [key for key in trigger_scripts_dict] # before with table scripts only file name was an identifier. Now whole relative path the file # (relative to config.json) # table_scripts_dict_denormalised = self._get_scripts(self._config.tables_path, files_deployment, # ""tables"", self._source_code_path) # table_scripts_dict = {os.path.split(k)[1]: v for k, v in table_scripts_dict_denormalised.items()} table_scripts_dict = self._get_scripts(self._config.tables_path, files_deployment, ""tables"", self._source_code_path) if not files_deployment: return_value['table_scripts_requested'] = [key for key in table_scripts_dict] if self._conn.closed: self._conn = psycopg2.connect(self._connection_string, connection_factory=pgpm.lib.utils.db.MegaConnection) cur = self._conn.cursor() # be cautious, dangerous thing if auto_commit: self._conn.autocommit = True # Check if DB is pgpm enabled if not pgpm.lib.utils.db.SqlScriptsHelper.schema_exists(cur, self._pgpm_schema_name): self._logger.error('Can\'t deploy schemas to DB where pgpm was not installed. ' 'First install pgpm by running pgpm install') self._conn.close() sys.exit(1) # check installed version of _pgpm schema. pgpm_v_db_tuple = pgpm.lib.utils.db.SqlScriptsHelper.get_pgpm_db_version(cur, self._pgpm_schema_name) pgpm_v_db = distutils.version.StrictVersion(""."".join(pgpm_v_db_tuple)) pgpm_v_script = distutils.version.StrictVersion(pgpm.lib.version.__version__) if pgpm_v_script > pgpm_v_db: self._logger.error('{0} schema version is outdated. Please run pgpm install --upgrade first.' .format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) elif pgpm_v_script < pgpm_v_db: self._logger.error('Deployment script\'s version is lower than the version of {0} schema ' 'installed in DB. Update pgpm script first.'.format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) # Resolve dependencies list_of_deps_ids = [] if self._config.dependencies: _is_deps_resolved, list_of_deps_ids, _list_of_unresolved_deps = \ self._resolve_dependencies(cur, self._config.dependencies) if not _is_deps_resolved: self._logger.error('There are unresolved dependencies. Deploy the following package(s) and try again:') for unresolved_pkg in _list_of_unresolved_deps: self._logger.error('{0}'.format(unresolved_pkg)) self._conn.close() sys.exit(1) # Prepare and execute preamble _deployment_script_preamble = pkgutil.get_data('pgpm', 'lib/db_scripts/deploy_prepare_config.sql') self._logger.debug('Executing a preamble to deployment statement') cur.execute(_deployment_script_preamble) # Get schema name from project configuration schema_name = '' if self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: if self._config.subclass == 'versioned': schema_name = '{0}_{1}'.format(self._config.name, self._config.version.raw) self._logger.debug('Schema {0} will be updated'.format(schema_name)) elif self._config.subclass == 'basic': schema_name = '{0}'.format(self._config.name) if not files_deployment: self._logger.debug('Schema {0} will be created/replaced'.format(schema_name)) else: self._logger.debug('Schema {0} will be updated'.format(schema_name)) # Create schema or update it if exists (if not in production mode) and set search path if files_deployment: # if specific scripts to be deployed if self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: if not pgpm.lib.utils.db.SqlScriptsHelper.schema_exists(cur, schema_name): self._logger.error('Can\'t deploy scripts to schema {0}. Schema doesn\'t exist in database' .format(schema_name)) self._conn.close() sys.exit(1) else: pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, schema_name) self._logger.debug('Search_path was changed to schema {0}'.format(schema_name)) else: if self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: if not pgpm.lib.utils.db.SqlScriptsHelper.schema_exists(cur, schema_name): pgpm.lib.utils.db.SqlScriptsHelper.create_db_schema(cur, schema_name) elif mode == 'safe': self._logger.error('Schema already exists. It won\'t be overriden in safe mode. ' 'Rerun your script with ""-m moderate"", ""-m overwrite"" or ""-m unsafe"" flags') self._conn.close() sys.exit(1) elif mode == 'moderate': old_schema_exists = True old_schema_rev = 0 while old_schema_exists: old_schema_exists = pgpm.lib.utils.db.SqlScriptsHelper.schema_exists( cur, schema_name + '_' + str(old_schema_rev)) if old_schema_exists: old_schema_rev += 1 old_schema_name = schema_name + '_' + str(old_schema_rev) self._logger.debug('Schema already exists. It will be renamed to {0} in moderate mode. Renaming...' .format(old_schema_name)) _rename_schema_script = ""ALTER SCHEMA {0} RENAME TO {1};\n"".format(schema_name, old_schema_name) cur.execute(_rename_schema_script) # Add metadata to pgpm schema pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_set_revision_package'.format(self._pgpm_schema_name), [self._config.name, self._config.subclass, old_schema_rev, self._config.version.major, self._config.version.minor, self._config.version.patch, self._config.version.pre]) self._logger.debug('Schema {0} was renamed to {1}. Meta info was added to {2} schema' .format(schema_name, old_schema_name, self._pgpm_schema_name)) pgpm.lib.utils.db.SqlScriptsHelper.create_db_schema(cur, schema_name) elif mode == 'unsafe': _drop_schema_script = ""DROP SCHEMA {0} CASCADE;\n"".format(schema_name) cur.execute(_drop_schema_script) self._logger.debug('Dropping old schema {0}'.format(schema_name)) pgpm.lib.utils.db.SqlScriptsHelper.create_db_schema(cur, schema_name) if self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, schema_name) # Reordering and executing types return_value['type_scripts_deployed'] = [] if len(type_scripts_dict) > 0: types_script = '\n'.join([''.join(value) for key, value in type_scripts_dict.items()]) type_drop_scripts, type_ordered_scripts, type_unordered_scripts = self._reorder_types(types_script) if type_drop_scripts: for statement in type_drop_scripts: if statement: cur.execute(statement) if type_ordered_scripts: for statement in type_ordered_scripts: if statement: cur.execute(statement) if type_unordered_scripts: for statement in type_unordered_scripts: if statement: cur.execute(statement) self._logger.debug('Types loaded to schema {0}'.format(schema_name)) return_value['type_scripts_deployed'] = [key for key in type_scripts_dict] else: self._logger.debug('No type scripts to deploy') # Executing Table DDL scripts executed_table_scripts = [] return_value['table_scripts_deployed'] = [] if len(table_scripts_dict) > 0: if compare_table_scripts_as_int: sorted_table_scripts_dict = collections.OrderedDict(sorted(table_scripts_dict.items(), key=lambda t: int(t[0].rsplit('.', 1)[0]))) else: sorted_table_scripts_dict = collections.OrderedDict(sorted(table_scripts_dict.items(), key=lambda t: t[0].rsplit('.', 1)[0])) self._logger.debug('Running Table DDL scripts') for key, value in sorted_table_scripts_dict.items(): pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_is_table_ddl_executed'.format(self._pgpm_schema_name), [ key, self._config.name, self._config.subclass, self._config.version.major, self._config.version.minor, self._config.version.patch, self._config.version.pre ]) is_table_executed = cur.fetchone()[0] if self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, schema_name) elif self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.DATABASE_SCOPE: cur.execute(""SET search_path TO DEFAULT ;"") if (not is_table_executed) or (mode == 'unsafe'): # if auto commit mode than every statement is called separately. # this is done this way as auto commit is normally used when non transaction statements are called # then this is needed to avoid ""cannot be executed from a function or multi-command string"" errors if auto_commit: for statement in sqlparse.split(value): if statement: cur.execute(statement) else: cur.execute(value) self._logger.debug(value) self._logger.debug('{0} executed for schema {1}'.format(key, schema_name)) executed_table_scripts.append(key) return_value['table_scripts_deployed'].append(key) else: self._logger.debug('{0} is not executed for schema {1} as it has already been executed before. ' .format(key, schema_name)) else: self._logger.debug('No Table DDL scripts to execute') # Executing functions return_value['function_scripts_deployed'] = [] if len(function_scripts_dict) > 0: self._logger.debug('Running functions definitions scripts') for key, value in function_scripts_dict.items(): # if auto commit mode than every statement is called separately. # this is done this way as auto commit is normally used when non transaction statements are called # then this is needed to avoid ""cannot be executed from a function or multi-command string"" errors if auto_commit: for statement in sqlparse.split(value): if statement: cur.execute(statement) else: cur.execute(value) return_value['function_scripts_deployed'].append(key) self._logger.debug('Functions loaded to schema {0}'.format(schema_name)) else: self._logger.debug('No function scripts to deploy') # Executing views return_value['view_scripts_deployed'] = [] if len(view_scripts_dict) > 0: self._logger.debug('Running views definitions scripts') for key, value in view_scripts_dict.items(): # if auto commit mode than every statement is called separately. # this is done this way as auto commit is normally used when non transaction statements are called # then this is needed to avoid ""cannot be executed from a function or multi-command string"" errors if auto_commit: for statement in sqlparse.split(value): if statement: cur.execute(statement) else: cur.execute(value) return_value['view_scripts_deployed'].append(key) self._logger.debug('Views loaded to schema {0}'.format(schema_name)) else: self._logger.debug('No view scripts to deploy') # Executing triggers return_value['trigger_scripts_deployed'] = [] if len(trigger_scripts_dict) > 0: self._logger.debug('Running trigger definitions scripts') for key, value in trigger_scripts_dict.items(): # if auto commit mode than every statement is called separately. # this is done this way as auto commit is normally used when non transaction statements are called # then this is needed to avoid ""cannot be executed from a function or multi-command string"" errors if auto_commit: for statement in sqlparse.split(value): if statement: cur.execute(statement) else: cur.execute(value) return_value['trigger_scripts_deployed'].append(key) self._logger.debug('Triggers loaded to schema {0}'.format(schema_name)) else: self._logger.debug('No trigger scripts to deploy') # alter schema privileges if needed if (not files_deployment) and mode != 'overwrite' \ and self._config.scope == pgpm.lib.utils.config.SchemaConfiguration.SCHEMA_SCOPE: pgpm.lib.utils.db.SqlScriptsHelper.revoke_all(cur, schema_name, 'public') if self._config.usage_roles: pgpm.lib.utils.db.SqlScriptsHelper.grant_usage_privileges( cur, schema_name, ', '.join(self._config.usage_roles)) self._logger.debug('User(s) {0} was (were) granted usage permissions on schema {1}.' .format("", "".join(self._config.usage_roles), schema_name)) if self._config.owner_role: pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_alter_schema_owner', [schema_name, self._config.owner_role]) self._logger.debug('Ownership of schema {0} and all its objects was changed and granted to user {1}.' .format(schema_name, self._config.owner_role)) # Add metadata to pgpm schema pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_upsert_package_info'.format(self._pgpm_schema_name), [self._config.name, self._config.subclass, self._config.version.major, self._config.version.minor, self._config.version.patch, self._config.version.pre, self._config.version.metadata, self._config.description, self._config.license, list_of_deps_ids, vcs_ref, vcs_link, issue_ref, issue_link]) self._logger.debug('Meta info about deployment was added to schema {0}' .format(self._pgpm_schema_name)) pgpm_package_id = cur.fetchone()[0] if len(table_scripts_dict) > 0: for key in executed_table_scripts: cur.callproc('_log_table_evolution'.format(self._pgpm_schema_name), [key, pgpm_package_id]) # Commit transaction self._conn.commit() self._conn.close() deployed_files_count = len(return_value['function_scripts_deployed']) + \ len(return_value['type_scripts_deployed']) + \ len(return_value['view_scripts_deployed']) + \ len(return_value['trigger_scripts_deployed']) + \ len(return_value['table_scripts_deployed']) requested_files_count = len(return_value['function_scripts_requested']) + \ len(return_value['type_scripts_requested']) + \ len(return_value['view_scripts_requested']) + \ len(return_value['trigger_scripts_requested']) + \ len(return_value['table_scripts_requested']) return_value['deployed_files_count'] = deployed_files_count return_value['requested_files_count'] = requested_files_count if deployed_files_count == requested_files_count: return_value['code'] = self.DEPLOYMENT_OUTPUT_CODE_OK return_value['message'] = 'OK' else: return_value['code'] = self.DEPLOYMENT_OUTPUT_CODE_NOT_ALL_DEPLOYED return_value['message'] = 'Not all requested files were deployed' return return_value" 2530,"def _get_scripts(self, scripts_path_rel, files_deployment, script_type, project_path): """""" Gets scripts from specified folders """""" scripts_dict = {} if scripts_path_rel: self._logger.debug('Getting scripts with {0} definitions'.format(script_type)) scripts_dict = pgpm.lib.utils.misc.collect_scripts_from_sources(scripts_path_rel, files_deployment, project_path, False, self._logger) if len(scripts_dict) == 0: self._logger.debug('No {0} definitions were found in {1} folder'.format(script_type, scripts_path_rel)) else: self._logger.debug('No {0} folder was specified'.format(script_type)) return scripts_dict" 2531,"def _resolve_dependencies(self, cur, dependencies): """""" Function checks if dependant packages are installed in DB """""" list_of_deps_ids = [] _list_of_deps_unresolved = [] _is_deps_resolved = True for k, v in dependencies.items(): pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.execute(""SELECT _find_schema('{0}', '{1}')"" .format(k, v)) pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(',')) try: list_of_deps_ids.append(int(pgpm_v_ext[0])) except: pass if not pgpm_v_ext[0]: _is_deps_resolved = False _list_of_deps_unresolved.append(""{0}: {1}"".format(k, v)) return _is_deps_resolved, list_of_deps_ids, _list_of_deps_unresolved" 2532,"def _reorder_types(self, types_script): """""" Takes type scripts and reorders them to avoid Type doesn't exist exception """""" self._logger.debug('Running types definitions scripts') self._logger.debug('Reordering types definitions scripts to avoid ""type does not exist"" exceptions') _type_statements = sqlparse.split(types_script) # TODO: move up to classes _type_statements_dict = {} # dictionary that store statements with type and order. type_unordered_scripts = [] # scripts to execute without order type_drop_scripts = [] # drop scripts to execute first for _type_statement in _type_statements: _type_statement_parsed = sqlparse.parse(_type_statement) if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check # we need only type declarations to be ordered if _type_statement_parsed[0].get_type() == 'CREATE': _type_body_r = r'\bcreate\s+\b(?:type|domain)\s+\b(\w+\.\w+|\w+)\b' _type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0] _type_statements_dict[str(_type_name)] = \ {'script': _type_statement, 'deps': []} elif _type_statement_parsed[0].get_type() == 'DROP': type_drop_scripts.append(_type_statement) else: type_unordered_scripts.append(_type_statement) # now let's add dependant types to dictionary with types # _type_statements_list = [] # list of statements to be ordered for _type_key in _type_statements_dict.keys(): for _type_key_sub, _type_value in _type_statements_dict.items(): if _type_key != _type_key_sub: if pgpm.lib.utils.misc.find_whole_word(_type_key)(_type_value['script']): _type_value['deps'].append(_type_key) # now let's add order to type scripts and put them ordered to list _deps_unresolved = True _type_script_order = 0 _type_names = [] type_ordered_scripts = [] # ordered list with scripts to execute while _deps_unresolved: for k, v in _type_statements_dict.items(): if not v['deps']: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: _dep_exists = True for _dep in v['deps']: if _dep not in _type_names: _dep_exists = False if _dep_exists: _type_names.append(k) v['order'] = _type_script_order _type_script_order += 1 if not v['script'] in type_ordered_scripts: type_ordered_scripts.append(v['script']) else: v['order'] = -1 _deps_unresolved = False for k, v in _type_statements_dict.items(): if v['order'] == -1: _deps_unresolved = True return type_drop_scripts, type_ordered_scripts, type_unordered_scripts" 2533,"def find_table_links(self): """""" When given a url, this function will find all the available table names for that EPA dataset. """""" html = urlopen(self.model_url).read() doc = lh.fromstring(html) href_list = [area.attrib['href'] for area in doc.cssselect('map area')] tables = self._inception_table_links(href_list) return tables" 2534,"def _inception_table_links(self, href_list): """""" Sometimes the EPA likes to nest their models and tables -- model within a model within a model -- so this internal method tries to clear all that up. """""" tables = set() for link in href_list: if not link.startswith('http://'): link = self.agency_url + link html = urlopen(link).read() doc = lh.fromstring(html) area = doc.cssselect('map area') if area: # Then this is a model containing models. tables.update((a.attrib['href'] for a in area)) else: # The link is a table without additional models. tables.update(link) return tables" 2535,"def find_definition_urls(self, set_of_links): """"""Find the available definition URLs for the columns in a table."""""" definition_dict = {} re_link_name = re.compile('.*p_table_name=(\w+)&p_topic.*') for link in set_of_links: if link.startswith('http://'): table_dict = {} html = urlopen(link).read() doc = lh.fromstring(html) unordered_list = doc.cssselect('#main ul')[-1] for li in unordered_list.iterchildren(): a = li.find('a') table_dict.update({a.text: a.attrib['href']}) link_name = re_link_name.sub(r'\1', link).upper() definition_dict.update({link_name: table_dict}) return definition_dict" 2536,"def create_agency(self): """"""Create an agency text file of definitions."""""" agency = self.agency links = self.find_table_links() definition_dict = self.find_definition_urls(links) with open(agency + '.txt', 'w') as f: f.write(str(definition_dict))" 2537,"def loop_through_agency(self): """"""Loop through an agency to grab the definitions for its tables."""""" agency = self.agency with open(agency + '.txt') as f: data = eval(f.read()) for table in data: for column in data[table]: value_link = data[table][column] data[table][column] = self.grab_definition(value_link) data = json.dumps(data) with open(agency + '_values.json', 'w') as f: f.write(str(data))" 2538,"def grab_definition(self, url): """""" Grab the column definition of a table from the EPA using a combination of regular expressions and lxml. """""" re_description = re.compile('Description:(.+?\\n)') re_table_name = re.compile(""(\w+ Table.+)"") if url.startswith('//'): url = 'http:' + url elif url.startswith('/'): url = 'http://www.epa.gov' + url try: html = urlopen(url).read() doc = lh.fromstring(html) main = doc.cssselect('#main')[0] text = main.text_content() definition = re_description.search(text).group(1).strip() except (AttributeError, IndexError, TypeError, HTTPError): print url else: value = re_table_name.sub('', definition) return value return url" 2539,"def add_arc(self, src, dst, char): """"""Adds a new Arc Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The character for the transition Returns: None """""" if src not in self.automaton.states(): self.add_state() arc = fst.Arc(self.isyms[char], self.osyms[char], fst.Weight.One(self.automaton.weight_type()), dst) self.automaton.add_arc(src, arc)" 2540,"def fixminimized(self, alphabet): """""" After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None """""" insymbols = fst.SymbolTable() outsymbols = fst.SymbolTable() num = 1 for char in self.alphabet: self.isyms.__setitem__(char, num) self.osyms.__setitem__(char, num) insymbols.add_symbol(char, num) outsymbols.add_symbol(char, num) num = num + 1 self.automaton.set_input_symbols(insymbols) self.automaton.set_output_symbols(outsymbols) endstate = self.add_state() for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = False for char in alphabet: self.add_arc(endstate, endstate, char)" 2541,"def complement(self, alphabet): """""" Returns the complement of DFA Args: alphabet (list): The input alphabet Returns: None """""" self._addsink(alphabet) for state in self.automaton.states(): if self.automaton.final(state) == fst.Weight.One(self.automaton.weight_type()): self.automaton.set_final(state, fst.Weight.Zero(self.automaton.weight_type())) else: self.automaton.set_final(state, fst.Weight.One(self.automaton.weight_type()))" 2542,"def init_from_acceptor_bycopying(self, acceptor): """""" Adds a sink state Args: alphabet (list): The input alphabet Returns: None """""" for state in acceptor.states: for arc in state.arcs: self.add_arc(state.stateid, arc.nextstate, acceptor.isyms.find(arc.ilabel)) if state.final: print state.stateid,' is final' self[state.stateid].final = True;" 2543,"def intersect(self, other): """"""Constructs an unminimized DFA recognizing the intersection of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the intersect operation Returns: Returns: DFA: The resulting DFA """""" self.automaton = fst.intersect(self.automaton, other.automaton) return self" 2544,"def main(*argv, filesystem=None, do_exit=True, stdout=None, stderr=None): """"""Main method for the cli. We allow the filesystem to be overridden for test purposes."""""" try: mdcli = MdCLI() mdcli.filesystem = filesystem mdcli.stdout = stdout or sys.stdout mdcli.stderr = stderr or sys.stderr retval = mdcli.main(*argv, loop=LOOP_NEVER) if do_exit: sys.exit(retval) else: return retval except KeyboardInterrupt: pass" 2545,"def get_optparser(self): """"""Override to allow specification of the maildir"""""" p = Cmdln.get_optparser(self) p.add_option( ""-M"", ""--maildir"", action=""store"", dest=""maildir"" ) p.add_option( ""-V"", ""--verbose"", action=""store_true"", dest=""verbose"" ) return p" 2546,"def do_lsfolders(self, subcmd, opts): """"""${cmd_name}: list the sub folders of the maildir. ${cmd_usage} """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.lsfolders(stream=self.stdout)" 2547,"def do_ls(self, subcmd, opts, folder=""""): """"""${cmd_name}: list messages in the specified folder ${cmd_usage} ${cmd_option_list} SINCE can be used with epoch times, for example: md ls -s $(date '+%s') """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.ls( foldername = folder, stream = self.stdout, reverse = getattr(opts, ""reverse"", False), grep = getattr(opts, ""grep"", None), field = getattr(opts, ""field"", None), since = float(getattr(opts, ""since"", -1)) )" 2548,"def do_lisp(self, subcmd, opts, folder=""""): """"""${cmd_name}: list messages in the specified folder in JSON format ${cmd_usage} """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.lisp( foldername=folder, stream=self.stdout, reverse=getattr(opts, ""reverse"", False), since=float(getattr(opts, ""since"", -1)) )" 2549,"def do_make(self, subcmd, opts, path): """"""${cmd_name}: make a maildir at the specified path. ${cmd_usage} If the path is relative then create under MAILDIR else create at the absolute location. """""" # Do we need to make this "".path"" if it's relative? d = path if path[0] == ""/"" else joinpath(self.maildir, ""."" + path) os.makedirs(joinpath(d, ""cur"")) os.makedirs(joinpath(d, ""new"")) os.makedirs(joinpath(d, ""tmp"")) os.makedirs(joinpath(d, ""store""))" 2550,"def do_rm(self, subcmd, opts, message): """"""${cmd_name}: remove the specified message ${cmd_usage} """""" maildir = self.maildir client = MdClient(maildir, filesystem=self.filesystem) try: client.remove(message) except KeyError: return 1" 2551,"def do_mv(self, subcmd, opts, message, folder): """"""${cmd_name}: move the specified message to the specified folder ${cmd_usage} """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.move(message, folder)" 2552,"def do_text(self, subcmd, opts, message): """"""${cmd_name}: get the best text part of the specified message ${cmd_usage} """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.gettext(message, self.stdout)" 2553,"def do_raw(self, subcmd, opts, message): """"""${cmd_name}: dump the complete raw message ${cmd_usage} """""" client = MdClient(self.maildir) client.getraw(message, self.stdout)" 2554,"def do_rawpart(self, subcmd, opts, message): """"""${cmd_name}: dump a part from the specified message ${cmd_usage} ${cmd_option_list} """""" client = MdClient(self.maildir, filesystem=self.filesystem) partid = getattr(opts, ""part"", None) if not partid: client.getrawpart(message, self.stdout) else: client.getrawpartid(message, partid, self.stdout)" 2555,"def do_struct(self, subcmd, opts, message): """"""${cmd_name}: get the structure of the specified message ${cmd_usage} ${cmd_option_list} """""" client = MdClient(self.maildir, filesystem=self.filesystem) as_json = getattr(opts, ""json"", False) client.getstruct(message, as_json=as_json, stream=self.stdout)" 2556,"def do_file(self, subcmd, opts, message): """"""${cmd_name}: download the whole file of the message. ${cmd_usage} """""" client = MdClient(self.maildir, filesystem=self.filesystem) client.get(message, self.stdout)" 2557,"def do_pull(self, subcmd, opts, remote_maildir): """"""${cmd_name}: pull the remote maildir into the local maildir. ${cmd_usage} ${cmd_option_list} The REMOTE_MAILDIR is a url which specifies where the dir is. A few different forms are supported: ssh://user@hostname/path is a remote directory at path, accessed via ssh file://path is a local maildir directory at path """""" import mdlib.pull m = re.match( ""(?P<protocol>[a-z]+)://(?P<urlpart>.*)"", remote_maildir ) if not m: print(""md pull: the remote maildir url was unrecognized"", file=self.stderr) return local_maildir = self.maildir noop = getattr(opts, ""noop"", False) or False verbose = getattr(self.options, ""verbose"", False) or False filterfile = getattr(opts, ""filter"", None) or None try: filterfd = open(filterfile) except: filterfd = None # Some error loading the filterfile if verbose: print(""md pull: could not load filter file"", file=self.stderr) data = m.groupdict() if data.get(""protocol"") == ""ssh"": m = re.match( ""(?P<user>[a-zA-Z0-9-]+@)*(?P<hostname>[a-zA-Z0-9.-]+)(?P<path>[a-zA-Z0-9./-]+)"", data.get(""urlpart"") ) if not m: print(""md pull: %s was not a remote maildir"" % remote_maildir, file=self.stderr) return data = m.groupdict() host = data.get(""hostname"", None) \ if not data.get(""user"", None) \ else ""%s@%s"" % (data.get(""user""), data.get(""hostname"")) remote_maildir = data.get(""path"") mdlib.pull.sshpull(host, remote_maildir, local_maildir, noop, verbose, filterfd) elif data.get(""protocol"") == ""file"": maildir = data.get(""urlpart"") mdlib.pull.filepull(maildir, local_maildir, noop, verbose, filterfd) else: print(""md pull: %s not a recognized protocol"" % protocol, file=self.stderr) # Finally try and close the filterfd if filterfd: try: filterfd.close() except: if verbose: print(""md pull: couldn't close open filter file"", file=self.stderr)" 2558,"def do_newfilter(self, subcmd, opts): """"""${cmd_name}: make a filterfile and spit it to stdout. """""" from mdlib.filterprocessor import RULES print(RULES, file=self.stdout)" 2559,"def do_storecheck(self, subcmd, opts): """"""${cmd_name}: checks the store for files that may not be in the maildirs. """""" from os.path import basename from os.path import dirname from os.path import exists as existspath from os.path import islink from os.path import join as joinpath maildir = self.maildir cur = joinpath(maildir, ""cur"") new = joinpath(maildir, ""new"") store = joinpath(maildir, ""store"") found_list = [] # Loop through the folders checking that everything maps back to the store for scandir in [cur, new]: for f in os.listdir(scandir): filename = joinpath(scandir, f) try: assert islink(filename) store_location = os.readlink(filename) assert existspath(store_location) and dirname(store_location) == store except AssertionError: print(""%s was not a link into the store"" % ( ""/"".join([ filename.split(""/"")[-2], filename.split(""/"")[-1] ]) ), file=self.stdout) else: found_list.append(basename(store_location)) for storefile in os.listdir(store): if storefile not in found_list: print( ""%s found in store but not folders"" % joinpath(""store"", storefile), file=self.stdout )" 2560,"def form(context, form, **kwargs): """""" The `form` template tag will render a tape-form enabled form using the template provided by `get_layout_template` method of the form using the context generated by `get_layout_context` method of the form. Usage:: {% load tapeforms %} {% form my_form %} You can override the used layout template using the keyword argument `using`:: {% load tapeforms %} {% form my_form using='other_form_layout_template.html' %} :param form: The Django form to render. :return: Rendered form (errors + hidden fields + fields) as HTML. """""" if not isinstance(form, (forms.BaseForm, TapeformFieldset)): raise template.TemplateSyntaxError( 'Provided form should be a `Form` instance, actual type: {0}'.format( form.__class__.__name__)) return render_to_string( form.get_layout_template(kwargs.get('using', None)), form.get_layout_context(), )" 2561,"def formfield(context, bound_field, **kwargs): """""" The `formfield` template tag will render a form field of a tape-form enabled form using the template provided by `get_field_template` method of the form together with the context generated by `get_field_context` method of the form. Usage:: {% load tapeforms %} {% formfield my_form.my_field %} You can override the used field template using the keyword argument `using`:: {% load tapeforms %} {% formfield my_form.my_field using='other_field_template.html' %} :param bound_field: The `BoundField` from a Django form to render. :return: Rendered field (label + widget + other stuff) as HTML. """""" if not isinstance(bound_field, forms.BoundField): raise template.TemplateSyntaxError( 'Provided field should be a `BoundField` instance, actual type: {0}'.format( bound_field.__class__.__name__)) return render_to_string( bound_field.form.get_field_template(bound_field, kwargs.get('using', None)), bound_field.form.get_field_context(bound_field), )" 2562,"def wrap_as_node(self, func): 'wrap a function as a node' name = self.get_name(func) @wraps(func) def wrapped(*args, **kwargs): 'wrapped version of func' message = self.get_message_from_call(*args, **kwargs) self.logger.info('calling ""%s"" with %r', name, message) result = func(message) # functions can return multiple values (""emit"" multiple times) # by yielding instead of returning. Handle this case by making # a list of the results and processing them all after the # generator successfully exits. If we were to process them as # they came out of the generator, we might get a partially # processed input sent down the graph. This may be possible in # the future via a flag. if isinstance(result, GeneratorType): results = [ self.wrap_result(name, item) for item in result if item is not NoResult ] self.logger.debug( '%s returned generator yielding %d items', func, len(results) ) [self.route(name, item) for item in results] return tuple(results) # the case of a direct return is simpler. wrap, route, and # return the value. else: if result is NoResult: return result result = self.wrap_result(name, result) self.logger.debug( '%s returned single value %s', func, result ) self.route(name, result) return result return wrapped" 2563,"def node(self, fields, subscribe_to=None, entry_point=False, ignore=None, **wrapper_options): '''\ Decorate a function to make it a node. .. note:: decorating as a node changes the function signature. Nodes should accept a single argument, which will be a :py:class:`emit.message.Message`. Nodes can be called directly by providing a dictionary argument or a set of keyword arguments. Other uses will raise a ``TypeError``. :param fields: fields that this function returns :type fields: ordered iterable of :py:class:`str` :param subscribe_to: functions in the graph to subscribe to. These indicators can be regular expressions. :type subscribe_to: :py:class:`str` or iterable of :py:class:`str` :param ignore: functions in the graph to ignore (also uses regular expressions.) Useful for ignoring specific functions in a broad regex. :type ignore: :py:class:`str` or iterable of :py:class:`str` :param entry_point: Set to ``True`` to mark this as an entry point - that is, this function will be called when the router is called directly. :type entry_point: :py:class:`bool` In addition to all of the above, you can define a ``wrap_node`` function on a subclass of Router, which will need to receive node and an options dictionary. Any extra options passed to node will be passed down to the options dictionary. See :py:class:`emit.router.CeleryRouter.wrap_node` as an example. :returns: decorated and wrapped function, or decorator if called directly ''' def outer(func): 'outer level function' # create a wrapper function self.logger.debug('wrapping %s', func) wrapped = self.wrap_as_node(func) if hasattr(self, 'wrap_node'): self.logger.debug('wrapping node ""%s"" in custom wrapper', wrapped) wrapped = self.wrap_node(wrapped, wrapper_options) # register the task in the graph name = self.get_name(func) self.register( name, wrapped, fields, subscribe_to, entry_point, ignore ) return wrapped return outer" 2564,"def resolve_node_modules(self): 'import the modules specified in init' if not self.resolved_node_modules: try: self.resolved_node_modules = [ importlib.import_module(mod, self.node_package) for mod in self.node_modules ] except ImportError: self.resolved_node_modules = [] raise return self.resolved_node_modules" 2565,"def get_message_from_call(self, *args, **kwargs): '''\ Get message object from a call. :raises: :py:exc:`TypeError` (if the format is not what we expect) This is where arguments to nodes are turned into Messages. Arguments are parsed in the following order: - A single positional argument (a :py:class:`dict`) - No positional arguments and a number of keyword arguments ''' if len(args) == 1 and isinstance(args[0], dict): # then it's a message self.logger.debug('called with arg dictionary') result = args[0] elif len(args) == 0 and kwargs != {}: # then it's a set of kwargs self.logger.debug('called with kwargs') result = kwargs else: # it's neither, and we don't handle that self.logger.error( 'get_message_from_call could not handle ""%r"", ""%r""', args, kwargs ) raise TypeError('Pass either keyword arguments or a dictionary argument') return self.message_class(result)" 2566,"def register(self, name, func, fields, subscribe_to, entry_point, ignore): ''' Register a named function in the graph :param name: name to register :type name: :py:class:`str` :param func: function to remember and call :type func: callable ``fields``, ``subscribe_to`` and ``entry_point`` are the same as in :py:meth:`Router.node`. ''' self.fields[name] = fields self.functions[name] = func self.register_route(subscribe_to, name) if ignore: self.register_ignore(ignore, name) if entry_point: self.add_entry_point(name) self.logger.info('registered %s', name)" 2567,"def add_entry_point(self, destination): '''\ Add an entry point :param destination: node to route to initially :type destination: str ''' self.routes.setdefault('__entry_point', set()).add(destination) return self.routes['__entry_point']" 2568,"def register_route(self, origins, destination): ''' Add routes to the routing dictionary :param origins: a number of origins to register :type origins: :py:class:`str` or iterable of :py:class:`str` or None :param destination: where the origins should point to :type destination: :py:class:`str` Routing dictionary takes the following form:: {'node_a': set(['node_b', 'node_c']), 'node_b': set(['node_d'])} ''' self.names.add(destination) self.logger.debug('added ""%s"" to names', destination) origins = origins or [] # remove None if not isinstance(origins, list): origins = [origins] self.regexes.setdefault(destination, [re.compile(origin) for origin in origins]) self.regenerate_routes() return self.regexes[destination]" 2569,"def register_ignore(self, origins, destination): ''' Add routes to the ignore dictionary :param origins: a number of origins to register :type origins: :py:class:`str` or iterable of :py:class:`str` :param destination: where the origins should point to :type destination: :py:class:`str` Ignore dictionary takes the following form:: {'node_a': set(['node_b', 'node_c']), 'node_b': set(['node_d'])} ''' if not isinstance(origins, list): origins = [origins] self.ignore_regexes.setdefault(destination, [re.compile(origin) for origin in origins]) self.regenerate_routes() return self.ignore_regexes[destination]" 2570,"def regenerate_routes(self): 'regenerate the routes after a new route is added' for destination, origins in self.regexes.items(): # we want only the names that match the destination regexes. resolved = [ name for name in self.names if name is not destination and any(origin.search(name) for origin in origins) ] ignores = self.ignore_regexes.get(destination, []) for origin in resolved: destinations = self.routes.setdefault(origin, set()) if any(ignore.search(origin) for ignore in ignores): self.logger.info('ignoring route ""%s"" -> ""%s""', origin, destination) try: destinations.remove(destination) self.logger.debug('removed ""%s"" -> ""%s""', origin, destination) except KeyError: pass continue if destination not in destinations: self.logger.info('added route ""%s"" -> ""%s""', origin, destination) destinations.add(destination)" 2571,"def route(self, origin, message): '''\ Using the routing dictionary, dispatch a message to all subscribers :param origin: name of the origin node :type origin: :py:class:`str` :param message: message to dispatch :type message: :py:class:`emit.message.Message` or subclass ''' # side-effect: we have to know all the routes before we can route. But # we can't resolve them while the object is initializing, so we have to # do it just in time to route. self.resolve_node_modules() if not self.routing_enabled: return subs = self.routes.get(origin, set()) for destination in subs: self.logger.debug('routing ""%s"" -> ""%s""', origin, destination) self.dispatch(origin, destination, message)" 2572,"def dispatch(self, origin, destination, message): '''\ dispatch a message to a named function :param destination: destination to dispatch to :type destination: :py:class:`str` :param message: message to dispatch :type message: :py:class:`emit.message.Message` or subclass ''' func = self.functions[destination] self.logger.debug('calling %r directly', func) return func(_origin=origin, **message)" 2573,"def wrap_result(self, name, result): ''' Wrap a result from a function with it's stated fields :param name: fields to look up :type name: :py:class:`str` :param result: return value from function. Will be converted to tuple. :type result: anything :raises: :py:exc:`ValueError` if name has no associated fields :returns: :py:class:`dict` ''' if not isinstance(result, tuple): result = tuple([result]) try: return dict(zip(self.fields[name], result)) except KeyError: msg = '""%s"" has no associated fields' self.logger.exception(msg, name) raise ValueError(msg % name)" 2574,"def get_name(self, func): ''' Get the name to reference a function by :param func: function to get the name of :type func: callable ''' if hasattr(func, 'name'): return func.name return '%s.%s' % ( func.__module__, func.__name__ )" 2575,"def coerce(self, value): """"""Convert text values into boolean values. True values are (case insensitive): 'yes', 'true', '1'. False values are (case insensitive): 'no', 'false', '0'. Args: value (str or bool): The value to coerce. Raises: TypeError: If the value is not a bool or string. ValueError: If the value is not bool or an acceptable value. Returns: bool: The True/False value represented. """""" if isinstance(value, bool): return value if not hasattr(value, 'lower'): raise TypeError('Value is not bool or string.') if value.lower() in ('yes', 'true', '1'): return True if value.lower() in ('no', 'false', '0'): return False raise ValueError('Could not coerce {0} to a bool.'.format(value))" 2576,"def main(): """""" Testing function for PDA - DFA Diff Operation """""" if len(argv) < 2: print 'Usage: ' print ' Get A String %s CFG_fileA FST_fileB' % argv[0] return alphabet = createalphabet() cfgtopda = CfgPDA(alphabet) print '* Parsing Grammar:', mma = cfgtopda.yyparse(argv[1]) print 'OK' flex_a = Flexparser(alphabet) print '* Parsing Regex:', mmb = flex_a.yyparse(argv[2]) print mmb print 'OK' print '* Minimize Automaton:', mmb.minimize() print 'OK' print mmb print '* Diff:', ops = PdaDiff(mma, mmb, alphabet) mmc = ops.diff() print 'OK' print '* Get String:', print ops.get_string()" 2577,"def _delta(self, graph, cur_state, char): """""" Args: graph (Fst Acceptor): The DFA cur_state (Fst State): The current State char (Char): The input character Returns: (Fst State): The destination state """""" for arc in cur_state.arcs: if graph.isyms.find(arc.ilabel) == char: return graph[arc.nextstate] return None" 2578,"def _intesect(self): """"""The intesection of a PDA and a DFA"""""" p1automaton = self.mma p2automaton = self.mmb p3automaton = PDA(self.alphabet) self._break_terms() p1counter = 0 p3counter = 0 p2states = list(p2automaton.states) print 'PDA States: ' + repr(p1automaton.n) print 'DFA States: ' + repr(len(list(p2states))) ignorechars = p1automaton.nonterminals+ [0] + ['@closing'] del(ignorechars[ignorechars.index('S')]) while p1counter < p1automaton.n + 1: p1state = p1automaton.s[p1counter] p2counter = 0 while p2counter < len(list(p2states)): p2state = p2states[p2counter] tempstate = PDAState() tempstate.id = (p1state.id, p2state.stateid) tempstate.sym = p1state.sym tempstate.type = p1state.type tempstate.trans = {} found = 0 for char in self.alphabet: if char in ignorechars: continue # DFA has single destination from a state p2dest = self._delta(p2automaton, p2state, char) # PDA may have multiple destinations from a state # print p1state.trans if p2dest is not None: for potential in p1state.trans: if char in p1state.trans[potential]: found = 1 p1dest = potential if (p1dest, p2dest.stateid) not in tempstate.trans: tempstate.trans[ (p1dest, p2dest.stateid)] = [] # print 'Appending A Transition to # ('+`p1dest`+','+`p2dest.stateid`+') for # input '+`char` tempstate.trans[ (p1dest, p2dest.stateid)].append(char) # THEN THE NONTERMINALS + 0 3 transitions # print p1state.trans # print p1automaton.nonterminals if found == 0 and p1state.type == 3 and len(p1state.trans) >0: assert 1==1,'Check Failed: A READ state with transitions' \ ' did not participate in the cross product' if p2dest is not None: for nonterm in p1automaton.nonterminals + \ [0] + ['@closing']: for potential in p1state.trans: if nonterm in p1state.trans[potential]: p1dest = potential if (p1dest, p2state.stateid) not in tempstate.trans: tempstate.trans[ (p1dest, p2state.stateid)] = [] # print 'Appending B Transition to # ('+`p1dest`+','+`p2state.stateid`+') for # input '+`nonterm` tempstate.trans[ (p1dest, p2state.stateid)].append(nonterm) p3automaton.s[p3counter] = tempstate p3counter = p3counter + 1 p2counter = p2counter + 1 p1counter = p1counter + 1 # print 'Total States Appended '+`len(p3automaton.input_string)` p3automaton.n = p3counter - 1 p3automaton.accepted = [] for state in p2automaton.states: if state.final != TropicalWeight(float('inf')): p3automaton.accepted.append(state.stateid) return p3automaton" 2579,"def diff(self): """"""The Difference between a PDA and a DFA"""""" self.mmb.complement(self.alphabet) self.mmb.minimize() print 'start intersection' self.mmc = self._intesect() print 'end intersection' return self.mmc" 2580,"def get_string(self): """""" Returns a string from the Diff resutl. Depending on the method, either the string will be generated directly from the PDA using the state removal method, or the PDA will be first translated to a CFG and then a string will be generated from the CFG Args: None Returns: A string from the Diff """""" return_string = None if not self.mmc: return """" method = 'PDASTRING' if method == 'PDASTRING': stringgen = PdaString() print '* Reduce PDA using DFA BFS (remove unreachable states):' newpda = self.mmc.s handle = IntersectionHandling() newpda = handle.get(newpda, self.mmc.accepted) reduce_b = ReducePDA() newpda = reduce_b.get(newpda) #simply = SimplifyStateIDs() #newpda, biggestid, newaccepted = simply.get( # newpda, self.mmc.accepted) print ""- Total PDA states after reduction are "" + repr(len(newpda)) return_string = stringgen.init(newpda, self.mmc.accepted) if return_string is not None: return_string = return_string[0] elif method == 'PDACFGSTRING': optimized = 1 dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating PDA simplification' print ' - Total PDA states are ' + repr(len(self.mmc.s)) handle = IntersectionHandling() newpda = handle.get(self.mmc.s, self.mmc.accepted) newpda = self.mmc.s simply = SimplifyStateIDs() newpda, biggestid, newaccepted = simply.get( newpda, self.mmc.accepted) print ' - Total PDA states after id clearence are ' + repr(len(newpda)) replace = ReadReplace(newpda, biggestid) newpda = replace.replace_read() print ' - Total PDA states after read elimination are ' + repr(len(newpda)) maxstate = replace.nextstate() - 1 print '* Reduce PDA using DFA BFS (remove unreachable states):' reduce_b = ReducePDA() newpda = reduce_b.get(newpda) print ""- Total PDA states after reduction are "" + repr(len(newpda)) dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print ""* PDA was simplyfied in %d days, %d hours, %d minutes and %d seconds"" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating CNF from PDA generation' cnfgenerator = PdaCnf(newpda, newaccepted) dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print ""* CNF was generated in %d days, %d hours, %d minutes and %d seconds"" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) dt1 = datetime.datetime.fromtimestamp(time.time()) print '* Initiating string from CFG generation' grammar = cnfgenerator.get_rules(optimized) print ' - Total grammar rules are ' + repr(len(grammar)) gen = CFGGenerator(CNFGenerator(grammar), optimized=optimized, splitstring=0, maxstate=maxstate) return_string = gen.generate() dt2 = datetime.datetime.fromtimestamp(time.time()) rdelta = dateutil.relativedelta.relativedelta(dt2, dt1) print ""* A string was generated in %d days, %d hours, %d minutes and %d seconds"" % ( rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds) print return_string else: return_string = None return return_string" 2581,"def refresh_devices(self): '''Queries hub for list of devices, and creates new device objects''' try: response = self.api.get(""/api/v2/devices"", {'properties':'all'}) for device_data in response['DeviceList']: self.devices.append(Device(device_data, self)) except APIError as e: print(""API error: "") for key,value in e.data.iteritems: print(str(key) + "": "" + str(value))" 2582,"def refresh_details(self): '''Query hub and refresh all details of a device, but NOT status, includes grouplist not present in refresh_all_devices''' try: return self.api_iface._api_get(""/api/v2/devices/"" + str(self.device_id)) except APIError as e: print(""API error: "") for key,value in e.data.iteritems: print(str(key) + "": "" + str(value))" 2583,"def send_command(self, command): '''Send a command to a device''' data = {""command"": command, ""device_id"": self.device_id} try: response = self.api_iface._api_post(""/api/v2/commands"", data) return Command(response, self) except APIError as e: print(""API error: "") for key,value in e.data.iteritems: print(str(key) + "": "" + str(value))" 2584,"def _update_details(self,data): '''Intakes dict of details, and sets necessary properties in device''' # DeviceName, IconID, HouseID, DeviceID always present self.device_id = data['DeviceID'] self.device_name = data['DeviceName'] self.properties = data" 2585,"def _update_details(self,data): '''Intakes dict of details, and sets necessary properties in command''' for api_name in self._properties: if api_name in data: setattr(self, ""_"" + api_name, data[api_name]) else: # Only set to blank if not initialized try: getattr(self, ""_"" + api_name) except AttributeError: setattr(self, ""_"" + api_name, '')" 2586,"def query_status(self): '''Query the hub for the status of this command''' try: data = self.api_iface._api_get(self.link) self._update_details(data) except APIError as e: print(""API error: "") for key,value in e.data.iteritems: print(str(key) + "": "" + str(value))" 2587,"def tracks(self): """""" Tracks list context :return: Tracks list context """""" if self._tracks is None: self._tracks = TrackList(self.version, self.id) return self._tracks" 2588,"def list(self, ids, market=values.UNSET): """""" List albums :param List[str] ids: List of albums ids :param str market: Market locale :return: Page of Albums :rtype: AlbumPage """""" params = values.of({ 'ids': ','.join(ids), 'market': market }) response = self.version.request('GET', '/albums', params=params) return AlbumPage(self.version, response.json(), 'albums')" 2589,"def to_string(self): """""" stringifies version :return: string of version """""" if self.major == -1: major_str = 'x' else: major_str = self.major if self.minor == -1: minor_str = 'x' else: minor_str = self.minor if self.patch == -1: patch_str = 'x' else: patch_str = self.patch return '{0}_{1}_{2}'.format(major_str, minor_str, patch_str)" 2590,"def find(self, binding_id, instance): """"""find an instance Create a new instance and populate it with data stored if it exists. Args: binding_id (string): UUID of the binding instance (AtlasServiceInstance.Instance): instance Returns: AtlasServiceBinding: A binding """""" binding = AtlasServiceBinding.Binding(binding_id, instance) self.backend.storage.populate(binding) return binding" 2591,"def bind(self, binding, parameters): """""" Create the binding Args: binding (AtlasServiceBinding.Binding): Existing or New binding parameters (dict): Parameters for the binding Returns: Binding: Status Raises: ErrBindingAlreadyExists: If binding exists but with different parameters """""" if not binding.isProvisioned(): # Update binding parameters binding.parameters = parameters # Credentials creds = self.backend.config.generate_binding_credentials(binding) # Binding p = self.backend.config.generate_binding_permissions( binding, DatabaseUsersPermissionsSpecs(creds[""username""],creds[""password""]) ) try: self.backend.atlas.DatabaseUsers.create_a_database_user(p) except ErrAtlasConflict: # The user already exists. This is not an issue because this is possible that we # created it in a previous call that failed later on the broker. pass self.backend.storage.store(binding) # Bind done return Binding(BindState.SUCCESSFUL_BOUND, credentials = creds) elif binding.parameters == parameters: if self.backend.config.isGenerateBindingCredentialsPredictible(): # Identical and credentials generation is predictible so we can return credentials again. creds = self.backend.config.generate_binding_credentials(binding) return Binding(BindState.IDENTICAL_ALREADY_EXISTS, credentials = creds) # Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid # wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite # this is not the case. raise ErrBindingAlreadyExists() else: # Different parameters ... raise ErrBindingAlreadyExists()" 2592,"def unbind(self, binding): """""" Unbind the instance Args: binding (AtlasServiceBinding.Binding): Existing or New binding """""" username = self.backend.config.generate_binding_username(binding) try: self.backend.atlas.DatabaseUsers.delete_a_database_user(username) except ErrAtlasNotFound: # The user does not exist. This is not an issue because this is possible that we # removed it in a previous call that failed later on the broker. # This cover a manually deleted user case too. pass self.backend.storage.remove(binding)" 2593,"def describe(items, show_methods=True, show_properties=True): """"""Detecting attributes, inherits and relations :param items: list of objects to describe :param show_methods: do detection of methods :param show_properties: do detection of properties Return tuple (objects, relations, inherits) Where objects is list:: [{ 'name': '<Mapper class name or table name>', 'cols': [ ('<Column type class name>', '<Column name>'), ... ], 'props': ['<Property name>'], 'methods': ['<Method name>', ...], }, ...] Relations is:: [{ 'from': '<From mapper class name>', 'by': '<By mapper foreign key column name>', 'to': '<To mapper class name>', }, ...] Example usage:: import sadisplay from app import models desc = sadisplay.describe([ getattr(model, attr) for attr in dir(model) ]) desc = sadisplay.describe([models.User, models.Group]) """""" class EntryItem(object): """"""Class adaptor for mapped classes and tables"""""" name = None methods = [] columns = [] inherits = None properties = [] bases = tuple() def __init__(self, mapper=None, table=None): if mapper is not None: self.name = mapper.class_.__name__ self.columns = mapper.columns self.methods = mapper.class_.__dict__.items() self.inherits = mapper.inherits self.properties = mapper.iterate_properties self.bases = mapper.class_.__bases__ self.class_ = mapper.class_ self.table_name = str(mapper.mapped_table) elif table is not None: self.name = table.name self.table_name = table.name # prepend schema if exists for foreign key matching if hasattr(table, ""schema"") and table.schema: self.table_name = table.schema + ""."" + self.table_name self.columns = table.columns else: pass def __repr__(self): return '<{s.__class__.__name__} {s.name}>'.format(s=self) def __eq__(self, other): if other.inherits or self.inherits: return self.name == other.name return self.table_name == other.table_name objects = [] relations = [] inherits = [] entries = [] for item in items: try: mapper = class_mapper(item) except (exc.ArgumentError, orm.exc.UnmappedClassError): if isinstance(item, Table): entity = EntryItem(table=item) else: continue else: entity = EntryItem(mapper=mapper) if entity not in entries: entries.append(entity) for entry in entries: result_item = { 'name': entry.name, 'cols': [ (c.type.__class__.__name__, c.name) for c in entry.columns ], 'props': [], 'methods': [], } if show_methods and entry.methods: if entry.inherits: base_methods = entry.inherits.class_.__dict__.keys() else: # Create the DummyClass subclass of mapper bases # for detecting mapper own methods suffix = '%s' % str(uuid.uuid4()) params = { '__tablename__': 'dummy_table_%s' % suffix, 'dummy_id_col': Column(Integer, primary_key=True) } DummyClass = type('Dummy%s' % suffix, entry.bases, params) base_methods = DummyClass.__dict__.keys() # Filter mapper methods for name, func in entry.methods: if name[0] != '_' and name not in base_methods: if isinstance(func, types.FunctionType): result_item['methods'].append(name) if show_properties and entry.properties: for item in entry.properties: if not isinstance(item, ColumnProperty): result_item['props'].append(item.key) # ordering for key in ('methods', 'props'): result_item[key].sort() objects.append(result_item) # Detect relations by ForeignKey for col in entry.columns: for fk in col.foreign_keys: table = fk.column.table for m in entries: try: if str(table) == str(m.table_name): relations.append({ 'from': entry.name, 'by': col.name, 'to': m.name, }) except AttributeError: pass if entry.inherits: inh = { 'child': entry.name, 'parent': EntryItem(mapper=entry.inherits).name, } inherits.append(inh) # Delete relation by inherits for i, rel in enumerate(relations): if inh['child'] == rel['from'] and inh['parent'] == rel['to']: relations.pop(i) return objects, relations, inherits" 2594,"def extension(names): """"""Makes a function to be an extension."""""" for name in names: if not NAME_PATTERN.match(name): raise ValueError('invalid extension name: %s' % name) def decorator(f, names=names): return Extension(f, names=names) return decorator" 2595,"def register(self, extensions): """"""Registers extensions."""""" for ext in reversed(extensions): for name in ext.names: try: self._extensions[name].appendleft(ext) except KeyError: self._extensions[name] = deque([ext])" 2596,"def eval_extensions(self, value, name, option, format): """"""Evaluates extensions in the registry. If some extension handles the format string, it returns a string. Otherwise, returns ``None``. """""" try: exts = self._extensions[name] except KeyError: raise ValueError('no suitable extension: %s' % name) for ext in exts: rv = ext(self, value, name, option, format) if rv is not None: return rv" 2597,"def matchToFeatures(fiContainer, specContainer, specfiles=None, fMassKey='mz', sMassKey='obsMz', isotopeErrorList=(0), precursorTolerance=5, toleranceUnit='ppm', rtExpansionUp=0.10, rtExpansionDown=0.05, matchCharge=True, scoreKey='pep', largerBetter=False): """"""Annotate :class:`Fi <maspy.core.Fi>` (Feature items) by matching :class:`Si <maspy.core.Si>` (Spectrum items) or :class:`Sii <maspy.core.Sii>` (Spectrum identification items). :param fiContainer: :class:`maspy.core.FeatureContainer`, contains ``Fi``. :param specContainer: :class:`maspy.core.MsrunContainer` or :class:`maspy.core.SiiContainer`, contains ``Si`` or ``Sii``. :param specfiles: filenames of ms-run files, if specified consider only items from those files :type specfiles: str, list or None :param fMassKey: mass attribute key in :attr:`Fi.__dict__` :param sMassKey: mass attribute key in :attr:`Si.__dict__` or :attr:`Sii.__dict__` (eg 'obsMz', 'excMz') :param isotopeErrorList: allowed isotope errors relative to the spectrum mass, for example ""0"" or ""1"". If no feature has been matched with isotope error 0, the spectrum mass is increased by the mass difference of carbon isotopes 12 and 13 and matched again. The different isotope error values are tested in the specified order therefore ""0"" should normally be the first value of the list. :type isotopeErrorList: list or tuple of int :param precursorTolerance: the largest allowed mass deviation of ``Si`` or ``Sii`` relative to ``Fi`` :param toleranceUnit: defines how the ``precursorTolerance`` is applied to the mass value of ``Fi``. ``""ppm"": mass * (1 +/- tolerance*1E-6)`` or ``""da"": mass +/- value`` :param rtExpansionUp: relative upper expansion of ``Fi`` retention time area. ``limitHigh = Fi.rtHigh + (Fi.rtHigh - Fi.rtLow) * rtExpansionUp`` :param rtExpansionDown: relative lower expansion of ``Fi`` retention time area. ``limitLow = Fi.rtLow - (Fi.rtHigh - Fi.rtLow) * rtExpansionDown`` :param matchCharge: bool, True if ``Fi`` and ``Si`` or ``Sii`` must have the same ``charge`` state to be matched. :param scoreKey: ``Sii`` attribute name used for scoring the identification reliability :param largerBetter: bool, True if higher score value means a better identification reliability .. note: Concerning the feature retention area expansion. If ``Si`` or ``Sii`` is matched to multiple ``Fi`` the rt expansion is removed and the matching is repeated. .. note: If the ``specContainer`` is a ``SiiContainer`` then matched ``Fi`` are annotated with :attr:`Sii.peptide`, if multiple ``Sii`` are matched to ``Fi`` the one with the best score is used. #TODO: this function is nested pretty badly and should maybe be rewritten #TODO: replace tolerance unit ""ppm"" by tolerance mode ""relative"" and change repsective calculations """""" isotopeErrorList = aux.toList(isotopeErrorList) if specContainer.__class__.__name__ == 'MsrunContainer': listKeySpecIds = 'siIds' else: listKeySpecIds = 'siiIds' specContainerSpecfiles = [_ for _ in viewkeys(specContainer.info)] if specfiles is not None: specfiles = aux.toList(specfiles) else: specfiles = [_ for _ in viewkeys(fiContainer.info)] specfiles = list(set(specfiles).intersection(set(specContainerSpecfiles))) for specfile in specfiles: multiMatchCounter = int() isotopeErrorMatchCounter = int() specArrays = specContainer.getArrays([sMassKey, 'rt', 'charge', 'msLevel'], specfiles=specfile ) featureArrays = fiContainer.getArrays(['rtHigh', 'rtLow', 'charge', fMassKey], specfiles=specfile, sort=fMassKey ) featureArrays['rtHighExpanded'] = (featureArrays['rtHigh'] + (featureArrays['rtHigh'] - featureArrays['rtLow']) * rtExpansionUp ) featureArrays['rtLowExpanded'] = (featureArrays['rtLow'] - (featureArrays['rtHigh'] - featureArrays['rtLow']) * rtExpansionDown ) specFeatureDict = dict() ## key = scanNr, value = set(featureKeys) featureSpecDict = dict() ## key = featureKey, value = set(scanNrs) for specPos, specId in enumerate(specArrays['id']): specZ = specArrays['charge'][specPos] if specZ is None: continue specMass = specArrays[sMassKey][specPos] specRt = specArrays['rt'][specPos] matchComplete = False isotopeErrorPos = 0 while not matchComplete: isotopeError = isotopeErrorList[isotopeErrorPos] # calculate mass limits for each isotope error if toleranceUnit.lower() == 'ppm': specMassHigh = ((specMass + isotopeError * 1.003355 / specZ) * (1 + precursorTolerance*1E-6) ) specMassLow = ((specMass + isotopeError * 1.003355 / specZ) * (1 - precursorTolerance*1E-6) ) elif toleranceUnit.lower() == 'da': specMassHigh = ((specMass + isotopeError * 1.003355 / specZ) + precursorTolerance ) specMassLow = ((specMass + isotopeError * 1.003355 / specZ) - precursorTolerance ) posL = bisect.bisect_left(featureArrays[fMassKey], specMassLow ) posR = bisect.bisect_right(featureArrays[fMassKey], specMassHigh ) if matchCharge: chargeMask = (featureArrays['charge'][posL:posR] == specZ) fRtHighKey = 'rtHighExpanded' fRtLowKey = 'rtLowExpanded' for fRtHighKey, fRtLowKey in [('rtHighExpanded', 'rtLowExpanded'), ('rtHigh', 'rtLow') ]: rtMask = ((featureArrays[fRtLowKey][posL:posR] <= specRt) & (featureArrays[fRtHighKey][posL:posR] >= specRt) ) if matchCharge: matchedFeatureIds = featureArrays['id'][posL:posR][rtMask & chargeMask] else: matchedFeatureIds = featureArrays['id'][posL:posR][rtMask] if len(matchedFeatureIds) <= 1: break # if exactly one feature has been matched, if len(matchedFeatureIds) > 0: if len(matchedFeatureIds) == 1: matchComplete = True if isotopeErrorList[isotopeErrorPos] != 0: isotopeErrorMatchCounter += 1 else: #Stop if Spectrum can be matched to multiple features multiMatchCounter += 1 break isotopeErrorPos += 1 if isotopeErrorPos >= len(isotopeErrorList): #Stop if all allowed isotope errors have been tested break if matchComplete: for featureId in matchedFeatureIds: getattr(fiContainer.container[specfile][featureId], listKeySpecIds ).append(specId) fiContainer.container[specfile][featureId].isMatched = True specFeatureDict[specId] = featureId featureSpecDict[featureId] = specId stats = dict() stats['totalFeatures'] = len(featureArrays['id']) stats['matchedFeatures'] = len(featureSpecDict) stats['relMatchedFeatures'] = round(100*stats['matchedFeatures']/stats['totalFeatures'], 1) stats['totalSpectra'] = len(specArrays['id'][(specArrays['msLevel'] != 1)]) stats['matchedSpectra'] = len(specFeatureDict) stats['relMatchedSpectra'] = round(100*stats['matchedSpectra']/stats['totalSpectra'], 1) print('------', specfile, '------') print('Annotated features:\t\t\t', stats['matchedFeatures'], '/', stats['totalFeatures'], '=', stats['relMatchedFeatures'], '%') print('Spectra matched to features:\t\t', stats['matchedSpectra'], '/', stats['totalSpectra'], '=', stats['relMatchedSpectra'], '%') if multiMatchCounter != 0: print('Discarded because of multiple matches:\t', multiMatchCounter) if isotopeErrorMatchCounter != 0: print('Isotope error matched spectra:\t\t', isotopeErrorMatchCounter) #annotate feature with sii information (peptide, sequence, score) if isinstance(specContainer, maspy.core.SiiContainer): for featureId in viewkeys(featureSpecDict): matches = list() for specId in fiContainer.container[specfile][featureId].siiIds: _sii = specContainer.getValidItem(specfile, specId) score = getattr(_sii, scoreKey) peptide = _sii.peptide sequence = _sii.sequence matches.append([score, peptide, sequence]) matches.sort(reverse=largerBetter) fiContainer.container[specfile][featureId].isAnnotated = True fiContainer.container[specfile][featureId].score = matches[0][0] fiContainer.container[specfile][featureId].peptide = matches[0][1] fiContainer.container[specfile][featureId].sequence = matches[0][2]" 2598,"def rtCalibration(fiContainer, allowedRtDev=60, allowedMzDev=2.5, reference=None, specfiles=None, showPlots=False, plotDir=None, minIntensity=1e5): """"""Performs a retention time calibration between :class:`FeatureItem` of multiple specfiles. :ivar fiContainer: Perform alignment on :class:`FeatureItem` in :attr:`FeatureContainer.specfiles` :ivar allowedRtDev: maxium retention time difference of two features in two runs to be matched :ivar allowedMzDev: maxium relative m/z difference (in ppm) of two features in two runs to be matched :ivar showPlots: boolean, True if a plot should be generated which shows to results of the calibration :ivar plotDir: if not None and showPlots is True, the plots are saved to this location. :ivar reference: Can be used to specifically specify a reference specfile :ivar specfiles: Limit alignment to those specfiles in the fiContainer :ivar minIntensity: consider only features with an intensity above this value """""" #TODO: long function, maybe split into subfunctions specfiles = [_ for _ in viewkeys(fiContainer.info)] if specfiles is None else specfiles matchCharge = True refMzKey = 'mz' mzKey = 'mz' if reference is not None: if reference in specfiles: specfiles = [reference] + list(set(specfiles).difference(set([reference]))) else: print('Specified reference specfile not present, using reference: ', specfiles[0]) for featureItem in fiContainer.getItems(specfiles=specfiles): if not hasattr(featureItem, 'obsRt'): setattr(featureItem, 'obsRt', featureItem.rt) referenceArrays = None for specfile in specfiles: featureArrays = fiContainer.getArrays(['rt', 'charge', 'mz', 'intensity'], specfiles=specfile, sort='rt' ) if minIntensity is not None: intensityMask = (featureArrays['intensity'] > minIntensity) for key in list(viewkeys(featureArrays)): featureArrays[key] = featureArrays[key][intensityMask] if referenceArrays is None: referenceArrays = featureArrays if showPlots: print('Reference: '+specfile) continue rtPosList = list() rtDevList = list() mzDevRelList = list() mzDevAbsList = list() for featurePos in range(len(featureArrays[mzKey])): currRt = featureArrays['rt'][featurePos] currMz = featureArrays[mzKey][featurePos] currZ = featureArrays['charge'][featurePos] mzLimitUp = currMz*(1+allowedMzDev*1E-6) mzLimitLow = currMz*(1-allowedMzDev*1E-6) rtLimitUp = currRt+allowedRtDev rtLimitLow = currRt-allowedRtDev posL = bisect.bisect_left(referenceArrays['rt'], rtLimitLow) posU = bisect.bisect_right(referenceArrays['rt'], rtLimitUp) refMask = (referenceArrays[refMzKey][posL:posU] <= mzLimitUp) & (referenceArrays[refMzKey][posL:posU] >= mzLimitLow) if matchCharge: refMask = refMask & (referenceArrays['charge'][posL:posU] == currZ) currMzDev = abs(referenceArrays[refMzKey][posL:posU][refMask] - currMz) bestHitMask = currMzDev.argsort() for refRt, refMz in zip(referenceArrays['rt'][posL:posU][refMask][bestHitMask], referenceArrays[refMzKey][posL:posU][refMask][bestHitMask]): rtPosList.append(currRt) rtDevList.append(currRt - refRt) mzDevRelList.append((1 - currMz / refMz)*1E6) mzDevAbsList.append(currMz - refMz) break rtPosList = numpy.array(rtPosList) rtDevList = numpy.array(rtDevList) splineInitialKnots = int(max(rtPosList) - min(rtPosList)) dataFit = aux.DataFit(rtDevList, rtPosList) dataFit.splineInitialKnots = splineInitialKnots dataFit.splineTerminalExpansion = 0.2 dataFit.processInput(dataAveraging='median', windowSize=10) dataFit.generateSplines() if showPlots: corrDevArr = rtDevList - dataFit.corrArray(rtPosList) timePoints = [min(rtPosList) + x for x in range(int(max(rtPosList)-min(rtPosList)))] corrValues = dataFit.corrArray(timePoints) fig, ax = plt.subplots(3, 2, sharex=False, sharey=False, figsize=(20, 18)) fig.suptitle(specfile) ax[0][0].hist(rtDevList, bins=100, color='grey', alpha=0.5, label='observed') ax[0][0].hist(corrDevArr, bins=100, color='red', alpha=0.5, label='corrected') ax[0][0].set_title('Retention time deviation') ax[0][0].legend() ax[0][0].set_xlim(allowedRtDev*-1, allowedRtDev) ax[0][1].hist(mzDevRelList, bins=100, color='grey') ax[0][1].set_title('Mz deviation [ppm]') ax[1][0].scatter(rtPosList, rtDevList, color='grey', alpha=0.1, label='observed') ax[1][0].plot(timePoints,corrValues, color='red', alpha=0.5, label='correction function') ax[1][0].set_title('Retention time deviation over time') ax[1][0].legend() ax[1][0].set_ylim(allowedRtDev*-1, allowedRtDev) ax[1][1].scatter(rtPosList, mzDevRelList, color='grey', alpha=0.1) ax[1][1].set_title('Mz deviation over time') ax[1][1].set_ylim(allowedMzDev*-1, allowedMzDev) ax[2][0].scatter(rtPosList, corrDevArr, color='grey', alpha=0.1) ax[2][0].set_title('Aligned retention time deviation over time') ax[2][0].set_ylim(allowedRtDev*-1, allowedRtDev) if plotDir is not None: plotloc = aux.joinpath(plotDir, specfile+'.rtAlign.png') fig.savefig(plotloc) else: fig.show() featureArrays = fiContainer.getArrays(['rt'], specfiles=specfile, sort='rt') featureArrays['corrRt'] = featureArrays['rt'] - dataFit.corrArray(featureArrays['rt']) for featureId, corrRt, rt in zip(featureArrays['id'], featureArrays['corrRt'], featureArrays['rt']): fiContainer.container[specfile][featureId].rt = corrRt" 2599,"def GetShowDetails(self): """""" Extract show name, season number and episode number from file name. Supports formats S<NUM>E<NUM> or <NUM>x<NUM> for season and episode numbers where letters are case insensitive and number can be one or more digits. It expects season number to be unique however it can handle either single or multipart episodes (consecutive values only). All information preceeding season number is used for the show name lookup. This string is forced to lowercase and stripped of special characters Returns ---------- boolean False if an incompatible file name is found, otherwise return True. """""" fileName = os.path.splitext(os.path.basename(self.fileInfo.origPath))[0] # Episode Number episodeNumSubstring = set(re.findall(""(?<=[0-9])[xXeE][0-9]+(?:[xXeE_.-][0-9]+)*"", fileName)) if len(episodeNumSubstring) != 1: goodlogging.Log.Info(""TVFILE"", ""Incompatible filename no episode match detected: {0}"".format(self.fileInfo.origPath)) return False episodeNumSet = set(re.findall(""(?<=[xXeE_.-])[0-9]+"", episodeNumSubstring.pop())) episodeNumList = [int(i) for i in episodeNumSet] episodeNumList.sort() episodeNum = ""{0}"".format(episodeNumList[0]) if len(episodeNumList) > 1: episodeNumReference = episodeNumList[0] for episodeNumIter in episodeNumList[1:]: if episodeNumIter == (episodeNumReference+1): strNum = ""{0}"".format(episodeNumIter) if len(strNum) == 1: strNum = ""0{0}"".format(strNum) self.showInfo.multiPartEpisodeNumbers.append(strNum) episodeNumReference = episodeNumIter else: break if len(episodeNum) == 1: episodeNum = ""0{0}"".format(episodeNum) self.showInfo.episodeNum = episodeNum # Season Number seasonNumSet = set(re.findall(""[sS]([0-9]+)"", fileName)) preceedingS = True if len(seasonNumSet) == 1: seasonNum = seasonNumSet.pop() else: seasonNumSet = set(re.findall(""([0-9]+)[xX](?:[0-9]+[xX])*"", fileName)) preceedingS = False if len(seasonNumSet) == 1: seasonNum = seasonNumSet.pop() else: goodlogging.Log.Info(""TVFILE"", ""Incompatible filename no season match detected: {0}"".format(self.fileInfo.origPath)) return False if len(seasonNum) == 1: seasonNum = ""0{0}"".format(seasonNum) self.showInfo.seasonNum = seasonNum # Show Name if preceedingS is True: showNameList = re.findall(""(.+?)\s*[_.-]*\s*[sS][0-9]+[xXeE][0-9]+.*"", fileName) else: showNameList = re.findall(""(.+?)\s*[_.-]*\s*[0-9]+[xXeE][0-9]+.*"", fileName) if len(showNameList) == 1: showName = util.StripSpecialCharacters(showNameList[0].lower(), stripAll=True) else: goodlogging.Log.Info(""TVFILE"", ""Incompatible filename no show name detected: {0}"".format(self.fileInfo.origPath)) return False self.fileInfo.showName = showName return True" 2600,"def GenerateNewFileName(self): """""" Create new file name from show name, season number, episode number and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName. Returns ---------- string New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName. """""" if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \ self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None: ext = os.path.splitext(self.fileInfo.origPath)[1] newFileName = ""{0}.S{1}E{2}"".format(self.showInfo.showName, self.showInfo.seasonNum, \ self.showInfo.episodeNum) for episodeNum in self.showInfo.multiPartEpisodeNumbers: newFileName = newFileName + ""_{0}"".format(episodeNum) newFileName = newFileName + "".{0}{1}"".format(self.showInfo.episodeName, ext) newFileName = util.StripSpecialCharacters(newFileName) return newFileName" 2601,"def GenerateNewFilePath(self, fileDir = None): """""" Create new file path. If a fileDir is provided it will be used otherwise the original file path is used. Updates file info object with new path. Parameters ---------- fileDir : string [optional : default = None] Optional file directory """""" newFileName = self.GenerateNewFileName() if newFileName is not None: if fileDir is None: fileDir = os.path.dirname(self.fileInfo.origPath) self.fileInfo.newPath = os.path.join(fileDir, newFileName)" 2602,"def Print(self): """""" Print contents of showInfo and FileInfo object """""" goodlogging.Log.Info(""TVFILE"", ""TV File details are:"") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info(""TVFILE"", ""Original File Path = {0}"".format(self.fileInfo.origPath)) if self.showInfo.showName is not None: goodlogging.Log.Info(""TVFILE"", ""Show Name (from guide) = {0}"".format(self.showInfo.showName)) elif self.fileInfo.showName is not None: goodlogging.Log.Info(""TVFILE"", ""Show Name (from file) = {0}"".format(self.fileInfo.showName)) if self.showInfo.seasonNum is not None and self.showInfo.episodeNum is not None: goodlogging.Log.Info(""TVFILE"", ""Season & Episode = S{0}E{1}"".format(self.showInfo.seasonNum, self.showInfo.episodeNum)) if self.showInfo.episodeName is not None: goodlogging.Log.Info(""TVFILE"", ""Episode Name: = {0}"".format(self.showInfo.episodeName)) if self.fileInfo.newPath is not None: goodlogging.Log.Info(""TVFILE"", ""New File Path = {0}"".format(self.fileInfo.newPath)) goodlogging.Log.DecreaseIndent()" 2603,"def connectProcess(connection, processProtocol, commandLine='', env={}, usePTY=None, childFDs=None, *args, **kwargs): """"""Opens a SSHSession channel and connects a ProcessProtocol to it @param connection: the SSH Connection to open the session channel on @param processProtocol: the ProcessProtocol instance to connect to the process @param commandLine: the command line to execute the process @param env: optional environment variables to set for the process @param usePTY: if set, request a PTY for the process @param childFDs: custom child file descriptors for the process """""" processOpenDeferred = defer.Deferred() process = SSHProcess(processProtocol, commandLine, env, usePTY, childFDs, *args, **kwargs) process.processOpen = processOpenDeferred.callback process.openFailed = processOpenDeferred.errback connection.openChannel(process) return processOpenDeferred" 2604,"def get_api_publisher(self, social_user): """""" owner_id - VK user or group from_group - 1 by group, 0 by user message - text attachments - comma separated links or VK resources ID's and other https://vk.com/dev.php?method=wall.post """""" def _post(**kwargs): api = self.get_api(social_user) response = api.wall.post(**kwargs) return response return _post" 2605,"def get_api_publisher(self, social_user): """""" files: {'file0':<file>} message: 'mess' """""" def _post(**kwargs): api = self.get_api(social_user) author = { 'group_id': kwargs.get('group_id'), 'user_id': kwargs.get('user_id'), } server_data = api.photos.getWallUploadServer(**author) attachments = [] for _file in kwargs['files']: upload_data = requests.post( server_data['upload_url'], files={""photo"": _file}).json() upload_data.update(author) photos_data = api.photos.saveWallPhoto(**upload_data) attachments.append('photo{owner_id}_{id}'.format(**photos_data[0])) del kwargs['files'] kwargs['attachments'] = ','.join(attachments) response = api.wall.post(**kwargs) server_data.update(response) return server_data return _post" 2606,"def call_builder_init(cls, kb_app, sphinx_app: Sphinx): """""" On builder init event, commit registry and do callbacks """""" # Find and commit docs project plugins conf_dir = sphinx_app.confdir plugins_dir = sphinx_app.config.kaybee_settings.plugins_dir full_plugins_dir = os.path.join(conf_dir, plugins_dir) if os.path.exists(full_plugins_dir): sys.path.insert(0, conf_dir) plugin_package = importlib.import_module(plugins_dir) importscan.scan(plugin_package) else: logger.info(f'## Kaybee: No plugin dir at {plugins_dir}') dectate.commit(kb_app) for callback in cls.get_callbacks(kb_app, SphinxEvent.BI): callback(kb_app, sphinx_app)" 2607,"def call_purge_doc(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docname: str): """""" On env-purge-doc, do callbacks """""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EPD): callback(kb_app, sphinx_app, sphinx_env, docname)" 2608,"def call_env_before_read_docs(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames: List[str]): """""" On env-read-docs, do callbacks"""""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EBRD): callback(kb_app, sphinx_app, sphinx_env, docnames)" 2609,"def call_env_doctree_read(cls, kb_app, sphinx_app: Sphinx, doctree: doctree): """""" On doctree-read, do callbacks"""""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.DREAD): callback(kb_app, sphinx_app, doctree)" 2610,"def call_doctree_resolved(cls, kb_app, sphinx_app: Sphinx, doctree: doctree, fromdocname: str): """""" On doctree-resolved, do callbacks"""""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.DRES): callback(kb_app, sphinx_app, doctree, fromdocname)" 2611,"def call_env_updated(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment): """""" On the env-updated event, do callbacks """""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.EU): callback(kb_app, sphinx_app, sphinx_env)" 2612,"def call_html_collect_pages(cls, kb_app, sphinx_app: Sphinx): """""" On html-collect-pages, do callbacks"""""" EventAction.get_callbacks(kb_app, SphinxEvent.HCP) for callback in EventAction.get_callbacks(kb_app, SphinxEvent.HCP): yield callback(kb_app, sphinx_app)" 2613,"def call_env_check_consistency(cls, kb_app, builder: StandaloneHTMLBuilder, sphinx_env: BuildEnvironment): """""" On env-check-consistency, do callbacks"""""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.ECC): callback(kb_app, builder, sphinx_env)" 2614,"def call_missing_reference(cls, kb_app, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, node, contnode, ): """""" On doctree-resolved, do callbacks"""""" for callback in EventAction.get_callbacks(kb_app, SphinxEvent.MR): return callback(kb_app, sphinx_app, sphinx_env, node, contnode)" 2615,"def call_html_page_context(cls, kb_app, sphinx_app: Sphinx, pagename: str, templatename: str, context, doctree: doctree ): """""" On doctree-resolved, do callbacks"""""" # We need to let one, and only one, callback return the name of # the template. Detect multiple and raise an exception. new_templatename = None for callback in EventAction.get_callbacks(kb_app, SphinxEvent.HPC): # The protocol: the one controlling callback will return a value # with a dictionary of {'templatename': 'sometemplate'} result = callback(kb_app, sphinx_app, pagename, templatename, context, doctree) if result and isinstance(result, dict) and 'templatename' in result: if new_templatename is not None: raise AssertionError('Multiple handlers returning') new_templatename = result['templatename'] return new_templatename" 2616,"def get_layout_template(self, template_name=None): """""" Returns the layout template to use when rendering the form to HTML. Preference of template selection: 1. Provided method argument `template_name` 2. Form class property `layout_template` 3. Globally defined default template from `defaults.LAYOUT_DEFAULT_TEMPLATE` :param template_name: Optional template to use instead of other configurations. :return: Template name to use when rendering the form. """""" if template_name: return template_name if self.layout_template: return self.layout_template return defaults.LAYOUT_DEFAULT_TEMPLATE" 2617,"def get_layout_context(self): """""" Returns the context which is used when rendering the form to HTML. The generated template context will contain the following variables: * form: `Form` instance * errors: `ErrorList` instance with non field errors and hidden field errors * hidden_fields: All hidden fields to render. * visible_fields: All visible fields to render. :return: Template context for form rendering. """""" errors = self.non_field_errors() for field in self.hidden_fields(): errors.extend(field.errors) return { 'form': self, 'errors': errors, 'hidden_fields': self.hidden_fields(), 'visible_fields': self.visible_fields(), }" 2618,"def full_clean(self, *args, **kwargs): """""" The full_clean method is hijacked to apply special treatment to invalid field inputs. For example adding extra options/classes to widgets. """""" super().full_clean(*args, **kwargs) for field in self.errors: if field != NON_FIELD_ERRORS: self.apply_widget_invalid_options(field)" 2619,"def get_field_template(self, bound_field, template_name=None): """""" Returns the field template to use when rendering a form field to HTML. Preference of template selection: 1. Provided method argument `template_name` 2. Template from `field_template_overrides` selected by field name 3. Template from `field_template_overrides` selected by field class 4. Form class property `field_template` 5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE` :param bound_field: `BoundField` instance to select a template for. :param template_name: Optional template to use instead of other configurations. :return: Template name to use when rendering the form field. """""" if template_name: return template_name templates = self.field_template_overrides or {} template_name = templates.get(bound_field.name, None) if template_name: return template_name template_name = templates.get(bound_field.field.__class__, None) if template_name: return template_name if self.field_template: return self.field_template return defaults.FIELD_DEFAULT_TEMPLATE" 2620,"def get_field_label_css_class(self, bound_field): """""" Returns the optional label CSS class to use when rendering a field template. By default, returns the Form class property `field_label_css_class`. If the field has errors and the Form class property `field_label_invalid_css_class` is defined, its value is appended to the CSS class. :param bound_field: `BoundField` instance to return CSS class for. :return: A CSS class string or `None` """""" class_name = self.field_label_css_class if bound_field.errors and self.field_label_invalid_css_class: class_name = join_css_class( class_name, self.field_label_invalid_css_class) return class_name or None" 2621,"def get_field_context(self, bound_field): """""" Returns the context which is used when rendering a form field to HTML. The generated template context will contain the following variables: * form: `Form` instance * field: `BoundField` instance of the field * field_id: Field ID to use in `<label for="".."">` * field_name: Name of the form field to render * errors: `ErrorList` instance with errors of the field * required: Boolean flag to signal if the field is required or not * label: The label text of the field * label_css_class: The optional label CSS class, might be `None` * help_text: Optional help text for the form field. Might be `None` * container_css_class: The CSS class for the field container. * widget_class_name: Lowercased version of the widget class name (e.g. 'textinput') * widget_input_type: `input_type` property of the widget instance, falls back to `widget_class_name` if not available. :return: Template context for field rendering. """""" widget = bound_field.field.widget widget_class_name = widget.__class__.__name__.lower() # Check if we have an overwritten id in widget attrs, # if not use auto_id of bound field. field_id = widget.attrs.get('id') or bound_field.auto_id if field_id: field_id = widget.id_for_label(field_id) return { 'form': self, 'field': bound_field, 'field_id': field_id, 'field_name': bound_field.name, 'errors': bound_field.errors, 'required': bound_field.field.required, 'label': bound_field.label, 'label_css_class': self.get_field_label_css_class(bound_field), 'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None, 'container_css_class': self.get_field_container_css_class(bound_field), 'widget_class_name': widget_class_name, 'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name }" 2622,"def apply_widget_options(self, field_name): """""" Applies additional widget options like changing the input type of DateInput and TimeInput to ""date"" / ""time"" to enable Browser date pickers or other attributes/properties. """""" widget = self.fields[field_name].widget if isinstance(widget, forms.DateInput): widget.input_type = 'date' if isinstance(widget, forms.TimeInput): widget.input_type = 'time' if isinstance(widget, forms.SplitDateTimeWidget): widget.widgets[0].input_type = 'date' widget.widgets[1].input_type = 'time'" 2623,"def apply_widget_template(self, field_name): """""" Applies widget template overrides if available. The method uses the `get_widget_template` method to determine if the widget template should be exchanged. If a template is available, the template_name property of the widget instance is updated. :param field_name: A field name of the form. """""" field = self.fields[field_name] template_name = self.get_widget_template(field_name, field) if template_name: field.widget.template_name = template_name" 2624,"def get_widget_template(self, field_name, field): """""" Returns the optional widget template to use when rendering the widget for a form field. Preference of template selection: 1. Template from `widget_template_overrides` selected by field name 2. Template from `widget_template_overrides` selected by widget class By default, returns `None` which means ""use Django's default widget template"". :param field_name: The field name to select a widget template for. :param field: `Field` instance to return a widget template. :return: Template name to use when rendering the widget or `None` """""" templates = self.widget_template_overrides or {} template_name = templates.get(field_name, None) if template_name: return template_name template_name = templates.get(field.widget.__class__, None) if template_name: return template_name return None" 2625,"def apply_widget_css_class(self, field_name): """""" Applies CSS classes to widgets if available. The method uses the `get_widget_css_class` method to determine if the widget CSS class should be changed. If a CSS class is returned, it is appended to the current value of the class property of the widget instance. :param field_name: A field name of the form. """""" field = self.fields[field_name] class_name = self.get_widget_css_class(field_name, field) if class_name: field.widget.attrs['class'] = join_css_class( field.widget.attrs.get('class', None), class_name)" 2626,"def apply_widget_invalid_options(self, field_name): """""" Applies additional widget options for an invalid field. This method is called when there is some error on a field to apply additional options on its widget. It does the following: * Sets the aria-invalid property of the widget for accessibility. * Adds an invalid CSS class, which is determined by the returned value of `get_widget_invalid_css_class` method. If a CSS class is returned, it is appended to the current value of the class property of the widget. :param field_name: A field name of the form. """""" field = self.fields[field_name] class_name = self.get_widget_invalid_css_class(field_name, field) if class_name: field.widget.attrs['class'] = join_css_class( field.widget.attrs.get('class', None), class_name) field.widget.attrs['aria-invalid'] = 'true'" 2627,"def use_quandl_data(self, authtoken): """""" Use quandl data to build conversion table """""" dfs = {} st = self.start.strftime(""%Y-%m-%d"") at = authtoken for pair in self.pairs: symbol = """".join(pair) qsym = ""CURRFX/{}"".format(symbol) dfs[symbol] = qdl.get(qsym,authtoken=at, trim_start=st)['Rate'] self.build_conversion_table(dfs)" 2628,"def use_trump_data(self, symbols): """""" Use trump data to build conversion table symbols : list of symbols: will attempt to use units to build the conversion table, strings represent symbol names. """""" dfs = {sym.units : sym.df[sym.name] for sym in symbols} self.build_conversion_table(dfs)" 2629,"def build_conversion_table(self, dataframes): """""" Build conversion table from a dictionary of dataframes """""" self.data = pd.DataFrame(dataframes) tmp_pairs = [s.split(""/"") for s in self.data.columns] self.data.columns = pd.MultiIndex.from_tuples(tmp_pairs)" 2630,"def match_tweet(self, tweet, user_stream): """""" Check if a tweet matches the defined criteria :param tweet: The tweet in question :type tweet: :class:`~responsebot.models.Tweet` :return: True if matched, False otherwise """""" if user_stream: if len(self.track) > 0: return self.is_tweet_match_track(tweet) return True return self.is_tweet_match_track(tweet) or self.is_tweet_match_follow(tweet)" 2631,"def find(self, _id, instance = None): """""" Find Args: _id (str): instance id or binding Id Keyword Arguments: instance (AtlasServiceInstance.Instance): Existing instance Returns: AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding. """""" if instance is None: # We are looking for an instance return self.service_instance.find(_id) else: # We are looking for a binding return self.service_binding.find(_id, instance)" 2632,"def create(self, instance, parameters, existing=True): """"""Create an instance Args: instance (AtlasServiceInstance.Instance): Existing or New instance parameters (dict): Parameters for the instance Keyword Arguments: existing (bool): True (use an existing cluster), False (create a new cluster) Returns: ProvisionedServiceSpec: Status """""" return self.service_instance.create(instance, parameters, existing)" 2633,"def index(self, req, drivers): """"""List all network List all of netowrks on some special cloud with: :Param req :Type object Request """""" result = [] for driver in drivers: result.append(driver.list_network(req.params)) data = { 'action': ""index"", 'controller': ""network"", 'cloud': req.environ['calplus.cloud'], 'result': result } return data" 2634,"def delete(self, req, driver): """"""Delete a network Delete a specific netowrk with id on special cloud with: :Param req :Type object Request """""" response = driver.delete_network(req.params, id) data = { 'action': ""delete"", 'controller': ""network"", 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2635,"def update(self, req, driver): """"""Update a network Update a specific netowrk with id on special cloud with: :Param req :Type object Request """""" response = driver.update_network(req.params, id) data = { 'action': ""update"", 'controller': ""network"", 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2636,"def create(self, req, driver): """"""Create a network Create a new netowrk on special cloud with: :Param req :Type object Request """""" response = driver.create_network(req.params) data = { 'action': ""create"", 'controller': ""network"", 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2637,"def get(self, req, driver): """"""Get info of a network Get info of a specific netowrk with id on special cloud with: :Param req :Type object Request """""" response = driver.get_network(req.params, id) data = { 'action': ""get"", 'controller': ""network"", 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2638,"def attach_igw(self, req, driver): """"""Attach network to Internet gateway :Param req :Type object Request """""" igw = driver.get_igw(req.params) if igw is None: igw = driver.create_igw(req.params) response = driver.attach_igw(req.params, igw) data = { 'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2639,"def dettach_igw(self, req, driver): """"""Dettach network from Internet gateway :Param req :Type object Request """""" response = driver.dettach_igw(req.params) data = { 'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2640,"def attach_vpngw(self, req, id, driver): """"""Attach network to VPN gateway :Param req :Type object Request """""" vpngw = driver.get_vnpgw(req.params, id) if vpngw is None: vpngw = driver.create_vpngw(req.params, id) response = driver.attach_vpngw(req.params, vpngw) data = { 'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data" 2641,"def connectMSExchange(server): """""" Creates a connection for the inputted server to a Microsoft Exchange server. :param server | <smtplib.SMTP> :usage |>>> import smtplib |>>> import projex.notify |>>> smtp = smtplib.SMTP('mail.server.com') |>>> projex.notify.connectMSExchange(smtp) :return (<bool> success, <str> reason) """""" if not sspi: return False, 'No sspi module found.' # send the SMTP EHLO command code, response = server.ehlo() if code != SMTP_EHLO_OKAY: return False, 'Server did not respond to EHLO command.' sspi_client = sspi.ClientAuth('NTLM') # generate NTLM Type 1 message sec_buffer = None err, sec_buffer = sspi_client.authorize(sec_buffer) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send NTLM Type 1 message -- Authentication Request code, response = server.docmd('AUTH', 'NTLM ' + ntlm_message) # verify the NTLM Type 2 response -- Challenge Message if code != SMTP_AUTH_CHALLENGE: msg = 'Server did not respond as expected to NTLM negotiate message' return False, msg # generate NTLM Type 3 message err, sec_buffer = sspi_client.authorize(base64.decodestring(response)) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send the NTLM Type 3 message -- Response Message code, response = server.docmd('', ntlm_message) if code != SMTP_AUTH_OKAY: return False, response return True, ''" 2642,"def sendEmail(sender, recipients, subject, body, attachments=None, cc=None, bcc=None, contentType='text/html', server=None, useMSExchange=None, encoding='utf-8', raiseErrors=False): """""" Sends an email from the inputted email address to the list of given recipients with the inputted subject and body. This will also attach the inputted list of attachments to the email. The server value will default to mail.<sender_domain> and you can use a ':' to specify a port for the server. :param sender <str> :param recipients <list> [ <str>, .. ] :param subject <str> :param body <str> :param attachments <list> [ <str>, .. ] :param cc <list> [ <str>, .. ] :param bcc <list> [ <str>, .. ] :param contentType <str> :param server <str> :return <bool> success """""" if attachments is None: attachments = [] if cc is None: cc = [] if bcc is None: bcc = [] if server is None: server = NOTIFY_SERVER if useMSExchange is None: useMSExchange = NOTIFY_SERVER_MSX # normalize the data sender = nstr(sender) recipients = map(nstr, recipients) # make sure we have valid information if not isEmail(sender): err = errors.NotifyError('%s is not a valid email address' % sender) logger.error(err) return False # make sure there are recipients if not recipients: err = errors.NotifyError('No recipients were supplied.') logger.error(err) return False # build the server domain if not server: err = errors.NotifyError('No email server specified') logger.error(err) return False # create the email msg = MIMEMultipart(_subtype='related') msg['Subject'] = projex.text.toUtf8(subject) msg['From'] = sender msg['To'] = ','.join(recipients) msg['Cc'] = ','.join([nstr(addr) for addr in cc if isEmail(addr)]) msg['Bcc'] = ','.join([nstr(addr) for addr in bcc if isEmail(addr)]) msg['Date'] = nstr(datetime.datetime.now()) msg['Content-type'] = 'Multipart/mixed' msg.preamble = 'This is a multi-part message in MIME format.' msg.epilogue = '' # build the body bodyhtml = projex.text.toUtf8(body) eattach = [] # include inline images filepaths = re.findall('<img\s+src=""(file:///[^""]+)""[^/>]*/?>', bodyhtml) for filepath in filepaths: filename = filepath.replace('file:///', '') if os.path.exists(filename) and filename not in attachments: # replace with the attachment id cid = 'cid:%s' % os.path.basename(filename) bodyhtml = bodyhtml.replace(filename, cid) # add the image to the attachments fp = open(nstr(filename), 'rb') msgImage = MIMEImage(fp.read()) fp.close() # add the msg image to the msg content_id = '<%s>' % os.path.basename(filename) inline_link = 'inline; filename=""%s""' % os.path.basename(filename) msgImage.add_header('Content-ID', content_id) msgImage.add_header('Content-Disposition', inline_link) eattach.append(msgImage) attachments.append(filename) # create the body text msgText = MIMEText(bodyhtml, contentType, encoding) msgText['Content-type'] = contentType # include attachments for attach in attachments: fp = open(nstr(attach), 'rb') txt = MIMEBase('application', 'octet-stream') txt.set_payload(fp.read()) fp.close() encode_base64(txt) attachment = 'attachment; filename=""%s""' % os.path.basename(attach) txt.add_header('Content-Disposition', attachment) eattach.append(txt) eattach.insert(0, msgText) # add the attachments to the message for attach in eattach: msg.attach(attach) # create the connection to the email server try: smtp_server = smtplib.SMTP(nstr(server)) except socket.gaierror, err: logger.error(err) if raiseErrors: raise return False except Exception, err: logger.error(err) if raiseErrors: raise return False # connect to a microsoft exchange server if specified if useMSExchange: success, response = connectMSExchange(smtp_server) if not success: logger.debug('Could not connect to MS Exchange: ' + response) try: smtp_server.sendmail(sender, recipients, msg.as_string()) smtp_server.close() except Exception, err: logger.error(err) if raiseErrors: raise return False return True" 2643,"def sendJabber(sender, password, receivers, body, senderDomain=NOTIFY_IM_DOMAIN_SENDER, receiverDomain=NOTIFY_IM_DOMAIN_RECEIVER): """""" Sends an instant message to the inputted receivers from the given user. The senderDomain is an override to be used when no domain is supplied, same for the receiverDomain. :param sender <str> :param password <str> :param receivers <list> [ <str>, .. ] :param body <str> :param senderDomain <str> :param receiverDomain <str> :return <bool> success """""" import xmpp # make sure there is a proper domain as part of the sender if '@' not in sender: sender += '@' + senderDomain # create a jabber user connection user = xmpp.protocol.JID(sender) # create a connection to an xmpp client client = xmpp.Client(user.getDomain(), debug=[]) connection = client.connect(secure=0, use_srv=False) if not connection: text = 'Could not create a connection to xmpp (%s)' % sender err = errors.NotifyError(text) logger.error(err) return False # authenticate the session auth = client.auth(user.getNode(), password, user.getResource()) if not auth: text = 'Jabber not authenticated: (%s, %s)' % (sender, password) err = errors.NotifyError(text) logger.error(err) return False count = 0 # send the message to the inputted receivers for receiver in receivers: if '@' not in receiver: receiver += '@' + receiverDomain # create the message msg = xmpp.protocol.Message(receiver, body) # create the html message html_http = {'xmlns': 'http://jabber.org/protocol/xhtml-im'} html_node = xmpp.Node('html', html_http) enc_msg = body.encode('utf-8') xml = '<body xmlns=""http://www.w3.org/1999/xhtml"">%s</body>' % enc_msg html_node.addChild(node=xmpp.simplexml.XML2Node(xml)) msg.addChild(node=html_node) client.send(msg) count += 1 return count > 0" 2644,"def set_entries(self, entries: List[Tuple[str, str]], titles, resources): """""" Provide the template the data for the toc entries """""" self.entries = [] for flag, pagename in entries: title = titles[pagename].children[0] resource = resources.get(pagename, None) if resource and hasattr(resource, 'is_published') and not \ resource.is_published: continue # Even if there is no resource for this tocentry, we can # use the toctree info self.entries.append(dict( title=title, href=pagename, resource=resource )) self.result_count = len(self.entries)" 2645,"def render(self, builder, context, sphinx_app: Sphinx): """""" Given a Sphinx builder and context with site in it, generate HTML """""" context['sphinx_app'] = sphinx_app context['toctree'] = self html = builder.templates.render(self.template + '.html', context) return html" 2646,"def associate_public_ip(self, instance_id, public_ip_id, private_ip=None): """"""Associate a external IP"""""" return self.driver.associate_public_ip( instance_id, public_ip_id, private_ip)" 2647,"def deprecatedmethod(classname='', info=''): """""" Defines a particular method as being deprecated - the method will exist for backwards compatibility, but will contain information as to how update code to become compatible with the current system. Code that is deprecated will only be supported through the end of a minor release cycle and will be cleaned during a major release upgrade. :usage |from projex.decorators import deprecated | |class A(object): | @deprecatedmethod('A', 'Use A.printout instead') | def format( self ): | print 'test' | | def printout( self ): : print 'new test' """""" def decorated(func): @wraps(func) def wrapped(*args, **kwds): frame = last_frame = None try: frame = inspect.currentframe() last_frame = frame.f_back fname = last_frame.f_code.co_filename func_file = func.func_code.co_filename opts = { 'func': func.__name__, 'line': last_frame.f_lineno, 'file': fname, 'class': classname, 'info': info, 'package': projex.packageFromPath(func_file) } msg = 'Deprecated method called from %(file)s, line %(line)d.' \ '\n %(package)s.%(class)s.%(func)s is deprecated.' \ ' %(info)s' % opts logger.warning(errors.DeprecatedMethodWarning(msg)) finally: del frame del last_frame return func(*args, **kwds) wrapped.__name__ = func.__name__ wrapped.__doc__ = ':warning This method is deprecated! %s\n\n' % info if func.__doc__: wrapped.__doc__ += func.__doc__ wrapped.__dict__.update(func.__dict__) wrapped.__dict__['func_type'] = 'deprecated method' return wrapped return decorated" 2648,"def profiler(sorting=('tottime',), stripDirs=True, limit=20, path='', autoclean=True): """""" Creates a profile wrapper around a method to time out all the operations that it runs through. For more information, look into the hotshot Profile documentation online for the built-in Python package. :param sorting <tuple> ( <key>, .. ) :param stripDirs <bool> :param limit <int> :param path <str> :param autoclean <bool> :usage |from projex.decorators import profiler | |class A: | @profiler() # must be called as a method | def increment(amount, count = 1): | return amount + count | |a = A() |a.increment(10) | """""" def decorated(func): """""" Wrapper function to handle the profiling options. """""" # create a call to the wrapping @wraps(func) def wrapped(*args, **kwds): """""" Inner method for calling the profiler method. """""" # define the profile name filename = os.path.join(path, '%s.prof' % func.__name__) # create a profiler for the method to run through prof = hotshot.Profile(filename) results = prof.runcall(func, *args, **kwds) prof.close() # log the information about it stats = hotshot.stats.load(filename) if stripDirs: stats.strip_dirs() # we don't want to know about the arguments for this method stats.sort_stats(*sorting) stats.print_stats(limit) # remove the file if desired if autoclean: os.remove(filename) return results return wrapped return decorated" 2649,"def retrymethod(count, sleep=0): """""" Defines a decorator method to wrap a method with a retry mechanism. The wrapped method will be attempt to be called the given number of times based on the count value, waiting the number of seconds defined by the sleep parameter. If the throw option is defined, then the given error will be thrown after the final attempt fails. :param count | <int> sleep | <int> | msecs """""" def decorated(func): @wraps(func) def wrapped(*args, **kwds): # do the retry options for i in range(count - 1): try: return func(*args, **kwds) except StandardError: pass if sleep: time.sleep(sleep) # run as standard return func(*args, **kwds) return wrapped return decorated" 2650,"def launch_server(message_handler, options): """""" Launch a message server :param handler_function: The handler function to execute for each message :param options: Application options for TCP, etc. """""" logger = logging.getLogger(__name__) # if (options.debug): # logger.setLevel(logging.DEBUG) # if not options.monitor_port: # logger.warning( # ""Monitoring not enabled. No monitor-port option defined."") # else: # threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start() # Create the server, binding to specified host on configured port # logger.info( # 'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3])) # server = ThreadedTCPServer((options.host, options.port), # Activate the server; this will keep running until you # interrupt the program with Ctrl-C try: while True: logger.debug('waiting for more data') if not message_handler.handle(): break logger.warning(""I/O stream closed from client"") except KeyboardInterrupt: logger.info(""I/O stream closed from client exiting..."") os._exit(142) except: logger.exception(""Error encountered handling message"")" 2651,"def validate(self, options): """""" Validate the options or exit() """""" try: codecs.getencoder(options.char_encoding) except LookupError: self.parser.error(""invalid 'char-encoding' %s"" % options.char_encoding)" 2652,"def parse_code(url): """""" Parse the code parameter from the a URL :param str url: URL to parse :return: code query parameter :rtype: str """""" result = urlparse(url) query = parse_qs(result.query) return query['code']" 2653,"def user_token(scopes, client_id=None, client_secret=None, redirect_uri=None): """""" Generate a user access token :param List[str] scopes: Scopes to get :param str client_id: Spotify Client ID :param str client_secret: Spotify Client secret :param str redirect_uri: Spotify redirect URI :return: Generated access token :rtype: User """""" webbrowser.open_new(authorize_url(client_id=client_id, redirect_uri=redirect_uri, scopes=scopes)) code = parse_code(raw_input('Enter the URL that you were redirected to: ')) return User(code, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)" 2654,"def consume_file(self, infile): """"""Load the specified GFF3 file into memory."""""" reader = tag.reader.GFF3Reader(infilename=infile) self.consume(reader)" 2655,"def consume_seqreg(self, seqreg): """"""Load a :code:`##sequence-region` directive into memory."""""" if not isinstance(seqreg, tag.directive.Directive) or \ seqreg.type != 'sequence-region': raise ValueError('expected ##sequence-region directive') if seqreg.seqid in self.declared_regions: msg = 'duplicate sequence region ""{}""'.format(seqreg.seqid) raise ValueError(msg) self.declared_regions[seqreg.seqid] = seqreg.range.copy()" 2656,"def consume_feature(self, feature): """"""Load a :code:`Feature` object into memory."""""" if not isinstance(feature, tag.feature.Feature): raise ValueError('expected Feature object') self[feature.seqid][feature.start:feature.end] = feature if feature.seqid not in self.inferred_regions: self.inferred_regions[feature.seqid] = feature._range.copy() newrange = self.inferred_regions[feature.seqid].merge(feature._range) self.inferred_regions[feature.seqid].start = newrange.start self.inferred_regions[feature.seqid].end = newrange.end" 2657,"def consume(self, entrystream): """""" Load a stream of entries into memory. Only Feature objects and sequence-region directives are loaded, all other entries are discarded. """""" for entry in entrystream: if isinstance(entry, tag.directive.Directive) and \ entry.type == 'sequence-region': self.consume_seqreg(entry) elif isinstance(entry, tag.feature.Feature): self.consume_feature(entry)" 2658,"def query(self, seqid, start, end, strict=True): """""" Query the index for features in the specified range. :param seqid: ID of the sequence to query :param start: start of the query interval :param end: end of the query interval :param strict: indicates whether query is strict containment or overlap (:code:`True` and :code:`False`, respectively) """""" return sorted([ intvl.data for intvl in self[seqid].search(start, end, strict) ])" 2659,"def cli(ctx, stage): """"""Show the functions that are available, bubble system and custom."""""" if not ctx.bubble: ctx.say_yellow( 'There is no bubble present, will not show any transformer functions') raise click.Abort() rule_functions = get_registered_rule_functions() ctx.gbc.say('before loading functions:' + str(len(rule_functions))) load_rule_functions(ctx) ctx.gbc.say('after loading functions:' + str(len(rule_functions))) ctx.gbc.say('rule_functions:', stuff=rule_functions, verbosity=10) rule_functions.set_parent(ctx.gbc) for f in rule_functions: ctx.say('fun: ' + f, verbosity=1) ctx.gbc.say('funs: ', stuff=rule_functions.get_rule_functions(), verbosity=100) return True" 2660,"def to_utctimestamp(a_datetime): """""" Calculate number of seconds from UTC 1970-01-01 00:00:00. When: - dt doesn't have tzinfo: assume it's a utc time. - dt has tzinfo: use tzinfo. WARNING, if your datetime object doens't have ``tzinfo``, make sure it's a UTC time, but **NOT a LOCAL TIME**. **中文文档** 计算时间戳, 若: - 不带tzinfo: 则默认为是UTC time。 - 带tzinfo: 则使用tzinfo。 """""" if a_datetime.tzinfo is None: delta = a_datetime - datetime(1970, 1, 1) else: delta = a_datetime - datetime(1970, 1, 1, tzinfo=utc) return delta.total_seconds()" 2661,"def to_utc(a_datetime, keep_utc_tzinfo=False): """""" Convert a time awared datetime to utc datetime. :param a_datetime: a timezone awared datetime. (If not, then just returns) :param keep_utc_tzinfo: whether to retain the utc time zone information. **中文文档** 将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。 """""" if a_datetime.tzinfo: utc_datetime = a_datetime.astimezone(utc) # convert to utc time if keep_utc_tzinfo is False: utc_datetime = utc_datetime.replace(tzinfo=None) return utc_datetime else: return a_datetime" 2662,"def utc_to_tz(utc_datetime, tzinfo, keep_tzinfo=False): """""" Convert a UTC datetime to a time awared local time :param utc_datetime: :param tzinfo: :param keep_tzinfo: """""" tz_awared_datetime = utc_datetime.replace(tzinfo=utc).astimezone(tzinfo) if keep_tzinfo is False: tz_awared_datetime = tz_awared_datetime.replace(tzinfo=None) return tz_awared_datetime" 2663,"def repr_data_size(size_in_bytes, precision=2): # pragma: no cover """"""Return human readable string represent of a file size. Doesn""t support size greater than 1EB. For example: - 100 bytes => 100 B - 100,000 bytes => 97.66 KB - 100,000,000 bytes => 95.37 MB - 100,000,000,000 bytes => 93.13 GB - 100,000,000,000,000 bytes => 90.95 TB - 100,000,000,000,000,000 bytes => 88.82 PB ... Magnitude of data:: 1000 kB kilobyte 1000 ** 2 MB megabyte 1000 ** 3 GB gigabyte 1000 ** 4 TB terabyte 1000 ** 5 PB petabyte 1000 ** 6 EB exabyte 1000 ** 7 ZB zettabyte 1000 ** 8 YB yottabyte """""" if size_in_bytes < 1024: return ""%s B"" % size_in_bytes magnitude_of_data = [""B"", ""KB"", ""MB"", ""GB"", ""TB"", ""PB"", ""EB"", ""ZB"", ""YB""] index = 0 while 1: index += 1 size_in_bytes, mod = divmod(size_in_bytes, 1024) if size_in_bytes < 1024: break template = ""{0:.%sf} {1}"" % precision s = template.format(size_in_bytes + mod / 1024.0, magnitude_of_data[index]) return s" 2664,"def update_slots(self, event): """""" :type lex_input_event: LexInputEvent :return: None """""" if isinstance(event, LexInputEvent): event_slots = event.currentIntent.slots elif isinstance(event, basestring) or isinstance(event, unicode) or isinstance(event, str): event_slots = deepcopy(json.loads(event)['currentIntent']['slots']) else: event_slots = deepcopy(event['currentIntent']['slots']) for key, val in event_slots.items(): if key not in self.dialogAction.slots._schema.fields: field = Field(key, types.StringType()) self.dialogAction.slots._schema.append_field(field) self.dialogAction.slots[key] = val" 2665,"def render_toctrees(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str): """""" Look in doctrees for toctree and replace with custom render """""" # Only do any of this if toctree support is turned on in KaybeeSettings. # By default, this is off. settings: KaybeeSettings = sphinx_app.config.kaybee_settings if not settings.articles.use_toctree: return # Setup a template and context builder: StandaloneHTMLBuilder = sphinx_app.builder env: BuildEnvironment = sphinx_app.env # Toctree support. First, get the registered toctree class, if any registered_toctree = ToctreeAction.get_for_context(kb_app) for node in doctree.traverse(toctree): if node.attributes['hidden']: continue custom_toctree = registered_toctree(fromdocname) context = builder.globalcontext.copy() context['sphinx_app'] = sphinx_app # Get the toctree entries. We only handle one level of depth for # now. To go further, we need to recurse like sphinx's # adapters.toctree._toctree_add_classes function entries = node.attributes['entries'] # The challenge here is that some items in a toctree # might not be resources in our ""database"". So we have # to ask Sphinx to get us the titles. custom_toctree.set_entries(entries, env.titles, sphinx_app.env.resources) output = custom_toctree.render(builder, context, sphinx_app) # Put the output into the node contents listing = [nodes.raw('', output, format='html')] node.replace_self(listing)" 2666,"def stamp_excerpt(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): """""" Walk the tree and extract excert into resource.excerpt """""" # First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the excerpt on the resource excerpt = getattr(resource.props, 'excerpt', False) auto_excerpt = getattr(resource.props, 'auto_excerpt', False) if excerpt: resource.excerpt = excerpt elif not auto_excerpt: resource.excerpt = None else: # Extract the excerpt based on the number of paragraphs # in auto_excerpt resource.excerpt = get_rst_excerpt(doctree, auto_excerpt)" 2667,"def bitfieldify(buff, count): """"""Extract a bitarray out of a bytes array. Some hardware devices read from the LSB to the MSB, but the bit types available prefer to put pad bits on the LSB side, completely changing the data. This function takes in bytes and the number of bits to extract starting from the LSB, and produces a bitarray of those bits. """""" databits = bitarray() databits.frombytes(buff) return databits[len(databits)-count:]" 2668,"def build_byte_align_buff(bits): """"""Pad the left side of a bitarray with 0s to align its length with byte boundaries. Args: bits: A bitarray to be padded and aligned. Returns: A newly aligned bitarray. """""" bitmod = len(bits)%8 if bitmod == 0: rdiff = bitarray() else: #KEEP bitarray rdiff = bitarray(8-bitmod) rdiff.setall(False) return rdiff+bits" 2669,"def create(self, name, cidr, **kwargs): """"""This function will create a user network. Within OpenStack, it will create a network and a subnet Within AWS, it will create a VPC and a subnet :param name: string :param cidr: string E.x: ""10.0.0.0/24"" :param kwargs: dict :return: dict """""" return self.driver.create(name, cidr, **kwargs)" 2670,"def find_whole_word(w): """""" Scan through string looking for a location where this word produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern; note that this is different from finding a zero-length match at some point in the string. """""" return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search" 2671,"def collect_scripts_from_sources(script_paths, files_deployment, project_path='.', is_package=False, logger=None): """""" Collects postgres scripts from source files :param script_paths: list of strings or a string with a relative path to the directory containing files with scripts :param files_deployment: list of files that need to be harvested. Scripts from there will only be taken if the path to the file is in script_paths :param project_path: path to the project source code :param is_package: are files packaged with pip egg :param logger: pass the logger object if needed :return: """""" logger = logger or logging.getLogger(__name__) scripts_dict = {} if script_paths: if not isinstance(script_paths, list): # can be list of paths or a string, anyways converted to list script_paths = [script_paths] if is_package: for script_path in script_paths: for file_info in pkg_resources.resource_listdir('pgpm', script_path): file_content = pkg_resources.resource_string('pgpm', '{0}/{1}'.format(script_path, file_info))\ .decode('utf-8') if file_content: scripts_dict[file_info] = file_content logger.debug('File {0}/{1} collected.'.format(script_path, file_info)) else: logger.debug('File {0}/{1} not collected as it\'s empty.'.format(script_path, file_info)) else: if files_deployment: # if specific script to be deployed, only find them for list_file_name in files_deployment: list_file_full_path = os.path.join(project_path, list_file_name) if os.path.isfile(list_file_full_path): for i in range(len(script_paths)): if script_paths[i] in list_file_full_path: file_content = io.open(list_file_full_path, 'r', -1, 'utf-8-sig', 'ignore').read() if file_content: scripts_dict[list_file_name] = file_content logger.debug('File {0} collected.'.format(list_file_full_path)) else: logger.debug('File {0} not collected as it\'s empty.'.format(list_file_full_path)) else: logger.debug('File {0} is not found in any of {1} folders, please specify a correct path' .format(list_file_full_path, script_paths)) else: for script_path in script_paths: for subdir, dirs, files in os.walk(script_path): files = sorted(files) for file_info in files: if file_info != settings.CONFIG_FILE_NAME and file_info[0] != '.': file_content = io.open(os.path.join(subdir, file_info), 'r', -1, 'utf-8-sig', 'ignore').read() if file_content: scripts_dict[file_info] = file_content logger.debug('File {0} collected'.format(os.path.join(subdir, file_info))) else: logger.debug('File {0} not collected as it\'s empty.' .format(os.path.join(subdir, file_info))) return scripts_dict" 2672,"def parse(self, fp, headersonly=True): """"""Create a message structure from the data in a file."""""" feedparser = FeedParser(self._class) feedparser._set_headersonly() try: mp = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) except: mp = fp data = """" # While parsing the header we can convert to us-ascii? while True: line = mp.readline() data = data + line.decode(""us-ascii"") if line == b""\n"": break feedparser.feed(data) # mp[0:5000]) return feedparser.close()" 2673,"def coerce(self, values): """"""Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError. """""" if isinstance(values, compat.basestring): values = tuple(value.strip() for value in values.split(',')) # Create a list of options to store each value. opt_iter = tuple(copy.deepcopy(self._option) for value in values) for opt_obj, val in compat.zip(opt_iter, values): opt_obj.__set__(None, val) return opt_iter" 2674,"def get(self, name, default=None): """"""Fetch an option from the dictionary. Args: name (str): The name of the option. default: The value to return if the name is missing. Returns: any: The value stored by the option. This method resolves the option to its value rather than returning the option object itself. Use the 'options()' method or this object's iter to get the raw options. """""" option = self._options.get(name, None) if option is None: return default return option.__get__(self)" 2675,"def set(self, name, value): """"""Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: AttributeError: If the name is not registered. TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced. """""" if name not in self._options: raise AttributeError(""Option {0} does not exist."".format(name)) return self._options[name].__set__(self, value)" 2676,"def register(self, name, option): """"""Register a new option with the namespace. Args: name (str): The name to register the option under. option (option.Option): The option object to register. Raises: TypeError: If the option is not an option.Option object. ValueError: If the name is already registered. """""" if name in self._options: raise ValueError(""Option {0} already exists."".format(name)) if not isinstance(option, opt.Option): raise TypeError(""Options must be of type Option."") self._options[name] = option" 2677,"def set(self, name, value): """"""Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced. If the name is not registered a new option will be created using the option generator. """""" if name not in self._options: self.register(name, self._generator()) return self._options[name].__set__(self, value)" 2678,"def cli(ctx, amount, index, query, stage): """"""Pull data from Source Service Client"""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not pull') raise click.Abort() STAGE = None SRC = None if stage in STAGES and stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] if not STAGE: ctx.say_red('There is no STAGE in CFG:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if 'SOURCE' in STAGE: SRC = STAGE.SOURCE if not SRC: ctx.say_red('There is no SOURCE in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() gbc = ctx.GLOBALS['gbc'] src_client = get_client(gbc, SRC.CLIENT, ctx.home) # TODO: client get error count? # make default counters # client.pull(amount,index,counters) # counter:#Good Bad Ugly: BUG, counters # for this the client must be able to keep stats, or update stats in the pull loop. # bug.counters try: sclient = src_client.BubbleClient(cfg=SRC) sclient.set_parent(gbc) sclient.set_verbose(ctx.get_verbose()) except Exception as e: ctx.say_red( 'cannot create bubble client:' + SRC.CLIENT) ctx.say_red(str(e)) raise click.Abort('cannot pull') full_data = False if amount == -1 and index == -1: full_data = True try: if amount > 0: if index < 0: index = 0 pb_label='Pulling %d+%d '% (index,amount) src_data_gen = sclient.pull(amount, index) else: if query: pb_label='Querying:%s' % query src_data_gen = [sclient.query(query)] full_data = False else: pb_label='Pulling all' src_data_gen = sclient.pull() except Exception as e: ctx.say_red('cannot pull from source client: ' + SRC.CLIENT) ctx.say_red(str(e)) raise click.Abort('cannot pull') click.echo() # TODO: these actually need to be counted someway. # in client, # in storage, # where else? error_count = 0 with click.progressbar(src_data_gen, label=pb_label, show_pos=True, length=amount, show_eta=True, fill_char='◐') as progress_src_data_gen: pfr = bubble_lod_dump(ctx=ctx, step='pulled', stage=stage, full_data=full_data, reset=True, data_gen=progress_src_data_gen) ctx.say('pulled [%d] objects' % pfr['total']) stats = {} stats['pulled_stat_error_count'] = error_count stats['pulled_stat_total_count'] = pfr['total'] update_stats(ctx, stage, stats) return True" 2679,"def GetCompressedFilesInDir(fileDir, fileList, ignoreDirList, supportedFormatList = ['.rar',]): """""" Get all supported files from given directory folder. Appends to given file list. Parameters ---------- fileDir : string File directory to search. fileList : list List which any file matches will be added to. ignoreDirList : list List of directories to ignore in recursive lookup (currently unused). supportedFormatList : list [optional : default = ['.rar',]] List of supported file formats to search for. """""" goodlogging.Log.Info(""EXTRACT"", ""Parsing file directory: {0}"".format(fileDir)) if os.path.isdir(fileDir) is True: for globPath in glob.glob(os.path.join(fileDir, '*')): if os.path.splitext(globPath)[1] in supportedFormatList: fileList.append(globPath)" 2680,"def MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, otherPartFilePath = None): """""" Archive all parts of multi-part compressed file. If file has been extracted (via part1) then move all subsequent parts directly to archive directory. If file has not been extracted then if part >1 add to other part skipped list and only archive when the first part is sent for archiving. Parameters ---------- firstPartExtractList : list File directory to search. otherPartSkippedList : list List which any file matches will be added to. archiveDir : list List of directories to ignore in recursive lookup (currently unused). otherPartFilePath : list [optional : default = None] List of supported file formats to search for. """""" if otherPartFilePath is None: for filePath in list(otherPartSkippedList): MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath) else: baseFileName = re.findall(""(.+?)[.]part.+?rar"", otherPartFilePath)[0] if baseFileName in firstPartExtractList: util.ArchiveProcessedFile(otherPartFilePath, archiveDir) if otherPartFilePath in otherPartSkippedList: otherPartSkippedList.remove(otherPartFilePath) elif otherPartFilePath not in otherPartSkippedList: otherPartSkippedList.append(otherPartFilePath)" 2681,"def DoRarExtraction(rarArchive, targetFile, dstDir): """""" RAR extraction with exception catching Parameters ---------- rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns ---------- boolean False if rar extraction failed, otherwise True. """""" try: rarArchive.extract(targetFile, dstDir) except BaseException as ex: goodlogging.Log.Info(""EXTRACT"", ""Extract failed - Exception: {0}"".format(ex)) return False else: return True" 2682,"def GetRarPassword(skipUserInput): """""" Get password for rar archive from user input. Parameters ---------- skipUserInput : boolean Set to skip user input. Returns ---------- string or boolean If no password is given then returns False otherwise returns user response string. """""" goodlogging.Log.Info(""EXTRACT"", ""RAR file needs password to extract"") if skipUserInput is False: prompt = ""Enter password, 'x' to skip this file or 'exit' to quit this program: "" response = goodlogging.Log.Input(""EXTRACT"", prompt) response = util.CheckEmptyResponse(response) else: response = 'x' if response.lower() == 'x': goodlogging.Log.Info(""EXTRACT"", ""File extraction skipped without password"") return False elif response.lower() == 'exit': goodlogging.Log.Fatal(""EXTRACT"", ""Program terminated by user 'exit'"") else: return response" 2683,"def CheckPasswordReuse(skipUserInput): """""" Check with user for password reuse. Parameters ---------- skipUserInput : boolean Set to skip user input. Returns ---------- int Integer from -1 to 2 depending on user response. """""" goodlogging.Log.Info(""EXTRACT"", ""RAR files needs password to extract"") if skipUserInput is False: prompt = ""Enter 't' to reuse the last password for just this file, "" \ ""'a' to reuse for all subsequent files, "" \ ""'n' to enter a new password for this file "" \ ""or 's' to enter a new password for all files: "" response = goodlogging.Log.Input(""EXTRACT"", prompt) response = util.ValidUserResponse(response, ('t','a','n','s')) else: response = 'a' if response.lower() == 's': return -1 if response.lower() == 'n': return 0 elif response.lower() == 't': return 1 elif response.lower() == 'a': return 2" 2684,"def Extract(fileList, fileFormatList, archiveDir, skipUserInput): """""" Iterate through given file list and extract all files matching the file format list from each RAR file. After sucessful extraction move RAR files to archive directory. Parameters ---------- fileList : list List of files to attempt to extract. fileFormatList : list List of file formats to extract from each RAR archive. archiveDir : string Directory to move RAR files once extract is complete. skipUserInput : boolean Set to skip any potential user input (if a single option is available it will be selected otherwise the user input will default to take no action). """""" goodlogging.Log.Info(""EXTRACT"", ""Extracting files from compressed archives"") goodlogging.Log.IncreaseIndent() if len(fileList) == 0: goodlogging.Log.Info(""EXTRACT"", ""No files to extract"") goodlogging.Log.DecreaseIndent() return None firstPartExtractList = [] otherPartSkippedList = [] lastPassword = False reuseLastPassword = 0 for filePath in fileList: goodlogging.Log.Info(""EXTRACT"", ""{0}"".format(filePath)) goodlogging.Log.IncreaseIndent() try: rarArchive = rarfile.RarFile(filePath) except ImportError: goodlogging.Log.Info(""EXTRACT"", ""Unable to extract - Python needs the rarfile package to be installed (see README for more details)"") except rarfile.NeedFirstVolume: goodlogging.Log.Info(""EXTRACT"", ""File skipped - this is not the first part of the RAR archive"") MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath) except BaseException as ex: goodlogging.Log.Info(""EXTRACT"", ""Unable to extract - Exception: {0}"".format(ex)) else: dirPath = os.path.dirname(filePath) fileExtracted = False rarAuthentication = True if rarArchive.needs_password(): if lastPassword and reuseLastPassword in (0, 1): reuseLastPassword = CheckPasswordReuse(skipUserInput) if lastPassword and reuseLastPassword in (1, 2): rarArchive.setpassword(lastPassword) else: rarPassword = GetRarPassword(skipUserInput) if rarPassword: rarArchive.setpassword(rarPassword) lastPassword = rarPassword else: rarAuthentication = False if rarAuthentication: for f in rarArchive.infolist(): if util.FileExtensionMatch(f.filename, fileFormatList): goodlogging.Log.Info(""EXTRACT"", ""Extracting file: {0}"".format(f.filename)) extractPath = os.path.join(dirPath, f.filename) targetPath = os.path.join(dirPath, os.path.basename(f.filename)) if os.path.isfile(targetPath): goodlogging.Log.Info(""EXTRACT"", ""Extraction skipped - file already exists at target: {0}"".format(targetPath)) fileExtracted = True elif os.path.isfile(extractPath): goodlogging.Log.Info(""EXTRACT"", ""Extraction skipped - file already exists at extract directory: {0}"".format(extractPath)) fileExtracted = True else: fileExtracted = DoRarExtraction(rarArchive, f, dirPath) if os.path.isfile(extractPath) and not os.path.isfile(targetPath): os.rename(extractPath, targetPath) util.RemoveEmptyDirectoryTree(os.path.dirname(extractPath)) if fileExtracted is True: util.ArchiveProcessedFile(filePath, archiveDir) try: firstPartFileName = re.findall('(.+?)[.]part1[.]rar', filePath)[0] except IndexError: pass else: firstPartExtractList.append(firstPartFileName) MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir) finally: goodlogging.Log.DecreaseIndent() goodlogging.Log.DecreaseIndent()" 2685,"def register(function=None, *, singleton=False, threadlocal=False, name=None): """""" :deprecated: 1.0.0 Use :class:`giveme.injector.Injector` instead. Register a dependency factory in the dependency manager. The function name is the name of the dependency. This can be used as a decorator. Args: function (callable): The dependency factory function Not needed when used as decorator. singleton (``bool``, optional): If ``True`` the given function is only called once during the application lifetime. Injectees will receive the already created instance when available. Defaults to ``False`` threadlocal (``bool``, optional): Same as singleton except the returned instance is available only to the thread that created it. Defaults to ``False`` name (``str``, optional): Overridden name for the dependency. Defaults to the name of the registered function. """""" warnings.warn( ( 'Module level `register` decorator has been deprecated and will ' 'be removed in a future release. ' 'Use the Injector class instead' ), DeprecationWarning ) def decorator(function): return manager.register(function, singleton=singleton, threadlocal=threadlocal, name=name) if function: return decorator(function) else: return decorator" 2686,"def inject(function=None, **overridden_names): """""" :deprecated: 1.0.0 Use :class:`giveme.injector.Injector` instead. Inject dependencies into given function's arguments. By default the injector looks for keyword arguments matching registered dependency names. Example: @register def db_connection(): return create_db_connection() @inject def save_thing(thing, db_connection=None): db_connection.store(thing) Arbitrary arguments may also be mapped to specific dependency names by passing them to the decorator as ``arg='dependency_name'`` Example: @inject(db='db_connection') def save_thing(thing, db=None): # `db_connection` injected as `db` Args: function (callable): The function that accepts a dependency. Implicitly passed when used as a decorator. **overridden_names: Mappings of `function` arguments to dependency names in the form of ``function_argument='dependency name'`` """""" warnings.warn( ( 'Module level `inject` decorator has been deprecated and will ' 'be removed in a future release. ' 'Use the Injector class instead' ), DeprecationWarning ) def decorator(function): @wraps(function) def wrapper(*args, **kwargs): signature = inspect.signature(function) params = signature.parameters if not params: return function(*args, **kwargs) for name, param in params.items(): if param.kind not in (param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD): continue if name in kwargs: # Manual override, ignore it continue try: resolved_name = overridden_names.get(name, name) kwargs[name] = manager.get_value(resolved_name) except KeyError: pass return function(*args, **kwargs) return wrapper if function: return decorator(function) else: return decorator" 2687,"def register(self, func, singleton=False, threadlocal=False, name=None): """""" Register a dependency function """""" func._giveme_singleton = singleton func._giveme_threadlocal = threadlocal if name is None: name = func.__name__ self._registered[name] = func return func" 2688,"def get_value(self, name): """""" Get return value of a dependency factory or a live singleton instance. """""" factory = self._registered.get(name) if not factory: raise KeyError('Name not registered') if factory._giveme_singleton: if name in self._singletons: return self._singletons[name] self._singletons[name] = factory() return self._singletons[name] elif factory._giveme_threadlocal: if hasattr(self._threadlocals, name): return getattr(self._threadlocals, name) setattr(self._threadlocals, name, factory()) return getattr(self._threadlocals, name) return factory()" 2689,"def execute(filelocation, args, outdir, filters=None, executable='msConvert.exe'): """"""Execute the msConvert tool on Windows operating systems. :param filelocation: input file path :param args: str() or list(), msConvert arguments for details see the msConvert help below. :param outdir: path of the output directory :param filters: str() or list(), specify additional parameters and filters, for details see the msConvert help below. :param executable: must specify the complete file path of the msConvert.exe if its location is not in the ``PATH`` environment variable. """""" procArgs = [executable, filelocation] procArgs.extend(aux.toList(args)) if filters is not None: for arg in aux.toList(filters): procArgs.extend(['--filter', arg]) procArgs.extend(['-o', outdir]) ## run it ## proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()" 2690,"def execute(filelocation, outformat, outdir, log=False, executable='RawConverter.exe'): """"""Execute the msConvert tool on Windows operating systems. :param filelocation: input file path :param outformat: output format, must be one of the following: ms1, ms2, ms3, mgf :param outdir: path of the output directory :param log: #TODO :param executable: must specify the complete file path of the RawConverter.exe if its location is not in the ``PATH`` environment variable. .. note: Specifying the complete path to the executable is probably always necessary because RawConverter looks for the file ""AveragineTable.txt"" in the working directory. """""" assert outformat in ['ms1', 'ms2', 'ms3', 'mgf'] args = [executable, filelocation, '--'+outformat, '--out_folder', outdir, '--select_mono_prec'] ## run it ## proc = subprocess.Popen(args, cwd=os.path.dirname(executable), stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()" 2691,"def trace(fun, *a, **k): """""" define a tracer for a rule function for log and statistic purposes """""" @wraps(fun) def tracer(*a, **k): ret = fun(*a, **k) print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' % (str(fun), str(ret), str(a), str(k))) return ret return tracer" 2692,"def timer(fun, *a, **k): """""" define a timer for a rule function for log and statistic purposes """""" @wraps(fun) def timer(*a, **k): start = arrow.now() ret = fun(*a, **k) end = arrow.now() print('timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str(fun), str(start), str(end), str(end - start))) return ret return timer" 2693,"def get_function(self, fun=None): """"""get function as RuleFunction or return a NoRuleFunction function"""""" sfun = str(fun) self.say('get_function:' + sfun, verbosity=100) if not fun: return NoRuleFunction() # dummy to execute via no_fun if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.add_function(name=sfun, fun=self.rule_function_not_found(fun)) self.cry('fun(%s) not found, returning dummy' % (sfun), verbosity=10) if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.rule_function_not_found(fun)" 2694,"def add_function(self, fun=None, name=None, fun_type=FUN_TYPE): """"""actually replace function"""""" if not name: if six.PY2: name = fun.func_name else: name = fun.__name__ self.say('adding fun(%s)' % name, verbosity=50) self.say('adding fun_type:%s' % fun_type, verbosity=50) if self.function_exists(name): self.cry('overwriting :fun(%s)' % name, verbosity=10) self.say('added :' + name, verbosity=10) self._rule_functions[name] = RuleFunction(name, fun, fun_type) return True" 2695,"def function_exists(self, fun): """""" get function's existense """""" res = fun in self._rule_functions self.say('function exists:' + str(fun) + ':' + str(res), verbosity=10) return res" 2696,"def rule_function_not_found(self, fun=None): """""" any function that does not exist will be added as a dummy function that will gather inputs for easing into the possible future implementation """""" sfun = str(fun) self.cry('rule_function_not_found:' + sfun) def not_found(*a, **k): return(sfun + ':rule_function_not_found', k.keys()) return not_found" 2697,"def get_elem_type(elem): """""" Get elem type of soup selection :param elem: a soup element """""" elem_type = None if isinstance(elem, list): if elem[0].get(""type"") == ""radio"": elem_type = ""radio"" else: raise ValueError(u""Unknown element type: {}"".format(elem)) elif elem.name == ""select"": elem_type = ""select"" elif elem.name == ""input"": elem_type = elem.get(""type"") else: raise ValueError(u""Unknown element type: {}"".format(elem)) # To be removed assert elem_type is not None return elem_type" 2698,"def get_option_value(elem): """""" Get the value attribute, or if it doesn't exist the text content. <option value=""foo"">bar</option> => ""foo"" <option>bar</option> => ""bar"" :param elem: a soup element """""" value = elem.get(""value"") if value is None: value = elem.text.strip() if value is None or value == """": msg = u""Error parsing value from {}."".format(elem) raise ValueError(msg) return value" 2699,"def parse_value(val): """""" Parse values from html """""" val = val.replace(""%"", "" "")\ .replace("" "","""")\ .replace("","", ""."")\ .replace(""st"","""").strip() missing = [""Ejdeltagit"", ""N/A""] if val in missing: return val elif val == """": return None return float(val)" 2700,"def _get_html(self, url): """""" Get html from url """""" self.log.info(u""/GET {}"".format(url)) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info(""(from cache)"") if r.status_code != 200: throw_request_err(r) return r.content" 2701,"def _get_json(self, url): """""" Get json from url """""" self.log.info(u""/GET "" + url) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info(""(from cache)"") if r.status_code != 200: throw_request_err(r) return r.json()" 2702,"def regions(self): """""" Get a list of all regions """""" regions = [] elem = self.dimensions[""region""].elem for option_elem in elem.find_all(""option""): region = option_elem.text.strip() regions.append(region) return regions" 2703,"def _get_region_slug(self, id_or_label): """""" Get the regional slug to be used in url ""Norrbotten"" => ""Norrbottens"" :param id_or_label: Id or label of region """""" #region = self.dimensions[""region""].get(id_or_label) region = id_or_label slug = region\ .replace(u"" "",""-"")\ .replace(u""ö"",""o"")\ .replace(u""Ö"",""O"")\ .replace(u""ä"",""a"")\ .replace(u""å"",""a"") + ""s"" EXCEPTIONS = { ""Jamtland-Harjedalens"": ""Jamtlands"", ""Rikets"": ""Sveriges"", } if slug in EXCEPTIONS: slug = EXCEPTIONS[slug] return slug" 2704,"def _parse_result_page(self, url, payload, only_region=False): """""" Get data from a result page :param url: url to query :param payload: payload to pass :return: a dictlist with data """""" data = [] try: if only_region: html = self.scraper._get_html(url) else: html = self.scraper._post_html(url, payload=payload) except RequestException500: self.scraper.log.warning(u""Status code 500 on {} with {}"".format(url, payload)) return None current_selection = self._get_current_selection(html) table = Datatable(html) data = [] for row in table.data: region_or_unit_id, region_or_unit_label = row[""region_or_unit""] if region_or_unit_label in self.regions: row[""region""] = region_or_unit_label row[""unit""] = None else: row[""region""] = None row[""unit""] = region_or_unit_label value = row[""value""] row.pop(""value"", None) row.pop(""region_or_unit"", None) for dim in self.dimensions: if dim.id not in row: row[dim.id] = current_selection[dim.id][1] # gets label data.append(Result(value, row)) return data" 2705,"def elem_type(self): """""" :returns: ""select""|""radio""|""checkbox"" """""" if not hasattr(self, ""_elem_type""): self._elem_type = get_elem_type(self.elem) return self._elem_type" 2706,"def default_value(self): """""" The default category when making a query """""" if not hasattr(self, ""_default_value""): if self.elem_type == ""select"": try: # Get option marked ""selected"" def_value = get_option_value(self.elem.select_one(""[selected]"")) except AttributeError: # ...or if that one doesen't exist get the first option def_value = get_option_value(self.elem.select_one(""option"")) elif self.elem_type == ""checkbox"": def_value = self.elem.get(""value"") elif self.elem_type == ""radio"": def_value = [x for x in self.elem if x.has_attr(""checked"")][0].get(""value"") self._default_value = def_value assert def_value is not None return self._default_value" 2707,"def measures(self): """""" Get a list of the measuers of this datatable Measures can be ""Antal Besök inom 7 dagar"", ""Måluppfyllelse vårdgarantin"", etc """""" if self._measures == None: self._measures = get_unique([x[""measure""] for x in self.data]) return self._measures" 2708,"def _parse_values(self): """""" Get values """""" data = [] if self.has_tabs: def _parse_tab_text(tab): # Annoying html in tabs if tab.select_one("".visible_normal""): return tab.select_one("".visible_normal"").text else: return tab.text sub_table_ids = [_parse_tab_text(x) for x in self.soup.select("".table_switch li"")] sub_tables = self.soup.select("".dataTables_wrapper"") assert len(sub_tables) == len(sub_table_ids) assert len(sub_tables) > 0 for measure, table in zip(sub_table_ids, sub_tables): if self.has_horizontal_scroll: _data = self._parse_horizontal_scroll_table(table) for region, col, value in _data: data.append({ ""region_or_unit"": region, ""select_period"": col, # Hardcode warning! ""measure"": measure, }) else: if self.has_horizontal_scroll: raise NotImplementedError() if self.has_vertical_scroll: table = self.soup.select_one(""#DataTables_Table_0_wrapper"") _data = self._parse_vertical_scroll_table(table) else: table = self.soup.select("".chart.table.scrolling"")[-1] _data = self._parse_regular_table(table) for region, measure, value in _data: data.append({ ""region_or_unit"": region, ""measure"": measure, ""value"": value }) return data" 2709,"def _parse_horizontal_scroll_table(self, table_html): """""" Get list of dicts from horizontally scrollable table """""" row_labels = [parse_text(x.text) for x in table_html.select("".DTFC_LeftBodyWrapper tbody tr"")] row_label_ids = [None] * len(row_labels) cols = [parse_text(x.text) for x in table_html.select("".dataTables_scrollHead th"")] value_rows = table_html.select("".dataTables_scrollBody tbody tr"") values = [] for row_i, value_row in enumerate(value_rows): row_values = [parse_value(x.text) for x in value_row.select(""td"")] values.append(row_values) sheet = Sheet(zip(row_label_ids, row_labels), cols, values) return sheet.long_format" 2710,"def as_dictlist(self): """""" Returns a dictlist with values [ { ""row"": ""row_a"", ""col"": ""col_a"", ""value"": 1, } ] """""" data = [] for row_i, row in enumerate(self.row_index): for col_i, col in enumerate(self.col_index): value = self.values_by_row[row_i][col_i] data.append({ ""row"": row, ""col"": col, ""value"": value, }) return data" 2711,"def is_json_file(filename, show_warnings = False): """"""Check configuration file type is JSON Return a boolean indicating wheather the file is JSON format or not """""" try: config_dict = load_config(filename, file_type = ""json"") is_json = True except: is_json = False return(is_json)" 2712,"def is_yaml_file(filename, show_warnings = False): """"""Check configuration file type is yaml Return a boolean indicating wheather the file is yaml format or not """""" if is_json_file(filename): return(False) try: config_dict = load_config(filename, file_type = ""yaml"") if(type(config_dict) == str): is_yaml = False else: is_yaml = True except: is_yaml = False return(is_yaml)" 2713,"def is_ini_file(filename, show_warnings = False): """"""Check configuration file type is INI Return a boolean indicating wheather the file is INI format or not """""" try: config_dict = load_config(filename, file_type = ""ini"") if config_dict == {}: is_ini = False else: is_ini = True except: is_ini = False return(is_ini)" 2714,"def is_toml_file(filename, show_warnings = False): """"""Check configuration file type is TOML Return a boolean indicating wheather the file is TOML format or not """""" if is_yaml_file(filename): return(False) try: config_dict = load_config(filename, file_type = ""toml"") is_toml = True except: is_toml = False return(is_toml)" 2715,"def get_config_type(filename): """"""Get configuration file type:[JSON, YAML, INI, TOML] Return the configuration filetype: json, yaml, ini, toml or False """""" if is_json_file(filename): return(""json"") elif is_ini_file(filename): return(""ini"") elif is_yaml_file(filename): return(""yaml"") elif is_toml_file(filename): return(""toml"") else: return(False)" 2716,"def load(keystorerc=None, keystore=None, copyto=None, verbose=False): '''decrypt and write out a keystore''' config = None if keystorerc: config = config_reader.read(keystorerc) if not config: print('No configuration found.', file=sys.stderr) sys.exit(-1) elif keystore: config = { 'keystore': keystore, 'files': [] } if 'verbose' in config and config['verbose']: verbose = True keystore_path = None if 'keystore' not in config: print('.keystorerc needs to specify a keystore file path.', file=sys.stderr) sys.exit(-1) elif not pathlib.Path(os.path.expanduser(config['keystore'])).is_file(): # If keystore file does not exist, nothing to load and exits print('keystore does not exist: {}'.format(config['keystore']), file=sys.stderr) sys.exit(-1) else: keystore_path = config['keystore'] if copyto and not pathlib.Path(os.path.expanduser(copyto)).is_dir(): print('The folder to copy to does not exist: {}'.format(copyto), file=sys.stderr) sys.exit(-1) # load and attempt to unencrypt keystore by passphrase encrypted_keystore = None try: with open(os.path.expanduser(keystore_path), 'rb') as keystore_file: encrypted_keystore = keystore_file.read() if verbose: print('Located encrypted keystore at {}.'.format(keystore_path)) decrypted = False decrypted_keystore = None while not decrypted: try: passphrase = getpass.getpass('Please enter the passphrase: ') decrypted_keystore = simplecrypt.decrypt(passphrase, encrypted_keystore) decrypted = True except simplecrypt.DecryptionException as err: print('Invalid passphrase. Please try again.') except UnicodeDecodeError as err: print('Keyring cannot be decrypted.\nError: {}'.format(err), file=sys.stderr) sys.exit(-1) except OSError as err: print('keystore cannot be opened: {}'.format(err), file=sys.stderr) sys.exit(-1) # attempt to uncompress the keystore decompressed_keystore = gzip.decompress(decrypted_keystore) # attempt to unserialise the keystore try: keystore = json.loads(decompressed_keystore) except json.decoder.JSONDecodeError as err: print('Please contact the author about this as this is a serious problem. {}'.format(err), file=sys.stderr) sys.exit(-1) if verbose: print('Keystore decrypted successfully.') count = 0 for filepath, key in keystore.items(): expanded_filepath = os.path.expanduser(filepath) if copyto: expanded_filepath = os.path.join(copyto, os.path.basename(filepath)) confirmed = False overwrite = False if not pathlib.Path(expanded_filepath).exists(): confirmed = True overwrite = True while not confirmed: overwrite = input('File {} exists. Are you sure you want to overwrite? (y)/n: '.format(expanded_filepath)) if overwrite == '' or overwrite == 'y' or overwrite == 'Y': overwrite = True confirmed = True elif overwrite == 'n' or overwrite == 'N': overwrite = False confirmed = True else: print('Please enter y or n.') if not overwrite: continue # key ready to be created if verbose: print('Writing key to {} ...'.format(expanded_filepath)) try: with open(expanded_filepath, 'wb') as keyfile: b64_decoded = base64.decodebytes(key.encode('utf-8')) keyfile.write(b64_decoded) count += 1 except OSError as err: print('File system threw an error: {}'.format(err), file=sys.stderr) print('Skipping {}'.format(expanded_filepath)) if verbose: print('Keystore restored {} keys.'.format(count))" 2717,"def _collect_settings(self, apps): """""" Iterate over given apps or INSTALLED_APPS and collect the content of each's settings file, which is expected to be in JSON format. """""" contents = {} if apps: for app in apps: if app not in settings.INSTALLED_APPS: raise CommandError(""Application '{0}' not in settings.INSTALLED_APPS"".format(app)) else: apps = settings.INSTALLED_APPS for app in apps: module = import_module(app) for module_dir in module.__path__: json_file = os.path.abspath(os.path.join(module_dir, self.json_file)) if os.path.isfile(json_file): with open(json_file, 'r') as fp: contents[app] = json.load(fp) return contents" 2718,"def required_unique(objects, key): """""" A pyrsistent invariant which requires all objects in the given iterable to have a unique key. :param objects: The objects to check. :param key: A one-argument callable to compute the key of an object. :return: An invariant failure if any two or more objects have the same key computed. An invariant success otherwise. """""" keys = {} duplicate = set() for k in map(key, objects): keys[k] = keys.get(k, 0) + 1 if keys[k] > 1: duplicate.add(k) if duplicate: return (False, u""Duplicate object keys: {}"".format(duplicate)) return (True, u"""")" 2719,"def item_by_name(self, name): """""" Find an item in this collection by its name metadata. :param unicode name: The name of the object for which to search. :raise KeyError: If no object matching the given name is found. :return IObject: The object with the matching name. """""" for obj in self.items: if obj.metadata.name == name: return obj raise KeyError(name)" 2720,"def _fetch_dimensions(self, dataset): """""" Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january'). """""" yield Dimension(u""date"", label=""Day of the month"") yield Dimension(u""month"", datatype=""month"", dialect=""swedish"") yield Dimension(u""year"", datatype=""year"")" 2721,"def _dct_from_mro(cls: type, attr_name: str) -> dict: """"""""Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest)."""""" d = {} for c in reversed(cls.mro()): d.update(getattr(c, attr_name, {})) return d" 2722,"def _sorted_items(mapping: typing.Mapping) -> typing.Generator: """"""Given a mapping where values are iterables, yield items whose values contained references are not used as keys first: Example: >>> dct = {'two': ('two', 'one', 'foo'), 'one': ('hi', 'six', 'net'), 'six': ('three', 'four'), 'foo': ['bar']} >>> for k, v in _sorted_items(dct): ... print(k, v) ... six ('three', 'four') foo ['bar'] one ('hi', 'six', 'net') two ('two', 'one', 'foo') """""" to_yield = set(mapping) while to_yield: for key, values in mapping.items(): if key not in to_yield or (to_yield - {key} & set(values)): # other keys left to yield before this one continue yield key, values to_yield.remove(key)" 2723,"def _init_name_core(self, name: str): """"""Runs whenever a new instance is initialized or `sep` is set."""""" self.__regex = re.compile(rf'^{self._pattern}$') self.name = name" 2724,"def values(self) -> typing.Dict[str, str]: """"""The field values of this object's name as a dictionary in the form of {field: value}."""""" return {k: v for k, v in self._items if v is not None}" 2725,"def get_name(self, **values) -> str: """"""Get a new name string from this object's name values. :param values: Variable keyword arguments where the **key** should refer to a field on this object that will use the provided **value** to build the new name. """""" if not values and self.name: return self.name if values: # if values are provided, solve compounds that may be affected for ck, cvs in _sorted_items(self.compounds): if ck in cvs and ck in values: # redefined compound name to outer scope e.g. fifth = (fifth, sixth) continue comp_values = [values.pop(cv, getattr(self, cv)) for cv in cvs] if None not in comp_values: values[ck] = ''.join(rf'{v}' for v in comp_values) return self._get_nice_name(**values)" 2726,"def cast_config(cls, config: typing.Mapping[str, str]) -> typing.Dict[str, str]: """"""Cast `config` to grouped regular expressions."""""" return {k: cls.cast(v, k) for k, v in config.items()}" 2727,"def appdataPath(appname): """""" Returns the generic location for storing application data in a cross platform way. :return <str> """""" # determine Mac OS appdata location if sys.platform == 'darwin': # credit: MHL try: from AppKit import NSSearchPathForDirectoriesInDomains # NSApplicationSupportDirectory = 14 # NSUserDomainMask = 1 # True for expanding the tilde into a fully qualified path basepath = NSSearchPathForDirectoriesInDomains(14, 1, True) return os.path.join(basepath[0], appname) except (ImportError, AttributeError, IndexError): basepath = os.path.expanduser(""~/Library/Application Support"") return os.path.join(basepath, appname) # determine Windows OS appdata location elif sys.platform == 'win32': return os.path.join(os.environ.get('APPDATA'), appname) # determine Linux OS appdata location else: return os.path.expanduser(os.path.join('~', '.' + appname))" 2728,"def _execute_primitives(self, commands): """"""Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises. Args: commands: A list of Executable Primitives to be run in order. """""" for p in commands: if self._scanchain and self._scanchain._debug: print("" Executing"", p)#pragma: no cover p.execute(self)" 2729,"def pretty_version_text(): """"""Return pretty version text listing all plugins."""""" version_lines = [""dtool, version {}"".format(dtool_version)] version_lines.append(""\nBase:"") version_lines.append(""dtoolcore, version {}"".format(dtoolcore.__version__)) version_lines.append(""dtool-cli, version {}"".format(__version__)) # List the storage broker packages. version_lines.append(""\nStorage brokers:"") for ep in iter_entry_points(""dtool.storage_brokers""): package = ep.module_name.split(""."")[0] dyn_load_p = __import__(package) version = dyn_load_p.__version__ storage_broker = ep.load() version_lines.append( ""{}, {}, version {}"".format( storage_broker.key, package.replace(""_"", ""-""), version)) # List the plugin packages. modules = [ep.module_name for ep in iter_entry_points(""dtool.cli"")] packages = set([m.split(""."")[0] for m in modules]) version_lines.append(""\nPlugins:"") for p in packages: dyn_load_p = __import__(p) version_lines.append( ""{}, version {}"".format( p.replace(""_"", ""-""), dyn_load_p.__version__)) return ""\n"".join(version_lines)" 2730,"def dtool(debug): """"""Tool to work with datasets."""""" level = logging.WARNING if debug: level = logging.DEBUG logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)" 2731,"def add_nic(self, instance_id, net_id): """"""Add a Network Interface Controller"""""" #TODO: upgrade with port_id and fixed_ip in future self.client.servers.interface_attach( instance_id, None, net_id, None) return True" 2732,"def delete_nic(self, instance_id, port_id): """"""Delete a Network Interface Controller"""""" self.client.servers.interface_detach(instance_id, port_id) return True" 2733,"def list_nic(self, instance_id): """"""List all Network Interface Controller """""" #NOTE: interfaces a list of novaclient.v2.servers.Server interfaces = self.client.servers.interface_list(instance_id) return interfaces" 2734,"def associate_public_ip(self, instance_id, public_ip_id, private_ip=None): """"""Associate a external IP"""""" floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() address = floating_ip.get('ip') self.client.servers.add_floating_ip(instance_id, address, private_ip) return True" 2735,"def disassociate_public_ip(self, public_ip_id): """"""Disassociate a external IP"""""" floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() instance_id = floating_ip.get('instance_id') address = floating_ip.get('ip') self.client.servers.remove_floating_ip(instance_id, address) return True" 2736,"def split(self, bitindex): """"""Split a promise into two promises at the provided index. A common operation in JTAG is reading/writing to a register. During the operation, the TMS pin must be low, but during the writing of the last bit, the TMS pin must be high. Requiring all reads or writes to have full arbitrary control over the TMS pin is unrealistic. Splitting a promise into two sub promises is a way to mitigate this issue. The final read bit is its own subpromise that can be associated with a different primitive than the 'rest' of the subpromise. Returns: Two TDOPromise instances: the 'Rest' and the 'Tail'. The 'Rest' is the first chunk of the original promise. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned """""" if bitindex < 0: raise ValueError(""bitindex must be larger or equal to 0."") if bitindex > len(self): raise ValueError( ""bitindex larger than the array's size. "" ""Len: %s; bitindex: %s""%(len(self), bitindex)) if bitindex == 0: return None, self if bitindex == len(self): return self, None left = TDOPromise(self._chain, self._bitstart, bitindex, _parent=self) #Starts at 0 because offset is for incoming data from #associated primitive, not location in parent. right = TDOPromise(self._chain, 0, len(self)-bitindex, _parent=self) self._components = [] self._addsub(left, 0) self._addsub(right, bitindex) return left, right" 2737,"def _fulfill(self, bits, ignore_nonpromised_bits=False): """"""Supply the promise with the bits from its associated primitive's execution. The fulfillment process must walk the promise chain backwards until it reaches the original promise and can supply the final value. The data that comes in can either be all a bit read for every bit written by the associated primitive, or (if the primitive supports it), only the bits that are used by promises. The ignore_nonpromised_bits flag specifies which format the incoming data is in. Args: bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin. ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data). """""" if self._allsubsfulfilled(): if not self._components: if ignore_nonpromised_bits: self._value = bits[self._bitstartselective: self._bitstartselective + self._bitlength] else: self._value = bits[self._bitstart:self._bitend] else: self._value = self._components[0][0]._value for sub, offset in self._components[1:]: self._value += sub._value if self._parent is not None: self._parent._fulfill(None)" 2738,"def makesubatoffset(self, bitoffset, *, _offsetideal=None): """"""Create a copy of this promise with an offset, and use it as this promise's child. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control. Returns: A TDOPromise registered with this promise, and with the correct offset. """""" if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromise( self._chain, self._bitstart + bitoffset, self._bitlength, _parent=self, bitstartselective=self._bitstartselective+_offsetideal ) self._addsub(newpromise, 0) return newpromise" 2739,"def add(self, promise, bitoffset, *, _offsetideal=None): """"""Add a promise to the promise collection at an optional offset. Args: promise: A TDOPromise to add to this collection. bitoffset: An integer offset for this new promise in the collection. _offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control. """""" #This Assumes that things are added in order. #Sorting or checking should likely be added. if _offsetideal is None: _offsetideal = bitoffset if isinstance(promise, TDOPromise): newpromise = promise.makesubatoffset( bitoffset, _offsetideal=_offsetideal) self._promises.append(newpromise) elif isinstance(promise, TDOPromiseCollection): for p in promise._promises: self.add(p, bitoffset, _offsetideal=_offsetideal)" 2740,"def split(self, bitindex): """"""Split a promise into two promises. A tail bit, and the 'rest'. Same operation as the one on TDOPromise, except this works with a collection of promises and splits the appropriate one. Returns: The 'Rest' and the 'Tail'. The 'Rest' is TDOPromiseCollection containing the first chunk of the original TDOPromiseCollection. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned """""" if bitindex < 0: raise ValueError(""bitindex must be larger or equal to 0."") if bitindex == 0: return None, self lastend = 0 split_promise = False for splitindex, p in enumerate(self._promises): if bitindex in range(lastend, p._bitstart): split_promise = False break if bitindex in range(p._bitstart, p._bitend): if bitindex-p._bitstart == 0: split_promise = False else: split_promise = True break lastend = p._bitend else: raise Exception(""Should be impossible"") processed_left = TDOPromiseCollection(self._chain) processed_right = TDOPromiseCollection(self._chain) if split_promise: left, right = p.split(bitindex-p._bitstart) for i in range(splitindex): processed_left.add(self._promises[i], 0) processed_left.add(left, 0) processed_right.add(right, 0) for tmpprim in self._promises[splitindex+1:]: processed_right.add(tmpprim, -bitindex) return processed_left, processed_right else: for i in range(splitindex): processed_left.add(self._promises[i], 0) for i in range(splitindex, len(self._promises)): processed_right.add(self._promises[i], -bitindex) return processed_left, processed_right" 2741,"def makesubatoffset(self, bitoffset, *, _offsetideal=None): """"""Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control. Returns: A new TDOPromiseCollection registered with this promise collection, and with the correct offset. """""" if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromiseCollection(self._chain) for promise in self._promises: newpromise.add(promise, bitoffset, _offsetideal=_offsetideal) return newpromise" 2742,"def call_jira_rest(self, url, user, password, method=""GET"", data=None): """""" Make JIRA REST call :param data: data for rest call :param method: type of call: GET or POST for now :param url: url to call :param user: user for authentication :param password: password for authentication :return: """""" headers = {'content-type': 'application/json'} self._logger.debug('Connecting to Jira to call the following REST method {0}'.format(url)) if method == ""GET"": response = requests.get(self.base_url + url, auth=requests.auth.HTTPBasicAuth(user, password)) elif method == ""POST"": response = requests.post(self.base_url + url, data=json.dumps(data), auth=requests.auth.HTTPBasicAuth(user, password), headers=headers) else: raise ValueError('method argument supports GET or POST values only') self._logger.debug('REST call successfully finalised') return response.json()" 2743,"def cli(ctx, stage): """"""Show transformer rules"""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, ' + 'will not show any transformer rules') raise click.Abort() path = ctx.home + '/' RULES = None ctx.say('Stage:'+stage, verbosity=10) if stage in STAGES: if stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] ctx.say('Stage found:', stuff=STAGE,verbosity=100) if 'TRANSFORM' in STAGE: TRANSFORM = STAGE.TRANSFORM ctx.say('Transform found:', stuff=TRANSFORM, verbosity=100) if 'RULES' in TRANSFORM: RULES = TRANSFORM.RULES ctx.say('Rules found:', stuff=RULES, verbosity=100) if not RULES: ctx.say_red('There is no TRANSFORM.RULES in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if type(RULES) == str and RULES.endswith('.bubble'): ctx.say('loading rules',verbosity=10) rules = get_bubble(ctx, path + RULES) rule_type = 'bubble' transformer = Transformer(rules=rules, rule_type=rule_type, bubble_path=path, verbose=ctx.get_verbose()) rules = transformer._rules.get_rules() ctx.say('current number of rules:' + str(len(rules)), verbosity=1) for r in rules: ctx.say('rule: ' + str(r), verbosity=1) ctx.gbc.say('rules: ', stuff=rules, verbosity=100) else: ctx.say('no rules!') return True" 2744,"def connectExec(connection, protocol, commandLine): """"""Connect a Protocol to a ssh exec session """""" deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestExec(commandLine) return deferred" 2745,"def connectShell(connection, protocol): """"""Connect a Protocol to a ssh shell session """""" deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestShell() return deferred" 2746,"def connectSubsystem(connection, protocol, subsystem): """"""Connect a Protocol to a ssh subsystem channel """""" deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestSubsystem(subsystem) return deferred" 2747,"def connectSession(connection, protocol, sessionFactory=None, *args, **kwargs): """"""Open a SSHSession channel and connect a Protocol to it @param connection: the SSH Connection to open the session channel on @param protocol: the Protocol instance to connect to the session @param sessionFactory: factory method to generate a SSHSession instance @note: :args: and :kwargs: are passed to the sessionFactory """""" factory = sessionFactory or defaultSessionFactory session = factory(*args, **kwargs) session.dataReceived = protocol.dataReceived session.closed = lambda: protocol.connectionLost(connectionDone) deferred = defer.Deferred() @deferred.addCallback def connectProtocolAndReturnSession(specificData): protocol.makeConnection(session) return session session.sessionOpen = deferred.callback session.openFailed = deferred.errback connection.openChannel(session) return deferred" 2748,"def defaultSessionFactory(env={}, usePTY=False, *args, **kwargs): """"""Create a SSHChannel of the given :channelType: type """""" return SSHSession(env, usePTY, *args, **kwargs)" 2749,"def requestExec(self, commandLine): """"""Request execution of :commandLine: and return a deferred reply. """""" data = common.NS(commandLine) return self.sendRequest('exec', data, wantReply=True)" 2750,"def requestSubsystem(self, subsystem): """"""Request a subsystem and return a deferred reply. """""" data = common.NS(subsystem) return self.sendRequest('subsystem', data, wantReply=True)" 2751,"def requestPty(self, term=None, rows=0, cols=0, xpixel=0, ypixel=0, modes=''): """"""Request allocation of a pseudo-terminal for a channel @param term: TERM environment variable value (e.g., vt100) @param columns: terminal width, characters (e.g., 80) @param rows: terminal height, rows (e.g., 24) @param width: terminal width, pixels (e.g., 640) @param height: terminal height, pixels (e.g., 480) @param modes: encoded terminal modes The dimension parameters are only informational. Zero dimension parameters are ignored. The columns/rows dimensions override the pixel dimensions (when nonzero). Pixel dimensions refer to the drawable area of the window. """""" #TODO: Needs testing! term = term or os.environ.get('TERM', '') data = packRequest_pty_req(term, (rows, cols, xpixel, ypixel), modes) return self.sendRequest('pty-req', data)" 2752,"def requestEnv(self, env={}): """"""Send requests to set the environment variables for the channel """""" for variable, value in env.items(): data = common.NS(variable) + common.NS(value) self.sendRequest('env', data)" 2753,"def commandstr(command): """"""Convert command into string."""""" if command == CMD_MESSAGE_ERROR: msg = ""CMD_MESSAGE_ERROR"" elif command == CMD_MESSAGE_LIST: msg = ""CMD_MESSAGE_LIST"" elif command == CMD_MESSAGE_PASSWORD: msg = ""CMD_MESSAGE_PASSWORD"" elif command == CMD_MESSAGE_MP3: msg = ""CMD_MESSAGE_MP3"" elif command == CMD_MESSAGE_DELETE: msg = ""CMD_MESSAGE_DELETE"" elif command == CMD_MESSAGE_VERSION: msg = ""CMD_MESSAGE_VERSION"" elif command == CMD_MESSAGE_CDR_AVAILABLE: msg = ""CMD_MESSAGE_CDR_AVAILABLE"" elif command == CMD_MESSAGE_CDR: msg = ""CMD_MESSAGE_CDR"" else: msg = ""CMD_MESSAGE_UNKNOWN"" return msg" 2754,"def run(): """"""Command for reflection database objects"""""" parser = OptionParser( version=__version__, description=__doc__, ) parser.add_option( '-u', '--url', dest='url', help='Database URL (connection string)', ) parser.add_option( '-r', '--render', dest='render', default='dot', choices=['plantuml', 'dot'], help='Output format - plantuml or dot', ) parser.add_option( '-l', '--list', dest='list', action='store_true', help='Output database list of tables and exit', ) parser.add_option( '-i', '--include', dest='include', help='List of tables to include through "",""', ) parser.add_option( '-e', '--exclude', dest='exclude', help='List of tables to exlude through "",""', ) (options, args) = parser.parse_args() if not options.url: print('-u/--url option required') exit(1) engine = create_engine(options.url) meta = MetaData() meta.reflect(bind=engine) if options.list: print('Database tables:') tables = sorted(meta.tables.keys()) def _g(l, i): try: return tables[i] except IndexError: return '' for i in range(0, len(tables), 2): print(' {0}{1}{2}'.format( _g(tables, i), ' ' * (38 - len(_g(tables, i))), _g(tables, i + 1), )) exit(0) tables = set(meta.tables.keys()) if options.include: tables &= set(map(string.strip, options.include.split(','))) if options.exclude: tables -= set(map(string.strip, options.exclude.split(','))) desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables)) print(getattr(render, options.render)(desc))" 2755,"def get_poll(poll_id): """""" Get a strawpoll. Example: poll = strawpy.get_poll('11682852') :param poll_id: :return: strawpy.Strawpoll object """""" return StrawPoll(requests.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)))" 2756,"def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck='normal'): """""" Create a strawpoll. Example: new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No']) :param title: :param options: :param multi: :param permissive: :param captcha: :param dupcheck: :return: strawpy.Strawpoll object """""" query = { 'title': title, 'options': options, 'multi': multi, 'permissive': permissive, 'captcha': captcha, 'dupcheck': dupcheck } return StrawPoll(requests.post('http://strawpoll.me/api/v2/polls', data=json.dumps(query)))" 2757,"def raise_status(response): """"""Raise an exception if the request did not return a status code of 200. :param response: Request response body """""" if response.status_code != 200: if response.status_code == 401: raise StrawPollException('Unauthorized', response) elif response.status_code == 403: raise StrawPollException('Forbidden', response) elif response.status_code == 404: raise StrawPollException('Not Found', response) else: response.raise_for_status()" 2758,"def refresh(self): """""" Refresh all class attributes. """""" strawpoll_response = requests.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=self.id)) raise_status(strawpoll_response) self.status_code = strawpoll_response.status_code self.response_json = strawpoll_response.json() self.id = self.response_json['id'] self.title = self.response_json['title'] self.options = self.response_json['options'] self.votes = self.response_json['votes'] self.captcha = self.response_json['captcha'] self.dupcheck = self.response_json['dupcheck'] self.url = 'https://www.strawpoll.me/{id}'.format(id=self.id) self.results_url = 'https://www.strawpoll.me/{id}/r'.format(id=self.id)" 2759,"def write_json_file(self, path): """""" Serialize this VariantCollection to a JSON representation and write it out to a text file. """""" with open(path, ""w"") as f: f.write(self.to_json())" 2760,"def read_json_file(cls, path): """""" Construct a VariantCollection from a JSON file. """""" with open(path, 'r') as f: json_string = f.read() return cls.from_json(json_string)" 2761,"def dumps(data, escape=False, **kwargs): """"""A wrapper around `json.dumps` that can handle objects that json module is not aware. This function is aware of a list of custom serializers that can be registered by the API user, making it possible to convert any kind of object to types that the json library can handle. """""" if 'sort_keys' not in kwargs: kwargs['sort_keys'] = True converted = json.dumps(data, default=_converter, **kwargs) if escape: # We're escaping the whole dumped string here cause there's no (easy) # way to hook into the native json library and change how they process # values like strings, None objects and some other ""literal"" stuff. # # Also, we're not escaping quotes here cause they're escaped by the # native json library already. So, we just escape basic html entities, # like <, > and &; return cgi.escape(converted) return converted" 2762,"def deserialize(klass, data): """"""Helper function to access a method that creates objects of a given `klass` with the received `data`. """""" handler = DESERIALIZE_REGISTRY.get(klass) if handler: return handler(data) raise TypeError(""There is no deserializer registered to handle "" ""instances of '{}'"".format(klass.__name__))" 2763,"def _convert_from(data): """"""Internal function that will be hooked to the native `json.loads` Find the right deserializer for a given value, taking into account the internal deserializer registry. """""" try: module, klass_name = data['__class__'].rsplit('.', 1) klass = getattr(import_module(module), klass_name) except (ImportError, AttributeError, KeyError): # But I still haven't found what I'm looking for # # Waiting for three different exceptions here. KeyError will # raise if can't find the ""__class__"" entry in the json `data` # dictionary. ImportError happens when the module present in the # dotted name can't be resolved. Finally, the AttributeError # happens when we can find the module, but couldn't find the # class on it. return data return deserialize(klass, data['__value__'])" 2764,"def _converter(data): """"""Internal function that will be passed to the native `json.dumps`. This function uses the `REGISTRY` of serializers and try to convert a given instance to an object that json.dumps can understand. """""" handler = REGISTRY.get(data.__class__) if handler: full_name = '{}.{}'.format( data.__class__.__module__, data.__class__.__name__) return { '__class__': full_name, '__value__': handler(data), } raise TypeError(repr(data) + "" is not JSON serializable"")" 2765,"def start(self): """""" Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \ :func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.) """""" logging.info('ResponseBot started') handler_classes = handler_utils.discover_handler_classes(self.config.get('handlers_package')) if len(handler_classes) == 0: logging.warning('No handler found. Did you forget to extend BaseTweethandler? Check --handlers-module') while True: try: client = auth_utils.auth(self.config) listener = ResponseBotListener(client=client, handler_classes=handler_classes) stream = ResponseBotStream(client=client, listener=listener) stream.start() except (APIQuotaError, AuthenticationError, TweepError) as e: self.handle_error(e) else: break" 2766,"def handle_error(self, error): """""" Try to detect repetitive errors and sleep for a while to avoid being marked as spam """""" logging.exception(""try to sleep if there are repeating errors."") error_desc = str(error) now = datetime.datetime.now() if error_desc not in self.error_time_log: self.error_time_log[error_desc] = now return time_of_last_encounter = self.error_time_log[str(error)] time_since_last_encounter = now - time_of_last_encounter if time_since_last_encounter.total_seconds() > self.config.get('min_seconds_between_errors'): self.error_time_log[error_desc] = now return if error_desc not in self.error_sleep_log: time.sleep(self.config.get('sleep_seconds_on_consecutive_errors')) self.error_sleep_log[error_desc] = 1 else: sys.exit()" 2767,"def parse_isodate(datestr): """"""Parse a string that loosely fits ISO 8601 formatted date-time string """""" m = isodate_rx.search(datestr) assert m, 'unrecognized date format: ' + datestr year, month, day = m.group('year', 'month', 'day') hour, minute, second, fraction = m.group('hour', 'minute', 'second', 'fraction') tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm') dt = datetime.datetime(int(year), int(month), int(day), int(hour)) if fraction is None: fraction = 0 else: fraction = float('0.' + fraction) if minute is None: dt = dt.replace(minute=int(60 * fraction)) else: dt = dt.replace(minute=int(minute)) if second is None: dt = dt.replace(second=int(60 * fraction)) else: dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction)) if tz is not None: if tz[0] == 'Z': offset = 0 else: offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh)) if tz[0] == '-': offset = -offset dt = dt.replace(tzinfo=UTCOffset(offset)) return dt" 2768,"def ls( self, rev, path, recursive=False, recursive_dirs=False, directory=False, report=() ): """"""List directory or file :param rev: The revision to use. :param path: The path to list. May start with a '/' or not. Directories may end with a '/' or not. :param recursive: Recursively list files in subdirectories. :param recursive_dirs: Used when recursive=True, also list directories. :param directory: If path is a directory, list path itself instead of its contents. :param report: A list or tuple of extra attributes to return that may require extra processing. Recognized values are 'size', 'target', 'executable', and 'commit'. Returns a list of dictionaries with the following keys: **type** The type of the file: 'f' for file, 'd' for directory, 'l' for symlink. **name** The name of the file. Not present if directory=True. **size** The size of the file. Only present for files when 'size' is in report. **target** The target of the symlink. Only present for symlinks when 'target' is in report. **executable** True if the file is executable, False otherwise. Only present for files when 'executable' is in report. Raises PathDoesNotExist if the path does not exist. """""" raise NotImplementedError" 2769,"def log( self, revrange=None, limit=None, firstparent=False, merges=None, path=None, follow=False ): """"""Get commit logs :param revrange: Either a single revision or a range of revisions as a 2-element list or tuple. :param int limit: Limit the number of log entries. :param bool firstparent: Only follow the first parent of merges. :param bool merges: True means only merges, False means no merges, None means both merges and non-merges. :param str path: Only match commits containing changes on this path. :param bool follow: Follow file history across renames. :returns: log information :rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry` If revrange is None, return a list of all log entries in reverse chronological order. If revrange is a single revision, return a single log entry. If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log entries starting at B and following that branch back to A or one of its ancestors (not inclusive. If A is None, follow branch B back to the beginning of history. If B is None, list all descendants in reverse chronological order. """""" raise NotImplementedError" 2770,"def user_create(self, cloudflare_email, cloudflare_pass, unique_id=None): """""" Create new cloudflare user with selected email and id. Optionally also select unique_id which can be then used to get user information. :param cloudflare_email: new user cloudflare email :type cloudflare_email: str :param cloudflare_pass: new user cloudflare password :type cloudflare_pass: str :param unique_id: new user unique id :type unique_id: str (optional) :returns: :rtype: dict """""" params = { 'act': 'user_create', 'cloudflare_email': cloudflare_email, 'cloudflare_pass': cloudflare_pass } if unique_id: params['unique_id'] = unique_id return self._request(params)" 2771,"def zone_set(self, user_key, zone_name, resolve_to, subdomains): """""" Create new zone for user associated with this user_key. :param user_key: The unique 3auth string,identifying the user's CloudFlare Account. Generated from a user_create or user_auth :type user_key: str :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. ""example.com"". :type zone_name: str :param resolve_to: The CNAME that CloudFlare should ultimately resolve web connections to after they have been filtered :type resolve_to: str :param subdomains: A comma-separated string of subdomain(s) that CloudFlare should host, e.g. ""www,blog,forums"" :type subdomains: str :returns: :rtype: dict """""" params = { 'act': 'zone_set', 'user_key': user_key, 'zone_name': zone_name, 'resolve_to': resolve_to, 'subdomains': subdomains, } return self._request(params)" 2772,"def full_zone_set(self, user_key, zone_name): """""" Create new zone and all subdomains for user associated with this user_key. :param user_key: The unique 3auth string,identifying the user's CloudFlare Account. Generated from a user_create or user_auth :type user_key: str :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. ""example.com"". :type zone_name: str :returns: :rtype: dict """""" params = { 'act': 'full_zone_set', 'user_key': user_key, 'zone_name': zone_name, } return self._request(params)" 2773,"def user_lookup(self, cloudflare_email=None, unique_id=None): """""" Lookup user data based on either his cloudflare_email or his unique_id. :param cloudflare_email: email associated with user :type cloudflare_email: str :param unique_id: unique id associated with user :type unique_id: str :returns: :rtype: dict """""" if not cloudflare_email and not unique_id: raise KeyError( 'Either cloudflare_email or unique_id must be present') params = {'act': 'user_lookup'} if cloudflare_email: params['cloudflare_email'] = cloudflare_email else: params['unique_id'] = unique_id return self._request(params)" 2774,"def user_auth( self, cloudflare_email=None, cloudflare_pass=None, unique_id=None ): """""" Get user_key based on either his email and password or unique_id. :param cloudflare_email: email associated with user :type cloudflare_email: str :param cloudflare_pass: pass associated with user :type cloudflare_pass: str :param unique_id: unique id associated with user :type unique_id: str :returns: :rtype: dict """""" if not (cloudflare_email and cloudflare_pass) and not unique_id: raise KeyError( 'Either cloudflare_email and cloudflare_pass or unique_id must be present') params = {'act': 'user_auth'} if cloudflare_email and cloudflare_pass: params['cloudflare_email'] = cloudflare_email params['cloudflare_pass'] = cloudflare_pass if unique_id: params['unique_id'] = unique_id return self._request(params)" 2775,"def zone_list( self, user_key, limit=100, offset=0, zone_name=None, sub_id=None, zone_status='ALL', sub_status='ALL', ): """""" List zones for a user. :param user_key: key for authentication of user :type user_key: str :param limit: limit of zones shown :type limit: int :param offset: offset of zones to be shown :type offset: int :param zone_name: name of zone to lookup :type zone_name: str :param sub_id: subscription id of reseller (only for use by resellers) :type sub_id: str :param zone_status: status of zones to be shown :type zone_status: str (one of: V(active), D(deleted), ALL) :param sub_status: status of subscription of zones to be shown :type zone_name: str (one of: V(active), CNL(cancelled), ALL ) :returns: :rtype: dict """""" if zone_status not in ['V', 'D', 'ALL']: raise ValueError('zone_status has to be V, D or ALL') if sub_status not in ['V', 'CNL', 'ALL']: raise ValueError('sub_status has to be V, CNL or ALL') params = { 'act': 'zone_list', 'user_key': user_key, 'limit': limit, 'offset': offset, 'zone_status': zone_status, 'sub_status': sub_status } if zone_name: params['zone_name'] = zone_name if sub_id: params['sub_id'] = sub_id return self._request(params)" 2776,"def attr_exists(self, attr): """"""Returns True if at least on instance of the attribute is found """""" gen = self.attr_gen(attr) n_instances = len(list(gen)) if n_instances > 0: return True else: return False" 2777,"def datasets(self): """"""Method returns a list of dataset paths. Examples -------- >>> for dataset in h5f.datasets(): print(dataset) '/dataset1/data1/data' '/dataset1/data2/data' '/dataset2/data1/data' '/dataset2/data2/data' """""" HiisiHDF._clear_cache() self.visititems(HiisiHDF._is_dataset) return HiisiHDF.CACHE['dataset_paths']" 2778,"def groups(self): """"""Method returns a list of all goup paths Examples -------- >>> for group in h5f.groups(): print(group) '/' '/dataset1' '/dataset1/data1' '/dataset1/data2' """""" HiisiHDF._clear_cache() self.CACHE['group_paths'].append('/') self.visititems(HiisiHDF._is_group) return HiisiHDF.CACHE['group_paths']" 2779,"def attr_gen(self, attr): """"""Returns attribute generator that yields namedtuples containing path value pairs Parameters ---------- attr : str Name of the search attribute Returns ------- attr_generator : generator Returns a generator that yields named tuples with field names path and value. Examples -------- >>> gen = h5f.attr_gen('elangle') >>> pair = next(gen) >>> print(pair.path) '/dataset1/where' >>> print(pair.value) 0.5 """""" HiisiHDF._clear_cache() HiisiHDF.CACHE['search_attribute'] = attr HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes self.visititems(HiisiHDF._find_attr_paths) path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths']) return path_attr_gen" 2780,"def create_from_filedict(self, filedict): """""" Creates h5 file from dictionary containing the file structure. Filedict is a regular dictinary whose keys are hdf5 paths and whose values are dictinaries containing the metadata and datasets. Metadata is given as normal key-value -pairs and dataset arrays are given using 'DATASET' key. Datasets must be numpy arrays. Method can also be used to append existing hdf5 file. If the file is opened in read only mode, method does nothing. Examples -------- Create newfile.h5 and fill it with data and metadata >>> h5f = HiisiHDF('newfile.h5', 'w') >>> filedict = {'/':{'attr1':'A'}, '/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'} >>> h5f.create_from_filedict(filedict) """""" if self.mode in ['r+','w', 'w-', 'x', 'a']: for h5path, path_content in filedict.iteritems(): if path_content.has_key('DATASET'): # If path exist, write only metadata if h5path in self: for key, value in path_content.iteritems(): if key != 'DATASET': self[h5path].attrs[key] = value else: try: group = self.create_group(os.path.dirname(h5path)) except ValueError: group = self[os.path.dirname(h5path)] pass # This pass has no effect? new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET']) for key, value in path_content.iteritems(): if key != 'DATASET': new_dataset.attrs[key] = value else: try: group = self.create_group(h5path) except ValueError: group = self[h5path] for key, value in path_content.iteritems(): group.attrs[key] = value" 2781,"def search(self, attr, value, tolerance=0): """"""Find paths with a key value match Parameters ---------- attr : str name of the attribute value : str or numerical value value of the searched attribute Keywords -------- tolerance : float tolerance used when searching for matching numerical attributes. If the value of the attribute found from the file differs from the searched value less than the tolerance, attributes are considered to be the same. Returns ------- results : list a list of all matching paths Examples -------- >>> for result in h5f.search('elangle', 0.5, 0.1): print(result) '/dataset1/where' >>> for result in h5f.search('quantity', 'DBZH'): print(result) '/dataset1/data2/what' '/dataset2/data2/what' '/dataset3/data2/what' '/dataset4/data2/what' '/dataset5/data2/what' """""" found_paths = [] gen = self.attr_gen(attr) for path_attr_pair in gen: # if attribute is numerical use numerical_value_tolerance in # value comparison. If attribute is string require exact match if isinstance(path_attr_pair.value, str): type_name = 'str' else: type_name = path_attr_pair.value.dtype.name if 'int' in type_name or 'float' in type_name: if abs(path_attr_pair.value - value) <= tolerance: found_paths.append(path_attr_pair.path) else: if path_attr_pair.value == value: found_paths.append(path_attr_pair.path) return found_paths" 2782,"def _extractReporterIons(ionArrays, reporterMz, mzTolerance): """"""Find and a list of reporter ions and return mz and intensity values. Expected reporter mz values are searched in ""ionArray['mz']"" and reported if the observed relative deviation is less than specified by ""mzTolerance"". In the case of multiple matches, the one with the minimal deviation is picked. If no matching entries are found numpy.nan is returned for the mz value and an intensity of 0. The returned arrays are in the order of ""reporterMz"" values. :param ionArrays: a dictionary containing two numpy arrays of equal size, {""i"": an array of ion intensities, ""mz"" an array of ion mz values} :param reporterMz: a list of reporter mz values :param mzTolerance: maximum allowed relative mz deviation :returns: {'mz': numpy.array(), 'i': numpy.array()} """""" reporterIons = {'mz': [], 'i': []} for reporterMzValue in reporterMz: limHi = reporterMzValue * (1+mzTolerance) limLo = reporterMzValue * (1-mzTolerance) loPos = bisect.bisect_left(ionArrays['mz'], limLo) upPos = bisect.bisect_right(ionArrays['mz'], limHi) matchingValues = ionArrays['mz'][loPos:upPos] if matchingValues.size == 0: reporterIons['i'].append(0) reporterIons['mz'].append(numpy.nan) elif matchingValues.size == 1: reporterIons['i'].append(ionArrays['i'][loPos]) reporterIons['mz'].append(ionArrays['mz'][loPos]) else: mzDeviations = numpy.abs(matchingValues-reporterMzValue) minDeviationPos = numpy.argmin(mzDeviations) bestMatchArrayPos = range(loPos, upPos)[minDeviationPos] reporterIons['i'].append(ionArrays['i'][bestMatchArrayPos]) reporterIons['mz'].append(ionArrays['mz'][bestMatchArrayPos]) reporterIons['mz'] = numpy.array(reporterIons['mz'], dtype=ionArrays['mz'].dtype ) reporterIons['i'] = numpy.array(reporterIons['i'], dtype=ionArrays['i'].dtype ) return reporterIons" 2783,"def _correctIsotopeImpurities(matrix, intensities): """"""Corrects observed reporter ion intensities for isotope impurities. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a COLUMN. Use maspy.isobar._transposeMatrix() if channels are written in rows. :param intensities: numpy array of observed reporter ion intensities. :returns: a numpy array of reporter ion intensities corrected for isotope impurities. """""" correctedIntensities, _ = scipy.optimize.nnls(matrix, intensities) return correctedIntensities" 2784,"def _normalizeImpurityMatrix(matrix): """"""Normalize each row of the matrix that the sum of the row equals 1. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a row. :returns: a matrix containing normalized values """""" newMatrix = list() for line in matrix: total = sum(line) if total != 0: newMatrix.append([i / total for i in line]) else: newMatrix.append(line) return newMatrix" 2785,"def _padImpurityMatrix(matrix, preChannels, postChannels): """"""Align the values of an isotope impurity matrix and fill up with 0. NOTE: The length of the rows in the ""matrix"" must be the sum of ""preChannels"" and ""postChannels"" + 1. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a row. :params preChannels: number of matrix columns with a nominal mass shift < 0 (-1, -2,..) in respect to the reporter ion mz value. :params postChannels: number of matrix columns with a nominal mass shift > 0 (+1, +2,..) in respect to the reporter ion mz value. :returns: extended matrix, where the number of rows is unchanged but the length of each row is extend to the number of rows. """""" extendedMatrix = list() lastMatrixI = len(matrix)-1 for i, line in enumerate(matrix): prePadding = itertools.repeat(0., i) postPadding = itertools.repeat(0., lastMatrixI-i) newLine = list(itertools.chain(prePadding, line, postPadding)) extendedMatrix.append(newLine[preChannels:-postChannels]) return extendedMatrix" 2786,"def _processImpurityMatrix(self): """"""Process the impurity matrix so that it can be used to correct observed reporter intensities. """""" processedMatrix = _normalizeImpurityMatrix(self.impurityMatrix) processedMatrix = _padImpurityMatrix( processedMatrix, self.matrixPreChannels, self.matrixPostChannels ) processedMatrix = _transposeMatrix(processedMatrix) return processedMatrix" 2787,"def exception(message): """"""Exception method convenience wrapper."""""" def decorator(method): """"""Inner decorator so we can accept arguments."""""" @wraps(method) def wrapper(self, *args, **kwargs): """"""Innermost decorator wrapper - this is confusing."""""" if self.messages: kwargs['message'] = args[0] if args else kwargs.get('message', message) else: kwargs['message'] = None kwargs['prefix'] = self.prefix kwargs['statsd'] = self.statsd return method(self, **kwargs) return wrapper return decorator" 2788,"def to_dict(self): """"""Convert Exception class to a Python dictionary."""""" val = dict(self.payload or ()) if self.message: val['message'] = self.message return val" 2789,"def init_app(self, app, config=None, statsd=None): """"""Init Flask Extension."""""" if config is not None: self.config = config elif self.config is None: self.config = app.config self.messages = self.config.get('EXCEPTION_MESSAGE', True) self.prefix = self.config.get('EXCEPTION_PREFIX', DEFAULT_PREFIX) self.statsd = statsd" 2790,"def program(self): """"""program : (newline) statement | program statement """""" statements = [] if self.cur_token.type == TokenTypes.NEW_LINE: self.eat(TokenTypes.NEW_LINE) while self.cur_token.type != TokenTypes.EOF: statements += [self.statement()] return Block(statements)" 2791,"def statement(self): """""" statement : assign_statement | expression | control | empty Feature For Loop adds: | loop Feature Func adds: | func | return statement """""" if self.cur_token.type == TokenTypes.VAR: self.tokenizer.start_saving(self.cur_token) self.variable() peek_var = self.cur_token self.tokenizer.replay() self.eat() if peek_var.type == TokenTypes.ASSIGN: return self.assign_statement() else: return self.expression() elif self.cur_token.type in TokenTypes.control(self.features): return self.control() elif self.cur_token.type in TokenTypes.loop(self.features): return self.loop() elif self.cur_token.type in TokenTypes.func(self.features): if self.cur_token.type == TokenTypes.FUNC: return self.func() elif self.cur_token.type == TokenTypes.RETURN: return self.return_statement() self.error(""Invalid token or unfinished statement"")" 2792,"def assign_statement(self): """""" assign smt : variable ASSIGN expression(;) Feature Type Array adds: | variable SETITEM expression(;) """""" left = self.variable() op = self.cur_token self.eat(TokenTypes.ASSIGN) right = self.expression() smt = None if Features.TYPE_ARRAY in self.features and isinstance(left, GetArrayItem): # Remake this as a setitem. smt = SetArrayItem(left.left, left.right, right) else: smt = Assign(op, left, right) if self.cur_token.type == TokenTypes.SEMI_COLON: self.eat(TokenTypes.SEMI_COLON) return smt" 2793,"def control(self): """""" control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block) """""" self.eat(TokenTypes.IF) ctrl = self.expression() block = self.block() ifs = [If(ctrl, block)] else_block = Block() while self.cur_token.type == TokenTypes.ELIF: self.eat(TokenTypes.ELIF) ctrl = self.expression() block = self.block() ifs.append(If(ctrl, block)) if self.cur_token.type == TokenTypes.ELSE: self.eat(TokenTypes.ELSE) else_block = self.block() return ControlBlock(ifs, else_block)" 2794,"def loop(self): """""" loop : 'for' init; ctrl; inc block """""" self.eat(TokenTypes.FOR_LOOP) init = NoOp() if self.cur_token.type != TokenTypes.SEMI_COLON: init = self.assign_statement() else: self.eat(TokenTypes.SEMI_COLON) ctrl = NoOp() if self.cur_token.type != TokenTypes.SEMI_COLON: ctrl = self.expression() self.eat(TokenTypes.SEMI_COLON) inc = NoOp() if self.cur_token.type != TokenTypes.LBRACE: inc = self.assign_statement() block = self.block() return ForLoop(init, ctrl, inc, block)" 2795,"def func(self): """""" func : func name(paramlist) block """""" self.eat(TokenTypes.FUNC) name = Var(self.cur_token) self.eat(TokenTypes.VAR) self.eat(TokenTypes.LPAREN) sig = self.param_list() self.eat(TokenTypes.RPAREN) block = self.block() return FunctionDef(name, Function(sig, block))" 2796,"def param_list(self): """""" paramlist : var, paramlist paramlist : var paramlist : """""" params = [] while self.cur_token.type == TokenTypes.VAR: params.append(Var(self.cur_token)) self.eat(TokenTypes.VAR) if self.cur_token.type == TokenTypes.COMMA: self.eat(TokenTypes.COMMA) return FunctionSig(params)" 2797,"def arg_list(self, ending_char=TokenTypes.RPAREN): """""" arglist : expression, arglist arglist : expression arglist : """""" args = [] while not self.cur_token.type == ending_char: args.append(self.expression()) if self.cur_token.type == TokenTypes.COMMA: self.eat(TokenTypes.COMMA) return args" 2798,"def array_const(self): """""" Feature Type Array adds: array : [ arglist ] """""" self.eat(TokenTypes.LBRACKET) node = Array(self.arg_list(TokenTypes.RBRACKET)) self.eat(TokenTypes.RBRACKET) return node" 2799,"def block(self): """""" block : { (newline) statements } (newline) """""" statements = [] self.eat(TokenTypes.LBRACE) if self.cur_token.type == TokenTypes.NEW_LINE: self.eat(TokenTypes.NEW_LINE) while self.cur_token.type != TokenTypes.RBRACE: statements.append(self.statement()) self.eat(TokenTypes.RBRACE) if self.cur_token.type == TokenTypes.NEW_LINE: self.eat(TokenTypes.NEW_LINE) return Block(statements)" 2800,"def variable(self): """""" variable : variable Feature Type Array adds: variable : variable[expression] Feature Type Func adds: variable : variable(arg_list) """""" var = Var(self.cur_token) self.eat(TokenTypes.VAR) if Features.TYPE_ARRAY in self.features: while self.cur_token.type == TokenTypes.LBRACKET: self.eat(TokenTypes.LBRACKET) # Start passed the logical ops. expr = self.operator_expression(level=2) self.eat(TokenTypes.RBRACKET) var = GetArrayItem(left=var, right=expr) if Features.FUNC in self.features: if self.cur_token.type == TokenTypes.LPAREN: self.eat(TokenTypes.LPAREN) args = self.arg_list() self.eat(TokenTypes.RPAREN) var = Call(var, args) return var" 2801,"def wrap_node(self, node, options): '''\ celery registers tasks by decorating them, and so do we, so the user can pass a celery task and we'll wrap our code with theirs in a nice package celery can execute. ''' if 'celery_task' in options: return options['celery_task'](node) return self.celery_task(node)" 2802,"def checkpoint(key=0, unpickler=pickle.load, pickler=pickle.dump, work_dir=gettempdir(), refresh=False): """""" A utility decorator to save intermediate results of a function. It is the caller's responsibility to specify a key naming scheme such that the output of each function call with different arguments is stored in a separate file. :param key: The key to store the computed intermediate output of the decorated function. if key is a string, it is used directly as the name. if key is a string.Template object, you can specify your file-naming convention using the standard string.Template conventions. Since string.Template uses named substitutions, it can handle only keyword arguments. Therfore, in addition to the standard Template conventions, an additional feature is provided to help with non-keyword arguments. For instance if you have a function definition as f(m, n, arg3='myarg3',arg4='myarg4'). Say you want your key to be: n followed by an _ followed by 'text' followed by arg3 followed by a . followed by arg4. Let n = 3, arg3='out', arg4='txt', then you are interested in getting '3_textout.txt'. This is written as key=Template('{1}_text$arg3.$arg4') The filename is first generated by substituting the kwargs, i.e key_id.substitute(kwargs), this would give the string '{1}_textout.txt' as output. This is further processed by a call to format with args as the argument, where the second argument is picked (since counting starts from 0), and we get 3_textout.txt. if key is a callable function, it is called with the same arguments as that of the function, in a special format. key must be of the form lambda arg, kwarg: ... your definition. arg is an iterable containing the un-named arguments of the function, and kwarg is a dictionary containing the keyword arguments. For instance, the above example can be written as: key = lambda arg, kwarg: '%d_text%s.%s'.format(arg[1], kwarg['arg3'], kwarg['arg4']) Or one can define a function that takes the same arguments: def key_namer(args, kwargs): return '%d_text%s.%s'.format(arg[1], kwarg['arg3'], kwarg['arg4']) This way you can do complex argument processing and name generation. :param pickler: The function that loads the saved object and returns. This should ideally be of the same format as the one that is computed. However, in certain cases, it is enough as long as it provides the information necessary for the caller, even if it is not exactly same as the object returned by the function. :param unpickler: The function that saves the computed object into a file. :param work_dir: The location where the checkpoint files are stored. :param do_refresh: If enabled, this will not skip, effectively disabling the decoration @checkpoint. REFRESHING: One of the intended ways to use the refresh feature is as follows: Say you are checkpointing a function f1, f2; have a file or a place where you define refresh variables: defs.py: ------- REFRESH_f1 = True REFRESH_f2 = os.environ['F2_REFRESH'] # can set this externally code.py: ------- @checkpoint(..., refresh=REFRESH_f1) def f1(...): your code. @checkpoint(..., refresh=REFRESH_f2) def f2(...): your code. This way, you have control on what to refresh without modifying the code, by setting the defs either via input or by modifying defs.py. """""" def decorator(func): def wrapped(*args, **kwargs): # If first arg is a string, use it directly. if isinstance(key, str): save_file = os.path.join(work_dir, key) elif isinstance(key, Template): save_file = os.path.join(work_dir, key.substitute(kwargs)) save_file = save_file.format(*args) elif isinstance(key, types.FunctionType): save_file = os.path.join(work_dir, key(args, kwargs)) else: logging.warn('Using 0-th argument as default.') save_file = os.path.join(work_dir, '{0}') save_file = save_file.format(args[key]) logging.info('checkpoint@ %s' % save_file) # cache_file doesn't exist, run the function and save output in checkpoint. if isinstance(refresh, types.FunctionType): do_refresh = refresh() else: do_refresh = refresh if do_refresh or not os.path.exists(path=save_file): # Otherwise compute it save it and return it. # If the program fails, don't checkpoint. try: out = func(*args, **kwargs) except: # a blank raise re-raises the last exception. raise else: # If the program is successful, then go ahead and call the save function. with open(save_file, 'wb') as f: pickler(out, f) return out # Otherwise, load the checkpoint file and send it. else: logging.info(""Checkpoint exists. Loading from: %s"" % save_file) with open(save_file, 'rb') as f: return unpickler(f) # Todo: Sending options to load/save functions. return wrapped return decorator" 2803,"def bfs(graph, start): """""" Finds the shortest string using BFS Args: graph (DFA): The DFA states start (DFA state): The DFA initial state Returns: str: The shortest string """""" # maintain a queue of paths queue = [] visited = [] # maintain a queue of nodes # push the first path into the queue queue.append([['', start]]) while queue: # get the first path from the queue path = queue.pop(0) # get the last node from the path node = path[-1][1] if node.stateid not in visited: visited.append(node.stateid) # path found if node.final != TropicalWeight(float('inf')): return """".join([mnode[0] for mnode in path]) # enumerate all adjacent nodes, construct a new path and push # it into the queue for arc in node.arcs: char = graph.isyms.find(arc.ilabel) next_state = graph[arc.nextstate] # print next_state.stateid if next_state.stateid not in visited: new_path = list(path) new_path.append([char, next_state]) queue.append(new_path)" 2804,"def run(): """"""Display the arguments as a braille graph on standard output."""""" # We override the program name to reflect that this script must be run with # the python executable. parser = argparse.ArgumentParser( prog='python -m braillegraph', description='Print a braille bar graph of the given integers.' ) # This flag sets the end string that we'll print. If we pass end=None to # print(), it will use its default. If we pass end='', it will suppress the # newline character. parser.add_argument('-n', '--no-newline', action='store_const', dest='end', const='', default=None, help='do not print the trailing newline character') # Add subparsers for the directions subparsers = parser.add_subparsers(title='directions') horizontal_parser = subparsers.add_parser('horizontal', help='a horizontal graph') horizontal_parser.set_defaults( func=lambda args: horizontal_graph(args.integers) ) horizontal_parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer') vertical_parser = subparsers.add_parser('vertical', help='a vertical graph') vertical_parser.set_defaults( func=lambda args: vertical_graph(args.integers, sep=args.sep) ) vertical_parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer') # The separator for groups of bars (i.e., ""lines""). If we pass None, # vertical_parser will use its default. vertical_parser.add_argument('-s', '--sep', action='store', default=None, help='separator for groups of bars') args = parser.parse_args() print(args.func(args), end=args.end)" 2805,"def _rnd_date(start, end): """"""Internal random date generator. """""" return date.fromordinal(random.randint(start.toordinal(), end.toordinal()))" 2806,"def rnd_date(start=date(1970, 1, 1), end=None, **kwargs): """""" Generate a random date between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.date, (default date(1970, 1, 1)) :param end: Right bound :type end: string or datetime.date, (default date.today()) :return: a datetime.date object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 """""" if end is None: end = date.today() start = parser.parse_date(start) end = parser.parse_date(end) _assert_correct_start_end(start, end) return _rnd_date(start, end)" 2807,"def rnd_date_array(size, start=date(1970, 1, 1), end=None, **kwargs): """""" Array or Matrix of random date generator. :returns: 1d or 2d array of datetime.date """""" if end is None: end = date.today() start = parser.parse_date(start) end = parser.parse_date(end) _assert_correct_start_end(start, end) return _randn(size, _rnd_date, start, end)" 2808,"def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs): """""" Generate mass random date. :param size: int, number of :param start: date similar object, int / str / date / datetime :param end: date similar object, int / str / date / datetime, default today's date :param kwargs: args placeholder :return: list of datetime.date """""" if end is None: end = date.today() start_days = to_ordinal(parser.parse_datetime(start)) end_days = to_ordinal(parser.parse_datetime(end)) _assert_correct_start_end(start_days, end_days) if has_np: # pragma: no cover return [ from_ordinal(days) for days in np.random.randint(start_days, end_days, size) ] else: return [ from_ordinal(random.randint(start_days, end_days)) for _ in range(size) ]" 2809,"def rnd_datetime(start=datetime(1970, 1, 1), end=datetime.now()): """""" Generate a random datetime between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.datetime, (default datetime(1970, 1, 1)) :param end: Right bound :type end: string or datetime.datetime, (default datetime.now()) :return: a datetime.datetime object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的时间。 """""" start = parser.parse_datetime(start) end = parser.parse_datetime(end) _assert_correct_start_end(start, end) return _rnd_datetime(start, end)" 2810,"def rnd_datetime_array(size, start=datetime(1970, 1, 1), end=None): """""" Array or Matrix of random datetime generator. :returns: 1d or 2d array of datetime.date """""" if end is None: end = datetime.now() start = parser.parse_datetime(start) end = parser.parse_datetime(end) _assert_correct_start_end(start, end) return _randn(size, _rnd_datetime, start, end)" 2811,"def day_interval(year, month, day, milliseconds=False, return_string=False): """""" Return a start datetime and end datetime of a day. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.day_interval(2014, 6, 17) >>> start datetime(2014, 6, 17, 0, 0, 0) >>> end datetime(2014, 6, 17, 23, 59, 59) """""" if milliseconds: # pragma: no cover delta = timedelta(milliseconds=1) else: delta = timedelta(seconds=1) start = datetime(year, month, day) end = datetime(year, month, day) + timedelta(days=1) - delta if not return_string: return start, end else: return str(start), str(end)" 2812,"def month_interval(year, month, milliseconds=False, return_string=False): """""" Return a start datetime and end datetime of a month. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.month_interval(2000, 2) >>> start datetime(2000, 2, 1, 0, 0, 0) >>> end datetime(2000, 2, 29, 23, 59, 59) """""" if milliseconds: # pragma: no cover delta = timedelta(milliseconds=1) else: delta = timedelta(seconds=1) if month == 12: start = datetime(year, month, 1) end = datetime(year + 1, 1, 1) - delta else: start = datetime(year, month, 1) end = datetime(year, month + 1, 1) - delta if not return_string: return start, end else: return str(start), str(end)" 2813,"def year_interval(year, milliseconds=False, return_string=False): """""" Return a start datetime and end datetime of a year. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.year_interval(2007) >>> start datetime(2007, 1, 1, 0, 0, 0) >>> end datetime(2007, 12, 31, 23, 59, 59) """""" if milliseconds: # pragma: no cover delta = timedelta(milliseconds=1) else: delta = timedelta(seconds=1) start = datetime(year, 1, 1) end = datetime(year + 1, 1, 1) - delta if not return_string: return start, end else: return str(start), str(end)" 2814,"def renderfile(filename, options=None, templatePaths=None, default='', silent=False): """""" Renders a file to text using the mako template system. To learn more about mako and its usage, see [[www.makotemplates.org]] :return <str> formatted text """""" if not mako: logger.debug('mako is not installed') return default if not mako: logger.debug('mako is not installed.') return default if templatePaths is None: templatePaths = [] # use the default mako templates basepath = os.environ.get('MAKO_TEMPLATEPATH', '') if basepath: basetempls = basepath.split(os.path.pathsep) else: basetempls = [] templatePaths += basetempls # include the root path templatePaths.insert(0, os.path.dirname(filename)) templatePaths = map(lambda x: x.replace('\\', '/'), templatePaths) # update the default options scope = dict(os.environ) scope['projex_text'] = projex.text scope['date'] = date scope['datetime'] = datetime scope.update(_macros) scope.update(os.environ) if options is not None: scope.update(options) old_env_path = os.environ.get('MAKO_TEMPLATEPATH', '') os.environ['MAKO_TEMPLATEPATH'] = os.path.pathsep.join(templatePaths) logger.debug('rendering mako file: %s', filename) if templatePaths: lookup = mako.lookup.TemplateLookup(directories=templatePaths) templ = mako.template.Template(filename=filename, lookup=lookup) else: templ = mako.template.Template(filename=filename) try: output = templ.render(**scope) except StandardError: output = default if not silent: logger.exception('Error rendering mako text') os.environ['MAKO_TEMPLATEPATH'] = old_env_path return output" 2815,"def render(text, options=None, templatePaths=None, default=None, silent=False, raiseErrors=False): """""" Renders a template text to a resolved text value using the mako template system. Provides a much more robust template option to the projex.text system. While the projex.text method can handle many simple cases with no dependencies, the makotext module makes use of the powerful mako template language. This module provides a simple wrapper to the mako code. To learn more about mako and its usage, see [[www.makotemplates.org]] :param text <str> :param options <dict> { <str> key: <variant> value, .. } :return <str> formatted text :usage |import projex.makotext |options = { 'key': 10, 'name': 'eric' } |template = '${name.lower()}_${key}_${date.today()}.txt' |projex.makotext.render( template, options ) """""" if not mako: logger.debug('mako is not installed.') return text if default is None else default if templatePaths is None: templatePaths = [] # use the default mako templates basepath = os.environ.get('MAKO_TEMPLATEPATH', '') if basepath: basetempls = basepath.split(os.path.pathsep) else: basetempls = [] templatePaths += basetempls # update the default options scope = dict(os.environ) scope['projex_text'] = projex.text scope['date'] = date scope['datetime'] = datetime scope.update(_macros) if options is not None: scope.update(options) if templatePaths: lookup = mako.lookup.TemplateLookup(directories=templatePaths) try: templ = mako.template.Template(text, lookup=lookup) except StandardError: output = text if default is None else default if not silent: logger.exception('Error compiling mako text') return output else: try: templ = mako.template.Template(text) except StandardError: output = text if default is None else default if not silent: logger.exception('Error compiling mako text') return output try: output = templ.render(**scope) except StandardError: if raiseErrors: raise output = text if default is None else default if not silent: logger.exception('Error rendering mako text') return output return output" 2816,"def collectfiles(path, filt=None): """""" Collects some files based on the given filename. :param path | <str> filt | <method> :return [(<str> name, <str> filepath), ..] """""" if not os.path.isdir(path): path = os.path.dirname(path) output = [] for name in sorted(os.listdir(path)): filepath = os.path.join(path, name) if os.path.isfile(filepath): if not filt or filt(name): output.append((name, filepath)) return output" 2817,"def get_milestone(self, title): """""" given the title as str, looks for an existing milestone or create a new one, and return the object """""" if not title: return GithubObject.NotSet if not hasattr(self, '_milestones'): self._milestones = {m.title: m for m in self.repo.get_milestones()} milestone = self._milestones.get(title) if not milestone: milestone = self.repo.create_milestone(title=title) return milestone" 2818,"def get_assignee(self, login): """""" given the user login, looks for a user in assignee list of the repo and return it if was found. """""" if not login: return GithubObject.NotSet if not hasattr(self, '_assignees'): self._assignees = {c.login: c for c in self.repo.get_assignees()} if login not in self._assignees: # warning print(""{} doesn't belong to this repo. This issue won't be assigned."".format(login)) return self._assignees.get(login)" 2819,"def sender(self, issues): """""" push a list of issues to github """""" for issue in issues: state = self.get_state(issue.state) if issue.number: try: gh_issue = self.repo.get_issue(issue.number) original_state = gh_issue.state if original_state == state: action = 'Updated' elif original_state == 'closed': action = 'Reopened' else: action = 'Closed' gh_issue.edit(title=issue.title, body=issue.body, labels=issue.labels, milestone=self.get_milestone(issue.milestone), assignee=self.get_assignee(issue.assignee), state=self.get_state(issue.state) ) print('{} #{}: {}'.format(action, gh_issue.number, gh_issue.title)) except GithubException: print('Not found #{}: {} (ignored)'.format(issue.number, issue.title)) continue else: gh_issue = self.repo.create_issue(title=issue.title, body=issue.body, labels=issue.labels, milestone=self.get_milestone(issue.milestone), assignee=self.get_assignee(issue.assignee)) print('Created #{}: {}'.format(gh_issue.number, gh_issue.title))" 2820,"def define(self, key, value): """""" Defines the value for the inputted key by setting both its default and \ value to the inputted value. :param key | <str> value | <variant> """""" skey = nstr(key) self._defaults[skey] = value self[skey] = value" 2821,"def toXml(self, xparent): """""" Saves the settings for this dataset to the inputted parent xml. :param xparent | <xml.etree.ElementTree.Element> """""" for key, value in self.items(): elem = ElementTree.SubElement(xparent, 'entry') typ = type(elem).__name__ elem.set('key', key) elem.set('type', typ) if typ in DataSet._xmlTypes: DataSet._xmlTypes[typ][0](elem, value) else: elem.set('value', nstr(value))" 2822,"def fromXml(cls, xparent): """""" Loads the settings for this dataset to the inputted parent xml. :param xparent | <xml.etree.ElementTree.Element> """""" output = cls() for xentry in xparent: key = xentry.get('key') if not key: continue typ = xentry.get('type', 'str') if typ in DataSet._xmlTypes: value = DataSet._xmlTypes[typ][1](xentry) else: value = xentry.get('value', '') output.define(key, value) return output" 2823,"def registerXmlType(typ, encoder, decoder): """""" Registers a data type to encode/decode for xml settings. :param typ | <object> encoder | <method> decoder | <method> """""" DataSet._xmlTypes[nstr(typ)] = (encoder, decoder)" 2824,"def wrap_node(self, node, options): ''' we have the option to construct nodes here, so we can use different queues for nodes without having to have different queue objects. ''' job_kwargs = { 'queue': options.get('queue', 'default'), 'connection': options.get('connection', self.redis_connection), 'timeout': options.get('timeout', None), 'result_ttl': options.get('result_ttl', 500), } return job(**job_kwargs)(node)" 2825,"def writeMzml(specfile, msrunContainer, outputdir, spectrumIds=None, chromatogramIds=None, writeIndex=True): """""" #TODO: docstring :param specfile: #TODO docstring :param msrunContainer: #TODO docstring :param outputdir: #TODO docstring :param spectrumIds: #TODO docstring :param chromatogramIds: #TODO docstring """""" #TODO: maybe change to use aux.openSafeReplace outputFile = io.BytesIO() #TODO: perform check that specfile is present in msrunContainer and at least # the metadatanode. metadataTree = msrunContainer.rmc[specfile] #Generate a list of spectrum ids that should be written to mzML if spectrumIds is None and specfile in msrunContainer.smic: keyTuple = [(int(key), key) for key in viewkeys(msrunContainer.smic[specfile])] spectrumIds = [key for _, key in sorted(keyTuple)] spectrumCounts = len(spectrumIds) #Generate a list of chromatogram ids that should be written to mzML if chromatogramIds is None and specfile in msrunContainer.cic: chromatogramIds = [cId for cId in viewkeys(msrunContainer.cic[specfile])] chromatogramCounts = len(chromatogramIds) spectrumIndexList = list() chromatogramIndexList = list() xmlFile = ETREE.xmlfile(outputFile, encoding='ISO-8859-1', buffered=False) xmlWriter = xmlFile.__enter__() xmlWriter.write_declaration() nsmap = {None: 'http://psi.hupo.org/ms/mzml', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance' } mzmlAttrib = {'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation': \ 'http://psi.hupo.org/ms/mzml http://psidev.info/files/ms/mzML/xsd/mzML1.1.0.xsd', 'version': '1.1.0', 'id': metadataTree.attrib['id'] } if writeIndex: xmlIndexedMzml = xmlWriter.element('indexedmzML', nsmap=nsmap) xmlIndexedMzml.__enter__() xmlWriter.write('\n') xmlMzml = xmlWriter.element('mzML', mzmlAttrib, nsmap=nsmap) xmlMzml.__enter__() xmlWriter.write('\n') for metadataNode in metadataTree.getchildren(): if metadataNode.tag != 'run': xmlWriter.write(maspy.xml.recCopyElement(metadataNode), pretty_print=True ) else: xmlRun = xmlWriter.element(metadataNode.tag, metadataNode.attrib) xmlRun.__enter__() xmlWriter.write('\n') for runChild in metadataNode.getchildren(): if runChild.tag == 'spectrumList': specDefaultProcRef = runChild.attrib['defaultDataProcessingRef'] elif runChild.tag == 'chromatogramList': chromDefaultProcRef = runChild.attrib['defaultDataProcessingRef'] else: #TODO: maybe recCopy? xmlRun.append(runChild) #If any spectra should be written, generate the spectrumList Node. if spectrumCounts > 0: specListAttribs = {'count': str(spectrumCounts), 'defaultDataProcessingRef': specDefaultProcRef } xmlSpectrumList = xmlWriter.element('spectrumList', specListAttribs ) xmlSpectrumList.__enter__() xmlWriter.write('\n') for index, key in enumerate(spectrumIds): smi = msrunContainer.smic[specfile][key] sai = msrunContainer.saic[specfile][key] #Store the spectrum element offset here spectrumIndexList.append((outputFile.tell(), smi.attributes['id'] )) xmlSpectrum = xmlSpectrumFromSmi(index, smi, sai) xmlWriter.write(xmlSpectrum, pretty_print=True) xmlSpectrumList.__exit__(None, None, None) xmlWriter.write('\n') #If any chromatograms should be written, generate the #chromatogramList Node. if chromatogramCounts > 0: chromListAttribs = {'count': str(chromatogramCounts), 'defaultDataProcessingRef': chromDefaultProcRef } xmlChromatogramList = xmlWriter.element('chromatogramList', chromListAttribs ) xmlChromatogramList.__enter__() xmlWriter.write('\n') for index, key in enumerate(chromatogramIds): ci = msrunContainer.cic[specfile][key] #Store the chromatogram element offset here chromatogramIndexList.append((outputFile.tell(), ci.id)) xmlChromatogram = xmlChromatogramFromCi(index, ci) xmlWriter.write(xmlChromatogram, pretty_print=True) xmlChromatogramList.__exit__(None, None, None) xmlWriter.write('\n') xmlRun.__exit__(None, None, None) xmlWriter.write('\n') #Close the mzml node xmlMzml.__exit__(None, None, None) #Optional: write the indexedMzml nodes and close the indexedMzml node if writeIndex: xmlWriter.write('\n') indexListOffset = outputFile.tell() _writeMzmlIndexList(xmlWriter, spectrumIndexList, chromatogramIndexList) _writeIndexListOffset(xmlWriter, indexListOffset) _writeMzmlChecksum(xmlWriter, outputFile) xmlIndexedMzml.__exit__(None, None, None) #Close the xml file xmlFile.__exit__(None, None, None) #Write the output mzML file filepath = aux.joinpath(outputdir, specfile+'.mzML') with open(filepath, 'wb') as openfile: openfile.write(outputFile.getvalue())" 2826,"def _writeMzmlIndexList(xmlWriter, spectrumIndexList, chromatogramIndexList): """""" #TODO: docstring :param xmlWriter: #TODO: docstring :param spectrumIndexList: #TODO: docstring :param chromatogramIndexList: #TODO: docstring """""" counts = 0 if spectrumIndexList: counts += 1 if chromatogramIndexList: counts += 1 if counts == 0: return None #Create indexList node xmlIndexList = xmlWriter.element('indexList', {'count': str(counts)}) xmlIndexList.__enter__() xmlWriter.write('\n') _writeIndexListElement(xmlWriter, 'spectrum', spectrumIndexList) _writeIndexListElement(xmlWriter, 'chromatogram', chromatogramIndexList) #Close indexList node xmlIndexList.__exit__(None, None, None) xmlWriter.write('\n')" 2827,"def _writeIndexListElement(xmlWriter, elementName, indexList): """""" #TODO: docstring :param xmlWriter: #TODO: docstring :param elementName: #TODO: docstring :param indexList: #TODO: docstring """""" if indexList: xmlIndex = xmlWriter.element('index', {'name': elementName}) xmlIndex.__enter__() xmlWriter.write('\n') for offset, indexId in indexList: offsetElement = ETREE.Element('offset', {'idRef': indexId}) offsetElement.text = str(offset) xmlWriter.write(offsetElement, pretty_print=True) xmlIndex.__exit__(None, None, None) xmlWriter.write('\n')" 2828,"def _writeMzmlChecksum(xmlWriter, outputFile): """""" #TODO: docstring :param xmlWriter: #TODO: docstring :param outputFile: #TODO: docstring """""" sha = hashlib.sha1(outputFile.getvalue()) sha.update('<fileChecksum>') xmlChecksumElement = ETREE.Element('fileChecksum') xmlChecksumElement.text = sha.hexdigest() xmlWriter.write(xmlChecksumElement, pretty_print=True)" 2829,"def _writeIndexListOffset(xmlWriter, offset): """""" #TODO: docstring :param xmlWriter: #TODO: docstring :param offset: #TODO: docstring """""" xmlIndexListOffset = ETREE.Element('indexListOffset') xmlIndexListOffset.text = str(offset) xmlWriter.write(xmlIndexListOffset, pretty_print=True)" 2830,"def xmlGenScanList(scanList, scanListParams): """""" #TODO: docstring :params scanList: #TODO: docstring :params scanListParams: #TODO: docstring :returns: #TODO: docstring """""" numEntries = len(scanList) xmlScanList = ETREE.Element('scanList', {'count': str(numEntries)}) maspy.xml.xmlAddParams(xmlScanList, scanListParams) for scan in scanList: #Note: no attributes supported xmlScan = ETREE.Element('scan', {}) maspy.xml.xmlAddParams(xmlScan, scan['params']) #Generate the scanWindowList entry numScanWindows = len(scan['scanWindowList']) if numScanWindows > 0: xmlScanWindowList = ETREE.Element('scanWindowList', {'count': str(numScanWindows)} ) for scanWindow in scan['scanWindowList']: xmlScanWindow = ETREE.Element('scanWindow') maspy.xml.xmlAddParams(xmlScanWindow, scanWindow) xmlScanWindowList.append(xmlScanWindow) xmlScan.append(xmlScanWindowList) xmlScanList.append(xmlScan) return xmlScanList" 2831,"def xmlGenPrecursorList(precursorList): """""" #TODO: docstring :params precursorList: #TODO: docstring :returns: #TODO: docstring """""" numEntries = len(precursorList) xmlPrecursorList = ETREE.Element('precursorList', {'count': str(numEntries)} ) for precursor in precursorList: #Note: no attributes for external referencing supported precursorAttrib = {} if precursor['spectrumRef'] is not None: precursorAttrib.update({'spectrumRef': precursor['spectrumRef']}) xmlPrecursor = ETREE.Element('precursor', precursorAttrib) #Add isolationWindow element if precursor['isolationWindow'] is not None: xmlIsolationWindow = ETREE.Element('isolationWindow') maspy.xml.xmlAddParams(xmlIsolationWindow, precursor['isolationWindow'] ) xmlPrecursor.append(xmlIsolationWindow) #Add selectedIonList element numSelectedIons = len(precursor['selectedIonList']) if numSelectedIons > 0: xmlSelectedIonList = ETREE.Element('selectedIonList', {'count': str(numSelectedIons)} ) for selectedIon in precursor['selectedIonList']: xmlSelectedIon = ETREE.Element('selectedIon') maspy.xml.xmlAddParams(xmlSelectedIon, selectedIon) xmlSelectedIonList.append(xmlSelectedIon) xmlPrecursor.append(xmlSelectedIonList) #Add activation element xmlActivation = ETREE.Element('activation') maspy.xml.xmlAddParams(xmlActivation, precursor['activation']) xmlPrecursor.append(xmlActivation) xmlPrecursorList.append(xmlPrecursor) return xmlPrecursorList" 2832,"def xmlGenBinaryDataArrayList(binaryDataInfo, binaryDataDict, compression='zlib', arrayTypes=None): """""" #TODO: docstring :params binaryDataInfo: #TODO: docstring :params binaryDataDict: #TODO: docstring :params compression: #TODO: docstring :params arrayTypes: #TODO: docstring :returns: #TODO: docstring """""" #Note: any other value for ""compression"" than ""zlib"" results in no # compression #Note: Use arrayTypes parameter to specify the order of the arrays if arrayTypes is None: arrayTypes = [_ for _ in viewkeys(binaryDataInfo)] numEntries = len(binaryDataInfo) xmlBinaryDataArrayList = ETREE.Element('binaryDataArrayList', {'count': str(numEntries)} ) for arrayType in arrayTypes: _, dataTypeParam = maspy.xml.findBinaryDataType(binaryDataInfo[arrayType]['params']) binaryData = binaryDataDict[arrayType] bitEncoding = '64' if binaryData.dtype.str == '<f8' else '32' if binaryData.size > 0: binaryData, arrayLength = maspy.xml.encodeBinaryData(binaryData, bitEncoding, compression ) else: binaryData = '' arrayLength = 0 # --- define binaryDataArray parameters --- # params = list() if bitEncoding == '64': params.append(('MS:1000523', None, None)) else: params.append(('MS:1000521', None, None)) if compression == 'zlib': params.append(('MS:1000574', None, None)) else: params.append(('MS:1000576', None, None)) mandatoryAccessions = ['MS:1000523', 'MS:1000521', 'MS:1000574', 'MS:1000576' ] for param in binaryDataInfo[arrayType]['params']: if param[0] not in mandatoryAccessions: params.append(param) #Note: not all attributes supported binaryDataArrayAttrib = {'encodedLength': str(len(binaryData))} for attr in ['dataProcessingRef']: if binaryDataInfo[arrayType][attr] is not None: binaryDataArrayAttrib[attr] = binaryDataInfo[arrayType][attr] xmlBinaryDataArray = ETREE.Element('binaryDataArray', binaryDataArrayAttrib ) maspy.xml.xmlAddParams(xmlBinaryDataArray, params) xmlBinary = ETREE.Element('binary') xmlBinary.text = binaryData xmlBinaryDataArray.append(xmlBinary) xmlBinaryDataArrayList.append(xmlBinaryDataArray) return xmlBinaryDataArrayList" 2833,"def xmlSpectrumFromSmi(index, smi, sai=None, compression='zlib'): """""" #TODO: docstring :param index: The zero-based, consecutive index of the spectrum in the SpectrumList. (mzML specification) :param smi: a SpectrumMetadataItem instance :param sai: a SpectrumArrayItem instance, if none is specified no binaryDataArrayList is written :param compression: #TODO: docstring :returns: #TODO: docstring """""" if sai is not None: arrayLength = [array.size for array in viewvalues(sai.arrays)] if len(set(arrayLength)) != 1: raise Exception('Unequal size for different array in sai.arrays') else: arrayLength = arrayLength[0] else: arrayLength = 0 spectrumAttrib = {'index': str(index), 'id': smi.attributes['id'], 'defaultArrayLength': str(arrayLength)} xmlSpectrum = ETREE.Element('spectrum', **spectrumAttrib) maspy.xml.xmlAddParams(xmlSpectrum, smi.params) #Add the scanList if len(smi.scanList) > 0: xmlSpectrum.append(xmlGenScanList(smi.scanList, smi.scanListParams)) if len(smi.precursorList) > 0: xmlSpectrum.append(xmlGenPrecursorList(smi.precursorList)) if len(smi.productList) > 0: xmlSpectrum.append(xmlGenProductList(smi.productList)) if sai is not None: xmlSpectrum.append(xmlGenBinaryDataArrayList(sai.arrayInfo, sai.arrays, compression=compression )) return xmlSpectrum" 2834,"def xmlChromatogramFromCi(index, ci, compression='zlib'): """""" #TODO: docstring :param index: #TODO: docstring :param ci: #TODO: docstring :param compression: #TODO: docstring :returns: #TODO: docstring """""" arrayLength = [array.size for array in viewvalues(ci.arrays)] if len(set(arrayLength)) != 1: raise Exception('Unequal size for different array in sai.arrays') else: arrayLength = arrayLength[0] chromatogramAttrib = {'index': str(index), 'id': ci.id, 'defaultArrayLength': str(arrayLength)} if 'dataProcessingRef' in ci.attrib: chromatogramAttrib.update({'dataProcessingRef': dataProcessingRef}) xmlChromatogram = ETREE.Element('chromatogram', **chromatogramAttrib) maspy.xml.xmlAddParams(xmlChromatogram, ci.params) #TODO: add appropriate functions for precursor and product if ci.product is not None: raise NotImplementedError() if ci.precursor is not None: raise NotImplementedError() #Sort the array keys, that 'rt' is always the first, necessary for example # for the software ""SeeMS"" to properly display chromatograms. arrayTypes = set(ci.arrayInfo) if 'rt' in arrayTypes: arrayTypes.remove('rt') arrayTypes = ['rt'] + list(arrayTypes) else: arrayTypes = list(arrayTypes) xmlChromatogram.append(xmlGenBinaryDataArrayList(ci.arrayInfo, ci.arrays, compression=compression, arrayTypes=arrayTypes ) ) return xmlChromatogram" 2835,"def execute(self, query, until_zero=False): """""" Execute a query :param query: query to execute :param until_zero: should query be called until returns 0 :return: """""" if self._conn.closed: self._conn = psycopg2.connect(self._connection_string, connection_factory=pgpm.lib.utils.db.MegaConnection) cur = self._conn.cursor() # be cautious, dangerous thing self._conn.autocommit = True # Check if DB is pgpm enabled if not pgpm.lib.utils.db.SqlScriptsHelper.schema_exists(cur, self._pgpm_schema_name): self._logger.error('Can\'t deploy schemas to DB where pgpm was not installed. ' 'First install pgpm by running pgpm install') self._conn.close() sys.exit(1) # check installed version of _pgpm schema. pgpm_v_db_tuple = pgpm.lib.utils.db.SqlScriptsHelper.get_pgpm_db_version(cur, self._pgpm_schema_name) pgpm_v_db = distutils.version.StrictVersion(""."".join(pgpm_v_db_tuple)) pgpm_v_script = distutils.version.StrictVersion(pgpm.lib.version.__version__) if pgpm_v_script > pgpm_v_db: self._logger.error('{0} schema version is outdated. Please run pgpm install --upgrade first.' .format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) elif pgpm_v_script < pgpm_v_db: self._logger.error('Deployment script\'s version is lower than the version of {0} schema ' 'installed in DB. Update pgpm script first.'.format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) # Executing query if until_zero: self._logger.debug('Running query {0} until it returns 0 (but not more than 10000 times' .format(query)) proc_return_value = None counter = 0 while proc_return_value != 0: cur.execute(query) proc_return_value = cur.fetchone()[0] counter += 1 if counter > 9999: break else: self._logger.debug('Running query {0}'.format(query)) cur.execute(query) # Commit transaction self._conn.commit() self._conn.close() return 0" 2836,"def install_pgpm_to_db(self, user_roles, upgrade=False): """""" Installs package manager """""" if self._conn.closed: self._conn = psycopg2.connect(self._connection_string, connection_factory=pgpm.lib.utils.db.MegaConnection) cur = self._conn.cursor() # get pgpm functions functions_dict = pgpm.lib.utils.misc.collect_scripts_from_sources('lib/db_scripts/functions', False, '.', True, self._logger) triggers_dict = pgpm.lib.utils.misc.collect_scripts_from_sources('lib/db_scripts/triggers', False, '.', True, self._logger) # get current user cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.current_user_sql) current_user = cur.fetchone()[0] # check if current user is a super user cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.is_superuser_sql) is_cur_superuser = cur.fetchone()[0] if not is_cur_superuser: self._logger.debug('User {0} is not a superuser. It is recommended that you connect as superuser ' 'when installing pgpm as some operation might need superuser rights' .format(current_user)) # Create schema if it doesn't exist if pgpm.lib.utils.db.SqlScriptsHelper.schema_exists(cur, self._pgpm_schema_name): # Executing pgpm trigger functions if len(triggers_dict) > 0: self._logger.info('Running functions definitions scripts') self._logger.debug(triggers_dict) pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) for key, value in triggers_dict.items(): cur.execute(value) self._logger.debug('Functions loaded to schema {0}'.format(self._pgpm_schema_name)) else: self._logger.debug('No function scripts to deploy') # check installed version of _pgpm schema. pgpm_v_db_tuple = pgpm.lib.utils.db.SqlScriptsHelper.get_pgpm_db_version(cur, self._pgpm_schema_name) pgpm_v_db = distutils.version.StrictVersion(""."".join(pgpm_v_db_tuple)) pgpm_v_script = distutils.version.StrictVersion(pgpm.lib.version.__version__) if pgpm_v_script > pgpm_v_db: if upgrade: self._migrate_pgpm_version(cur, pgpm_v_db, pgpm_v_script, True) else: self._migrate_pgpm_version(cur, pgpm_v_db, pgpm_v_script, False) elif pgpm_v_script < pgpm_v_db: self._logger.error('Deployment script\'s version is lower than the version of {0} schema ' 'installed in DB. Update pgpm script first.'.format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) else: self._logger.error('Can\'t install pgpm as schema {0} already exists'.format(self._pgpm_schema_name)) self._conn.close() sys.exit(1) # Executing pgpm functions if len(functions_dict) > 0: self._logger.info('Running functions definitions scripts') self._logger.debug(functions_dict) pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) for key, value in functions_dict.items(): if value: cur.execute(value) self._logger.debug('Functions loaded to schema {0}'.format(self._pgpm_schema_name)) else: self._logger.debug('No function scripts to deploy') else: # Prepare and execute preamble deployment_script_preamble = pkgutil.get_data(self._main_module_name, 'lib/db_scripts/deploy_prepare_config.sql') self._logger.info('Executing a preamble to install statement') cur.execute(deployment_script_preamble) # Python 3.x doesn't have format for byte strings so we have to convert install_script = pkgutil.get_data(self._main_module_name, 'lib/db_scripts/install.tmpl.sql').decode('utf-8') self._logger.info('Installing package manager') cur.execute(install_script.format(schema_name=self._pgpm_schema_name)) migration_files_list = sorted(pkg_resources.resource_listdir(self._main_module_name, 'lib/db_scripts/migrations/'), key=lambda filename: distutils.version.StrictVersion(filename.split('-')[0])) # Executing pgpm trigger functions if len(triggers_dict) > 0: self._logger.info('Running functions definitions scripts') self._logger.debug(triggers_dict) pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) for key, value in triggers_dict.items(): cur.execute(value) self._logger.debug('Functions loaded to schema {0}'.format(self._pgpm_schema_name)) else: self._logger.debug('No function scripts to deploy') # Executing migration scripts after trigger functions # as they may contain trigger definitions that use functions from pgpm for file_info in migration_files_list: # Python 3.x doesn't have format for byte strings so we have to convert migration_script = pkg_resources.resource_string(self._main_module_name, 'lib/db_scripts/migrations/{0}'.format(file_info))\ .decode('utf-8').format(schema_name=self._pgpm_schema_name) self._logger.debug('Running version upgrade script {0}'.format(file_info)) self._logger.debug(migration_script) cur.execute(migration_script) # Executing pgpm functions if len(functions_dict) > 0: self._logger.info('Running functions definitions scripts') self._logger.debug(functions_dict) pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) for key, value in functions_dict.items(): cur.execute(value) self._logger.debug('Functions loaded to schema {0}'.format(self._pgpm_schema_name)) else: self._logger.debug('No function scripts to deploy') # call this function to put in a migration log that there was a migration to the last version # it's a hack basically due to the fact in 0.0.7-0.1.3 migration script migration info was manually inserted # to avoid differences we add migration info to the last version (although it wasn't really a migration) # to be refactored cur.callproc('_add_migration_info', ['0.0.7', pgpm.lib.version.__version__]) # check if users of pgpm are specified pgpm.lib.utils.db.SqlScriptsHelper.revoke_all(cur, self._pgpm_schema_name, 'public') if not user_roles: self._logger.debug('No user was specified to have permisions on _pgpm schema. ' 'This means only user that installed _pgpm will be able to deploy. ' 'We recommend adding more users.') else: # set default privilages to users pgpm.lib.utils.db.SqlScriptsHelper.grant_default_usage_install_privileges( cur, self._pgpm_schema_name, ', '.join(user_roles)) pgpm.lib.utils.db.SqlScriptsHelper.grant_usage_install_privileges( cur, self._pgpm_schema_name, ', '.join(user_roles)) pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_upsert_package_info', [self._pgpm_schema_name, self._pgpm_schema_subclass, self._pgpm_version.major, self._pgpm_version.minor, self._pgpm_version.patch, self._pgpm_version.pre, self._pgpm_version.metadata, 'Package manager for Postgres', 'MIT']) # Commit transaction self._conn.commit() self._conn.close() return 0" 2837,"def uninstall_pgpm_from_db(self): """""" Removes pgpm from db and all related metadata (_pgpm schema). Install packages are left as they are :return: 0 if successful and error otherwise """""" drop_schema_cascade_script = 'DROP SCHEMA {schema_name} CASCADE;' if self._conn.closed: self._conn = psycopg2.connect(self._connection_string, connection_factory=pgpm.lib.utils.db.MegaConnection) cur = self._conn.cursor() # get current user cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.current_user_sql) current_user = cur.fetchone()[0] # check if current user is a super user cur.execute(pgpm.lib.utils.db.SqlScriptsHelper.is_superuser_sql) is_cur_superuser = cur.fetchone()[0] if not is_cur_superuser: self._logger.debug('User {0} is not a superuser. Only superuser can remove pgpm' .format(current_user)) sys.exit(1) self._logger.debug('Removing pgpm from DB by dropping schema {0}'.format(self._pgpm_schema_name)) cur.execute(drop_schema_cascade_script.format(schema_name=self._pgpm_schema_name)) # Commit transaction self._conn.commit() self._conn.close() return 0" 2838,"def _migrate_pgpm_version(self, cur, version_pgpm_db, version_pgpm_script, migrate_or_leave): """""" Enact migration script from one version of pgpm to another (newer) :param cur: :param migrate_or_leave: True if migrating, False if exiting :return: """""" migrations_file_re = r'^(.*)-(.*).tmpl.sql$' migration_files_list = sorted(pkg_resources.resource_listdir(self._main_module_name, 'lib/db_scripts/migrations/'), key=lambda filename: distutils.version.StrictVersion(filename.split('-')[0])) for file_info in migration_files_list: versions_list = re.compile(migrations_file_re, flags=re.IGNORECASE).findall(file_info) version_a = distutils.version.StrictVersion(versions_list[0][0]) version_b = distutils.version.StrictVersion(versions_list[0][1]) if version_pgpm_script >= version_a and version_b > version_pgpm_db: # Python 3.x doesn't have format for byte strings so we have to convert migration_script = pkg_resources.resource_string(self._main_module_name, 'lib/db_scripts/migrations/{0}'.format(file_info))\ .decode('utf-8').format(schema_name=self._pgpm_schema_name) if migrate_or_leave: self._logger.debug('Running version upgrade script {0}'.format(file_info)) self._logger.debug(migration_script) cur.execute(migration_script) self._conn.commit() pgpm.lib.utils.db.SqlScriptsHelper.set_search_path(cur, self._pgpm_schema_name) cur.callproc('_add_migration_info', [versions_list[0][0], versions_list[0][1]]) self._conn.commit() self._logger.debug('Successfully finished running version upgrade script {0}'.format(file_info)) if not migrate_or_leave: self._logger.error('{0} schema version is outdated. Please run pgpm install --upgrade first.' .format(self._pgpm_schema_name)) self._conn.close() sys.exit(1)" 2839,"def save(self, *args, **kwargs): if self.pk is None: if hasattr(self, 'product'): if not self.description: self.description = self.product self.price_recommended = self.product.price_base elif hasattr(self, 'line_order'): if not self.description: self.description = self.line_order.product self.price_recommended = self.line_order.price_base if hasattr(self, 'tax') and hasattr(self, 'type_tax'): self.tax = self.type_tax.tax if hasattr(self, 'product'): self.tax_label = self.product.product.tax.name if self.product.code: self.code = self.product.code else: self.code = self.product.product.code """""" si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versión """""" self.update_total(force_save=False) if 'force_save' in kwargs: kwargs.pop('force_save') return super(GenLineProduct, self).save(*args, **kwargs)" 2840,"def create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL, url_reverse, related_line, related_object, msg_error_relation, msg_error_not_found, unique): """""" pk: pk del documento origen list_lines: listado de pk de lineas de origen MODEL_SOURCE: modelo del documento origen MODEL_FINAL: model del documento final MODEL_LINE_SOURCE: modelo de la linea origen MODEL_LINE_FINAL: modelo de la linea final url_reverse: url del destino related_line: campo del modelo linea final en el que irá asignada la linea origen related_object: campo del modelo linea final en el que irá asignado el objeto final msg_error_relation: Mensaje de error indicando que las lineas ya están relacionadas msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen unique: (True/False) Indica si puede haber más de una linea asociada a otras lineas """""" context = {} obj_src = MODEL_SOURCE.objects.filter(pk=pk).first() if list_lines and obj_src: # parse to int list_lines = [int(x) for x in list_lines] # list of lines objects if unique: create = not MODEL_LINE_FINAL.objects.filter(**{""{}__pk__in"".format(related_line): list_lines}).exists() else: create = True """""" si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento """""" if create: with transaction.atomic(): obj_final = MODEL_FINAL() obj_final.customer = obj_src.customer obj_final.date = datetime.datetime.now() obj_final.billing_series = obj_src.billing_series if isinstance(obj_final, SalesOrder): obj_final.budget = obj_src obj_final.save() for lb_pk in list_lines: line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first() if line_src: line_final = MODEL_LINE_FINAL(**{""{}_id"".format(related_object): obj_final.pk, related_line: line_src}) # line_final.order = obj_final # line_final.line_budget = line_src src_list_fields = [f.name for f in line_src._meta.get_fields()] dst_list_fields = [f.name for f in line_final._meta.get_fields()] if 'product' in src_list_fields and 'product' in dst_list_fields: line_final.product = line_src.product if 'description' in src_list_fields and 'description' in dst_list_fields: line_final.description = line_src.description if 'code' in src_list_fields and 'code' in dst_list_fields: line_final.code = line_src.code # if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'): if 'line_order' in src_list_fields and 'line_order' in dst_list_fields: line_final.line_order = line_src.line_order line_final.quantity = line_src.quantity line_final.price_base = line_src.price_base # if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'): if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields: line_final.price_recommended = line_src.price_recommended line_final.tax = line_src.tax # line_final.type_tax = line_src.type_tax line_final.discount = line_src.discount if 'removed' in src_list_fields and 'removed' in dst_list_fields: line_final.removed = line_src.removed line_final.save() if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists(): for opt_src in line_src.line_basket_option_sales.all(): opt_dst = SalesLineOrderOption() opt_dst.line_order = line_final opt_dst.product_option = opt_src.product_option opt_dst.product_final = opt_src.product_final opt_dst.quantity = opt_src.quantity opt_dst.save() # bloqueamos el documento origen obj_src.lock = True obj_src.save() # context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk}) context['url'] = ""{}#/{}"".format(reverse(url_reverse), obj_final.pk) context['obj_final'] = obj_final else: # _(""Hay lineas asignadas a pedidos"") context['error'] = msg_error_relation else: # _('Budget not found') context['error'] = msg_error_not_found return context" 2841,"def create_albaran_automatic(pk, list_lines): """""" creamos de forma automatica el albaran """""" line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk') if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]): # solo aquellas lineas de pedidos que no estan ya albarandas if line_bd.count() != 0: for x in line_bd[0]: list_lines.pop(list_lines.index(x)) GenLineProduct.create_albaran_from_order(pk, list_lines)" 2842,"def create_invoice_from_albaran(pk, list_lines): """""" la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos """""" context = {} if list_lines: new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter( pk__in=[int(x) for x in list_lines] ).exclude(invoiced=True)] if new_list_lines: lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1] if lo and lo[0] and lo[0][0]: new_pk = lo[0][0] context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines) if 'error' not in context or not context['error']: SalesLineAlbaran.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude(invoiced=True).update(invoiced=True) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context" 2843,"def create_invoice_from_ticket(pk, list_lines): """""" la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos """""" context = {} if list_lines: new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])] if new_list_lines: lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1] if lo and lo[0] and lo[0][0]: new_pk = lo[0][0] return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines) else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context" 2844,"def set_options(self, options): """""" options = [{ 'product_option': instance of ProductFinalOption, 'product_final': instance of ProductFinal, 'quantity': Float }, ] """""" with transaction.atomic(): for option in options: opt = self.line_basket_option_sales.filter( product_option=option['product_option'] ).first() if opt: # edit change = False if opt.quantity != option['quantity']: opt.quantity = option['quantity'] change = True if opt.product_final != option['product_final']: opt.product_final = option['product_final'] change = True if change: opt.save() else: # new opt = SalesLineBasketOption() # raise Exception(self.pk, self.__dict__, self) # raise Exception(self.pk) opt.line_budget = SalesLineBasket.objects.get(pk=self.pk) opt.product_option = option['product_option'] opt.product_final = option['product_final'] opt.quantity = option['quantity'] opt.save()" 2845,"def findmodules(path, recurse=False): """""" Looks up the modules for the given path and returns a list of the packages. If the recurse flag is set to True, then it will look through the package recursively. :param path | <str> recurse | <bool> :return ([<str>, ..] modules, [<str>, ..] paths) """""" output = set() roots = set() for root, folders, files in os.walk(path): # add packages for folder in folders: pkgpath = os.path.join(root, folder, '__init__.py') if os.path.exists(pkgpath): output.add(packageFromPath(pkgpath)) # add modules rootpth = packageRootPath(root) rootpkg = packageFromPath(root) roots.add(rootpth) for file_ in files: name, ext = os.path.splitext(file_) if ext not in ('.py', '.pyo', '.pyc'): continue if name in ('__init__', '__plugins__'): continue if rootpkg: output.add(rootpkg + '.' + name) else: output.add(name) if not recurse: break return list(output), list(roots)" 2846,"def importfile(filename): """""" Imports a module specifically from a file. :param filename | <str> :return <module> || None """""" pkg = packageFromPath(filename, includeModule=True) root = packageRootPath(filename) if root not in sys.path: sys.path.insert(0, root) __import__(pkg) return sys.modules[pkg]" 2847,"def importmodules(package_or_toc, ignore=None, recurse=False, silent=None): """""" Imports all the sub-modules of a package, a useful technique for developing plugins. By default, this method will walk the directory structure looking for submodules and packages. You can also specify a __toc__ attribute on the package to define the sub-modules that you want to import. :param package_or_toc | <package> || <str> filename ignore | [<str>, ..] || None recurse | <bool> silent | <bool> :usage |>>> import projex |>>> import projex.docgen |>>> projex.importmodules(projex.docgen) |[<projex.docgen.commands>, <projex.docgen.default_config>, ..] :return [<module> || <package>, ..] """""" if package_or_toc in __IMPORTED: return __IMPORTED.add(package_or_toc) if silent is None: silent = os.environ.get('PROJEX_LOG_IMPORTS', 'False').lower() != 'true' toc = [] output = [] if ignore is None: ignore = [] # import from a set toc file if type(package_or_toc) in (str, unicode): # import a toc file if os.path.isfile(package_or_toc): f = open(package_or_toc, 'r') toc = f.readlines() f.close() # import from a directory elif os.path.isdir(package_or_toc): toc, paths = findmodules(package_or_toc, recurse=recurse) for path in paths: if path in sys.path: sys.path.remove(path) sys.path.insert(0, path) # import a module by string else: use_sub_modules = False if package_or_toc.endswith('.*'): use_sub_modules = True package_or_toc = package_or_toc[:-2] try: __import__(package_or_toc) module = sys.modules[package_or_toc] except ImportError as err: if not silent: logger.error('Unable to import module: %s', package_or_toc) logger.debug(traceback.print_exc()) return [] except KeyError: if not silent: logger.error('Unable to find module: %s', package_or_toc) return [] if use_sub_modules: base = os.path.dirname(module.__file__) for path in os.listdir(base): if path.endswith('.py') and path != '__init__.py': importmodules(package_or_toc + '.' + path.replace('.py', '')) elif os.path.isdir(os.path.join(base, path)): importmodules(package_or_toc + '.' + path) else: return importmodules(module) # import from a given package else: toc = getattr(package_or_toc, '__toc__', []) if not toc: toc = [] recurse = getattr(package_or_toc, '__recurse__', False) try: paths = package_or_toc.__path__ except AttributeError: try: paths = [os.path.dirname(package_or_toc.__file__)] except AttributeError: paths = [] for path in paths: data = findmodules(path, recurse=recurse) toc += data[0] for sub_path in data[1]: if sub_path in sys.path: sys.path.remove(sub_path) sys.path.insert(0, sub_path) setattr(package_or_toc, '__toc__', toc) # import using standard means (successful for when dealing with for modname in sorted(toc): # check against a callable ignore method if callable(ignore) and ignore(modname): continue elif type(modname) in (list, tuple) and modname not in ignore: continue # ignore preset options if modname.endswith('__init__'): continue elif modname.endswith('__plugins__'): continue try: output.append(sys.modules[modname]) continue except KeyError: pass if not silent: logger.debug('Importing: %s' % modname) try: mod = importlib.import_module(modname) sys.modules[modname] = mod output.append(mod) except ImportError, err: if not silent: logger.error('Error importing module: %s', modname) logger.debug(traceback.print_exc()) return output" 2848,"def importobject(module_name, object_name): """""" Imports the object with the given name from the inputted module. :param module_name | <str> object_name | <str> :usage |>>> import projex |>>> modname = 'projex.envmanager' |>>> attr = 'EnvManager' |>>> EnvManager = projex.importobject(modname, attr) :return <object> || None """""" if module_name not in sys.modules: try: __import__(module_name) except ImportError: logger.debug(traceback.print_exc()) logger.error('Could not import module: %s', module_name) return None module = sys.modules.get(module_name) if not module: logger.warning('No module %s found.' % module_name) return None if not hasattr(module, object_name): logger.warning('No object %s in %s.' % (object_name, module_name)) return None return getattr(module, object_name)" 2849,"def packageRootPath(path): """""" Returns the root file path that defines a Python package from the inputted path. :param path | <str> :return <str> """""" path = nstr(path) if os.path.isfile(path): path = os.path.dirname(path) parts = os.path.normpath(path).split(os.path.sep) package_parts = [] for i in range(len(parts), 0, -1): filename = os.path.sep.join(parts[:i] + ['__init__.py']) if not os.path.isfile(filename): break package_parts.insert(0, parts[i - 1]) if not package_parts: return path return os.path.abspath(os.path.sep.join(parts[:-len(package_parts)]))" 2850,"def packageFromPath(path, includeModule=False): """""" Determines the python package path based on the inputted path. :param path | <str> :return <str> """""" path = nstr(path) module = '' if os.path.isfile(path): path, fname = os.path.split(path) if fname.endswith('.py') and fname != '__init__.py': module = fname.split('.')[0] parts = os.path.normpath(path).split(os.path.sep) package_parts = [] for i in range(len(parts), 0, -1): filename = os.path.sep.join(parts[:i] + ['__init__.py']) if not os.path.isfile(filename): break package_parts.insert(0, parts[i - 1]) if includeModule and module: package_parts.append(module) return '.'.join(package_parts)" 2851,"def website(app=None, mode='home', subcontext='UserGuide'): """""" Returns the website location for projex software. :param app | <str> || None mode | <str> (home, docs, blog, dev) :return <str> """""" base_url = WEBSITES.get(mode, '') if app and base_url: opts = {'app': app, 'base_url': base_url} base_url = SUBCONTEXT_MAP.get((mode, subcontext), base_url) base_url %= opts return base_url" 2852,"def _check_values(in_values): """""" Check if values need to be converted before they get mogrify'd """""" out_values = [] for value in in_values: # if isinstance(value, (dict, list)): # out_values.append(json.dumps(value)) # else: out_values.append(value) return tuple(out_values)" 2853,"def insert(self, table, data_list, return_cols='id'): """""" Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols) for inserting data then executemany() TODO: Is there a limit of length the query can be? If so handle it. """""" data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified # Make sure that `data_list` is a list if not isinstance(data_list, list): data_list = [data_list] # Make sure data_list has content if len(data_list) == 0: # No need to continue return [] # Data in the list must be dicts (just check the first one) if not isinstance(data_list[0], dict): logger.critical(""Data must be a list of dicts"") # Do not return here, let the exception handle the error that will be thrown when the query runs # Make sure return_cols is a list if return_cols is None or len(return_cols) == 0 or return_cols[0] is None: return_cols = '' elif not isinstance(return_cols, list): return_cols = [return_cols] if len(return_cols) > 0: return_cols = 'RETURNING ' + ','.join(return_cols) try: with self.getcursor() as cur: query = ""INSERT INTO {table} ({fields}) VALUES {values} {return_cols}""\ .format(table=table, fields='""{0}""'.format('"", ""'.join(data_list[0].keys())), values=','.join(['%s'] * len(data_list)), return_cols=return_cols, ) values = [] for row in [tuple(v.values()) for v in data_list]: values.append(_check_values(row)) query = cur.mogrify(query, values) cur.execute(query) try: return cur.fetchall() except Exception: return None except Exception as e: logger.exception(""Error inserting data"") logger.debug(""Error inserting data: {data}"".format(data=data_list)) raise e.with_traceback(sys.exc_info()[2])" 2854,"def upsert(self, table, data_list, on_conflict_fields, on_conflict_action='update', update_fields=None, return_cols='id'): """""" Create a bulk upsert statement which is much faster (~6x in tests with 10k & 100k rows and n cols) for upserting data then executemany() TODO: Is there a limit of length the query can be? If so handle it. """""" data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified # Make sure that `data_list` is a list if not isinstance(data_list, list): data_list = [data_list] # Make sure data_list has content if len(data_list) == 0: # No need to continue return [] # Data in the list must be dicts (just check the first one) if not isinstance(data_list[0], dict): logger.critical(""Data must be a list of dicts"") # TODO: raise some error here rather then returning None return None # Make sure on_conflict_fields is a list if not isinstance(on_conflict_fields, list): on_conflict_fields = [on_conflict_fields] # Make sure on_conflict_fields has data if len(on_conflict_fields) == 0 or on_conflict_fields[0] is None: # No need to continue logger.critical(""Must pass in `on_conflict_fields` argument"") # TODO: raise some error here rather then returning None return None # Make sure return_cols is a list if return_cols is None or len(return_cols) == 0 or return_cols[0] is None: return_cols = '' elif not isinstance(return_cols, list): return_cols = [return_cols] if len(return_cols) > 0: return_cols = 'RETURNING ' + ','.join(return_cols) # Make sure update_fields is a list/valid if on_conflict_action == 'update': if not isinstance(update_fields, list): update_fields = [update_fields] # If noting is passed in, set `update_fields` to all (data_list-on_conflict_fields) if len(update_fields) == 0 or update_fields[0] is None: update_fields = list(set(data_list[0].keys()) - set(on_conflict_fields)) # If update_fields is empty here that could only mean that all fields are set as conflict_fields if len(update_fields) == 0: logger.critical(""Not all the fields can be `on_conflict_fields` when doing an update"") # TODO: raise some error here rather then returning None return None # If everything is good to go with the update fields fields_update_tmp = [] for key in data_list[0].keys(): fields_update_tmp.append('""{0}""=""excluded"".""{0}""'.format(key)) conflict_action_sql = 'UPDATE SET {update_fields}'\ .format(update_fields=', '.join(fields_update_tmp)) else: # Do nothing on conflict conflict_action_sql = 'NOTHING' try: with self.getcursor() as cur: query = """"""INSERT INTO {table} ({insert_fields}) VALUES {values} ON CONFLICT ({on_conflict_fields}) DO {conflict_action_sql} {return_cols} """""".format(table=table, insert_fields='""{0}""'.format('"",""'.join(data_list[0].keys())), values=','.join(['%s'] * len(data_list)), on_conflict_fields=','.join(on_conflict_fields), conflict_action_sql=conflict_action_sql, return_cols=return_cols, ) # Get all the values for each row and create a lists of lists values = [] for row in [list(v.values()) for v in data_list]: values.append(_check_values(row)) query = cur.mogrify(query, values) cur.execute(query) try: return cur.fetchall() except Exception: return None except Exception as e: logger.exception(""Error upserting data"") logger.debug(""Error upserting data: {data}"".format(data=data_list)) raise e.with_traceback(sys.exc_info()[2])" 2855,"def update(self, table, data_list, matched_field=None, return_cols='id'): """""" Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and 4 cols) for inserting data then executemany() TODO: Is there a limit of length the query can be? If so handle it. """""" data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified if matched_field is None: # Assume the id field logger.info(""Matched field not defined, assuming the `id` field"") matched_field = 'id' # Make sure that `data_list` is a list if not isinstance(data_list, list): data_list = [data_list] if len(data_list) == 0: # No need to continue return [] # Make sure return_cols is a list if return_cols is None or len(return_cols) == 0 or return_cols[0] is None: return_cols = '' elif not isinstance(return_cols, list): return_cols = [return_cols] if len(return_cols) > 0: return_cols = 'RETURNING ' + ','.join(return_cols) # Data in the list must be dicts (just check the first one) if not isinstance(data_list[0], dict): logger.critical(""Data must be a list of dicts"") # Do not return here, let the exception handle the error that will be thrown when the query runs try: with self.getcursor() as cur: query_list = [] # TODO: change to return data from the database, not just what you passed in return_list = [] for row in data_list: if row.get(matched_field) is None: logger.debug(""Cannot update row. Missing field {field} in data {data}"" .format(field=matched_field, data=row)) logger.error(""Cannot update row. Missing field {field} in data"".format(field=matched_field)) continue # Pull matched_value from data to be updated and remove that key matched_value = row.get(matched_field) del row[matched_field] query = ""UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}""\ .format(table=table, data=','.join(""%s=%%s"" % u for u in row.keys()), matched_field=matched_field, return_cols=return_cols ) values = list(row.values()) values.append(matched_value) values = _check_values(values) query = cur.mogrify(query, values) query_list.append(query) return_list.append(matched_value) finial_query = b';'.join(query_list) cur.execute(finial_query) try: return cur.fetchall() except Exception: return None except Exception as e: logger.exception(""Error updating data"") logger.debug(""Error updating data: {data}"".format(data=data_list)) raise e.with_traceback(sys.exc_info()[2])" 2856,"def clone(srcpath, destpath, vcs=None): """"""Clone an existing repository. :param str srcpath: Path to an existing repository :param str destpath: Desired path of new repository :param str vcs: Either ``git``, ``hg``, or ``svn`` :returns VCSRepo: The newly cloned repository If ``vcs`` is not given, then the repository type is discovered from ``srcpath`` via :func:`probe`. """""" vcs = vcs or probe(srcpath) cls = _get_repo_class(vcs) return cls.clone(srcpath, destpath)" 2857,"def probe(path): """"""Probe a repository for its type. :param str path: The path of the repository :raises UnknownVCSType: if the repository type couldn't be inferred :returns str: either ``git``, ``hg``, or ``svn`` This function employs some heuristics to guess the type of the repository. """""" import os from .common import UnknownVCSType if os.path.isdir(os.path.join(path, '.git')): return 'git' elif os.path.isdir(os.path.join(path, '.hg')): return 'hg' elif ( os.path.isfile(os.path.join(path, 'config')) and os.path.isdir(os.path.join(path, 'objects')) and os.path.isdir(os.path.join(path, 'refs')) and os.path.isdir(os.path.join(path, 'branches')) ): return 'git' elif ( os.path.isfile(os.path.join(path, 'format')) and os.path.isdir(os.path.join(path, 'conf')) and os.path.isdir(os.path.join(path, 'db')) and os.path.isdir(os.path.join(path, 'locks')) ): return 'svn' else: raise UnknownVCSType(path)" 2858,"def open(path, vcs=None): """"""Open an existing repository :param str path: The path of the repository :param vcs: If specified, assume the given repository type to avoid auto-detection. Either ``git``, ``hg``, or ``svn``. :raises UnknownVCSType: if the repository type couldn't be inferred If ``vcs`` is not specified, it is inferred via :func:`probe`. """""" import os assert os.path.isdir(path), path + ' is not a directory' vcs = vcs or probe(path) cls = _get_repo_class(vcs) return cls(path)" 2859,"def _check_attributes(self, attributes, extra=None): """"""Check if attributes given to the constructor can be used to instanciate a valid node."""""" extra = extra or () unknown_keys = set(attributes) - set(self._possible_attributes) - set(extra) if unknown_keys: logger.warning('%s got unknown attributes: %s' % (self.__class__.__name__, unknown_keys))" 2860,"def get(self, name, strict=True): """"""Get an attribute of the holder (read-only access)."""""" if not isinstance(name, str) or name.startswith('_'): raise AttributeError(self.__class__.__name__, name) elif strict and name not in self._possible_attributes: raise AttributeError('%s is not a valid attribute of %r.' % (name, self)) elif name in self._attributes: return self._attributes[name] else: raise exceptions.AttributeNotProvided(name)" 2861,"def addMonths(date, months): """""" Returns the new date based on the inputted months. :param date | <datetime.date> months | <int> :return <datetime.date> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() mult = months / abs(months) years = mult * (abs(months) / 12) months = mult * (abs(months) % 12) # calculate the new month month = date.month + months if month < 1: years -= 1 month = 12 - month elif 12 < month: years += 1 month %= 12 # calculate the new year year = date.year + years # calculate the new day check = datetime.date(year, month, 1) days = daysInMonth(check) return datetime.date(year, month, min(date.day, days))" 2862,"def addYears(date, years): """""" Returns the new date based on the inputted number of years. :param date | <datetime.date> years | <int> :return <datetime.date> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() return datetime.date(date.year + years, date.month, date.day)" 2863,"def daysInMonth(date): """""" Returns the number of the days in the month for the given date. This will take into account leap years based on the inputted date's year. :param date | <datetime.date> :return <int> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() month = date.month # look for a leap year if month == 2 and not date.year % 4: return 29 return DaysInMonth.get(month, -1)" 2864,"def daysInYear(date): """""" Returns the number of days in the year for the given date. :param date | <datetime.date> || <int> :return <int> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() if type(date) != int: year = date.year else: year = date if not year % 4: return 366 return 365" 2865,"def displayName(date, options=None, format='%b %d, %Y'): """""" Returns the display name for the inputted date, given the list of options. :param date | <datetime.date> options | <projex.dates.Names> format | <str> :return <str> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() if isinstance(date, datetime.datetime): time = ' @ ' + date.strftime('%I:%M%p').strip('0M').lower() date = date.date() else: time = '' today = datetime.date.today() delta = date - today if delta.days == 0: return 'Today' + time elif delta.days == -1: return 'Yesterday' + time elif delta.days == 1: return 'Tomorrow' + time elif abs(delta.days) < 8: # look for different weeks if date.isocalendar()[1] != today.isocalendar()[1]: qualifier = 'Last ' if delta.days < 0 else 'Next ' else: qualifier = '' return qualifier + date.strftime('%A') + time else: return date.strftime(format)" 2866,"def named(date, options=None): """""" Returns the best named option for the inputted date based on the inputted date name. :param date | <datetime.date> options | <projex.dates.Names> || None :return <projex.dates.Names> """""" # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() if options is None: options = Names.all() # use the date information if isinstance(date, datetime.datetime): date = date.date() # grab today's information today = datetime.date.today() today_month = today.month today_year, today_week, today_weekday = today.isocalendar() # grab yesterday's information yesterday = today + datetime.timedelta(days=-1) tomorrow = today + datetime.timedelta(days=1) # grab the date information date_month = date.month date_year, date_week, date_weekday = date.isocalendar() # look for today if today == date and Names.Today & options: return Names.Today # look for yesterday elif yesterday == date and Names.Yesterday & options: return Names.Yesterday # look for tomorrow elif tomorrow == date and Names.Tomorrow & options: return Names.Tomorrow # look for same year options elif today_year == date_year: # look for same month options if today_month == date_month: # look for this week if today_week == date_week and Names.ThisWeek & options: return Names.ThisWeek # look for last week elif today_week == date_week + 1 and Names.LastWeek & options: return Names.LastWeek # look for next week elif today_week == date_week - 1 and Names.NextWeek & options: return Names.NextWeek # look for this month elif Names.ThisMonth & options: return Names.ThisMonth # look for last month options elif today_month == date_month + 1 and Names.LastMonth & options: return Names.LastMonth # look for next month options elif today_month == date_month - 1 and Names.NextMonth & options: return Names.NextMonth # look for this year options elif Names.ThisYear & options: return Names.ThisYear # look for last year options elif today_year == date_year + 1 and Names.LastYear & options: return Names.LastYear # look for next year options elif today_year == date_year - 1 and Names.NextYear & options: return Names.NextYear # look for past dates elif date < today and Names.Past & options: return Names.Past # look for future dates elif today < date and Names.Future & options: return Names.Future return Names.Sometime" 2867,"def repeating(first, mode=RepeatMode.Weekly, step=1, flags=0, startAt=None, repeatUntil=None, maximum=None): """""" Returns a list of repeating dates from the inputted start date based on the given mode. If an repeatUntil date is supplied, then the results will be capped once the last date is reached, otherwise, the maximum number of results will be returned. :param first | <datetime.date> mode | <RepeatMode> step | <int> | value must be greater than 1 flags | <RepeatFlags> startAt | <datetime.date> || None repeatUntil | <datetime.date> || None maximum | <int> || None :return [<datetime.date>, ..] """""" if repeatUntil is None and maximum is None: maximum = 100 # calculate the dates step = max(1, step) output = [] # create the start at information if startAt is not None and first < startAt: if mode == RepeatMode.Monthly: curr = datetime.date(startAt.year, startAt.month, first.day) elif mode == RepeatMode.Yearly: curr = datetime.date(startAt.year, first.month, first.day) else: curr = first else: curr = first if curr < first: curr = first # determine if any days are flagged any_days = 0 for value in DaysOfWeek.values(): any_days |= value # repeat on a daily basis while True: # increment daily if mode == RepeatMode.Weekly: # repeat for specific days if flags & any_days: start = curr + datetime.timedelta(days=1 - curr.isoweekday()) exit_loop = False for i in range(7): day = start + datetime.timedelta(days=i) if day < first: continue elif repeatUntil is not None and repeatUntil < day: exit_loop = True break flag = DaysOfWeek[day.isoweekday()] # skip this day of the week when repeating if not (flags & flag): continue if startAt is None or startAt <= day: output.append(day) if exit_loop: break else: if repeatUntil is not None and repeatUntil < curr: break if startAt is None or startAt <= curr: output.append(curr) curr = curr + datetime.timedelta(days=7 * step) # break when the end first is hit if repeatUntil is not None and repeatUntil < curr: break # break when the maximum is hit elif maximum is not None and len(output) == maximum: break # increment weekly elif mode == RepeatMode.Weekly: if startAt is None or startAt <= curr: output.append(curr) curr = curr + datetime.timedelta(days=step * 7) # increment monthly elif mode == RepeatMode.Monthly: if startAt is None or startAt <= curr: output.append(curr) # approximate the delta curr = addMonths(curr, step) # check to see if we're repeating on the day of the week in # the month, or the actual day of the month if (flags & RepeatFlags.DayOfTheWeek) != 0: shift = curr.isodayofweek() - first.isoweekday() curr = curr + datetime.timedelta(days=shift) # increment yearly elif mode == RepeatMode.Yearly: if startAt is None or startAt <= curr: output.append(curr) curr = addYears(curr, step) return output" 2868,"def weekdays(start, end): """""" Returns the number of weekdays between the inputted start and end dates. This would be the equivalent of doing (end - start) to get the number of calendar days between the two dates. :param start | <datetime.date> end | <datetime.date> :return <int> """""" # don't bother calculating anything for the same inputted date if start == end: return int(start.isoweekday() not in (6, 7)) elif end < start: return -weekdays(end, start) else: strt_weekday = start.isoweekday() end_weekday = end.isoweekday() # calculate in the positive direction if end < start: return -weekdays(end, start) # calculate from the monday after the start if 5 < strt_weekday: start = start + datetime.timedelta(days=8 - strt_weekday) # calculate from the friday before the end if 5 < end_weekday: end = end - datetime.timedelta(days=end_weekday - 5) remainder = end.isoweekday() - start.isoweekday() end = end - datetime.timedelta(days=remainder) # if the end is now before the start, then both dates fell on a weekend if end < start: return 0 # otherwise, if the dates normalized to each other, then return the # remainder elif end == start: return remainder + 1 # remove the number of weekends from the start and end dates days = ((end - start).days + 1) total_days = abs(days) multiplier = days / total_days weekends = int(round(total_days / 7.0) * 2) week_days = ((total_days - weekends) + remainder) * multiplier return week_days" 2869,"def main(args=None): """""" Entry point for the tag CLI. Isolated as a method so that the CLI can be called by other Python code (e.g. for testing), in which case the arguments are passed to the function. If no arguments are passed to the function, parse them from the command line. """""" if args is None: args = tag.cli.parser().parse_args() assert args.cmd in mains mainmethod = mains[args.cmd] mainmethod(args)" 2870,"def _build_request(request): """"""Build message to transfer over the socket from a request."""""" msg = bytes([request['cmd']]) if 'dest' in request: msg += bytes([request['dest']]) else: msg += b'\0' if 'sha' in request: msg += request['sha'] else: for dummy in range(64): msg += b'0' logging.debug(""Request (%d): %s"", len(msg), msg) return msg" 2871,"def main(): """"""Show example using the API."""""" __async__ = True logging.basicConfig(format=""%(levelname)-10s %(message)s"", level=logging.DEBUG) if len(sys.argv) != 2: logging.error(""Must specify configuration file"") sys.exit() config = configparser.ConfigParser() config.read(sys.argv[1]) password = config.get('default', 'password') if __async__: client = Client(config.get('default', 'host'), config.getint('default', 'port'), password, _callback) else: client = Client(config.get('default', 'host'), config.getint('default', 'port'), password) status = client.messages() msg = status[0] print(msg) print(client.mp3(msg['sha'].encode('utf-8'))) while True: continue" 2872,"def start(self): """"""Start thread."""""" if not self._thread: logging.info(""Starting asterisk mbox thread"") # Ensure signal queue is empty try: while True: self.signal.get(False) except queue.Empty: pass self._thread = threading.Thread(target=self._loop) self._thread.setDaemon(True) self._thread.start()" 2873,"def stop(self): """"""Stop thread."""""" if self._thread: self.signal.put(""Stop"") self._thread.join() if self._soc: self._soc.shutdown() self._soc.close() self._thread = None" 2874,"def _connect(self): """"""Connect to server."""""" self._soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._soc.connect((self._ipaddr, self._port)) self._soc.send(_build_request({'cmd': cmd.CMD_MESSAGE_PASSWORD, 'sha': self._password}))" 2875,"def _recv_msg(self): """"""Read a message from the server."""""" command = ord(recv_blocking(self._soc, 1)) msglen = recv_blocking(self._soc, 4) msglen = ((msglen[0] << 24) + (msglen[1] << 16) + (msglen[2] << 8) + msglen[3]) msg = recv_blocking(self._soc, msglen) return command, msg" 2876,"def _loop(self): """"""Handle data."""""" request = {} connected = False while True: timeout = None sockets = [self.request_queue, self.signal] if not connected: try: self._clear_request(request) self._connect() self._soc.send(_build_request( {'cmd': cmd.CMD_MESSAGE_LIST})) self._soc.send(_build_request( {'cmd': cmd.CMD_MESSAGE_CDR_AVAILABLE})) connected = True except ConnectionRefusedError: timeout = 5.0 if connected: sockets.append(self._soc) readable, _writable, _errored = select.select( sockets, [], [], timeout) if self.signal in readable: break if self._soc in readable: # We have incoming data try: command, msg = self._recv_msg() self._handle_msg(command, msg, request) except (RuntimeError, ConnectionResetError): logging.warning(""Lost connection"") connected = False self._clear_request(request) if self.request_queue in readable: request = self.request_queue.get() self.request_queue.task_done() if not connected: self._clear_request(request) else: if (request['cmd'] == cmd.CMD_MESSAGE_LIST and self._status and (not self._callback or 'sync' in request)): self.result_queue.put( [cmd.CMD_MESSAGE_LIST, self._status]) request = {} else: self._soc.send(_build_request(request))" 2877,"def mp3(self, sha, **kwargs): """"""Get raw MP3 of a message."""""" return self._queue_msg({'cmd': cmd.CMD_MESSAGE_MP3, 'sha': _get_bytes(sha)}, **kwargs)" 2878,"def delete(self, sha, **kwargs): """"""Delete a message."""""" return self._queue_msg({'cmd': cmd.CMD_MESSAGE_DELETE, 'sha': _get_bytes(sha)}, **kwargs)" 2879,"def get_cdr(self, start=0, count=-1, **kwargs): """"""Request range of CDR messages"""""" sha = encode_to_sha(""{:d},{:d}"".format(start, count)) return self._queue_msg({'cmd': cmd.CMD_MESSAGE_CDR, 'sha': sha}, **kwargs)" 2880,"def path(self) -> Path: """"""A Path for this name object joining field names from `self.get_path_pattern_list` with this object's name"""""" args = list(self._iter_translated_field_names(self.get_path_pattern_list())) args.append(self.get_name()) return Path(*args)" 2881,"def fold(self, predicate): """"""Takes a predicate and applies it to each node starting from the leaves and making the return value propagate."""""" childs = {x:y.fold(predicate) for (x,y) in self._attributes.items() if isinstance(y, SerializableTypedAttributesHolder)} return predicate(self, childs)" 2882,"def the_one(cls): """"""Get the single global HelpUrlExpert object."""""" if cls.THE_ONE is None: cls.THE_ONE = cls(settings.HELP_TOKENS_INI_FILE) return cls.THE_ONE" 2883,"def get_config_value(self, section_name, option, default_option=""default""): """""" Read a value from the configuration, with a default. Args: section_name (str): name of the section in the configuration from which the option should be found. option (str): name of the configuration option. default_option (str): name of the default configuration option whose value should be returned if the requested option is not found. Returns: str: the value from the ini file. """""" if self.config is None: self.config = configparser.ConfigParser() self.config.read(self.ini_file_name) if option: try: return self.config.get(section_name, option) except configparser.NoOptionError: log.debug( ""Didn't find a configuration option for '%s' section and '%s' option"", section_name, option, ) return self.config.get(section_name, default_option)" 2884,"def url_for_token(self, token): """"""Find the full URL for a help token."""""" book_url = self.get_config_value(""pages"", token) book, _, url_tail = book_url.partition(':') book_base = settings.HELP_TOKENS_BOOKS[book] url = book_base lang = getattr(settings, ""HELP_TOKENS_LANGUAGE_CODE"", None) if lang is not None: lang = self.get_config_value(""locales"", lang) url += ""/"" + lang version = getattr(settings, ""HELP_TOKENS_VERSION"", None) if version is not None: url += ""/"" + version url += ""/"" + url_tail return url" 2885,"def load_data(Filepath, ObjectType='data', RelativeChannelNo=None, SampleFreq=None, PointsToLoad=-1, calcPSD=True, NPerSegmentPSD=1000000, NormaliseByMonitorOutput=False, silent=False): """""" Parameters ---------- Filepath : string filepath to the file containing the data used to initialise and create an instance of the DataObject class ObjectType : string, optional type to load the data as, takes the value 'default' if not specified. Options are: 'data' : optoanalysis.DataObject 'thermo' : optoanalysis.thermo.ThermoObject RelativeChannelNo : int, optional If loading a .bin file produced by the Saneae datalogger, used to specify the channel number If loading a .dat file produced by the labview NI5122 daq card, used to specifiy the channel number if two channels where saved, if left None with .dat files it will assume that the file to load only contains one channel. If NormaliseByMonitorOutput is True then specifies the monitor channel for loading a .dat file produced by the labview NI5122 daq card. SampleFreq : float, optional If loading a .dat file produced by the labview NI5122 daq card, used to manually specify the sample frequency PointsToLoad : int, optional Number of first points to read. -1 means all points (i.e., the complete file) WORKS WITH NI5122 DATA SO FAR ONLY!!! calcPSD : bool, optional Whether to calculate the PSD upon loading the file, can take some time off the loading and reduce memory usage if frequency space info is not required NPerSegmentPSD : int, optional NPerSegment to pass to scipy.signal.welch to calculate the PSD NormaliseByMonitorOutput : bool, optional If True the particle signal trace will be divided by the monitor output, which is specified by the channel number set in the RelativeChannelNo parameter. WORKS WITH NI5122 DATA SO FAR ONLY!!! Returns ------- Data : DataObject An instance of the DataObject class contaning the data that you requested to be loaded. """""" if silent != True: print(""Loading data from {}"".format(Filepath)) ObjectTypeDict = { 'data' : DataObject, 'thermo' : optoanalysis.thermo.ThermoObject, } try: Object = ObjectTypeDict[ObjectType] except KeyError: raise ValueError(""You entered {}, this is not a valid object type"".format(ObjectType)) data = Object(Filepath, RelativeChannelNo, SampleFreq, PointsToLoad, calcPSD, NPerSegmentPSD, NormaliseByMonitorOutput) try: channel_number, run_number, repeat_number = [int(val) for val in re.findall('\d+', data.filename)] data.channel_number = channel_number data.run_number = run_number data.repeat_number = repeat_number if _does_file_exist(data.filepath.replace(data.filename, '') + ""pressure.log""): print(""pressure.log file exists"") for line in open(data.filepath.replace(data.filename, '') + ""pressure.log"", 'r'): run_number, repeat_number, pressure = line.split(',')[1:] run_number = int(run_number) repeat_number = int(repeat_number) pressure = float(pressure) if (run_number == data.run_number) and (repeat_number == data.repeat_number): data.pmbar = pressure except ValueError: pass try: if _does_file_exist(glob(data.filepath.replace(data.filename, '*' + data.filename[20:-4] + ' - header.dat'))[0]): print(""header file exists"") with open(glob(data.filepath.replace(data.filename, '*' + data.filepath[20:-4] + ' - header.dat'))[0], encoding='ISO-8859-1') as f: lines = f.readlines() data.pmbar = (float(lines[68][-9:-1])+float(lines[69][-9:-1]))/2 except (ValueError, IndexError): pass return data" 2886,"def search_data_std(Channel, RunNos, RepeatNos, directoryPath='.'): """""" Lets you find multiple datasets at once assuming they have a filename which contains a pattern of the form: CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo> Parameters ---------- Channel : int The channel you want to load RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Data_filepaths : list A list containing the filepaths to the matching files """""" files = glob('{}/*'.format(directoryPath)) files_CorrectChannel = [] for file_ in files: if 'CH{}'.format(Channel) in file_: files_CorrectChannel.append(file_) files_CorrectRunNo = [] for RunNo in RunNos: files_match = _fnmatch.filter( files_CorrectChannel, '*RUN*0{}_*'.format(RunNo)) for file_ in files_match: files_CorrectRunNo.append(file_) files_CorrectRepeatNo = [] for RepeatNo in RepeatNos: files_match = _fnmatch.filter( files_CorrectRunNo, '*REPEAT*0{}.*'.format(RepeatNo)) for file_ in files_match: files_CorrectRepeatNo.append(file_) return files_CorrectRepeatNo" 2887,"def multi_load_data(Channel, RunNos, RepeatNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000): """""" Lets you load multiple datasets at once assuming they have a filename which contains a pattern of the form: CH<ChannelNo>_RUN00...<RunNo>_REPEAT00...<RepeatNo> Parameters ---------- Channel : int The channel you want to load RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Data : list A list containing the DataObjects that were loaded. """""" matching_files = search_data_std(Channel=Channel, RunNos=RunNos, RepeatNos=RepeatNos, directoryPath=directoryPath) #data = [] #for filepath in matching_files_: # data.append(load_data(filepath, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)) cpu_count = _cpu_count() workerPool = _Pool(cpu_count) load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD) data = workerPool.map(load_data_partial, matching_files) workerPool.close() workerPool.terminate() workerPool.join() #with _Pool(cpu_count) as workerPool: #load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD) #data = workerPool.map(load_data_partial, files_CorrectRepeatNo) return data" 2888,"def multi_load_data_custom(Channel, TraceTitle, RunNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000): """""" Lets you load multiple datasets named with the LeCroy's custom naming scheme at once. Parameters ---------- Channel : int The channel you want to load TraceTitle : string The custom trace title of the files. RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Data : list A list containing the DataObjects that were loaded. """""" # files = glob('{}/*'.format(directoryPath)) # files_CorrectChannel = [] # for file_ in files: # if 'C{}'.format(Channel) in file_: # files_CorrectChannel.append(file_) # files_CorrectRunNo = [] # for RunNo in RunNos: # files_match = _fnmatch.filter( # files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*') # for file_ in files_match: # files_CorrectRunNo.append(file_) matching_files = search_data_custom(Channel, TraceTitle, RunNos, directoryPath) cpu_count = _cpu_count() workerPool = _Pool(cpu_count) # for filepath in files_CorrectRepeatNo: # print(filepath) # data.append(load_data(filepath)) load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD) data = workerPool.map(load_data_partial, matching_files) workerPool.close() workerPool.terminate() workerPool.join() return data" 2889,"def search_data_custom(Channel, TraceTitle, RunNos, directoryPath='.'): """""" Lets you create a list with full file paths of the files named with the LeCroy's custom naming scheme. Parameters ---------- Channel : int The channel you want to load TraceTitle : string The custom trace title of the files. RunNos : sequence Sequence of run numbers you want to load RepeatNos : sequence Sequence of repeat numbers you want to load directoryPath : string, optional The path to the directory housing the data The default is the current directory Returns ------- Paths : list A list containing the full file paths of the files you were looking for. """""" files = glob('{}/*'.format(directoryPath)) files_CorrectChannel = [] for file_ in files: if 'C{}'.format(Channel) in file_: files_CorrectChannel.append(file_) files_CorrectRunNo = [] for RunNo in RunNos: files_match = _fnmatch.filter( files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*') for file_ in files_match: files_CorrectRunNo.append(file_) print(""loading the following files: {}"".format(files_CorrectRunNo)) paths = files_CorrectRunNo return paths" 2890,"def calc_temp(Data_ref, Data): """""" Calculates the temperature of a data set relative to a reference. The reference is assumed to be at 300K. Parameters ---------- Data_ref : DataObject Reference data set, assumed to be 300K Data : DataObject Data object to have the temperature calculated for Returns ------- T : uncertainties.ufloat The temperature of the data set """""" T = 300 * ((Data.A * Data_ref.Gamma) / (Data_ref.A * Data.Gamma)) Data.T = T return T" 2891,"def calc_gamma_components(Data_ref, Data): """""" Calculates the components of Gamma (Gamma0 and delta_Gamma), assuming that the Data_ref is uncooled data (ideally at 3mbar for best fitting). It uses the fact that A_prime=A/Gamma0 should be constant for a particular particle under changes in pressure and therefore uses the reference save to calculate A_prime (assuming the Gamma value found for the uncooled data is actually equal to Gamma0 since only collisions should be causing the damping. Therefore for the cooled data Gamma0 should equal A/A_prime and therefore we can extract Gamma0 and delta_Gamma. A_prime = ConvFactor**2 * (2*k_B*T0/(pi*m)) Parameters ---------- Data_ref : DataObject Reference data set, assumed to be 300K Data : DataObject Data object to have the temperature calculated for Returns ------- Gamma0 : uncertainties.ufloat Damping due to the environment delta_Gamma : uncertainties.ufloat Damping due to other effects (e.g. feedback cooling) """""" A_prime = Data_ref.A/Data_ref.Gamma Gamma0 = Data.A/A_prime delta_Gamma = Data.Gamma - Gamma0 return Gamma0, delta_Gamma" 2892,"def fit_curvefit(p0, datax, datay, function, **kwargs): """""" Fits the data to a function using scipy.optimise.curve_fit Parameters ---------- p0 : array_like initial parameters to use for fitting datax : array_like x data to use for fitting datay : array_like y data to use for fitting function : function funcion to be fit to the data kwargs keyword arguments to be passed to scipy.optimise.curve_fit Returns ------- pfit_curvefit : array Optimal values for the parameters so that the sum of the squared residuals of ydata is minimized perr_curvefit : array One standard deviation errors in the optimal values for the parameters """""" pfit, pcov = \ _curve_fit(function, datax, datay, p0=p0, epsfcn=0.0001, **kwargs) error = [] for i in range(len(pfit)): try: error.append(_np.absolute(pcov[i][i])**0.5) except: error.append(_np.NaN) pfit_curvefit = pfit perr_curvefit = _np.array(error) return pfit_curvefit, perr_curvefit" 2893,"def moving_average(array, n=3): """""" Calculates the moving average of an array. Parameters ---------- array : array The array to have the moving average taken of n : int The number of points of moving average to take Returns ------- MovingAverageArray : array The n-point moving average of the input array """""" ret = _np.cumsum(array, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n" 2894,"def take_closest(myList, myNumber): """""" Assumes myList is sorted. Returns closest value to myNumber. If two numbers are equally close, return the smallest number. Parameters ---------- myList : array The list in which to find the closest value to myNumber myNumber : float The number to find the closest to in MyList Returns ------- closestValue : float The number closest to myNumber in myList """""" pos = _bisect_left(myList, myNumber) if pos == 0: return myList[0] if pos == len(myList): return myList[-1] before = myList[pos - 1] after = myList[pos] if after - myNumber < myNumber - before: return after else: return before" 2895,"def _position_autocorrelation_fitting_eqn(t, Gamma, AngTrapFreq): """""" The value of the fitting equation: exp(-t*Gamma/2) * (cos(t* sqrt(Omega**2 - Gamma**2 /4)) + Gamma* sin(t* sqrt(Omega**2-Gamma**2 /4))/(2* sqrt(Omega**2 - Gamma**2 /4))) [eqn 4.20 taken from DOI: DOI: 10.1007/978-1-4614-6031-2] to be fit to the autocorrelation-exponential decay Parameters ---------- t : float time Gamma : float Big Gamma (in radians), i.e. damping AngTrapFreq : float Angular Trapping Frequency in Radians Returns ------- Value : float The value of the fitting equation """""" return _np.exp(-t*Gamma/2)* ( _np.cos(t* _np.sqrt(AngTrapFreq**2-Gamma**2/4)) + Gamma* _np.sin(t* _np.sqrt(AngTrapFreq**2-Gamma**2/4))/(2* _np.sqrt(AngTrapFreq**2-Gamma**2/4)) )" 2896,"def fit_autocorrelation(autocorrelation, time, GammaGuess, TrapFreqGuess=None, method='energy', MakeFig=True, show_fig=True): """""" Fits exponential relaxation theory to data. Parameters ---------- autocorrelation : array array containing autocorrelation to be fitted time : array array containing the time of each point the autocorrelation was evaluated GammaGuess : float The approximate Big Gamma (in radians) to use initially TrapFreqGuess : float The approximate trapping frequency to use initially in Hz. method : string, optional To choose which autocorrelation fit is needed. 'position' : equation 4.20 from Tongcang Li's 2013 thesis (DOI: 10.1007/978-1-4614-6031-2) 'energy' : proper exponential energy correlation decay (DOI: 10.1103/PhysRevE.94.062151) MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- ParamsFit - Fitted parameters: 'variance'-method : [Gamma] 'position'-method : [Gamma, AngularTrappingFrequency] ParamsFitErr - Error in fitted parameters: 'varaince'-method : [GammaErr] 'position'-method : [GammaErr, AngularTrappingFrequencyErr] fig : matplotlib.figure.Figure object figure object containing the plot ax : matplotlib.axes.Axes object axes with the data plotted of the: - initial data - final fit """""" datax = time datay = autocorrelation method = method.lower() if method == 'energy': p0 = _np.array([GammaGuess]) Params_Fit, Params_Fit_Err = fit_curvefit(p0, datax, datay, _energy_autocorrelation_fitting_eqn) autocorrelation_fit = _energy_autocorrelation_fitting_eqn(_np.arange(0,datax[-1],1e-7), Params_Fit[0]) elif method == 'position': AngTrapFreqGuess = 2 * _np.pi * TrapFreqGuess p0 = _np.array([GammaGuess, AngTrapFreqGuess]) Params_Fit, Params_Fit_Err = fit_curvefit(p0, datax, datay, _position_autocorrelation_fitting_eqn) autocorrelation_fit = _position_autocorrelation_fitting_eqn(_np.arange(0,datax[-1],1e-7), Params_Fit[0], Params_Fit[1]) if MakeFig == True: fig = _plt.figure(figsize=properties[""default_fig_size""]) ax = fig.add_subplot(111) ax.plot(datax*1e6, datay, '.', color=""darkblue"", label=""Autocorrelation Data"", alpha=0.5) ax.plot(_np.arange(0,datax[-1],1e-7)*1e6, autocorrelation_fit, color=""red"", label=""fit"") ax.set_xlim([0, 30e6/Params_Fit[0]/(2*_np.pi)]) legend = ax.legend(loc=""best"", frameon = 1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('white') ax.set_xlabel(""time (us)"") ax.set_ylabel(r""$\left | \frac{\langle x(t)x(t+\tau) \rangle}{\langle x(t)x(t) \rangle} \right |$"") if show_fig == True: _plt.show() return Params_Fit, Params_Fit_Err, fig, ax else: return Params_Fit, Params_Fit_Err, None, None" 2897,"def PSD_fitting_eqn(A, OmegaTrap, Gamma, omega): """""" The value of the fitting equation: A / ((OmegaTrap**2 - omega**2)**2 + (omega * Gamma)**2) to be fit to the PSD Parameters ---------- A : float Fitting constant A A = γ**2*Γ_0*(2*K_b*T_0)/(π*m) where: γ = conversionFactor Γ_0 = Damping factor due to environment π = pi OmegaTrap : float The trapping frequency in the axis of interest (in angular frequency) Gamma : float The damping factor Gamma = Γ = Γ_0 + δΓ where: Γ_0 = Damping factor due to environment δΓ = extra damping due to feedback or other effects omega : float The angular frequency to calculate the value of the fitting equation at Returns ------- Value : float The value of the fitting equation """""" return A / ((OmegaTrap**2 - omega**2)**2 + omega**2 * (Gamma)**2)" 2898,"def PSD_fitting_eqn_with_background(A, OmegaTrap, Gamma, FlatBackground, omega): """""" The value of the fitting equation: A / ((OmegaTrap**2 - omega**2)**2 + (omega * Gamma)**2) + FlatBackground to be fit to the PSD Parameters ---------- A : float Fitting constant A A = γ**2*Γ_0*(2*K_b*T_0)/(π*m) where: γ = conversionFactor Γ_0 = Damping factor due to environment π = pi OmegaTrap : float The trapping frequency in the axis of interest (in angular frequency) Gamma : float The damping factor Gamma = Γ = Γ_0 + δΓ where: Γ_0 = Damping factor due to environment δΓ = extra damping due to feedback or other effects FlatBackground : float Adds a constant offset to the peak to account for a flat noise background omega : float The angular frequency to calculate the value of the fitting equation at Returns ------- Value : float The value of the fitting equation """""" return A / ((OmegaTrap**2 - omega**2)**2 + omega**2 * (Gamma)**2) + FlatBackground" 2899,"def fit_PSD(Data, bandwidth, TrapFreqGuess, AGuess=0.1e10, GammaGuess=400, FlatBackground=None, MakeFig=True, show_fig=True): """""" Fits theory PSD to Data. Assumes highest point of PSD is the trapping frequency. Parameters ---------- Data : DataObject data object to be fitted bandwidth : float bandwidth around trapping frequency peak to fit the theory PSD to TrapFreqGuess : float The approximate trapping frequency to use initially as the centre of the peak AGuess : float, optional The initial value of the A parameter to use in fitting GammaGuess : float, optional The initial value of the Gamma parameter to use in fitting FlatBackground : float, optional If given a number the fitting function assumes a flat background to get more exact Area, which does not factor in noise. defaults to None, which fits a model with no flat background contribution, basically no offset MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- ParamsFit - Fitted parameters: [A, TrappingFrequency, Gamma, FlatBackground(optional)] ParamsFitErr - Error in fitted parameters: [AErr, TrappingFrequencyErr, GammaErr, FlatBackgroundErr(optional)] fig : matplotlib.figure.Figure object figure object containing the plot ax : matplotlib.axes.Axes object axes with the data plotted of the: - initial data - initial fit - final fit """""" AngFreqs = 2 * pi * Data.freqs Angbandwidth = 2 * pi * bandwidth AngTrapFreqGuess = 2 * pi * TrapFreqGuess ClosestToAngTrapFreqGuess = take_closest(AngFreqs, AngTrapFreqGuess) index_OmegaTrap = _np.where(AngFreqs == ClosestToAngTrapFreqGuess)[0][0] OmegaTrap = AngFreqs[index_OmegaTrap] f_fit_lower = take_closest(AngFreqs, OmegaTrap - Angbandwidth / 2) f_fit_upper = take_closest(AngFreqs, OmegaTrap + Angbandwidth / 2) indx_fit_lower = int(_np.where(AngFreqs == f_fit_lower)[0][0]) indx_fit_upper = int(_np.where(AngFreqs == f_fit_upper)[0][0]) if indx_fit_lower == indx_fit_upper: raise ValueError(""Bandwidth argument must be higher, region is too thin."") # print(f_fit_lower, f_fit_upper) # print(AngFreqs[indx_fit_lower], AngFreqs[indx_fit_upper]) # find highest point in region about guess for trap frequency - use that # as guess for trap frequency and recalculate region about the trap # frequency index_OmegaTrap = _np.where(Data.PSD == max( Data.PSD[indx_fit_lower:indx_fit_upper]))[0][0] OmegaTrap = AngFreqs[index_OmegaTrap] # print(OmegaTrap) f_fit_lower = take_closest(AngFreqs, OmegaTrap - Angbandwidth / 2) f_fit_upper = take_closest(AngFreqs, OmegaTrap + Angbandwidth / 2) indx_fit_lower = int(_np.where(AngFreqs == f_fit_lower)[0][0]) indx_fit_upper = int(_np.where(AngFreqs == f_fit_upper)[0][0]) logPSD = 10 * _np.log10(Data.PSD) # putting PSD in dB def calc_theory_PSD_curve_fit(freqs, A, TrapFreq, BigGamma, FlatBackground=None): if FlatBackground == None: Theory_PSD = 10 * \ _np.log10(PSD_fitting_eqn(A, TrapFreq, BigGamma, freqs)) # PSD in dB else: Theory_PSD = 10* \ _np.log10(PSD_fitting_eqn_with_background(A, TrapFreq, BigGamma, FlatBackground, freqs)) # PSD in dB if A < 0 or TrapFreq < 0 or BigGamma < 0: return 1e9 else: return Theory_PSD datax = AngFreqs[indx_fit_lower:indx_fit_upper] datay = logPSD[indx_fit_lower:indx_fit_upper] if FlatBackground == None: p0 = _np.array([AGuess, OmegaTrap, GammaGuess]) Params_Fit, Params_Fit_Err = fit_curvefit(p0, datax, datay, calc_theory_PSD_curve_fit) else: p0 = _np.array([AGuess, OmegaTrap, GammaGuess, FlatBackground]) Params_Fit, Params_Fit_Err = fit_curvefit(p0, datax, datay, calc_theory_PSD_curve_fit) if MakeFig == True: fig = _plt.figure(figsize=properties[""default_fig_size""]) ax = fig.add_subplot(111) if FlatBackground==None: PSDTheory_fit_initial = 10 * _np.log10( PSD_fitting_eqn(p0[0], p0[1], p0[2], AngFreqs)) PSDTheory_fit = 10 * _np.log10( PSD_fitting_eqn(Params_Fit[0], Params_Fit[1], Params_Fit[2], AngFreqs)) else: PSDTheory_fit_initial = 10 * _np.log10( PSD_fitting_eqn_with_background(p0[0], p0[1], p0[2], p0[3], AngFreqs)) PSDTheory_fit = 10 * _np.log10( PSD_fitting_eqn_with_background(Params_Fit[0], Params_Fit[1], Params_Fit[2], Params_Fit[3], AngFreqs)) ax.plot(AngFreqs / (2 * pi), Data.PSD, color=""darkblue"", label=""Raw PSD Data"", alpha=0.5) ax.plot(AngFreqs / (2 * pi), 10**(PSDTheory_fit_initial / 10), '--', alpha=0.7, color=""purple"", label=""initial vals"") ax.plot(AngFreqs / (2 * pi), 10**(PSDTheory_fit / 10), color=""red"", label=""fitted vals"") ax.set_xlim([(OmegaTrap - 5 * Angbandwidth) / (2 * pi), (OmegaTrap + 5 * Angbandwidth) / (2 * pi)]) ax.plot([(OmegaTrap - Angbandwidth) / (2 * pi), (OmegaTrap - Angbandwidth) / (2 * pi)], [min(10**(logPSD / 10)), max(10**(logPSD / 10))], '--', color=""grey"") ax.plot([(OmegaTrap + Angbandwidth) / (2 * pi), (OmegaTrap + Angbandwidth) / (2 * pi)], [min(10**(logPSD / 10)), max(10**(logPSD / 10))], '--', color=""grey"") ax.semilogy() legend = ax.legend(loc=""best"", frameon = 1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('white') ax.set_xlabel(""Frequency (Hz)"") ax.set_ylabel(""$S_{xx}$ ($V^2/Hz$)"") if show_fig == True: _plt.show() return Params_Fit, Params_Fit_Err, fig, ax else: return Params_Fit, Params_Fit_Err, None, None" 2900,"def extract_parameters(Pressure, PressureErr, A, AErr, Gamma0, Gamma0Err, method=""chang""): """""" Calculates the radius, mass and conversion factor and thier uncertainties. For values to be correct data must have been taken with feedback off and at pressures of around 1mbar (this is because the equations assume harmonic motion and at lower pressures the uncooled particle experiences anharmonic motion (due to exploring furthur outside the middle of the trap). When cooled the value of Gamma (the damping) is a combination of the enviromental damping and feedback damping and so is not the correct value for use in this equation (as it requires the enviromental damping). Environmental damping can be predicted though as A=const*Gamma0. By fitting to 1mbar data one can find the value of the const and therefore Gamma0 = A/const Parameters ---------- Pressure : float Pressure in mbar when the data was taken PressureErr : float Error in the Pressure as a decimal (e.g. 15% error is 0.15) A : float Fitting constant A A = γ**2*2*Γ_0*(K_b*T_0)/(π*m) where: γ = conversionFactor Γ_0 = Damping factor due to environment π = pi AErr : float Error in Fitting constant A Gamma0 : float The enviromental damping factor Gamma_0 = Γ_0 Gamma0Err : float The error in the enviromental damping factor Gamma_0 = Γ_0 Returns: Params : list [radius, mass, conversionFactor] The extracted parameters ParamsError : list [radiusError, massError, conversionFactorError] The error in the extracted parameters """""" Pressure = 100 * Pressure # conversion to Pascals rho = 1800 # as quoted by Microspheres and Nanospheres # kgm^3 dm = 0.372e-9 # m O'Hanlon, 2003 T0 = 300 # kelvin kB = Boltzmann # m^2 kg s^-2 K-1 eta = 18.27e-6 # Pa s, viscosity of air method = method.lower() if method == ""rashid"": radius = (0.619 * 9 * pi * eta * dm**2) / \ (_np.sqrt(2) * rho * kB * T0) * (Pressure/Gamma0) m_air = 4.81e-26 # molecular mass of air is 28.97 g/mol and Avogadro's Number 6.0221409^23 if method == ""chang"": vbar = (8*kB*T0/(pi*m_air))**0.5 radius = 16/(rho*pi*vbar)*(Pressure/Gamma0)/4 # CORRECTION FACTOR OF 4 APPLIED!!!! # see section 4.1.1 of Muddassar Rashid's 2016 Thesis for # derivation of this # see also page 132 of Jan Giesler's Thesis err_radius = radius * \ _np.sqrt(((PressureErr * Pressure) / Pressure) ** 2 + (Gamma0Err / Gamma0)**2) mass = rho * ((4 * pi * radius**3) / 3) err_mass = mass * 3 * err_radius / radius conversionFactor = _np.sqrt(A * mass / (4 * kB * T0 * Gamma0)) err_conversionFactor = conversionFactor * \ _np.sqrt((AErr / A)**2 + (err_mass / mass) ** 2 + (Gamma0Err / Gamma0)**2) return [radius, mass, conversionFactor], [err_radius, err_mass, err_conversionFactor]" 2901,"def get_ZXY_freqs(Data, zfreq, xfreq, yfreq, bandwidth=5000): """""" Determines the exact z, x and y peak frequencies from approximate frequencies by finding the highest peak in the PSD ""close to"" the approximate peak frequency. By ""close to"" I mean within the range: approxFreq - bandwidth/2 to approxFreq + bandwidth/2 Parameters ---------- Data : DataObject DataObject containing the data for which you want to determine the z, x and y frequencies. zfreq : float An approximate frequency for the z peak xfreq : float An approximate frequency for the z peak yfreq : float An approximate frequency for the z peak bandwidth : float, optional The bandwidth around the approximate peak to look for the actual peak. The default value is 5000 Returns ------- trapfreqs : list List containing the trap frequencies in the following order (z, x, y) """""" trapfreqs = [] for freq in [zfreq, xfreq, yfreq]: z_f_fit_lower = take_closest(Data.freqs, freq - bandwidth / 2) z_f_fit_upper = take_closest(Data.freqs, freq + bandwidth / 2) z_indx_fit_lower = int(_np.where(Data.freqs == z_f_fit_lower)[0][0]) z_indx_fit_upper = int(_np.where(Data.freqs == z_f_fit_upper)[0][0]) z_index_OmegaTrap = _np.where(Data.PSD == max( Data.PSD[z_indx_fit_lower:z_indx_fit_upper]))[0][0] # find highest point in region about guess for trap frequency # use that as guess for trap frequency and recalculate region # about the trap frequency z_OmegaTrap = Data.freqs[z_index_OmegaTrap] trapfreqs.append(z_OmegaTrap) return trapfreqs" 2902,"def get_ZXY_data(Data, zf, xf, yf, FractionOfSampleFreq=1, zwidth=10000, xwidth=5000, ywidth=5000, filterImplementation=""filtfilt"", timeStart=None, timeEnd=None, NPerSegmentPSD=1000000, MakeFig=True, show_fig=True): """""" Given a Data object and the frequencies of the z, x and y peaks (and some optional parameters for the created filters) this function extracts the individual z, x and y signals (in volts) by creating IIR filters and filtering the Data. Parameters ---------- Data : DataObject DataObject containing the data for which you want to extract the z, x and y signals. zf : float The frequency of the z peak in the PSD xf : float The frequency of the x peak in the PSD yf : float The frequency of the y peak in the PSD FractionOfSampleFreq : integer, optional The fraction of the sample frequency to sub-sample the data by. This sometimes needs to be done because a filter with the appropriate frequency response may not be generated using the sample rate at which the data was taken. Increasing this number means the x, y and z signals produced by this function will be sampled at a lower rate but a higher number means a higher chance that the filter produced will have a nice frequency response. zwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Z. xwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter X. ywidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Y. filterImplementation : string, optional filtfilt or lfilter - use scipy.filtfilt or lfilter default: filtfilt timeStart : float, optional Starting time for filtering timeEnd : float, optional Ending time for filtering show_fig : bool, optional If True - plot unfiltered and filtered PSD for z, x and y. If False - don't plot anything Returns ------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. timedata : ndarray Array containing the time data to go with the z, x, and y signal. """""" if timeStart == None: timeStart = Data.timeStart if timeEnd == None: timeEnd = Data.timeEnd time = Data.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] SAMPLEFREQ = Data.SampleFreq / FractionOfSampleFreq if filterImplementation == ""filtfilt"": ApplyFilter = scipy.signal.filtfilt elif filterImplementation == ""lfilter"": ApplyFilter = scipy.signal.lfilter else: raise ValueError(""filterImplementation must be one of [filtfilt, lfilter] you entered: {}"".format( filterImplementation)) input_signal = Data.voltage[StartIndex: EndIndex][0::FractionOfSampleFreq] bZ, aZ = make_butterworth_bandpass_b_a(zf, zwidth, SAMPLEFREQ) print(""filtering Z"") zdata = ApplyFilter(bZ, aZ, input_signal) if(_np.isnan(zdata).any()): raise ValueError( ""Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter."") bX, aX = make_butterworth_bandpass_b_a(xf, xwidth, SAMPLEFREQ) print(""filtering X"") xdata = ApplyFilter(bX, aX, input_signal) if(_np.isnan(xdata).any()): raise ValueError( ""Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter."") bY, aY = make_butterworth_bandpass_b_a(yf, ywidth, SAMPLEFREQ) print(""filtering Y"") ydata = ApplyFilter(bY, aY, input_signal) if(_np.isnan(ydata).any()): raise ValueError( ""Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter."") if MakeFig == True: f, PSD = scipy.signal.welch( input_signal, SAMPLEFREQ, nperseg=NPerSegmentPSD) f_z, PSD_z = scipy.signal.welch(zdata, SAMPLEFREQ, nperseg=NPerSegmentPSD) f_y, PSD_y = scipy.signal.welch(ydata, SAMPLEFREQ, nperseg=NPerSegmentPSD) f_x, PSD_x = scipy.signal.welch(xdata, SAMPLEFREQ, nperseg=NPerSegmentPSD) fig, ax = _plt.subplots(figsize=properties[""default_fig_size""]) ax.plot(f, PSD) ax.plot(f_z, PSD_z, label=""z"") ax.plot(f_x, PSD_x, label=""x"") ax.plot(f_y, PSD_y, label=""y"") ax.legend(loc=""best"") ax.semilogy() ax.set_xlim([zf - zwidth, yf + ywidth]) else: fig = None ax = None if show_fig == True: _plt.show() timedata = time[StartIndex: EndIndex][0::FractionOfSampleFreq] return zdata, xdata, ydata, timedata, fig, ax" 2903,"def get_ZXY_data_IFFT(Data, zf, xf, yf, zwidth=10000, xwidth=5000, ywidth=5000, timeStart=None, timeEnd=None, show_fig=True): """""" Given a Data object and the frequencies of the z, x and y peaks (and some optional parameters for the created filters) this function extracts the individual z, x and y signals (in volts) by creating IIR filters and filtering the Data. Parameters ---------- Data : DataObject DataObject containing the data for which you want to extract the z, x and y signals. zf : float The frequency of the z peak in the PSD xf : float The frequency of the x peak in the PSD yf : float The frequency of the y peak in the PSD zwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Z. xwidth : float, optional The width of the pass-band of the IIR filter to be generated to filter X. ywidth : float, optional The width of the pass-band of the IIR filter to be generated to filter Y. timeStart : float, optional Starting time for filtering timeEnd : float, optional Ending time for filtering show_fig : bool, optional If True - plot unfiltered and filtered PSD for z, x and y. If False - don't plot anything Returns ------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. timedata : ndarray Array containing the time data to go with the z, x, and y signal. """""" if timeStart == None: timeStart = Data.timeStart if timeEnd == None: timeEnd = Data.timeEnd time = Data.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] SAMPLEFREQ = Data.SampleFreq input_signal = Data.voltage[StartIndex: EndIndex] zdata = IFFT_filter(input_signal, SAMPLEFREQ, zf - zwidth / 2, zf + zwidth / 2) xdata = IFFT_filter(input_signal, SAMPLEFREQ, xf - xwidth / 2, xf + xwidth / 2) ydata = IFFT_filter(input_signal, SAMPLEFREQ, yf - ywidth / 2, yf + ywidth / 2) if show_fig == True: NPerSegment = len(Data.time) if NPerSegment > 1e7: NPerSegment = int(1e7) f, PSD = scipy.signal.welch( input_signal, SAMPLEFREQ, nperseg=NPerSegment) f_z, PSD_z = scipy.signal.welch(zdata, SAMPLEFREQ, nperseg=NPerSegment) f_y, PSD_y = scipy.signal.welch(ydata, SAMPLEFREQ, nperseg=NPerSegment) f_x, PSD_x = scipy.signal.welch(xdata, SAMPLEFREQ, nperseg=NPerSegment) _plt.plot(f, PSD) _plt.plot(f_z, PSD_z, label=""z"") _plt.plot(f_x, PSD_x, label=""x"") _plt.plot(f_y, PSD_y, label=""y"") _plt.legend(loc=""best"") _plt.xlim([zf - zwidth, yf + ywidth]) _plt.xlabel('Frequency (Hz)') _plt.ylabel(r'$S_{xx}$ ($V^2/Hz$)') _plt.semilogy() _plt.title(""filepath = %s"" % (Data.filepath)) _plt.show() timedata = time[StartIndex: EndIndex] return zdata, xdata, ydata, timedata" 2904,"def animate(zdata, xdata, ydata, conversionFactorArray, timedata, BoxSize, timeSteps=100, filename=""particle""): """""" Animates the particle's motion given the z, x and y signal (in Volts) and the conversion factor (to convert between V and nm). Parameters ---------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. conversionFactorArray : ndarray Array of 3 values of conversion factors for z, x and y (in units of Volts/Metre) timedata : ndarray Array containing the time data in seconds. BoxSize : float The size of the box in which to animate the particle - in nm timeSteps : int, optional Number of time steps to animate filename : string, optional filename to create the mp4 under (<filename>.mp4) """""" timePerFrame = 0.203 print(""This will take ~ {} minutes"".format(timePerFrame * timeSteps / 60)) convZ = conversionFactorArray[0] * 1e-9 convX = conversionFactorArray[1] * 1e-9 convY = conversionFactorArray[2] * 1e-9 ZBoxStart = -BoxSize # 1/conv*(_np.mean(zdata)-0.06) ZBoxEnd = BoxSize # 1/conv*(_np.mean(zdata)+0.06) XBoxStart = -BoxSize # 1/conv*(_np.mean(xdata)-0.06) XBoxEnd = BoxSize # 1/conv*(_np.mean(xdata)+0.06) YBoxStart = -BoxSize # 1/conv*(_np.mean(ydata)-0.06) YBoxEnd = BoxSize # 1/conv*(_np.mean(ydata)+0.06) FrameInterval = 1 # how many timesteps = 1 frame in animation a = 20 b = 0.6 * a myFPS = 7 myBitrate = 1000000 fig = _plt.figure(figsize=(a, b)) ax = fig.add_subplot(111, projection='3d') ax.set_title(""{} us"".format(timedata[0] * 1000000)) ax.set_xlabel('X (nm)') ax.set_xlim([XBoxStart, XBoxEnd]) ax.set_ylabel('Y (nm)') ax.set_ylim([YBoxStart, YBoxEnd]) ax.set_zlabel('Z (nm)') ax.set_zlim([ZBoxStart, ZBoxEnd]) ax.view_init(20, -30) #ax.view_init(0, 0) def setup_plot(): XArray = 1 / convX * xdata[0] YArray = 1 / convY * ydata[0] ZArray = 1 / convZ * zdata[0] scatter = ax.scatter(XArray, YArray, ZArray) return scatter, def animate(i): # print ""\r {}"".format(i), print(""Frame: {}"".format(i), end=""\r"") ax.clear() ax.view_init(20, -30) ax.set_title(""{} us"".format(int(timedata[i] * 1000000))) ax.set_xlabel('X (nm)') ax.set_xlim([XBoxStart, XBoxEnd]) ax.set_ylabel('Y (nm)') ax.set_ylim([YBoxStart, YBoxEnd]) ax.set_zlabel('Z (nm)') ax.set_zlim([ZBoxStart, ZBoxEnd]) XArray = 1 / convX * xdata[i] YArray = 1 / convY * ydata[i] ZArray = 1 / convZ * zdata[i] scatter = ax.scatter(XArray, YArray, ZArray) ax.scatter([XArray], [0], [-ZBoxEnd], c='k', alpha=0.9) ax.scatter([-XBoxEnd], [YArray], [0], c='k', alpha=0.9) ax.scatter([0], [YBoxEnd], [ZArray], c='k', alpha=0.9) Xx, Yx, Zx, Xy, Yy, Zy, Xz, Yz, Zz = [], [], [], [], [], [], [], [], [] for j in range(0, 30): Xlast = 1 / convX * xdata[i - j] Ylast = 1 / convY * ydata[i - j] Zlast = 1 / convZ * zdata[i - j] Alpha = 0.5 - 0.05 * j if Alpha > 0: ax.scatter([Xlast], [0 + j * 10], [-ZBoxEnd], c='grey', alpha=Alpha) ax.scatter([-XBoxEnd], [Ylast], [0 - j * 10], c='grey', alpha=Alpha) ax.scatter([0 - j * 2], [YBoxEnd], [Zlast], c='grey', alpha=Alpha) Xx.append(Xlast) Yx.append(0 + j * 10) Zx.append(-ZBoxEnd) Xy.append(-XBoxEnd) Yy.append(Ylast) Zy.append(0 - j * 10) Xz.append(0 - j * 2) Yz.append(YBoxEnd) Zz.append(Zlast) if j < 15: XCur = 1 / convX * xdata[i - j + 1] YCur = 1 / convY * ydata[i - j + 1] ZCur = 1 / convZ * zdata[i - j + 1] ax.plot([Xlast, XCur], [Ylast, YCur], [Zlast, ZCur], alpha=0.4) ax.plot_wireframe(Xx, Yx, Zx, color='grey') ax.plot_wireframe(Xy, Yy, Zy, color='grey') ax.plot_wireframe(Xz, Yz, Zz, color='grey') return scatter, anim = _animation.FuncAnimation(fig, animate, int( timeSteps / FrameInterval), init_func=setup_plot, blit=True) _plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg' mywriter = _animation.FFMpegWriter(fps=myFPS, bitrate=myBitrate) # , fps = myFPS, bitrate = myBitrate) anim.save('{}.mp4'.format(filename), writer=mywriter) return None" 2905,"def animate_2Dscatter(x, y, NumAnimatedPoints=50, NTrailPoints=20, xlabel="""", ylabel="""", xlims=None, ylims=None, filename=""testAnim.mp4"", bitrate=1e5, dpi=5e2, fps=30, figsize = [6, 6]): """""" Animates x and y - where x and y are 1d arrays of x and y positions and it plots x[i:i+NTrailPoints] and y[i:i+NTrailPoints] against each other and iterates through i. """""" fig, ax = _plt.subplots(figsize = figsize) alphas = _np.linspace(0.1, 1, NTrailPoints) rgba_colors = _np.zeros((NTrailPoints,4)) # for red the first column needs to be one rgba_colors[:,0] = 1.0 # the fourth column needs to be your alphas rgba_colors[:, 3] = alphas scatter = ax.scatter(x[0:NTrailPoints], y[0:NTrailPoints], color=rgba_colors) if xlims == None: xlims = (min(x), max(x)) if ylims == None: ylims = (min(y), max(y)) ax.set_xlim(xlims) ax.set_ylim(ylims) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) def animate(i, scatter): scatter.axes.clear() # clear old scatter object scatter = ax.scatter(x[i:i+NTrailPoints], y[i:i+NTrailPoints], color=rgba_colors, animated=True) # create new scatter with updated data ax.set_xlim(xlims) ax.set_ylim(ylims) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return scatter, ani = _animation.FuncAnimation(fig, animate, _np.arange(1, NumAnimatedPoints), interval=25, blit=True, fargs=[scatter]) ani.save(filename, bitrate=bitrate, dpi=dpi, fps=fps) return None" 2906,"def IFFT_filter(Signal, SampleFreq, lowerFreq, upperFreq, PyCUDA = False): """""" Filters data using fft -> zeroing out fft bins -> ifft Parameters ---------- Signal : ndarray Signal to be filtered SampleFreq : float Sample frequency of signal lowerFreq : float Lower frequency of bandpass to allow through filter upperFreq : float Upper frequency of bandpass to allow through filter PyCUDA : bool, optional If True, uses PyCUDA to accelerate the FFT and IFFT via using your NVIDIA-GPU If False, performs FFT and IFFT with conventional scipy.fftpack Returns ------- FilteredData : ndarray Array containing the filtered data """""" if PyCUDA==True: Signalfft=calc_fft_with_PyCUDA(Signal) else: print(""starting fft"") Signalfft = scipy.fftpack.fft(Signal) print(""starting freq calc"") freqs = _np.fft.fftfreq(len(Signal)) * SampleFreq print(""starting bin zeroing"") Signalfft[_np.where(freqs < lowerFreq)] = 0 Signalfft[_np.where(freqs > upperFreq)] = 0 if PyCUDA==True: FilteredSignal = 2 * calc_ifft_with_PyCUDA(Signalfft) else: print(""starting ifft"") FilteredSignal = 2 * scipy.fftpack.ifft(Signalfft) print(""done"") return _np.real(FilteredSignal)" 2907,"def calc_fft_with_PyCUDA(Signal): """""" Calculates the FFT of the passed signal by using the scikit-cuda libary which relies on PyCUDA Parameters ---------- Signal : ndarray Signal to be transformed into Fourier space Returns ------- Signalfft : ndarray Array containing the signal's FFT """""" print(""starting fft"") Signal = Signal.astype(_np.float32) Signal_gpu = _gpuarray.to_gpu(Signal) Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64) plan = _Plan(Signal.shape,_np.float32,_np.complex64) _fft(Signal_gpu, Signalfft_gpu, plan) Signalfft = Signalfft_gpu.get() #only 2N+1 long Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2])))) print(""fft done"") return Signalfft" 2908,"def calc_ifft_with_PyCUDA(Signalfft): """""" Calculates the inverse-FFT of the passed FFT-signal by using the scikit-cuda libary which relies on PyCUDA Parameters ---------- Signalfft : ndarray FFT-Signal to be transformed into Real space Returns ------- Signal : ndarray Array containing the ifft signal """""" print(""starting ifft"") Signalfft = Signalfft.astype(_np.complex64) Signalfft_gpu = _gpuarray.to_gpu(Signalfft[0:len(Signalfft)//2+1]) Signal_gpu = _gpuarray.empty(len(Signalfft),_np.float32) plan = _Plan(len(Signalfft),_np.complex64,_np.float32) _ifft(Signalfft_gpu, Signal_gpu, plan) Signal = Signal_gpu.get()/(2*len(Signalfft)) #normalising as CUDA IFFT is un-normalised print(""ifft done"") return Signal" 2909,"def butterworth_filter(Signal, SampleFreq, lowerFreq, upperFreq): """""" Filters data using by constructing a 5th order butterworth IIR filter and using scipy.signal.filtfilt, which does phase correction after implementing the filter (as IIR filter apply a phase change) Parameters ---------- Signal : ndarray Signal to be filtered SampleFreq : float Sample frequency of signal lowerFreq : float Lower frequency of bandpass to allow through filter upperFreq : float Upper frequency of bandpass to allow through filter Returns ------- FilteredData : ndarray Array containing the filtered data """""" b, a = make_butterworth_b_a(lowerFreq, upperFreq, SampleFreq) FilteredSignal = scipy.signal.filtfilt(b, a, Signal) return _np.real(FilteredSignal)" 2910,"def make_butterworth_b_a(lowcut, highcut, SampleFreq, order=5, btype='band'): """""" Generates the b and a coefficients for a butterworth IIR filter. Parameters ---------- lowcut : float frequency of lower bandpass limit highcut : float frequency of higher bandpass limit SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """""" nyq = 0.5 * SampleFreq low = lowcut / nyq high = highcut / nyq if btype.lower() == 'band': b, a = scipy.signal.butter(order, [low, high], btype = btype) elif btype.lower() == 'low': b, a = scipy.signal.butter(order, low, btype = btype) elif btype.lower() == 'high': b, a = scipy.signal.butter(order, high, btype = btype) else: raise ValueError('Filter type unknown') return b, a" 2911,"def make_butterworth_bandpass_b_a(CenterFreq, bandwidth, SampleFreq, order=5, btype='band'): """""" Generates the b and a coefficients for a butterworth bandpass IIR filter. Parameters ---------- CenterFreq : float central frequency of bandpass bandwidth : float width of the bandpass from centre to edge SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """""" lowcut = CenterFreq-bandwidth/2 highcut = CenterFreq+bandwidth/2 b, a = make_butterworth_b_a(lowcut, highcut, SampleFreq, order, btype) return b, a" 2912,"def IIR_filter_design(CentralFreq, bandwidth, transitionWidth, SampleFreq, GainStop=40, GainPass=0.01): """""" Function to calculate the coefficients of an IIR filter, IMPORTANT NOTE: make_butterworth_bandpass_b_a and make_butterworth_b_a can produce IIR filters with higher sample rates and are prefereable due to this. Parameters ---------- CentralFreq : float Central frequency of the IIR filter to be designed bandwidth : float The width of the passband to be created about the central frequency transitionWidth : float The width of the transition band between the pass-band and stop-band SampleFreq : float The sample frequency (rate) of the data to be filtered GainStop : float, optional The dB of attenuation within the stopband (i.e. outside the passband) GainPass : float, optional The dB attenuation inside the passband (ideally close to 0 for a bandpass filter) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """""" NyquistFreq = SampleFreq / 2 if (CentralFreq + bandwidth / 2 + transitionWidth > NyquistFreq): raise ValueError( ""Need a higher Sample Frequency for this Central Freq, Bandwidth and transition Width"") CentralFreqNormed = CentralFreq / NyquistFreq bandwidthNormed = bandwidth / NyquistFreq transitionWidthNormed = transitionWidth / NyquistFreq bandpass = [CentralFreqNormed - bandwidthNormed / 2, CentralFreqNormed + bandwidthNormed / 2] bandstop = [CentralFreqNormed - bandwidthNormed / 2 - transitionWidthNormed, CentralFreqNormed + bandwidthNormed / 2 + transitionWidthNormed] print(bandpass, bandstop) b, a = scipy.signal.iirdesign(bandpass, bandstop, GainPass, GainStop) return b, a" 2913,"def get_freq_response(a, b, show_fig=True, SampleFreq=(2 * pi), NumOfFreqs=500, whole=False): """""" This function takes an array of coefficients and finds the frequency response of the filter using scipy.signal.freqz. show_fig sets if the response should be plotted Parameters ---------- b : array_like Coefficients multiplying the x values (inputs of the filter) a : array_like Coefficients multiplying the y values (outputs of the filter) show_fig : bool, optional Verbosity of function (i.e. whether to plot frequency and phase response or whether to just return the values.) Options (Default is 1): False - Do not plot anything, just return values True - Plot Frequency and Phase response and return values SampleFreq : float, optional Sample frequency (in Hz) to simulate (used to convert frequency range to normalised frequency range) NumOfFreqs : int, optional Number of frequencies to use to simulate the frequency and phase response of the filter. Default is 500. Whole : bool, optional Sets whether to plot the whole response (0 to sample freq) or just to plot 0 to Nyquist (SampleFreq/2): False - (default) plot 0 to Nyquist (SampleFreq/2) True - plot the whole response (0 to sample freq) Returns ------- freqList : ndarray Array containing the frequencies at which the gain is calculated GainArray : ndarray Array containing the gain in dB of the filter when simulated (20*log_10(A_out/A_in)) PhaseDiffArray : ndarray Array containing the phase response of the filter - phase difference between the input signal and output signal at different frequencies """""" w, h = scipy.signal.freqz(b=b, a=a, worN=NumOfFreqs, whole=whole) freqList = w / (pi) * SampleFreq / 2.0 himag = _np.array([hi.imag for hi in h]) GainArray = 20 * _np.log10(_np.abs(h)) PhaseDiffArray = _np.unwrap(_np.arctan2(_np.imag(h), _np.real(h))) fig1 = _plt.figure() ax1 = fig1.add_subplot(111) ax1.plot(freqList, GainArray, '-', label=""Specified Filter"") ax1.set_title(""Frequency Response"") if SampleFreq == 2 * pi: ax1.set_xlabel((""$\Omega$ - Normalized frequency "" ""($\pi$=Nyquist Frequency)"")) else: ax1.set_xlabel(""frequency (Hz)"") ax1.set_ylabel(""Gain (dB)"") ax1.set_xlim([0, SampleFreq / 2.0]) if show_fig == True: _plt.show() fig2 = _plt.figure() ax2 = fig2.add_subplot(111) ax2.plot(freqList, PhaseDiffArray, '-', label=""Specified Filter"") ax2.set_title(""Phase Response"") if SampleFreq == 2 * pi: ax2.set_xlabel((""$\Omega$ - Normalized frequency "" ""($\pi$=Nyquist Frequency)"")) else: ax2.set_xlabel(""frequency (Hz)"") ax2.set_ylabel(""Phase Difference"") ax2.set_xlim([0, SampleFreq / 2.0]) if show_fig == True: _plt.show() return freqList, GainArray, PhaseDiffArray, fig1, ax1, fig2, ax2" 2914,"def multi_plot_PSD(DataArray, xlim=[0, 500], units=""kHz"", LabelArray=[], ColorArray=[], alphaArray=[], show_fig=True): """""" plot the pulse spectral density for multiple data sets on the same axes. Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the Power Spectral Density units : string units to use for the x axis LabelArray : array-like, optional array of labels for each data-set to be plotted ColorArray : array-like, optional array of colors for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The axes object created """""" unit_prefix = units[:-2] # removed the last 2 chars if LabelArray == []: LabelArray = [""DataSet {}"".format(i) for i in _np.arange(0, len(DataArray), 1)] if ColorArray == []: ColorArray = _np.empty(len(DataArray)) ColorArray = list(ColorArray) for i, ele in enumerate(ColorArray): ColorArray[i] = None if alphaArray == []: alphaArray = _np.empty(len(DataArray)) alphaArray = list(alphaArray) for i, ele in enumerate(alphaArray): alphaArray[i] = None fig = _plt.figure(figsize=properties['default_fig_size']) ax = fig.add_subplot(111) for i, data in enumerate(DataArray): ax.semilogy(unit_conversion(data.freqs, unit_prefix), data.PSD, label=LabelArray[i], color=ColorArray[i], alpha=alphaArray[i]) ax.set_xlabel(""Frequency ({})"".format(units)) ax.set_xlim(xlim) ax.grid(which=""major"") legend = ax.legend(loc=""best"", frameon = 1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('white') ax.set_ylabel(""PSD ($v^2/Hz$)"") _plt.title('filedir=%s' % (DataArray[0].filedir)) if show_fig == True: _plt.show() return fig, ax" 2915,"def multi_plot_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True): """""" plot the time trace for multiple data sets on the same axes. Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs SubSampleN : int, optional Number of intervals between points to remove (to sub-sample data so that you effectively have lower sample rate to make plotting easier and quicker. xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the time signal LabelArray : array-like, optional array of labels for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The axes object created """""" unit_prefix = units[:-1] # removed the last char if LabelArray == []: LabelArray = [""DataSet {}"".format(i) for i in _np.arange(0, len(DataArray), 1)] fig = _plt.figure(figsize=properties['default_fig_size']) ax = fig.add_subplot(111) for i, data in enumerate(DataArray): ax.plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN], alpha=0.8, label=LabelArray[i]) ax.set_xlabel(""time (s)"") if xlim != None: ax.set_xlim(xlim) if ylim != None: ax.set_ylim(ylim) ax.grid(which=""major"") legend = ax.legend(loc=""best"", frameon = 1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('white') ax.set_ylabel(""voltage (V)"") if show_fig == True: _plt.show() return fig, ax" 2916,"def multi_subplots_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True): """""" plot the time trace on multiple axes Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs SubSampleN : int, optional Number of intervals between points to remove (to sub-sample data so that you effectively have lower sample rate to make plotting easier and quicker. xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the time signal LabelArray : array-like, optional array of labels for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created axs : list of matplotlib.axes.Axes objects The list of axes object created """""" unit_prefix = units[:-1] # removed the last char NumDataSets = len(DataArray) if LabelArray == []: LabelArray = [""DataSet {}"".format(i) for i in _np.arange(0, len(DataArray), 1)] fig, axs = _plt.subplots(NumDataSets, 1) for i, data in enumerate(DataArray): axs[i].plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN], alpha=0.8, label=LabelArray[i]) axs[i].set_xlabel(""time ({})"".format(units)) axs[i].grid(which=""major"") axs[i].legend(loc=""best"") axs[i].set_ylabel(""voltage (V)"") if xlim != None: axs[i].set_xlim(xlim) if ylim != None: axs[i].set_ylim(ylim) if show_fig == True: _plt.show() return fig, axs" 2917,"def arrange_plots_on_one_canvas(FigureAxTupleArray, title='', SubtitleArray = [], show_fig=True): """""" Arranges plots, given in an array of tuples consisting of fig and axs, onto a subplot-figure consisting of 2 horizontal times the lenght of the passed (fig,axs)-array divided by 2 vertical subplots Parameters ---------- FigureAxTupleArray : array-like array of Tuples(fig, axs) outputted from the other plotting funtions inside optoanalysis title : string, optional string for the global title of the overall combined figure SubtitleArray : array-like, optional array of titles for each figure-set to be plotted, i.e. subplots show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created axs : list of matplotlib.axes.Axes objects The list of axes object created """""" if SubtitleArray == []: SubtitleArray = [""Plot {}"".format(i) for i in _np.arange(0, len(FigureAxTupleArray), 1)] SingleFigSize = FigureAxTupleArray[0][0].get_size_inches() combinedFig=_plt.figure(figsize=(2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1])) for index in range(len(FigureAxTupleArray)): individualPlot = FigureAxTupleArray[index] individualPlot[0].set_size_inches((2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1])) ax = individualPlot[1] ax.set_title(SubtitleArray[index]) ax.remove() ax.figure = combinedFig ax.change_geometry(int(_np.ceil(len(FigureAxTupleArray)/2)),2,1+index) combinedFig.axes.append(ax) combinedFig.add_axes(ax) #_plt.close(individualPlot[0]) combinedFig.subplots_adjust(hspace=.4) combinedFig.suptitle(title) if show_fig == True: _plt.show() return combinedFig" 2918,"def calc_PSD(Signal, SampleFreq, NPerSegment=1000000, window=""hann""): """""" Extracts the pulse spectral density (PSD) from the data. Parameters ---------- Signal : array-like Array containing the signal to have the PSD calculated for SampleFreq : float Sample frequency of the signal array NPerSegment : int, optional Length of each segment used in scipy.welch default = 1000000 window : str or tuple or array_like, optional Desired window to use. See get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length will be used for nperseg. default = ""hann"" Returns ------- freqs : ndarray Array containing the frequencies at which the PSD has been calculated PSD : ndarray Array containing the value of the PSD at the corresponding frequency value in V**2/Hz """""" freqs, PSD = scipy.signal.welch(Signal, SampleFreq, window=window, nperseg=NPerSegment) PSD = PSD[freqs.argsort()] freqs.sort() return freqs, PSD" 2919,"def calc_autocorrelation(Signal, FFT=False, PyCUDA=False): """""" Calculates the autocorrelation from a given Signal via using Parameters ---------- Signal : array-like Array containing the signal to have the autocorrelation calculated for FFT : optional, bool Uses FFT to accelerate autocorrelation calculation, but assumes certain certain periodicity on the signal to autocorrelate. Zero-padding is added to account for this periodicity assumption. PyCUDA : bool, optional If True, uses PyCUDA to accelerate the FFT and IFFT via using your NVIDIA-GPU If False, performs FFT and IFFT with conventional scipy.fftpack Returns ------- Autocorrelation : ndarray Array containing the value of the autocorrelation evaluated at the corresponding amount of shifted array-index. """""" if FFT==True: Signal_padded = scipy.fftpack.ifftshift((Signal-_np.average(Signal))/_np.std(Signal)) n, = Signal_padded.shape Signal_padded = _np.r_[Signal_padded[:n//2], _np.zeros_like(Signal_padded), Signal_padded[n//2:]] if PyCUDA==True: f = calc_fft_with_PyCUDA(Signal_padded) else: f = scipy.fftpack.fft(Signal_padded) p = _np.absolute(f)**2 if PyCUDA==True: autocorr = calc_ifft_with_PyCUDA(p) else: autocorr = scipy.fftpack.ifft(p) return _np.real(autocorr)[:n//2]/(_np.arange(n//2)[::-1]+n//2) else: Signal = Signal - _np.mean(Signal) autocorr = scipy.signal.correlate(Signal, Signal, mode='full') return autocorr[autocorr.size//2:]/autocorr[autocorr.size//2]" 2920,"def _GetRealImagArray(Array): """""" Returns the real and imaginary components of each element in an array and returns them in 2 resulting arrays. Parameters ---------- Array : ndarray Input array Returns ------- RealArray : ndarray The real components of the input array ImagArray : ndarray The imaginary components of the input array """""" ImagArray = _np.array([num.imag for num in Array]) RealArray = _np.array([num.real for num in Array]) return RealArray, ImagArray" 2921,"def _GetComplexConjugateArray(Array): """""" Calculates the complex conjugate of each element in an array and returns the resulting array. Parameters ---------- Array : ndarray Input array Returns ------- ConjArray : ndarray The complex conjugate of the input array. """""" ConjArray = _np.array([num.conj() for num in Array]) return ConjArray" 2922,"def fm_discriminator(Signal): """""" Calculates the digital FM discriminator from a real-valued time signal. Parameters ---------- Signal : array-like A real-valued time signal Returns ------- fmDiscriminator : array-like The digital FM discriminator of the argument signal """""" S_analytic = _hilbert(Signal) S_analytic_star = _GetComplexConjugateArray(S_analytic) S_analytic_hat = S_analytic[1:] * S_analytic_star[:-1] R, I = _GetRealImagArray(S_analytic_hat) fmDiscriminator = _np.arctan2(I, R) return fmDiscriminator" 2923,"def _is_this_a_collision(ArgList): """""" Detects if a particular point is during collision after effect (i.e. a phase shift) or not. Parameters ---------- ArgList : array_like Contains the following elements: value : float value of the FM discriminator mean_fmd : float the mean value of the FM discriminator tolerance : float The tolerance in percentage that it must be away from the mean value for it to be counted as a collision event. Returns ------- is_this_a_collision : bool True if this is a collision event, false if not. """""" value, mean_fmd, tolerance = ArgList if not _approx_equal(mean_fmd, value, tolerance): return True else: return False" 2924,"def find_collisions(Signal, tolerance=50): """""" Finds collision events in the signal from the shift in phase of the signal. Parameters ---------- Signal : array_like Array containing the values of the signal of interest containing a single frequency. tolerance : float Percentage tolerance, if the value of the FM Discriminator varies from the mean by this percentage it is counted as being during a collision event (or the aftermath of an event). Returns ------- Collisions : ndarray Array of booleans, true if during a collision event, false otherwise. """""" fmd = fm_discriminator(Signal) mean_fmd = _np.mean(fmd) Collisions = [_is_this_a_collision( [value, mean_fmd, tolerance]) for value in fmd] return Collisions" 2925,"def count_collisions(Collisions): """""" Counts the number of unique collisions and gets the collision index. Parameters ---------- Collisions : array_like Array of booleans, containing true if during a collision event, false otherwise. Returns ------- CollisionCount : int Number of unique collisions CollisionIndicies : list Indicies of collision occurance """""" CollisionCount = 0 CollisionIndicies = [] lastval = True for i, val in enumerate(Collisions): if val == True and lastval == False: CollisionIndicies.append(i) CollisionCount += 1 lastval = val return CollisionCount, CollisionIndicies" 2926,"def parse_orgtable(lines): """""" Parse an org-table (input as a list of strings split by newline) into a Pandas data frame. Parameters ---------- lines : string an org-table input as a list of strings split by newline Returns ------- dataframe : pandas.DataFrame A data frame containing the org-table's data """""" def parseline(l): w = l.split('|')[1:-1] return [wi.strip() for wi in w] columns = parseline(lines[0]) data = [] for line in lines[2:]: data.append(map(str, parseline(line))) dataframe = _pd.DataFrame(data=data, columns=columns) dataframe.set_index(""RunNo"") return dataframe" 2927,"def plot_3d_dist(Z, X, Y, N=1000, AxisOffset=0, Angle=-40, LowLim=None, HighLim=None, show_fig=True): """""" Plots Z, X and Y as a 3d scatter plot with heatmaps of each axis pair. Parameters ---------- Z : ndarray Array of Z positions with time X : ndarray Array of X positions with time Y : ndarray Array of Y positions with time N : optional, int Number of time points to plot (Defaults to 1000) AxisOffset : optional, double Offset to add to each axis from the data - used to get a better view of the heat maps (Defaults to 0) LowLim : optional, double Lower limit of x, y and z axis HighLim : optional, double Upper limit of x, y and z axis show_fig : optional, bool Whether to show the produced figure before returning Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """""" angle = Angle fig = _plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') y = Z[0:N] x = X[0:N] z = Y[0:N] ax.scatter(x, y, z, alpha=0.3) xlim = ax.get_xlim() ylim = ax.get_ylim() zlim = ax.get_zlim() if LowLim != None: lowLim = LowLim - AxisOffset else: lowLim = min([xlim[0], ylim[0], zlim[0]]) - AxisOffset if HighLim != None: highLim = HighLim + AxisOffset else: highLim = max([xlim[1], ylim[1], zlim[1]]) + AxisOffset ax.set_xlim([lowLim, highLim]) ax.set_ylim([lowLim, highLim]) ax.set_zlim([lowLim, highLim]) ax.set_xlabel(""x"") ax.set_ylabel(""z"") ax.set_zlabel(""y"") ax.view_init(30, angle) h, yedges, zedges = _np.histogram2d(y, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, zz = _np.meshgrid(yedges, zedges) xpos = lowLim # Plane of histogram xflat = _np.full_like(yy, xpos) p = ax.plot_surface(xflat, yy, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, xedges, zedges = _np.histogram2d(x, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) xx, zz = _np.meshgrid(xedges, zedges) ypos = highLim # Plane of histogram yflat = _np.full_like(xx, ypos) p = ax.plot_surface(xx, yflat, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, yedges, xedges = _np.histogram2d(y, x, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, xx = _np.meshgrid(yedges, xedges) zpos = lowLim # Plane of histogram zflat = _np.full_like(yy, zpos) p = ax.plot_surface(xx, yy, zflat, facecolors=normalized_map, rstride=1, cstride=1, shade=False) if show_fig == True: _plt.show() return fig, ax" 2928,"def multi_plot_3d_dist(ZXYData, N=1000, AxisOffset=0, Angle=-40, LowLim=None, HighLim=None, ColorArray=None, alphaLevel=0.3, show_fig=True): """""" Plots serveral Z, X and Y datasets as a 3d scatter plot with heatmaps of each axis pair in each dataset. Parameters ---------- ZXYData : ndarray Array of arrays containing Z, X, Y data e.g. [[Z1, X1, Y1], [Z2, X2, Y2]] N : optional, int Number of time points to plot (Defaults to 1000) AxisOffset : optional, double Offset to add to each axis from the data - used to get a better view of the heat maps (Defaults to 0) LowLim : optional, double Lower limit of x, y and z axis HighLim : optional, double Upper limit of x, y and z axis show_fig : optional, bool Whether to show the produced figure before returning Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """""" if ZXYData.shape[1] != 3: raise ValueError(""Parameter ZXYData should be an array of length-3 arrays containing arrays of Z, X and Y data"") if ColorArray != None: if ZXYData.shape[0] != len(ColorArray): raise ValueError(""Parameter ColorArray should be the same lenth as ZXYData"") else: ColorArray = list(mcolours.BASE_COLORS.keys()) #ColorArray = ['b', 'g', 'r'] # ColorMapArray = [_plt.cm.Blues, _plt.cm.Greens, _plt.cm.Reds] if ZXYData.shape[0] > len(ColorArray): raise NotImplementedError(""Only {} datasets can be plotted with automatic colors"".format(len(ColorArray))) angle = Angle fig = _plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') for datindx, ZXY in enumerate(ZXYData): y = ZXY[0][0:N] x = ZXY[1][0:N] z = ZXY[2][0:N] ax.scatter(x, y, z, alpha=alphaLevel, color=ColorArray[datindx]) xlim = ax.get_xlim() ylim = ax.get_ylim() zlim = ax.get_zlim() if LowLim != None: lowLim = LowLim - AxisOffset else: lowLim = min([xlim[0], ylim[0], zlim[0]]) - AxisOffset if HighLim != None: highLim = HighLim + AxisOffset else: highLim = max([xlim[1], ylim[1], zlim[1]]) + AxisOffset ax.set_xlim([lowLim, highLim]) ax.set_ylim([lowLim, highLim]) ax.set_zlim([lowLim, highLim]) for datindx, ZXY in enumerate(ZXYData): y = ZXY[0][0:N] x = ZXY[1][0:N] z = ZXY[2][0:N] #h, yedges, zedges = _np.histogram2d(y, z, bins=50) #h = h.transpose() #normalized_map = ColorMapArray[datindx](h/h.max()) #yy, zz = _np.meshgrid(yedges, zedges) xpos = lowLim # Plane of histogram #xflat = _np.full_like(yy, xpos) #p = ax.plot_surface(xflat, yy, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) xflat = _np.full_like(y, xpos) ax.scatter(xflat, y, z, color=ColorArray[datindx], alpha=alphaLevel) #h, xedges, zedges = _np.histogram2d(x, z, bins=50) #h = h.transpose() #normalized_map = ColorMapArray[datindx](h/h.max()) #xx, zz = _np.meshgrid(xedges, zedges) ypos = highLim # Plane of histogram #yflat = _np.full_like(xx, ypos) #p = ax.plot_surface(xx, yflat, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) yflat = _np.full_like(x, ypos) ax.scatter(x, yflat, z, color=ColorArray[datindx], alpha=alphaLevel) #h, yedges, xedges = _np.histogram2d(y, x, bins=50) #h = h.transpose() #normalized_map = ColorMapArray[datindx](h/h.max()) #yy, xx = _np.meshgrid(yedges, xedges) zpos = lowLim # Plane of histogram #zflat = _np.full_like(yy, zpos) #p = ax.plot_surface(xx, yy, zflat, facecolors=normalized_map, rstride=1, cstride=1, shade=False) zflat = _np.full_like(y, zpos) ax.scatter(x, y, zflat, color=ColorArray[datindx], alpha=alphaLevel) ax.set_xlabel(""x"") ax.set_ylabel(""z"") ax.set_zlabel(""y"") ax.view_init(30, angle) if show_fig == True: _plt.show() return fig, ax" 2929,"def steady_state_potential(xdata,HistBins=100): """""" Calculates the steady state potential. Used in fit_radius_from_potentials. Parameters ---------- xdata : ndarray Position data for a degree of freedom HistBins : int Number of bins to use for histogram of xdata. Number of position points at which the potential is calculated. Returns ------- position : ndarray positions at which potential has been calculated potential : ndarray value of potential at the positions above """""" import numpy as _np pops=_np.histogram(xdata,HistBins)[0] bins=_np.histogram(xdata,HistBins)[1] bins=bins[0:-1] bins=bins+_np.mean(_np.diff(bins)) #normalise pops pops=pops/float(_np.sum(pops)) return bins,-_np.log(pops)" 2930,"def dynamical_potential(xdata, dt, order=3): """""" Computes potential from spring function Parameters ---------- xdata : ndarray Position data for a degree of freedom, at which to calculate potential dt : float time between measurements order : int order of polynomial to fit Returns ------- Potential : ndarray valued of potential at positions in xdata """""" import numpy as _np adata = calc_acceleration(xdata, dt) xdata = xdata[2:] # removes first 2 values as differentiating twice means # we have acceleration[n] corresponds to position[n-2] z=_np.polyfit(xdata,adata,order) p=_np.poly1d(z) spring_pot=_np.polyint(p) return -spring_pot" 2931,"def calc_acceleration(xdata, dt): """""" Calculates the acceleration from the position Parameters ---------- xdata : ndarray Position data dt : float time between measurements Returns ------- acceleration : ndarray values of acceleration from position 2 to N. """""" acceleration = _np.diff(_np.diff(xdata))/dt**2 return acceleration" 2932,"def fit_radius_from_potentials(z, SampleFreq, Damping, HistBins=100, show_fig=False): """""" Fits the dynamical potential to the Steady State Potential by varying the Radius. z : ndarray Position data SampleFreq : float frequency at which the position data was sampled Damping : float value of damping (in radians/second) HistBins : int number of values at which to evaluate the steady state potential / perform the fitting to the dynamical potential Returns ------- Radius : float Radius of the nanoparticle RadiusError : float One Standard Deviation Error in the Radius from the Fit (doesn't take into account possible error in damping) fig : matplotlib.figure.Figure object figure showing fitted dynamical potential and stationary potential ax : matplotlib.axes.Axes object axes for above figure """""" dt = 1/SampleFreq boltzmann=Boltzmann temp=300 # why halved?? density=1800 SteadyStatePotnl = list(steady_state_potential(z, HistBins=HistBins)) yoffset=min(SteadyStatePotnl[1]) SteadyStatePotnl[1] -= yoffset SpringPotnlFunc = dynamical_potential(z, dt) SpringPotnl = SpringPotnlFunc(z) kBT_Gamma = temp*boltzmann*1/Damping DynamicPotentialFunc = make_dynamical_potential_func(kBT_Gamma, density, SpringPotnlFunc) FitSoln = _curve_fit(DynamicPotentialFunc, SteadyStatePotnl[0], SteadyStatePotnl[1], p0 = 50) print(FitSoln) popt, pcov = FitSoln perr = _np.sqrt(_np.diag(pcov)) Radius, RadiusError = popt[0], perr[0] mass=((4/3)*pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnl fig, ax = _plt.subplots() ax.plot(SteadyStatePotnl[0], SteadyStatePotnl[1], 'bo', label=""Steady State Potential"") _plt.plot(z,Y, 'r-', label=""Dynamical Potential"") ax.legend(loc='best') ax.set_ylabel('U ($k_{B} T $ Joules)') ax.set_xlabel('Distance (mV)') _plt.tight_layout() if show_fig == True: _plt.show() return Radius*1e-9, RadiusError*1e-9, fig, ax" 2933,"def make_dynamical_potential_func(kBT_Gamma, density, SpringPotnlFunc): """""" Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. """""" def PotentialFunc(xdata, Radius): """""" calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata """""" mass = ((4/3)*pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnlFunc(xdata) return Y return PotentialFunc" 2934,"def calc_z0_and_conv_factor_from_ratio_of_harmonics(z, z2, NA=0.999): """""" Calculates the Conversion Factor and physical amplitude of motion in nms by comparison of the ratio of the heights of the z signal and second harmonic of z. Parameters ---------- z : ndarray array containing z signal in volts z2 : ndarray array containing second harmonic of z signal in volts NA : float NA of mirror used in experiment Returns ------- z0 : float Physical average amplitude of motion in nms ConvFactor : float Conversion Factor between volts and nms """""" V1 = calc_mean_amp(z) V2 = calc_mean_amp(z2) ratio = V2/V1 beta = 4*ratio laserWavelength = 1550e-9 # in m k0 = (2*pi)/(laserWavelength) WaistSize = laserWavelength/(pi*NA) Zr = pi*WaistSize**2/laserWavelength z0 = beta/(k0 - 1/Zr) ConvFactor = V1/z0 T0 = 300 return z0, ConvFactor" 2935,"def calc_mass_from_z0(z0, w0): """""" Calculates the mass of the particle using the equipartition from the angular frequency of the z signal and the average amplitude of the z signal in nms. Parameters ---------- z0 : float Physical average amplitude of motion in nms w0 : float Angular Frequency of z motion Returns ------- mass : float mass in kgs """""" T0 = 300 mFromEquipartition = Boltzmann*T0/(w0**2 * z0**2) return mFromEquipartition" 2936,"def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor): """""" Calculates mass from the A parameter from fitting, the damping from fitting in angular units and the Conversion factor calculated from comparing the ratio of the z signal and first harmonic of z. Parameters ---------- A : float A factor calculated from fitting Damping : float damping in radians/second calcualted from fitting ConvFactor : float conversion factor between volts and nms Returns ------- mass : float mass in kgs """""" T0 = 300 mFromA = 2*Boltzmann*T0/(pi*A) * ConvFactor**2 * Damping return mFromA" 2937,"def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None): """""" Get slice of time, z and (if provided) zdot from timeStart to timeEnd. Parameters ---------- time : ndarray array of time values z : ndarray array of z values zdot : ndarray, optional array of zdot (velocity) values. timeStart : float, optional time at which to start the slice. Defaults to beginnging of time trace timeEnd : float, optional time at which to end the slide. Defaults to end of time trace Returns ------- time_sliced : ndarray array of time values from timeStart to timeEnd z_sliced : ndarray array of z values from timeStart to timeEnd zdot_sliced : ndarray array of zdot values from timeStart to timeEnd. None if zdot not provided """""" if timeStart == None: timeStart = time[0] if timeEnd == None: timeEnd = time[-1] StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] time_sliced = time[StartIndex:EndIndex] z_sliced = z[StartIndex:EndIndex] if zdot != None: zdot_sliced = zdot[StartIndex:EndIndex] else: zdot_sliced = None return time_sliced, z_sliced, zdot_sliced" 2938,"def unit_conversion(array, unit_prefix, current_prefix=""""): """""" Converts an array or value to of a certain unit scale to another unit scale. Accepted units are: E - exa - 1e18 P - peta - 1e15 T - tera - 1e12 G - giga - 1e9 M - mega - 1e6 k - kilo - 1e3 m - milli - 1e-3 u - micro - 1e-6 n - nano - 1e-9 p - pico - 1e-12 f - femto - 1e-15 a - atto - 1e-18 Parameters ---------- array : ndarray Array to be converted unit_prefix : string desired unit (metric) prefix (e.g. nm would be n, ms would be m) current_prefix : optional, string current prefix of units of data (assumed to be in SI units by default (e.g. m or s) Returns ------- converted_array : ndarray Array multiplied such as to be in the units specified """""" UnitDict = { 'E': 1e18, 'P': 1e15, 'T': 1e12, 'G': 1e9, 'M': 1e6, 'k': 1e3, '': 1, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12, 'f': 1e-15, 'a': 1e-18, } try: Desired_units = UnitDict[unit_prefix] except KeyError: raise ValueError(""You entered {} for the unit_prefix, this is not a valid prefix"".format(unit_prefix)) try: Current_units = UnitDict[current_prefix] except KeyError: raise ValueError(""You entered {} for the current_prefix, this is not a valid prefix"".format(current_prefix)) conversion_multiplication = Current_units/Desired_units converted_array = array*conversion_multiplication return converted_array" 2939,"def extract_slices(z, freq, sample_freq, show_plot=False): """""" Iterates through z trace and pulls out slices of length period_samples and assigns them a phase from -180 to 180. Each slice then becomes a column in the 2d array that is returned. Such that the row (the first index) refers to phase (i.e. dat[0] are all the samples at phase = -180) and the column refers to the oscillation number (i.e. dat[:, 0] is the first oscillation). Parameters ---------- z : ndarray trace of z motion freq : float frequency of motion sample_freq : float sample frequency of the z array show_plot : bool, optional (default=False) if true plots and shows the phase plotted against the positon for each oscillation built on top of each other. Returns ------- phase : ndarray phase (in degrees) for each oscillation phase_slices : ndarray 2d numpy array containing slices as detailed above. """""" dt = 1/sample_freq # dt between samples period = 1/freq # period of oscillation of motion period_samples = round(period/dt) # integer number of discrete samples in a period number_of_oscillations = int(_np.floor(len(z)/period_samples)) # number of oscillations in z trace phase_slices_untransposed = _np.zeros([number_of_oscillations-1, period_samples]) phase = _np.linspace(-180, 180, period_samples) # phase assigned to samples if show_plot == True: fig, ax = _plt.subplots() for i in range(number_of_oscillations-1): # loops through number of oscillations - 1 pulling out period_samples # slices and assigning them a phase from -180 to 180 degrees start = i*period_samples # start index of section end = (i+1)*period_samples # end index of section if show_plot == True: _plt.plot(phase, z[start:end]) phase_slices_untransposed[i] = z[start:end] # enter z section as ith row phase_slices = phase_slices_untransposed.transpose() # swap rows and columns if show_plot == True: _plt.show() return phase, phase_slices" 2940,"def histogram_phase(phase_slices, phase, histbins=200, show_plot=False): """""" histograms the phase slices such as to build a histogram of the position distribution at each phase value. Parameters ---------- phase_slices : ndarray 2d array containing slices from many oscillations at each phase phase : ndarray 1d array of phases corresponding to slices histbins : int, optional (default=200) number of bins to use in histogramming data show_plot : bool, optional (default=False) if true plots and shows the heatmap of the phase against the positon distribution Returns ------- counts_array : ndarray 2d array containing the number of counts varying with phase and position. bin_edges : ndarray positions of bin edges """""" counts_array = _np.zeros([len(phase), histbins]) histedges = [phase_slices.min(), phase_slices.max()] for i, phase_slice in enumerate(phase_slices): # for each value of phase counts, bin_edges = _np.histogram(phase_slice, bins=histbins, range=histedges) # histogram the position distribution at that phase counts_array[i] = counts counts_array = _np.array(counts_array) counts_array_transposed = _np.transpose(counts_array).astype(float) if show_plot == True: fig = _plt.figure(figsize=(12, 6)) ax = fig.add_subplot(111) ax.set_title('Phase Distribution') ax.set_xlabel(""phase (°)"") ax.set_ylabel(""x"") _plt.imshow(counts_array_transposed, cmap='hot', interpolation='nearest', extent=[phase[0], phase[-1], histedges[0], histedges[1]]) ax.set_aspect('auto') _plt.show() return counts_array_transposed, bin_edges" 2941,"def get_wigner(z, freq, sample_freq, histbins=200, show_plot=False): """""" Calculates an approximation to the wigner quasi-probability distribution by splitting the z position array into slices of the length of one period of the motion. This slice is then associated with phase from -180 to 180 degrees. These slices are then histogramed in order to get a distribution of counts of where the particle is observed at each phase. The 2d array containing the counts varying with position and phase is then passed through the inverse radon transformation using the Simultaneous Algebraic Reconstruction Technique approximation from the scikit-image package. Parameters ---------- z : ndarray trace of z motion freq : float frequency of motion sample_freq : float sample frequency of the z array histbins : int, optional (default=200) number of bins to use in histogramming data for each phase show_plot : bool, optional (default=False) Whether or not to plot the phase distribution Returns ------- iradon_output : ndarray 2d array of size (histbins x histbins) bin_centres : ndarray positions of the bin centres """""" phase, phase_slices = extract_slices(z, freq, sample_freq, show_plot=False) counts_array, bin_edges = histogram_phase(phase_slices, phase, histbins, show_plot=show_plot) diff = bin_edges[1] - bin_edges[0] bin_centres = bin_edges[:-1] + diff iradon_output = _iradon_sart(counts_array, theta=phase) #_plt.imshow(iradon_output, extent=[bin_centres[0], bin_centres[-1], bin_centres[0], bin_centres[-1]]) #_plt.show() return iradon_output, bin_centres" 2942,"def plot_wigner3d(iradon_output, bin_centres, bin_centre_units="""", cmap=_cm.cubehelix_r, view=(10, -45), figsize=(10, 10)): """""" Plots the wigner space representation as a 3D surface plot. Parameters ---------- iradon_output : ndarray 2d array of size (histbins x histbins) bin_centres : ndarray positions of the bin centres bin_centre_units : string, optional (default="""") Units in which the bin_centres are given cmap : matplotlib.cm.cmap, optional (default=cm.cubehelix_r) color map to use for Wigner view : tuple, optional (default=(10, -45)) view angle for 3d wigner plot figsize : tuple, optional (default=(10, 10)) tuple defining size of figure created Returns ------- fig : matplotlib.figure.Figure object figure showing the wigner function ax : matplotlib.axes.Axes object axes containing the object """""" fig = _plt.figure(figsize=figsize) ax = fig.add_subplot(111, projection='3d') resid1 = iradon_output.sum(axis=0) resid2 = iradon_output.sum(axis=1) x = bin_centres # replace with x y = bin_centres # replace with p (xdot/omega) xpos, ypos = _np.meshgrid(x, y) X = xpos Y = ypos Z = iradon_output ax.set_xlabel(""x ({})"".format(bin_centre_units)) ax.set_xlabel(""y ({})"".format(bin_centre_units)) ax.scatter(_np.min(X)*_np.ones_like(y), y, resid2/_np.max(resid2)*_np.max(Z), alpha=0.7) ax.scatter(x, _np.max(Y)*_np.ones_like(x), resid1/_np.max(resid1)*_np.max(Z), alpha=0.7) # Plot the surface. surf = ax.plot_surface(X, Y, Z, cmap=cmap, linewidth=0, antialiased=False) # Customize the z axis. #ax.set_zlim(-1.01, 1.01) #ax.zaxis.set_major_locator(LinearLocator(10)) #ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # Add a color bar which maps values to colors. fig.colorbar(surf, shrink=0.5, aspect=5) ax.view_init(view[0], view[1]) return fig, ax" 2943,"def plot_wigner2d(iradon_output, bin_centres, cmap=_cm.cubehelix_r, figsize=(6, 6)): """""" Plots the wigner space representation as a 2D heatmap. Parameters ---------- iradon_output : ndarray 2d array of size (histbins x histbins) bin_centres : ndarray positions of the bin centres cmap : matplotlib.cm.cmap, optional (default=cm.cubehelix_r) color map to use for Wigner figsize : tuple, optional (default=(6, 6)) tuple defining size of figure created Returns ------- fig : matplotlib.figure.Figure object figure showing the wigner function ax : matplotlib.axes.Axes object axes containing the object """""" xx, yy = _np.meshgrid(bin_centres, bin_centres) resid1 = iradon_output.sum(axis=0) resid2 = iradon_output.sum(axis=1) wigner_marginal_seperation = 0.001 left, width = 0.2, 0.65-0.1 # left = left side of hexbin and hist_x bottom, height = 0.1, 0.65-0.1 # bottom = bottom of hexbin and hist_y bottom_h = height + bottom + wigner_marginal_seperation left_h = width + left + wigner_marginal_seperation cbar_pos = [0.03, bottom, 0.05, 0.02+width] rect_wigner = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] # start with a rectangular Figure fig = _plt.figure(figsize=figsize) axWigner = _plt.axes(rect_wigner) axHistx = _plt.axes(rect_histx) axHisty = _plt.axes(rect_histy) pcol = axWigner.pcolor(xx, yy, iradon_output, cmap=cmap) binwidth = bin_centres[1] - bin_centres[0] axHistx.bar(bin_centres, resid2, binwidth) axHisty.barh(bin_centres, resid1, binwidth) _plt.setp(axHistx.get_xticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines _plt.setp(axHisty.get_yticklabels(), visible=False) # sets x ticks to be invisible while keeping gridlines for tick in axHisty.get_xticklabels(): tick.set_rotation(-90) cbaraxes = fig.add_axes(cbar_pos) # This is the position for the colorbar #cbar = _plt.colorbar(axp, cax = cbaraxes) cbar = fig.colorbar(pcol, cax = cbaraxes, drawedges=False) #, orientation=""horizontal"" cbar.solids.set_edgecolor(""face"") cbar.solids.set_rasterized(True) cbar.ax.set_yticklabels(cbar.ax.yaxis.get_ticklabels(), y=0, rotation=45) #cbar.set_label(cbarlabel, labelpad=-25, y=1.05, rotation=0) plotlimits = _np.max(_np.abs(bin_centres)) axWigner.axis((-plotlimits, plotlimits, -plotlimits, plotlimits)) axHistx.set_xlim(axWigner.get_xlim()) axHisty.set_ylim(axWigner.get_ylim()) return fig, axWigner, axHistx, axHisty, cbar" 2944,"def calc_reduced_chi_squared(y_observed, y_model, observation_error, number_of_fitted_parameters): """""" Calculates the reduced chi-squared, used to compare a model to observations. For example can be used to calculate how good a fit is by using fitted y values for y_model along with observed y values and error in those y values. Reduced chi-squared should be close to 1 for a good fit, lower than 1 suggests you are overestimating the measurement error (observation_error you entered is higher than the true error in the measurement). A value higher than 1 suggests either your model is a bad fit OR you are underestimating the error in the measurement (observation_error you entered is lower than the true error in the measurement). See https://en.wikipedia.org/wiki/Reduced_chi-squared_statistic for more detail. Parameters ---------- y_observed : ndarray array of measured/observed values of some variable y which you are fitting to. y_model : ndarray array of y values predicted by your model/fit (predicted y values corresponding to y_observed) observation_error : float error in the measurements/observations of y number_of_fitted_parameters : float number of parameters in your model Returns ------- chi2_reduced : float reduced chi-squared parameter """""" observed = _np.array(y_observed) expected = _np.array(y_model) if observed.shape != expected.shape: raise ValueError(""y_observed should have same number of elements as y_model"") residuals = (observed - expected) z = residuals / observation_error # residuals divided by known error in measurement chi2 = _np.sum(z**2) # chi squared value num_of_observations = len(observed) v = num_of_observations - number_of_fitted_parameters # v = number of degrees of freedom chi2_reduced = chi2/v return chi2_reduced" 2945,"def load_time_data(self, RelativeChannelNo=None, SampleFreq=None, PointsToLoad=-1, NormaliseByMonitorOutput=False): """""" Loads the time and voltage data and the wave description from the associated file. Parameters ---------- RelativeChannelNo : int, optional Channel number for loading saleae data files If loading a .dat file produced by the labview NI5122 daq card, used to specifiy the channel number if two channels where saved, if left None with .dat files it will assume that the file to load only contains one channel. If NormaliseByMonitorOutput is True then RelativeChannelNo specifies the monitor channel for loading a .dat file produced by the labview NI5122 daq card. SampleFreq : float, optional Manual selection of sample frequency for loading labview NI5122 daq files PointsToLoad : int, optional Number of first points to read. -1 means all points (i.e., the complete file) WORKS WITH NI5122 DATA SO FAR ONLY!!! NormaliseByMonitorOutput : bool, optional If True the particle signal trace will be divided by the monitor output, which is specified by the channel number set in the RelativeChannelNo parameter. WORKS WITH NI5122 DATA SO FAR ONLY!!! """""" f = open(self.filepath, 'rb') raw = f.read() f.close() FileExtension = self.filepath.split('.')[-1] if FileExtension == ""raw"" or FileExtension == ""trc"": with _warnings.catch_warnings(): # supress missing data warning and raise a missing # data warning from optoanalysis with the filepath _warnings.simplefilter(""ignore"") waveDescription, timeParams, self.voltage, _, missingdata = optoanalysis.LeCroy.InterpretWaveform(raw, noTimeArray=True) if missingdata: _warnings.warn(""Waveform not of expected length. File {} may be missing data."".format(self.filepath)) self.SampleFreq = (1 / waveDescription[""HORIZ_INTERVAL""]) elif FileExtension == ""bin"": if RelativeChannelNo == None: raise ValueError(""If loading a .bin file from the Saleae data logger you must enter a relative channel number to load"") timeParams, self.voltage = optoanalysis.Saleae.interpret_waveform(raw, RelativeChannelNo) self.SampleFreq = 1/timeParams[2] elif FileExtension == ""dat"": #for importing a file written by labview using the NI5122 daq card if SampleFreq == None: raise ValueError(""If loading a .dat file from the NI5122 daq card you must enter a SampleFreq"") if RelativeChannelNo == None: self.voltage = _np.fromfile(self.filepath, dtype='>h',count=PointsToLoad) elif RelativeChannelNo != None: filedata = _np.fromfile(self.filepath, dtype='>h',count=PointsToLoad) if NormaliseByMonitorOutput == True: if RelativeChannelNo == 0: monitorsignal = filedata[:len(filedata):2] self.voltage = filedata[1:len(filedata):2]/monitorsignal elif RelativeChannelNo == 1: monitorsignal = filedata[1:len(filedata):2] self.voltage = filedata[:len(filedata):2]/monitorsignal elif NormaliseByMonitorOutput == False: self.voltage = filedata[RelativeChannelNo:len(filedata):2] timeParams = (0,(len(self.voltage)-1)/SampleFreq,1/SampleFreq) self.SampleFreq = 1/timeParams[2] elif FileExtension == ""tdms"": # for importing a file written by labview form the NI7961 FPGA with the RecordDataPC VI if SampleFreq == None: raise ValueError(""If loading a .tdms file saved from the FPGA you must enter a SampleFreq"") self.SampleFreq = SampleFreq dt = 1/self.SampleFreq FIFO_SIZE = 262143 # this is the maximum size of the DMA FIFO on the NI 7961 FPGA with the NI 5781 DAC card tdms_file = _TdmsFile(self.filepath) channel = tdms_file.object('Measured_Data', 'data') data = channel.data[FIFO_SIZE:] # dump first 1048575 points of data # as this is the values that had already filled the buffer # from before when the record code started running volts_per_unit = 2/(2**14) self.voltage = volts_per_unit*data timeParams = [0, (data.shape[0]-1)*dt, dt] elif FileExtension == 'txt': # .txt file created by LeCroy Oscilloscope data = [] with open(self.filepath, 'r') as csvfile: reader = csv.reader(csvfile) for row in reader: data.append(row) data = _np.array(data[5:]).astype(float).transpose() t0 = data[0][0] tend = data[0][-1] dt = data[0][1] - data[0][0] self.SampleFreq = 1/dt self.voltage = data[1] del(data) timeParams = [t0, tend, dt] else: raise ValueError(""Filetype not supported"") startTime, endTime, Timestep = timeParams self.timeStart = startTime self.timeEnd = endTime self.timeStep = Timestep self.time = frange(startTime, endTime+Timestep, Timestep) return None" 2946,"def get_time_data(self, timeStart=None, timeEnd=None): """""" Gets the time and voltage data. Parameters ---------- timeStart : float, optional The time get data from. By default it uses the first time point timeEnd : float, optional The time to finish getting data from. By default it uses the last time point Returns ------- time : ndarray array containing the value of time (in seconds) at which the voltage is sampled voltage : ndarray array containing the sampled voltages """""" if timeStart == None: timeStart = self.timeStart if timeEnd == None: timeEnd = self.timeEnd time = self.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] if EndIndex == len(time) - 1: EndIndex = EndIndex + 1 # so that it does not remove the last element return time[StartIndex:EndIndex], self.voltage[StartIndex:EndIndex]" 2947,"def plot_time_data(self, timeStart=None, timeEnd=None, units='s', show_fig=True): """""" plot time data against voltage data. Parameters ---------- timeStart : float, optional The time to start plotting from. By default it uses the first time point timeEnd : float, optional The time to finish plotting at. By default it uses the last time point units : string, optional units of time to plot on the x axis - defaults to s show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """""" unit_prefix = units[:-1] # removed the last char if timeStart == None: timeStart = self.timeStart if timeEnd == None: timeEnd = self.timeEnd time = self.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] fig = _plt.figure(figsize=properties['default_fig_size']) ax = fig.add_subplot(111) ax.plot(unit_conversion(time[StartIndex:EndIndex], unit_prefix), self.voltage[StartIndex:EndIndex]) ax.set_xlabel(""time ({})"".format(units)) ax.set_ylabel(""voltage (V)"") ax.set_xlim([timeStart, timeEnd]) if show_fig == True: _plt.show() return fig, ax" 2948,"def get_PSD(self, NPerSegment=1000000, window=""hann"", timeStart=None, timeEnd=None, override=False): """""" Extracts the power spectral density (PSD) from the data. Parameters ---------- NPerSegment : int, optional Length of each segment used in scipy.welch default = 1000000 window : str or tuple or array_like, optional Desired window to use. See get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length will be used for nperseg. default = ""hann"" Returns ------- freqs : ndarray Array containing the frequencies at which the PSD has been calculated PSD : ndarray Array containing the value of the PSD at the corresponding frequency value in V**2/Hz """""" if timeStart == None and timeEnd == None: freqs, PSD = calc_PSD(self.voltage, self.SampleFreq, NPerSegment=NPerSegment) self.PSD = PSD self.freqs = freqs else: if timeStart == None: timeStart = self.timeStart if timeEnd == None: timeEnd = self.timeEnd time = self.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] if EndIndex == len(time) - 1: EndIndex = EndIndex + 1 # so that it does not remove the last element freqs, PSD = calc_PSD(self.voltage[StartIndex:EndIndex], self.SampleFreq, NPerSegment=NPerSegment) if override == True: self.freqs = freqs self.PSD = PSD return freqs, PSD" 2949,"def plot_PSD(self, xlim=None, units=""kHz"", show_fig=True, timeStart=None, timeEnd=None, *args, **kwargs): """""" plot the pulse spectral density. Parameters ---------- xlim : array_like, optional The x limits of the plotted PSD [LowerLimit, UpperLimit] Default value is [0, SampleFreq/2] units : string, optional Units of frequency to plot on the x axis - defaults to kHz show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """""" # self.get_PSD() if timeStart == None and timeEnd == None: freqs = self.freqs PSD = self.PSD else: freqs, PSD = self.get_PSD(timeStart=timeStart, timeEnd=timeEnd) unit_prefix = units[:-2] if xlim == None: xlim = [0, unit_conversion(self.SampleFreq/2, unit_prefix)] fig = _plt.figure(figsize=properties['default_fig_size']) ax = fig.add_subplot(111) ax.semilogy(unit_conversion(freqs, unit_prefix), PSD, *args, **kwargs) ax.set_xlabel(""Frequency ({})"".format(units)) ax.set_xlim(xlim) ax.grid(which=""major"") ax.set_ylabel(""$S_{xx}$ ($V^2/Hz$)"") if show_fig == True: _plt.show() return fig, ax" 2950,"def calc_area_under_PSD(self, lowerFreq, upperFreq): """""" Sums the area under the PSD from lowerFreq to upperFreq. Parameters ---------- lowerFreq : float The lower limit of frequency to sum from upperFreq : float The upper limit of frequency to sum to Returns ------- AreaUnderPSD : float The area under the PSD from lowerFreq to upperFreq """""" Freq_startAreaPSD = take_closest(self.freqs, lowerFreq) index_startAreaPSD = int(_np.where(self.freqs == Freq_startAreaPSD)[0][0]) Freq_endAreaPSD = take_closest(self.freqs, upperFreq) index_endAreaPSD = int(_np.where(self.freqs == Freq_endAreaPSD)[0][0]) AreaUnderPSD = sum(self.PSD[index_startAreaPSD: index_endAreaPSD]) return AreaUnderPSD" 2951,"def get_fit(self, TrapFreq, WidthOfPeakToFit, A_Initial=0.1e10, Gamma_Initial=400, silent=False, MakeFig=True, show_fig=True): """""" Function that fits to a peak to the PSD to extract the frequency, A factor and Gamma (damping) factor. Parameters ---------- TrapFreq : float The approximate trapping frequency to use initially as the centre of the peak WidthOfPeakToFit : float The width of the peak to be fitted to. This limits the region that the fitting function can see in order to stop it from fitting to the wrong peak A_Initial : float, optional The initial value of the A parameter to use in fitting Gamma_Initial : float, optional The initial value of the Gamma parameter to use in fitting Silent : bool, optional Whether to print any output when running this function defaults to False MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- A : uncertainties.ufloat Fitting constant A A = γ**2*2*Γ_0*(K_b*T_0)/(π*m) where: γ = conversionFactor Γ_0 = Damping factor due to environment π = pi OmegaTrap : uncertainties.ufloat The trapping frequency in the z axis (in angular frequency) Gamma : uncertainties.ufloat The damping factor Gamma = Γ = Γ_0 + δΓ where: Γ_0 = Damping factor due to environment δΓ = extra damping due to feedback or other effects fig : matplotlib.figure.Figure object figure object containing the plot ax : matplotlib.axes.Axes object axes with the data plotted of the: - initial data - smoothed data - initial fit - final fit """""" if MakeFig == True: Params, ParamsErr, fig, ax = fit_PSD( self, WidthOfPeakToFit, TrapFreq, A_Initial, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_PSD( self, WidthOfPeakToFit, TrapFreq, A_Initial, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print(""\n"") print(""A: {} +- {}% "".format(Params[0], ParamsErr[0] / Params[0] * 100)) print( ""Trap Frequency: {} +- {}% "".format(Params[1], ParamsErr[1] / Params[1] * 100)) print( ""Big Gamma: {} +- {}% "".format(Params[2], ParamsErr[2] / Params[2] * 100)) self.A = _uncertainties.ufloat(Params[0], ParamsErr[0]) self.OmegaTrap = _uncertainties.ufloat(Params[1], ParamsErr[1]) self.Gamma = _uncertainties.ufloat(Params[2], ParamsErr[2]) if MakeFig == True: return self.A, self.OmegaTrap, self.Gamma, fig, ax else: return self.A, self.OmegaTrap, self.Gamma, None, None" 2952,"def get_fit_from_peak(self, lowerLimit, upperLimit, NumPointsSmoothing=1, silent=False, MakeFig=True, show_fig=True): """""" Finds approximate values for the peaks central frequency, height, and FWHM by looking for the heighest peak in the frequency range defined by the input arguments. It then uses the central frequency as the trapping frequency, peak height to approximate the A value and the FWHM to an approximate the Gamma (damping) value. Parameters ---------- lowerLimit : float The lower frequency limit of the range in which it looks for a peak upperLimit : float The higher frequency limit of the range in which it looks for a peak NumPointsSmoothing : float The number of points of moving-average smoothing it applies before fitting the peak. Silent : bool, optional Whether it prints the values fitted or is silent. show_fig : bool, optional Whether it makes and shows the figure object or not. Returns ------- OmegaTrap : ufloat Trapping frequency A : ufloat A parameter Gamma : ufloat Gamma, the damping parameter """""" lowerIndex = _np.where(self.freqs == take_closest(self.freqs, lowerLimit))[0][0] upperIndex = _np.where(self.freqs == take_closest(self.freqs, upperLimit))[0][0] if lowerIndex == upperIndex: _warnings.warn(""range is too small, returning NaN"", UserWarning) val = _uncertainties.ufloat(_np.NaN, _np.NaN) return val, val, val, val, val MaxPSD = max(self.PSD[lowerIndex:upperIndex]) centralIndex = _np.where(self.PSD == MaxPSD)[0][0] CentralFreq = self.freqs[centralIndex] approx_A = MaxPSD * 1e16 # 1e16 was calibrated for a number of saves to be approximately the correct conversion factor between the height of the PSD and the A factor in the fitting MinPSD = min(self.PSD[lowerIndex:upperIndex]) # need to get this on log scale HalfMax = MinPSD + (MaxPSD - MinPSD) / 2 try: LeftSideOfPeakIndex = _np.where(self.PSD == take_closest(self.PSD[lowerIndex:centralIndex], HalfMax))[0][0] LeftSideOfPeak = self.freqs[LeftSideOfPeakIndex] except IndexError: _warnings.warn(""range is too small, returning NaN"", UserWarning) val = _uncertainties.ufloat(_np.NaN, _np.NaN) return val, val, val, val, val try: RightSideOfPeakIndex = _np.where(self.PSD == take_closest(self.PSD[centralIndex:upperIndex], HalfMax))[0][0] RightSideOfPeak = self.freqs[RightSideOfPeakIndex] except IndexError: _warnings.warn(""range is too small, returning NaN"", UserWarning) val = _uncertainties.ufloat(_np.NaN, _np.NaN) return val, val, val, val, val FWHM = RightSideOfPeak - LeftSideOfPeak approx_Gamma = FWHM/4 try: A, OmegaTrap, Gamma, fig, ax \ = self.get_fit(CentralFreq, (upperLimit-lowerLimit)/2, A_Initial=approx_A, Gamma_Initial=approx_Gamma, silent=silent, MakeFig=MakeFig, show_fig=show_fig) except (TypeError, ValueError) as e: _warnings.warn(""range is too small to fit, returning NaN"", UserWarning) val = _uncertainties.ufloat(_np.NaN, _np.NaN) return val, val, val, val, val OmegaTrap = self.OmegaTrap A = self.A Gamma = self.Gamma omegaArray = 2 * pi * \ self.freqs[LeftSideOfPeakIndex:RightSideOfPeakIndex] PSDArray = self.PSD[LeftSideOfPeakIndex:RightSideOfPeakIndex] return OmegaTrap, A, Gamma, fig, ax" 2953,"def get_fit_auto(self, CentralFreq, MaxWidth=15000, MinWidth=500, WidthIntervals=500, MakeFig=True, show_fig=True, silent=False): """""" Tries a range of regions to search for peaks and runs the one with the least error and returns the parameters with the least errors. Parameters ---------- CentralFreq : float The central frequency to use for the fittings. MaxWidth : float, optional The maximum bandwidth to use for the fitting of the peaks. MinWidth : float, optional The minimum bandwidth to use for the fitting of the peaks. WidthIntervals : float, optional The intervals to use in going between the MaxWidth and MinWidth. show_fig : bool, optional Whether to plot and show the final (best) fitting or not. Returns ------- OmegaTrap : ufloat Trapping frequency A : ufloat A parameter Gamma : ufloat Gamma, the damping parameter fig : matplotlib.figure.Figure object The figure object created showing the PSD of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the PSD of the data with the fit """""" MinTotalSumSquaredError = _np.infty for Width in _np.arange(MaxWidth, MinWidth - WidthIntervals, -WidthIntervals): try: OmegaTrap, A, Gamma,_ , _ \ = self.get_fit_from_peak( CentralFreq - Width / 2, CentralFreq + Width / 2, silent=True, MakeFig=False, show_fig=False) except RuntimeError: _warnings.warn(""Couldn't find good fit with width {}"".format( Width), RuntimeWarning) val = _uncertainties.ufloat(_np.NaN, _np.NaN) OmegaTrap = val A = val Gamma = val TotalSumSquaredError = ( A.std_dev / A.n)**2 + (Gamma.std_dev / Gamma.n)**2 + (OmegaTrap.std_dev / OmegaTrap.n)**2 #print(""totalError: {}"".format(TotalSumSquaredError)) if TotalSumSquaredError < MinTotalSumSquaredError: MinTotalSumSquaredError = TotalSumSquaredError BestWidth = Width if silent != True: print(""found best"") try: OmegaTrap, A, Gamma, fig, ax \ = self.get_fit_from_peak(CentralFreq - BestWidth / 2, CentralFreq + BestWidth / 2, MakeFig=MakeFig, show_fig=show_fig, silent=silent) except UnboundLocalError: raise ValueError(""A best width was not found, try increasing the number of widths tried by either decreasing WidthIntervals or MinWidth or increasing MaxWidth"") OmegaTrap = self.OmegaTrap A = self.A Gamma = self.Gamma self.FTrap = OmegaTrap/(2*pi) return OmegaTrap, A, Gamma, fig, ax" 2954,"def calc_gamma_from_variance_autocorrelation_fit(self, NumberOfOscillations, GammaGuess=None, silent=False, MakeFig=True, show_fig=True): """""" Calculates the total damping, i.e. Gamma, by splitting the time trace into chunks of NumberOfOscillations oscillations and calculated the variance of each of these chunks. This array of varainces is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- NumberOfOscillations : int The number of oscillations each chunk of the timetrace used to calculate the variance should contain. GammaGuess : float, optional Inital guess for BigGamma (in radians) Silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit """""" try: SplittedArraySize = int(self.SampleFreq/self.FTrap.n) * NumberOfOscillations except KeyError: ValueError('You forgot to do the spectrum fit to specify self.FTrap exactly.') VoltageArraySize = len(self.voltage) SnippetsVariances = _np.var(self.voltage[:VoltageArraySize-_np.mod(VoltageArraySize,SplittedArraySize)].reshape(-1,SplittedArraySize),axis=1) autocorrelation = calc_autocorrelation(SnippetsVariances) time = _np.array(range(len(autocorrelation))) * SplittedArraySize / self.SampleFreq if GammaGuess==None: Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4]) else: Gamma_Initial = GammaGuess if MakeFig == True: Params, ParamsErr, fig, ax = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print(""\n"") print( ""Big Gamma: {} +- {}% "".format(Params[0], ParamsErr[0] / Params[0] * 100)) Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0]) if MakeFig == True: return Gamma, fig, ax else: return Gamma, None, None" 2955,"def calc_gamma_from_energy_autocorrelation_fit(self, GammaGuess=None, silent=False, MakeFig=True, show_fig=True): """""" Calculates the total damping, i.e. Gamma, by calculating the energy each point in time. This energy array is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit """""" autocorrelation = calc_autocorrelation(self.voltage[:-1]**2*self.OmegaTrap.n**2+(_np.diff(self.voltage)*self.SampleFreq)**2) time = self.time.get_array()[:len(autocorrelation)] if GammaGuess==None: Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4]) else: Gamma_Initial = GammaGuess if MakeFig == True: Params, ParamsErr, fig, ax = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print(""\n"") print( ""Big Gamma: {} +- {}% "".format(Params[0], ParamsErr[0] / Params[0] * 100)) Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0]) if MakeFig == True: return Gamma, fig, ax else: return Gamma, None, None" 2956,"def calc_gamma_from_position_autocorrelation_fit(self, GammaGuess=None, FreqTrapGuess=None, silent=False, MakeFig=True, show_fig=True): """""" Calculates the total damping, i.e. Gamma, by calculating the autocorrleation of the position-time trace. The autocorrelation is fitted with an exponential relaxation function derived in Tongcang Li's 2013 thesis (DOI: 10.1007/978-1-4614-6031-2) and the function (equation 4.20 in the thesis) returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) FreqTrapGuess : float, optional Inital guess for the trapping Frequency in Hz silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians OmegaTrap : ufloat Trapping frequency in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit """""" autocorrelation = calc_autocorrelation(self.voltage) time = self.time.get_array()[:len(autocorrelation)] if GammaGuess==None: Gamma_Initial = (autocorrelation[0]-autocorrelation[int(self.SampleFreq/self.FTrap.n)])/(time[int(self.SampleFreq/self.FTrap.n)]-time[0])*2*_np.pi else: Gamma_Initial = GammaGuess if FreqTrapGuess==None: FreqTrap_Initial = self.FTrap.n else: FreqTrap_Initial = FreqTrapGuess if MakeFig == True: Params, ParamsErr, fig, ax = fit_autocorrelation( autocorrelation, time, Gamma_Initial, FreqTrap_Initial, method='position', MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_autocorrelation( autocorrelation, time, Gamma_Initial, FreqTrap_Initial, method='position', MakeFig=MakeFig, show_fig=show_fig) if silent == False: print(""\n"") print( ""Big Gamma: {} +- {}% "".format(Params[0], ParamsErr[0] / Params[0] * 100)) print( ""Trap Frequency: {} +- {}% "".format(Params[1], ParamsErr[1] / Params[1] * 100)) Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0]) OmegaTrap = _uncertainties.ufloat(Params[1], ParamsErr[1]) if MakeFig == True: return Gamma, OmegaTrap, fig, ax else: return Gamma, OmegaTrap, None, None" 2957,"def extract_parameters(self, P_mbar, P_Error, method=""chang""): """""" Extracts the Radius, mass and Conversion factor for a particle. Parameters ---------- P_mbar : float The pressure in mbar when the data was taken. P_Error : float The error in the pressure value (as a decimal e.g. 15% = 0.15) Returns ------- Radius : uncertainties.ufloat The radius of the particle in m Mass : uncertainties.ufloat The mass of the particle in kg ConvFactor : uncertainties.ufloat The conversion factor between volts/m """""" [R, M, ConvFactor], [RErr, MErr, ConvFactorErr] = \ extract_parameters(P_mbar, P_Error, self.A.n, self.A.std_dev, self.Gamma.n, self.Gamma.std_dev, method = method) self.Radius = _uncertainties.ufloat(R, RErr) self.Mass = _uncertainties.ufloat(M, MErr) self.ConvFactor = _uncertainties.ufloat(ConvFactor, ConvFactorErr) return self.Radius, self.Mass, self.ConvFactor" 2958,"def extract_ZXY_motion(self, ApproxZXYFreqs, uncertaintyInFreqs, ZXYPeakWidths, subSampleFraction=1, NPerSegmentPSD=1000000, MakeFig=True, show_fig=True): """""" Extracts the x, y and z signals (in volts) from the voltage signal. Does this by finding the highest peaks in the signal about the approximate frequencies, using the uncertaintyinfreqs parameter as the width it searches. It then uses the ZXYPeakWidths to construct bandpass IIR filters for each frequency and filtering them. If too high a sample frequency has been used to collect the data scipy may not be able to construct a filter good enough, in this case increasing the subSampleFraction may be nessesary. Parameters ---------- ApproxZXYFreqs : array_like A sequency containing 3 elements, the approximate z, x and y frequency respectively. uncertaintyInFreqs : float The uncertainty in the z, x and y frequency respectively. ZXYPeakWidths : array_like A sequency containing 3 elements, the widths of the z, x and y frequency peaks respectively. subSampleFraction : int, optional How much to sub-sample the data by before filtering, effectively reducing the sample frequency by this fraction. NPerSegmentPSD : int, optional NPerSegment to pass to scipy.signal.welch to calculate the PSD show_fig : bool, optional Whether to show the figures produced of the PSD of the original signal along with the filtered x, y and z. Returns ------- self.zVolts : ndarray The z signal in volts extracted by bandpass IIR filtering self.xVolts : ndarray The x signal in volts extracted by bandpass IIR filtering self.yVolts : ndarray The y signal in volts extracted by bandpass IIR filtering time : ndarray The array of times corresponding to the above 3 arrays fig : matplotlib.figure.Figure object figure object containing a plot of the PSD of the original signal with the z, x and y filtered signals ax : matplotlib.axes.Axes object axes object corresponding to the above figure """""" [zf, xf, yf] = ApproxZXYFreqs zf, xf, yf = get_ZXY_freqs( self, zf, xf, yf, bandwidth=uncertaintyInFreqs) [zwidth, xwidth, ywidth] = ZXYPeakWidths self.zVolts, self.xVolts, self.yVolts, time, fig, ax = get_ZXY_data( self, zf, xf, yf, subSampleFraction, zwidth, xwidth, ywidth, MakeFig=MakeFig, show_fig=show_fig, NPerSegmentPSD=NPerSegmentPSD) return self.zVolts, self.xVolts, self.yVolts, time, fig, ax" 2959,"def filter_data(self, freq, FractionOfSampleFreq=1, PeakWidth=10000, filterImplementation=""filtfilt"", timeStart=None, timeEnd=None, NPerSegmentPSD=1000000, PyCUDA=False, MakeFig=True, show_fig=True): """""" filter out data about a central frequency with some bandwidth using an IIR filter. Parameters ---------- freq : float The frequency of the peak of interest in the PSD FractionOfSampleFreq : integer, optional The fraction of the sample frequency to sub-sample the data by. This sometimes needs to be done because a filter with the appropriate frequency response may not be generated using the sample rate at which the data was taken. Increasing this number means the x, y and z signals produced by this function will be sampled at a lower rate but a higher number means a higher chance that the filter produced will have a nice frequency response. PeakWidth : float, optional The width of the pass-band of the IIR filter to be generated to filter the peak. Defaults to 10KHz filterImplementation : string, optional filtfilt or lfilter - use scipy.filtfilt or lfilter ifft - uses built in IFFT_filter default: filtfilt timeStart : float, optional Starting time for filtering. Defaults to start of time data. timeEnd : float, optional Ending time for filtering. Defaults to end of time data. NPerSegmentPSD : int, optional NPerSegment to pass to scipy.signal.welch to calculate the PSD PyCUDA : bool, optional Only important for the 'ifft'-method If True, uses PyCUDA to accelerate the FFT and IFFT via using your NVIDIA-GPU If False, performs FFT and IFFT with conventional scipy.fftpack MakeFig : bool, optional If True - generate figure showing filtered and unfiltered PSD Defaults to True. show_fig : bool, optional If True - plot unfiltered and filtered PSD Defaults to True. Returns ------- timedata : ndarray Array containing the time data FiletedData : ndarray Array containing the filtered signal in volts with time. fig : matplotlib.figure.Figure object The figure object created showing the PSD of the filtered and unfiltered signal ax : matplotlib.axes.Axes object The axes object created showing the PSD of the filtered and unfiltered signal """""" if timeStart == None: timeStart = self.timeStart if timeEnd == None: timeEnd = self.timeEnd time = self.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] input_signal = self.voltage[StartIndex: EndIndex][0::FractionOfSampleFreq] SAMPLEFREQ = self.SampleFreq / FractionOfSampleFreq if filterImplementation == ""filtfilt"" or filterImplementation == ""lfilter"": if filterImplementation == ""filtfilt"": ApplyFilter = scipy.signal.filtfilt elif filterImplementation == ""lfilter"": ApplyFilter = scipy.signal.lfilter b, a = make_butterworth_bandpass_b_a(freq, PeakWidth, SAMPLEFREQ) print(""filtering data"") filteredData = ApplyFilter(b, a, input_signal) if(_np.isnan(filteredData).any()): raise ValueError( ""Value Error: FractionOfSampleFreq must be higher, a sufficiently small sample frequency should be used to produce a working IIR filter."") elif filterImplementation == ""ifft"": filteredData = IFFT_filter(input_signal, SAMPLEFREQ, freq-PeakWidth/2, freq+PeakWidth/2, PyCUDA = PyCUDA) else: raise ValueError(""filterImplementation must be one of [filtfilt, lfilter, ifft] you entered: {}"".format(filterImplementation)) if MakeFig == True: f, PSD = scipy.signal.welch( input_signal, SAMPLEFREQ, nperseg=NPerSegmentPSD) f_filtdata, PSD_filtdata = scipy.signal.welch(filteredData, SAMPLEFREQ, nperseg=NPerSegmentPSD) fig, ax = _plt.subplots(figsize=properties[""default_fig_size""]) ax.plot(f, PSD) ax.plot(f_filtdata, PSD_filtdata, label=""filtered data"") ax.legend(loc=""best"") ax.semilogy() ax.set_xlim([freq - PeakWidth, freq + PeakWidth]) else: fig = None ax = None if show_fig == True: _plt.show() timedata = time[StartIndex: EndIndex][0::FractionOfSampleFreq] return timedata, filteredData, fig, ax" 2960,"def plot_phase_space_sns(self, freq, ConvFactor, PeakWidth=10000, FractionOfSampleFreq=1, kind=""hex"", timeStart=None, timeEnd =None, PointsOfPadding=500, units=""nm"", logscale=False, cmap=None, marginalColor=None, gridsize=200, show_fig=True, ShowPSD=False, alpha=0.5, *args, **kwargs): """""" Plots the phase space of a peak in the PSD. Parameters ---------- freq : float The frequenecy of the peak (Trapping frequency of the dimension of interest) ConvFactor : float (or ufloat) The conversion factor between Volts and Meters PeakWidth : float, optional The width of the peak. Defaults to 10KHz FractionOfSampleFreq : int, optional The fraction of the sample freq to use to filter the data. Defaults to 1. kind : string, optional kind of plot to draw - pass to jointplot from seaborne timeStart : float, optional Starting time for data from which to calculate the phase space. Defaults to start of time data. timeEnd : float, optional Ending time for data from which to calculate the phase space. Defaults to start of time data. PointsOfPadding : float, optional How many points of the data at the beginning and end to disregard for plotting the phase space, to remove filtering artifacts. Defaults to 500. units : string, optional Units of position to plot on the axis - defaults to nm cmap : matplotlib.colors.ListedColormap, optional cmap to use for plotting the jointplot marginalColor : string, optional color to use for marginal plots gridsize : int, optional size of the grid to use with kind=""hex"" show_fig : bool, optional Whether to show the figure before exiting the function Defaults to True. ShowPSD : bool, optional Where to show the PSD of the unfiltered and the filtered signal used to make the phase space plot. Defaults to False. Returns ------- fig : matplotlib.figure.Figure object figure object containing the phase space plot JP : seaborn.jointplot object joint plot object containing the phase space plot """""" if cmap == None: if logscale == True: cmap = properties['default_log_cmap'] else: cmap = properties['default_linear_cmap'] unit_prefix = units[:-1] _, PosArray, VelArray = self.calc_phase_space(freq, ConvFactor, PeakWidth=PeakWidth, FractionOfSampleFreq=FractionOfSampleFreq, timeStart=timeStart, timeEnd=timeEnd, PointsOfPadding=PointsOfPadding, ShowPSD=ShowPSD) _plt.close('all') PosArray = unit_conversion(PosArray, unit_prefix) # converts m to units required (nm by default) VelArray = unit_conversion(VelArray, unit_prefix) # converts m/s to units required (nm/s by default) VarPos = _np.var(PosArray) VarVel = _np.var(VelArray) MaxPos = _np.max(PosArray) MaxVel = _np.max(VelArray) if MaxPos > MaxVel / (2 * pi * freq): _plotlimit = MaxPos * 1.1 else: _plotlimit = MaxVel / (2 * pi * freq) * 1.1 print(""Plotting Phase Space"") if marginalColor == None: try: marginalColor = tuple((cmap.colors[len(cmap.colors)/2][:-1])) except AttributeError: try: marginalColor = cmap(2) except: marginalColor = properties['default_base_color'] if kind == ""hex"": # gridsize can only be passed if kind=""hex"" JP1 = _sns.jointplot(_pd.Series(PosArray[1:], name=""$z$ ({}) \n filepath=%s"".format(units) % (self.filepath)), _pd.Series(VelArray / (2 * pi * freq), name=""$v_z$/$\omega$ ({})"".format(units)), stat_func=None, xlim=[-_plotlimit, _plotlimit], ylim=[-_plotlimit, _plotlimit], size=max(properties['default_fig_size']), kind=kind, marginal_kws={'hist_kws': {'log': logscale},}, cmap=cmap, color=marginalColor, gridsize=gridsize, alpha=alpha, *args, **kwargs, ) else: JP1 = _sns.jointplot(_pd.Series(PosArray[1:], name=""$z$ ({}) \n filepath=%s"".format(units) % (self.filepath)), _pd.Series(VelArray / (2 * pi * freq), name=""$v_z$/$\omega$ ({})"".format(units)), stat_func=None, xlim=[-_plotlimit, _plotlimit], ylim=[-_plotlimit, _plotlimit], size=max(properties['default_fig_size']), kind=kind, marginal_kws={'hist_kws': {'log': logscale},}, cmap=cmap, color=marginalColor, alpha=alpha, *args, **kwargs, ) fig = JP1.fig if show_fig == True: print(""Showing Phase Space"") _plt.show() return fig, JP1" 2961,"def calc_phase_space(self, freq, ConvFactor, PeakWidth=10000, FractionOfSampleFreq=1, timeStart=None, timeEnd =None, PointsOfPadding=500, ShowPSD=False): """""" Calculates the position and velocity (in m) for use in plotting the phase space distribution. Parameters ---------- freq : float The frequenecy of the peak (Trapping frequency of the dimension of interest) ConvFactor : float (or ufloat) The conversion factor between Volts and Meters PeakWidth : float, optional The width of the peak. Defaults to 10KHz FractionOfSampleFreq : int, optional The fraction of the sample freq to use to filter the data. Defaults to 1. timeStart : float, optional Starting time for data from which to calculate the phase space. Defaults to start of time data. timeEnd : float, optional Ending time for data from which to calculate the phase space. Defaults to start of time data. PointsOfPadding : float, optional How many points of the data at the beginning and end to disregard for plotting the phase space, to remove filtering artifacts. Defaults to 500 ShowPSD : bool, optional Where to show the PSD of the unfiltered and the filtered signal used to make the phase space plot. Defaults to False. *args, **kwargs : optional args and kwargs passed to qplots.joint_plot Returns ------- time : ndarray time corresponding to position and velocity PosArray : ndarray Array of position of the particle in time VelArray : ndarray Array of velocity of the particle in time """""" _, Pos, fig, ax = self.filter_data( freq, FractionOfSampleFreq, PeakWidth, MakeFig=ShowPSD, show_fig=ShowPSD, timeStart=timeStart, timeEnd=timeEnd) time = self.time.get_array() if timeStart != None: StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] else: StartIndex = 0 if timeEnd != None: EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] else: EndIndex = -1 Pos = Pos[PointsOfPadding : -PointsOfPadding+1] time = time[StartIndex:EndIndex][::FractionOfSampleFreq][PointsOfPadding : -PointsOfPadding+1] if type(ConvFactor) == _uncertainties.core.Variable: conv = ConvFactor.n else: conv = ConvFactor PosArray = Pos / conv # converts V to m VelArray = _np.diff(PosArray) * (self.SampleFreq / FractionOfSampleFreq) # calcs velocity (in m/s) by differtiating position return time, PosArray, VelArray" 2962,"def get_value(self, ColumnName, RunNo): """""" Retreives the value of the collumn named ColumnName associated with a particular run number. Parameters ---------- ColumnName : string The name of the desired org-mode table's collumn RunNo : int The run number for which to retreive the pressure value Returns ------- Value : float The value for the column's name and associated run number """""" Value = float(self.ORGTableData[self.ORGTableData.RunNo == '{}'.format( RunNo)][ColumnName]) return Value" 2963,"def steady_state_potential(xdata,HistBins=100): """""" Calculates the steady state potential. Parameters ---------- xdata : ndarray Position data for a degree of freedom HistBins : int Number of bins to use for histogram of xdata. Number of position points at which the potential is calculated. Returns ------- position : ndarray positions at which potential has been calculated potential : ndarray value of potential at the positions above """""" import numpy as np pops=np.histogram(xdata,HistBins)[0] bins=np.histogram(xdata,HistBins)[1] bins=bins[0:-1] bins=bins+np.mean(np.diff(bins)) #normalise pops pops=pops/float(np.sum(pops)) return bins,-np.log(pops)" 2964,"def dynamical_potential(xdata, dt, order=3): """""" Computes potential from spring function Parameters ---------- xdata : ndarray Position data for a degree of freedom, at which to calculate potential dt : float time between measurements order : int order of polynomial to fit Returns ------- Potential : ndarray valued of potential at positions in xdata """""" import numpy as np adata = CalcAcceleration(xdata, dt) xdata = xdata[2:] # removes first 2 values as differentiating twice means # we have acceleration[n] corresponds to position[n-2] z=np.polyfit(xdata,adata,order) p=np.poly1d(z) spring_pot=np.polyint(p) return -spring_pot" 2965,"def CalcAcceleration(xdata, dt): """""" Calculates the acceleration from the position Parameters ---------- xdata : ndarray Position data dt : float time between measurements Returns ------- acceleration : ndarray values of acceleration from position 2 to N. """""" acceleration = np.diff(np.diff(xdata))/dt**2 return acceleration" 2966,"def FitRadius(z, SampleFreq, Damping, HistBins=100): """""" Fits the dynamical potential to the Steady State Potential by varying the Radius. z : ndarray Position data SampleFreq : float frequency at which the position data was sampled Damping : float value of damping (in radians/second) HistBins : int number of values at which to evaluate the steady state potential / perform the fitting to the dynamical potential Returns ------- Radius : float Radius of the nanoparticle RadiusError : float One Standard Deviation Error in the Radius from the Fit (doesn't take into account possible error in damping) """""" dt = 1/SampleFreq boltzmann=scipy.constants.Boltzmann temp=300 # why halved?? density=1800 SteadyStatePotnl = list(steady_state_potential(z, HistBins=HistBins)) yoffset=min(SteadyStatePotnl[1]) SteadyStatePotnl[1] -= yoffset SpringPotnlFunc = dynamical_potential(z, dt) SpringPotnl = SpringPotnlFunc(z) kBT_Gamma = temp*boltzmann*1/Damping #FitSoln = least_squares(GetResiduals, 50, args=(SteadyStatePotnl, SpringPotnlFunc, kBT_Gamma), full_output=True) #print(FitSoln) #RADIUS = FitSoln['x'][0] DynamicPotentialFunc = MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc) FitSoln = curve_fit(DynamicPotentialFunc, SteadyStatePotnl[0], SteadyStatePotnl[1], p0 = 50) print(FitSoln) popt, pcov = FitSoln perr = np.sqrt(np.diag(pcov)) Radius, RadiusError = popt[0], perr[0] mass=((4/3)*np.pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnl fig, ax = plt.subplots() ax.plot(SteadyStatePotnl[0], SteadyStatePotnl[1], 'bo', label=""Steady State Potential"") plt.plot(z,Y, 'r-', label=""Dynamical Potential"") ax.legend(loc='best') ax.set_ylabel('U ($k_{B} T $ Joules)') ax.set_xlabel('Distance (mV)') plt.tight_layout() plt.show() return Radius, RadiusError" 2967,"def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc): """""" Creates the function that calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- kBT_Gamma : float Value of kB*T/Gamma density : float density of the nanoparticle SpringPotnlFunc : function Function which takes the value of position (in volts) and returns the spring potential Returns ------- PotentialFunc : function function that calculates the potential given the position (in volts) and the radius of the particle. """""" def PotentialFunc(xdata, Radius): """""" calculates the potential given the position (in volts) and the radius of the particle. Parameters ---------- xdata : ndarray Positon data (in volts) Radius : float Radius in units of nm Returns ------- Potential : ndarray Dynamical Spring Potential at positions given by xdata """""" mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density yfit=(kBT_Gamma/mass) Y = yfit*SpringPotnlFunc(xdata) return Y return PotentialFunc" 2968,"def finished(finished_status, update_interval, status_key, edit_at_key): """""" Create dict query for pymongo that getting all finished task. :param finished_status: int, status code that greater or equal than this will be considered as finished. :param update_interval: int, the record will be updated every x seconds. :param status_key: status code field key, support dot notation. :param edit_at_key: edit_at time field key, support dot notation. :return: dict, a pymongo filter. **中文文档** 状态码大于某个值, 并且, 更新时间在最近一段时间以内. """""" return { status_key: {""$gte"": finished_status}, edit_at_key: { ""$gte"": x_seconds_before_now(update_interval), }, }" 2969,"def unfinished(finished_status, update_interval, status_key, edit_at_key): """""" Create dict query for pymongo that getting all unfinished task. :param finished_status: int, status code that less than this will be considered as unfinished. :param update_interval: int, the record will be updated every x seconds. :param status_key: status code field key, support dot notation. :param edit_at_key: edit_at time field key, support dot notation. :return: dict, a pymongo filter. **中文文档** 状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. """""" return { ""$or"": [ {status_key: {""$lt"": finished_status}}, {edit_at_key: {""$lt"": x_seconds_before_now(update_interval)}}, ] }" 2970,"def getCommandLine(self): """"""Insert the precursor and change directory commands """""" commandLine = self.precursor + self.sep if self.precursor else '' commandLine += self.cd + ' ' + self.path + self.sep if self.path else '' commandLine += PosixCommand.getCommandLine(self) return commandLine" 2971,"def _policy_psets(policy_instances): """"""Find all permission sets making use of all of a list of policy_instances. The input is an array of policy instances. """""" if len(policy_instances) == 0: # Special case: find any permission sets that don't have # associated policy instances. return PermissionSet.objects.filter(policyinstance__isnull=True) else: return PermissionSet.objects.filter( policyinstance__policy__in=policy_instances).distinct()" 2972,"def _get_permission_set_tree(user): """""" Helper to return cached permission set tree from user instance if set, else generates and returns analyzed permission set tree. Does not cache set automatically, that must be done explicitely. """""" if hasattr(user, CACHED_PSET_PROPERTY_KEY): return getattr(user, CACHED_PSET_PROPERTY_KEY) if user.is_authenticated(): try: return user.permissionset.first().tree() except AttributeError: raise ObjectDoesNotExist return PermissionSet.objects.get(anonymous_user=True).tree()" 2973,"def ensure_permission_set_tree_cached(user): """""" Helper to cache permission set tree on user instance """""" if hasattr(user, CACHED_PSET_PROPERTY_KEY): return try: setattr( user, CACHED_PSET_PROPERTY_KEY, _get_permission_set_tree(user)) except ObjectDoesNotExist: # No permission set pass" 2974,"def clear_user_policies(user): """"""Remove all policies assigned to a user (or the anonymous user if ``user`` is ``None``). """""" if user is None: try: pset = PermissionSet.objects.get(anonymous_user=True) pset.anonymous_user = False pset.save() except ObjectDoesNotExist: return else: pset = user.permissionset.first() if pset: pset.refresh() if user is not None: pset.users.remove(user) if pset.users.count() == 0 and not pset.anonymous_user: pset.delete()" 2975,"def assign_user_policies(user, *policies_roles): """"""Assign a sequence of policies to a user (or the anonymous user is ``user`` is ``None``). (Also installed as ``assign_policies`` method on ``User`` model. """""" clear_user_policies(user) pset = PermissionSet.objects.by_policies_and_roles(policies_roles) pset.refresh() if user is None: pset.anonymous_user = True else: pset.users.add(user) pset.save() cache.set(user_cache_key(user), None)" 2976,"def user_assigned_policies(user): """"""Return sequence of policies assigned to a user (or the anonymous user is ``user`` is ``None``). (Also installed as ``assigned_policies`` method on ``User`` model. """""" key = user_cache_key(user) cached = cache.get(key) if cached is not None: return cached if user is None: pset = PermissionSet.objects.filter(anonymous_user=True).first() else: pset = user.permissionset.first() if pset is None: return [] res = [] skip_role_policies = False skip_role = None skip_role_variables = None for pi in pset.policyinstance_set.select_related('policy', 'role'): if skip_role_policies: if pi.role == skip_role and pi.variables == skip_role_variables: continue else: skip_role_policies = False if pi.role: res.append(pi.role) skip_role = pi.role skip_role_variables = pi.variables skip_role_policies = True else: if pi.variables != '{}': res.append((pi.policy, json.loads(pi.variables))) else: res.append(pi.policy) cache.set(key, res) return res" 2977,"def parsed(self): """"""Get the JSON dictionary object which represents the content. This property is cached and only parses the content once. """""" if not self._parsed: self._parsed = json.loads(self.content) return self._parsed" 2978,"def cleanup_logger(self): """"""Clean up logger to close out file handles. After this is called, writing to self.log will get logs ending up getting discarded. """""" self.log_handler.close() self.log.removeHandler(self.log_handler)" 2979,"def update_configs(self, release): """""" Update the fedora-atomic.git repositories for a given release """""" git_repo = release['git_repo'] git_cache = release['git_cache'] if not os.path.isdir(git_cache): self.call(['git', 'clone', '--mirror', git_repo, git_cache]) else: self.call(['git', 'fetch', '--all', '--prune'], cwd=git_cache) git_dir = release['git_dir'] = os.path.join(release['tmp_dir'], os.path.basename(git_repo)) self.call(['git', 'clone', '-b', release['git_branch'], git_cache, git_dir]) if release['delete_repo_files']: for repo_file in glob.glob(os.path.join(git_dir, '*.repo')): self.log.info('Deleting %s' % repo_file) os.unlink(repo_file)" 2980,"def mock_cmd(self, release, *cmd, **kwargs): """"""Run a mock command in the chroot for a given release"""""" fmt = '{mock_cmd}' if kwargs.get('new_chroot') is True: fmt +=' --new-chroot' fmt += ' --configdir={mock_dir}' return self.call(fmt.format(**release).split() + list(cmd))" 2981,"def init_mock(self, release): """"""Initialize/update our mock chroot"""""" root = '/var/lib/mock/%s' % release['mock'] if not os.path.isdir(root): self.mock_cmd(release, '--init') self.log.info('mock chroot initialized') else: if release.get('mock_clean'): self.mock_cmd(release, '--clean') self.mock_cmd(release, '--init') self.log.info('mock chroot cleaned & initialized') else: self.mock_cmd(release, '--update') self.log.info('mock chroot updated')" 2982,"def generate_mock_config(self, release): """"""Dynamically generate our mock configuration"""""" mock_tmpl = pkg_resources.resource_string(__name__, 'templates/mock.mako') mock_dir = release['mock_dir'] = os.path.join(release['tmp_dir'], 'mock') mock_cfg = os.path.join(release['mock_dir'], release['mock'] + '.cfg') os.mkdir(mock_dir) for cfg in ('site-defaults.cfg', 'logging.ini'): os.symlink('/etc/mock/%s' % cfg, os.path.join(mock_dir, cfg)) with file(mock_cfg, 'w') as cfg: mock_out = Template(mock_tmpl).render(**release) self.log.debug('Writing %s:\n%s', mock_cfg, mock_out) cfg.write(mock_out)" 2983,"def mock_chroot(self, release, cmd, **kwargs): """"""Run a commend in the mock container for a release"""""" return self.mock_cmd(release, '--chroot', cmd, **kwargs)" 2984,"def generate_repo_files(self, release): """"""Dynamically generate our yum repo configuration"""""" repo_tmpl = pkg_resources.resource_string(__name__, 'templates/repo.mako') repo_file = os.path.join(release['git_dir'], '%s.repo' % release['repo']) with file(repo_file, 'w') as repo: repo_out = Template(repo_tmpl).render(**release) self.log.debug('Writing repo file %s:\n%s', repo_file, repo_out) repo.write(repo_out) self.log.info('Wrote repo configuration to %s', repo_file)" 2985,"def ostree_init(self, release): """"""Initialize the OSTree for a release"""""" out = release['output_dir'].rstrip('/') base = os.path.dirname(out) if not os.path.isdir(base): self.log.info('Creating %s', base) os.makedirs(base, mode=0755) if not os.path.isdir(out): self.mock_chroot(release, release['ostree_init'])" 2986,"def ostree_compose(self, release): """"""Compose the OSTree in the mock container"""""" start = datetime.utcnow() treefile = os.path.join(release['git_dir'], 'treefile.json') cmd = release['ostree_compose'] % treefile with file(treefile, 'w') as tree: json.dump(release['treefile'], tree) # Only use new_chroot for the invocation, as --clean and --new-chroot are buggy together right now out, err, rcode = self.mock_chroot(release, cmd, new_chroot=True) ref = None commitid = None for line in out.split('\n'): if ' => ' in line: # This line is the: ref => commitid line line = line.replace('\n', '') ref, _, commitid = line.partition(' => ') self.log.info('rpm-ostree compose complete (%s), ref %s, commitid %s', datetime.utcnow() - start, ref, commitid) return ref, commitid" 2987,"def update_ostree_summary(self, release): """"""Update the ostree summary file and return a path to it"""""" self.log.info('Updating the ostree summary for %s', release['name']) self.mock_chroot(release, release['ostree_summary']) return os.path.join(release['output_dir'], 'summary')" 2988,"def sync_in(self, release): """"""Sync the canonical repo to our local working directory"""""" tree = release['canonical_dir'] if os.path.exists(tree) and release.get('rsync_in_objs'): out = release['output_dir'] if not os.path.isdir(out): self.log.info('Creating %s', out) os.makedirs(out) self.call(release['rsync_in_objs']) self.call(release['rsync_in_rest'])" 2989,"def sync_out(self, release): """"""Sync our tree to the canonical location"""""" if release.get('rsync_out_objs'): tree = release['canonical_dir'] if not os.path.isdir(tree): self.log.info('Creating %s', tree) os.makedirs(tree) self.call(release['rsync_out_objs']) self.call(release['rsync_out_rest'])" 2990,"def call(self, cmd, **kwargs): """"""A simple subprocess wrapper"""""" if isinstance(cmd, basestring): cmd = cmd.split() self.log.info('Running %s', cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) out, err = p.communicate() if out: self.log.info(out) if err: if p.returncode == 0: self.log.info(err) else: self.log.error(err) if p.returncode != 0: self.log.error('returncode = %d' % p.returncode) raise Exception return out, err, p.returncode" 2991,"def merge(self, other): """""" Merge this range object with another (ranges need not overlap or abut). :returns: a new Range object representing the interval containing both ranges. """""" newstart = min(self._start, other.start) newend = max(self._end, other.end) return Range(newstart, newend)" 2992,"def intersect(self, other): """""" Determine the interval of overlap between this range and another. :returns: a new Range object representing the overlapping interval, or `None` if the ranges do not overlap. """""" if not self.overlap(other): return None newstart = max(self._start, other.start) newend = min(self._end, other.end) return Range(newstart, newend)" 2993,"def overlap(self, other): """"""Determine whether this range overlaps with another."""""" if self._start < other.end and self._end > other.start: return True return False" 2994,"def contains(self, other): """"""Determine whether this range contains another."""""" return self._start <= other.start and self._end >= other.end" 2995,"def transform(self, offset): """""" Shift this range by the specified offset. Note: the resulting range must be a valid interval. """""" assert self._start + offset > 0, \ ('offset {} invalid; resulting range [{}, {}) is ' 'undefined'.format(offset, self._start+offset, self._end+offset)) self._start += offset self._end += offset" 2996,"def runningMedian(seq, M): """""" Purpose: Find the median for the points in a sliding window (odd number in size) as it is moved from left to right by one point at a time. Inputs: seq -- list containing items for which a running median (in a sliding window) is to be calculated M -- number of items in window (window size) -- must be an integer > 1 Otputs: medians -- list of medians with size N - M + 1 Note: 1. The median of a finite list of numbers is the ""center"" value when this list is sorted in ascending order. 2. If M is an even number the two elements in the window that are close to the center are averaged to give the median (this is not by definition) """""" seq = iter(seq) s = [] m = M // 2 #// does a truncated division like integer division in Python 2 # Set up list s (to be sorted) and load deque with first window of seq s = [item for item in islice(seq,M)] d = deque(s) # Simple lambda function to handle even/odd window sizes median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5 # Sort it in increasing order and extract the median (""center"" of the sorted window) s.sort() medians = [median()] # Now slide the window by one point to the right for each new position (each pass through # the loop). Stop when the item in the right end of the deque contains the last item in seq for item in seq: old = d.popleft() # pop oldest from left d.append(item) # push newest in from right del s[bisect_left(s, old)] # locate insertion point and then remove old insort(s, item) # insert newest such that new sort is not required medians.append(median()) return medians" 2997,"def runningMean(seq, N, M): """""" Purpose: Find the mean for the points in a sliding window (fixed size) as it is moved from left to right by one point at a time. Inputs: seq -- list containing items for which a mean (in a sliding window) is to be calculated (N items) N -- length of sequence M -- number of items in sliding window Otputs: means -- list of means with size N - M + 1 """""" # Load deque (d) with first window of seq d = deque(seq[0:M]) means = [sum(d) / len(d)] # contains mean of first window # Now slide the window by one point to the right for each new position (each pass through # the loop). Stop when the item in the right end of the deque contains the last item in seq for item in islice(seq, M, N): old = d.popleft() # pop oldest from left d.append(item) # push newest in from right m = sum(d) / len(d) means.append(m) # mean for current window return means" 2998,"def behave(cmdline, cwd=""."", **kwargs): """""" Run behave as subprocess command and return process/shell instance with results (collected output, returncode). """""" assert isinstance(cmdline, six.string_types) return run(""behave "" + cmdline, cwd=cwd, **kwargs)" 2999,"def run(cls, command, cwd=""."", **kwargs): """""" Make a subprocess call, collect its output and returncode. Returns CommandResult instance as ValueObject. """""" assert isinstance(command, six.string_types) command_result = CommandResult() command_result.command = command use_shell = cls.USE_SHELL if ""shell"" in kwargs: use_shell = kwargs.pop(""shell"") # -- BUILD COMMAND ARGS: if six.PY2 and isinstance(command, six.text_type): # -- PREPARE-FOR: shlex.split() # In PY2, shlex.split() requires bytes string (non-unicode). # In PY3, shlex.split() accepts unicode string. command = codecs.encode(command, ""utf-8"") cmdargs = shlex.split(command) # -- TRANSFORM COMMAND (optional) command0 = cmdargs[0] real_command = cls.COMMAND_MAP.get(command0, None) if real_command: cmdargs0 = real_command.split() cmdargs = cmdargs0 + cmdargs[1:] preprocessors = cls.PREPROCESSOR_MAP.get(command0) if preprocessors: cmdargs = cls.preprocess_command(preprocessors, cmdargs, command, cwd) # -- RUN COMMAND: try: process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=use_shell, cwd=cwd, **kwargs) out, err = process.communicate() if six.PY2: # py3: we get unicode strings, py2 not default_encoding = 'UTF-8' out = six.text_type(out, process.stdout.encoding or default_encoding) err = six.text_type(err, process.stderr.encoding or default_encoding) process.poll() assert process.returncode is not None command_result.stdout = out command_result.stderr = err command_result.returncode = process.returncode if cls.DEBUG: print(""shell.cwd={0}"".format(kwargs.get(""cwd"", None))) print(""shell.command: {0}"".format("" "".join(cmdargs))) print(""shell.command.output:\n{0};"".format(command_result.output)) except OSError as e: command_result.stderr = u""OSError: %s"" % e command_result.returncode = e.errno assert e.errno != 0 postprocessors = cls.POSTPROCESSOR_MAP.get(command0) if postprocessors: command_result = cls.postprocess_command(postprocessors, command_result) return command_result" 3000,"def get_field_template(self, bound_field, template_name=None): """""" Uses a special field template for widget with multiple inputs. It only applies if no other template than the default one has been defined. """""" template_name = super().get_field_template(bound_field, template_name) if (template_name == self.field_template and isinstance(bound_field.field.widget, ( forms.RadioSelect, forms.CheckboxSelectMultiple))): return 'tapeforms/fields/foundation_fieldset.html' return template_name" 3001,"def printer(self): """"""Prints PDA state attributes"""""" print "" ID "" + repr(self.id) if self.type == 0: print "" Tag: - "" print "" Start State - "" elif self.type == 1: print "" Push "" + repr(self.sym) elif self.type == 2: print "" Pop State "" + repr(self.sym) elif self.type == 3: print "" Read State "" + repr(self.sym) elif self.type == 4: print "" Stop State "" + repr(self.sym) for j in self.trans: if len(self.trans[j]) > 1 or (len(self.trans[j]) == 1): for symbol in self.trans[j]: print "" On Symbol "" + repr(symbol) + "" Transition To State "" + repr(j)" 3002,"def printer(self): """"""Prints PDA states and their attributes"""""" i = 0 while i < self.n + 1: print ""--------- State No --------"" + repr(i) self.s[i].printer() i = i + 1" 3003,"def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0): """""" Consumes an input and validates if it is accepted Args: mystr (str): the input string to be consumes stack (list): the stack of symbols state (int): the current state of the PDA curchar (int): the index of the consumed character depth (int): the depth of the function call in the stack Returns: bool: A value indicating the correct or erroneous execution """""" mystrsplit = mystr.split(' ') if self.s[state].type == 1: stack.append(self.s[state].sym) if len(self.s[state].trans) > 0: state = self.s[state].trans[0] if self.parse( mystr, stack=stack, state=state, curchar=curchar, depth=depth + 1) == 1: return True return False if self.s[state].type == 2: if len(stack) == 0: return False sym = stack.pop() for key in self.s[state].trans: if sym in self.s[state].trans[key]: if self.parse( mystr, stack=stack, state=key, curchar=curchar, depth=depth + 1) == 1: return True return False if self.s[state].type == 3: for key in self.s[state].trans: if mystrsplit[curchar] in self.s[state].trans[key]: # print 'found ' if curchar + 1 == len(mystrsplit) \ and 'closing' in self.s[key].trans: return True elif curchar + 1 == len(mystrsplit): return False # print 'lets try as next state the state ' + repr(key) if self.parse( mystr, stack=stack, state=key, curchar=curchar + 1, depth=depth + 1) == 1: return True return False" 3004,"def _CreateDatabase(self): """""" Create all database tables. """""" goodlogging.Log.Info(""DB"", ""Initialising new database"", verbosity=self.logVerbosity) with sqlite3.connect(self._dbPath) as db: # Configuration tables db.execute(""CREATE TABLE Config ("" ""Name TEXT UNIQUE NOT NULL, "" ""Value TEXT)"") db.execute(""CREATE TABLE IgnoredDir ("" ""DirName TEXT UNIQUE NOT NULL)"") db.execute(""CREATE TABLE SupportedFormat ("" ""FileFormat TEXT UNIQUE NOT NULL)"") # Look-up tables db.execute(""CREATE TABLE TVLibrary ("" ""ShowID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "" ""ShowName TEXT UNIQUE NOT NULL, "" ""ShowDir TEXT UNIQUE)"") db.execute(""CREATE TABLE FileName ("" ""FileName TEXT UNIQUE NOT NULL, "" ""ShowID INTEGER, "" ""FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID))"") db.execute(""CREATE TABLE SeasonDir ("" ""ShowID INTEGER, "" ""Season INTEGER NOT NULL, "" ""SeasonDir TEXT NOT NULL, "" ""FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID),"" ""CONSTRAINT SeasonDirPK PRIMARY KEY (ShowID,Season))"") db.commit() goodlogging.Log.Info(""DB"", ""Database initialisation complete"", verbosity=self.logVerbosity)" 3005,"def _ActionDatabase(self, cmd, args = None, commit = True, error = True): """""" Do action on database. Parameters ---------- cmd : string SQL command. args : tuple [optional : default = None] Arguments to be passed along with the SQL command. e.g. cmd=""SELECT Value FROM Config WHERE Name=?"" args=(fieldName, ) commit : boolean [optional : default = True] If true commit database changes after command is executed. error : boolean [optional : default = True] If False then any sqlite3.OperationalError exceptions will cause this function to return None, otherwise the exception will be raised. Returns ---------- If a valid result is obtained from the database this will be returned. If an error occurs and the error argument is set to False then the return value will be None. """""" goodlogging.Log.Info(""DB"", ""Database Command: {0} {1}"".format(cmd, args), verbosity=self.logVerbosity) with sqlite3.connect(self._dbPath) as db: try: if args is None: result = db.execute(cmd) else: result = db.execute(cmd, args) except sqlite3.OperationalError: if error is True: raise return None else: if commit is True: db.commit() return result.fetchall()" 3006,"def _PurgeTable(self, tableName): """""" Deletes all rows from given table without dropping table. Parameters ---------- tableName : string Name of table. """""" goodlogging.Log.Info(""DB"", ""Deleting all entries from table {0}"".format(tableName), verbosity=self.logVerbosity) self._ActionDatabase(""DELETE FROM {0}"".format(tableName))" 3007,"def GetConfigValue(self, fieldName): """""" Match given field name in Config table and return corresponding value. Parameters ---------- fieldName : string String matching Name column in Config table. Returns ---------- string or None If a match is found the corresponding entry in the Value column of the database table is returned, otherwise None is returned (or if multiple matches are found a fatal error is raised). """""" result = self._ActionDatabase(""SELECT Value FROM Config WHERE Name=?"", (fieldName, )) if result is None: return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info(""DB"", ""Found database match in config table {0}={1}"".format(fieldName, result[0][0]), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal(""DB"", ""Database corrupted - multiple matches found in config table {0}={1}"".format(fieldName, result))" 3008,"def SetConfigValue(self, fieldName, value): """""" Set value in Config table. If a entry already exists this is updated with the new value, otherwise a new entry is added. Parameters ---------- fieldName : string String to be inserted or matched against Name column in Config table. value : string Entry to be inserted or updated in Value column of Config table. """""" currentConfigValue = self.GetConfigValue(fieldName) if currentConfigValue is None: goodlogging.Log.Info(""DB"", ""Adding {0}={1} to database config table"".format(fieldName, value), verbosity=self.logVerbosity) self._ActionDatabase(""INSERT INTO Config VALUES (?,?)"", (fieldName, value)) else: goodlogging.Log.Info(""DB"", ""Updating {0} in database config table from {1} to {2}"".format(fieldName, currentConfigValue, value), verbosity=self.logVerbosity) self._ActionDatabase(""UPDATE Config SET Value=? WHERE Name=?"", (value, fieldName))" 3009,"def _AddToSingleColumnTable(self, tableName, columnHeading, newValue): """""" Add an entry to a table containing a single column. Checks existing table entries to avoid duplicate entries if the given value already exists in the table. Parameters ---------- tableName : string Name of table to add entry to. columnHeading : string Name of column heading. newValue : string New value to add to table. """""" match = None currentTable = self._GetFromSingleColumnTable(tableName) if currentTable is not None: for currentValue in currentTable: if currentValue == newValue: match = True if match is None: goodlogging.Log.Info(""DB"", ""Adding {0} to {1} table"".format(newValue, tableName), verbosity=self.logVerbosity) self._ActionDatabase(""INSERT INTO {0} VALUES (?)"".format(tableName), (newValue, )) else: goodlogging.Log.Info(""DB"", ""{0} already exists in {1} table"".format(newValue, tableName), verbosity=self.logVerbosity) ############################################################################ # _GetFromSingleColumnTable ############################################################################ """""" Get all entries from a table containing a single column. Parameters ---------- tableName : string Name of table to add entry to. Returns ---------- list or None If either no table or no rows are found this returns None, otherwise a list of all table entries is returned. """"""" 3010,"def AddShowToTVLibrary(self, showName): """""" Add show to TVLibrary table. If the show already exists in the table a fatal error is raised. Parameters ---------- showName : string Show name to add to TV library table. Returns ---------- int Unique show id generated for show when it is added to the table. Used across the database to reference this show. """""" goodlogging.Log.Info(""DB"", ""Adding {0} to TV library"".format(showName), verbosity=self.logVerbosity) currentShowValues = self.SearchTVLibrary(showName = showName) if currentShowValues is None: self._ActionDatabase(""INSERT INTO TVLibrary (ShowName) VALUES (?)"", (showName, )) showID = self._ActionDatabase(""SELECT (ShowID) FROM TVLibrary WHERE ShowName=?"", (showName, ))[0][0] return showID else: goodlogging.Log.Fatal(""DB"", ""An entry for {0} already exists in the TV library"".format(showName))" 3011,"def UpdateShowDirInTVLibrary(self, showID, showDir): """""" Update show directory entry for given show id in TVLibrary table. Parameters ---------- showID : int Show id value. showDir : string Show directory name. """""" goodlogging.Log.Info(""DB"", ""Updating TV library for ShowID={0}: ShowDir={1}"".format(showID, showDir)) self._ActionDatabase(""UPDATE TVLibrary SET ShowDir=? WHERE ShowID=?"", (showDir, showID))" 3012,"def SearchTVLibrary(self, showName = None, showID = None, showDir = None): """""" Search TVLibrary table. If none of the optonal arguments are given it looks up all entries of the table, otherwise it will look up entries which match the given arguments. Note that it only looks up based on one argument - if show directory is given this will be used, otherwise show id will be used if it is given, otherwise show name will be used. Parameters ---------- showName : string [optional : default = None] Show name. showID : int [optional : default = None] Show id value. showDir : string [optional : default = None] Show directory name. Returns ---------- list or None If no result is found this returns None otherwise it will return a the result of the SQL query as a list. In the case that the result is expected to be unique and multiple entries are return a fatal error will be raised. """""" unique = True if showName is None and showID is None and showDir is None: goodlogging.Log.Info(""DB"", ""Looking up all items in TV library"", verbosity=self.logVerbosity) queryString = ""SELECT * FROM TVLibrary"" queryTuple = None unique = False elif showDir is not None: goodlogging.Log.Info(""DB"", ""Looking up from TV library where ShowDir is {0}"".format(showDir), verbosity=self.logVerbosity) queryString = ""SELECT * FROM TVLibrary WHERE ShowDir=?"" queryTuple = (showDir, ) elif showID is not None: goodlogging.Log.Info(""DB"", ""Looking up from TV library where ShowID is {0}"".format(showID), verbosity=self.logVerbosity) queryString = ""SELECT * FROM TVLibrary WHERE ShowID=?"" queryTuple = (showID, ) elif showName is not None: goodlogging.Log.Info(""DB"", ""Looking up from TV library where ShowName is {0}"".format(showName), verbosity=self.logVerbosity) queryString = ""SELECT * FROM TVLibrary WHERE ShowName=?"" queryTuple = (showName, ) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info(""DB"", ""Found match in TVLibrary: {0}"".format(result), verbosity=self.logVerbosity) return result elif len(result) > 1: if unique is True: goodlogging.Log.Fatal(""DB"", ""Database corrupted - multiple matches found in TV Library: {0}"".format(result)) else: goodlogging.Log.Info(""DB"", ""Found multiple matches in TVLibrary: {0}"".format(result), verbosity=self.logVerbosity) return result" 3013,"def SearchFileNameTable(self, fileName): """""" Search FileName table. Find the show id for a given file name. Parameters ---------- fileName : string File name to look up in table. Returns ---------- int or None If a match is found in the database table the show id for this entry is returned, otherwise this returns None. """""" goodlogging.Log.Info(""DB"", ""Looking up filename string '{0}' in database"".format(fileName), verbosity=self.logVerbosity) queryString = ""SELECT ShowID FROM FileName WHERE FileName=?"" queryTuple = (fileName, ) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: goodlogging.Log.Info(""DB"", ""No match found in database for '{0}'"".format(fileName), verbosity=self.logVerbosity) return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info(""DB"", ""Found file name match: {0}"".format(result), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal(""DB"", ""Database corrupted - multiple matches found in database table for: {0}"".format(result))" 3014,"def AddToFileNameTable(self, fileName, showID): """""" Add entry to FileName table. If the file name and show id combination already exists in the table a fatal error is raised. Parameters ---------- fileName : string File name. showID : int Show id. """""" goodlogging.Log.Info(""DB"", ""Adding filename string match '{0}'={1} to database"".format(fileName, showID), verbosity=self.logVerbosity) currentValues = self.SearchFileNameTable(fileName) if currentValues is None: self._ActionDatabase(""INSERT INTO FileName (FileName, ShowID) VALUES (?,?)"", (fileName, showID)) else: goodlogging.Log.Fatal(""DB"", ""An entry for '{0}' already exists in the FileName table"".format(fileName))" 3015,"def SearchSeasonDirTable(self, showID, seasonNum): """""" Search SeasonDir table. Find the season directory for a given show id and season combination. Parameters ---------- showID : int Show id for given show. seasonNum : int Season number. Returns ---------- string or None If no match is found this returns None, if a single match is found then the season directory name value is returned. If multiple matches are found a fatal error is raised. """""" goodlogging.Log.Info(""DB"", ""Looking up directory for ShowID={0} Season={1} in database"".format(showID, seasonNum), verbosity=self.logVerbosity) queryString = ""SELECT SeasonDir FROM SeasonDir WHERE ShowID=? AND Season=?"" queryTuple = (showID, seasonNum) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: goodlogging.Log.Info(""DB"", ""No match found in database"", verbosity=self.logVerbosity) return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info(""DB"", ""Found database match: {0}"".format(result), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal(""DB"", ""Database corrupted - multiple matches found in database table for: {0}"".format(result))" 3016,"def AddSeasonDirTable(self, showID, seasonNum, seasonDir): """""" Add entry to SeasonDir table. If a different entry for season directory is found for the given show id and season number combination this raises a fatal error. Parameters ---------- showID : int Show id. seasonNum : int Season number. seasonDir : string Season directory name. """""" goodlogging.Log.Info(""DB"", ""Adding season directory ({0}) to database for ShowID={1}, Season={2}"".format(seasonDir, showID, seasonNum), verbosity=self.logVerbosity) currentValue = self.SearchSeasonDirTable(showID, seasonNum) if currentValue is None: self._ActionDatabase(""INSERT INTO SeasonDir (ShowID, Season, SeasonDir) VALUES (?,?,?)"", (showID, seasonNum, seasonDir)) else: if currentValue == seasonDir: goodlogging.Log.Info(""DB"", ""A matching entry already exists in the SeasonDir table"", verbosity=self.logVerbosity) else: goodlogging.Log.Fatal(""DB"", ""A different entry already exists in the SeasonDir table"")" 3017,"def _PrintDatabaseTable(self, tableName, rowSelect = None): """""" Prints contents of database table. An optional argument (rowSelect) can be given which contains a list of column names and values against which to search, allowing a subset of the table to be printed. Gets database column headings using PRAGMA call. Automatically adjusts each column width based on the longest element that needs to be printed Parameters ---------- tableName : int Name of table to print. rowSelect : list of tuples A list of column names and values against to search against. Returns: int The number of table rows printed. """""" goodlogging.Log.Info(""DB"", ""{0}"".format(tableName)) goodlogging.Log.IncreaseIndent() tableInfo = self._ActionDatabase(""PRAGMA table_info({0})"".format(tableName)) dbQuery = ""SELECT * FROM {0}"".format(tableName) dbQueryParams = [] if rowSelect is not None: dbQuery = dbQuery + "" WHERE "" + ' AND '.join(['{0}=?'.format(i) for i, j in rowSelect]) dbQueryParams = [j for i, j in rowSelect] tableData = self._ActionDatabase(dbQuery, dbQueryParams) columnCount = len(tableInfo) columnWidths = [0]*columnCount columnHeadings = [] for count, column in enumerate(tableInfo): columnHeadings.append(column[1]) columnWidths[count] = len(column[1]) for row in tableData: for count, column in enumerate(row): if len(str(column)) > columnWidths[count]: columnWidths[count] = len(column) printStr = ""|"" for count, column in enumerate(columnWidths): printStr = printStr + "" {{0[{0}]:{1}}} |"".format(count, columnWidths[count]) goodlogging.Log.Info(""DB"", printStr.format(columnHeadings)) goodlogging.Log.Info(""DB"", ""-""*(sum(columnWidths)+3*len(columnWidths)+1)) for row in tableData: noneReplacedRow = ['-' if i is None else i for i in row] goodlogging.Log.Info(""DB"", printStr.format(noneReplacedRow)) goodlogging.Log.DecreaseIndent() goodlogging.Log.NewLine() return len(tableData)" 3018,"def PrintAllTables(self): """""" Prints contents of every table. """""" goodlogging.Log.Info(""DB"", ""Database contents:\n"") for table in self._tableDict.keys(): self._PrintDatabaseTable(table)" 3019,"def _UpdateDatabaseFromResponse(self, response, mode): """""" Update database table given a user input in the form ""TABLENAME COL1=VAL1 COL2=VAL2"". Either ADD or DELETE from table depending on mode argument. If the change succeeds the updated table is printed to stdout. Parameters ---------- response : string User input. mode : string Valid values are 'ADD' or 'DEL'. Returns ---------- None Will always return None. There are numerous early returns in the cases where the database update cannot proceed for any reason. """""" # Get tableName from user input (form TABLENAME COL1=VAL1 COL2=VAL2 etc) try: tableName, tableColumns = response.split(' ', 1) except ValueError: goodlogging.Log.Info(""DB"", ""Database update failed - failed to extract table name from response"") return None # Check user input against known table list if tableName not in self._tableDict.keys(): goodlogging.Log.Info(""DB"", ""Database update failed - unkown table name: {0}"".format(tableName)) return None # Build re pattern to extract column from user input (form TABLENAME COL1=VAL1 COL2=VAL2 etc) rowSelect = [] for column in self._tableDict[tableName]: colPatternList = ['(?:{0})'.format(i) for i in self._tableDict[tableName] if i != column] colPatternList.append('(?:$)') colPatternMatch = '|'.join(colPatternList) matchPattern = '{0}.*?{1}=(.+?)\s*(?:{2})'.format(tableName, column, colPatternMatch) match = re.findall(matchPattern, response) # Match should be in form [(VAL1, VAL2, VAL3, etc.)] if len(match) == 1: rowSelect.append((column, match[0])) elif len(match) > 1: goodlogging.Log.Info('DB', 'Database update failed - multiple matches found for table {0} column {1}'.format(tableName, column)) return None if len(rowSelect) == 0: goodlogging.Log.Info('DB', 'Database update failed - no row selection critera found in response') return None # Print selected rows rowCount = self._PrintDatabaseTable(tableName, rowSelect) # Do DELETE flow if mode.upper() == 'DEL': if rowCount == 0: goodlogging.Log.Info(""DB"", ""Database update failed - no rows found for given search critera: {0}"".format(response)) return None deleteConfirmation = goodlogging.Log.Input(""DB"", ""***WARNING*** DELETE THESE ROWS FROM {0} TABLE? [y/n]: "".format(tableName)) deleteConfirmation = util.ValidUserResponse(deleteConfirmation, ('y', 'n')) if deleteConfirmation.lower() == 'n': goodlogging.Log.Info(""DB"", ""Database table row delete cancelled"") return None # Build delete database query (form DELETE FROM TableName WHERE COL1=?, COL2=?) dbQuery = ""DELETE FROM {0}"".format(tableName) \ + "" WHERE "" \ + ' AND '.join(['{0}=?'.format(i) for i, j in rowSelect]) dbQueryParams = [j for i, j in rowSelect] self._ActionDatabase(dbQuery, dbQueryParams) goodlogging.Log.Info(""DB"", ""Deleted {0} row(s) from database table {0}:"".format(rowCount, tableName)) # Do ADD flow elif mode.upper() == 'ADD': if rowCount != 0: goodlogging.Log.Info(""DB"", ""Database update failed - a row already exists for the given critera: {0}"".format(response)) return None # Build insert database query (form INSERT INTO TableName (COL1, COL2) VALUES (?,?)) dbQuery = ""INSERT INTO {0} ("".format(tableName) \ + ', '.join(['{0}'.format(i) for i, j in rowSelect]) \ + "") VALUES ("" \ + ', '.join(['?']*len(rowSelect)) \ + "")"" dbQueryParams = [j for i, j in rowSelect] self._ActionDatabase(dbQuery, dbQueryParams) goodlogging.Log.Info(""DB"", ""Added row to database table {0}:"".format(tableName)) # Print resulting database table self._PrintDatabaseTable(tableName)" 3020,"def ManualUpdateTables(self): """""" Allow user to manually update the database tables. User options from initial prompt are: - 'ls' : print database contents - 'a' : add an row to a database table - 'd' : delete a single table row - 'p' : delete an entire table (purge) - 'f' : finish updates and continue - 'x' : finish updates and exit Selecting add, delete or purge will proceed to a further prompt where the user can enter exactly what information should be added or deleted. """""" goodlogging.Log.Info(""DB"", ""Starting manual database update:\n"") updateFinished = False # Loop until the user continues program flow or exits while not updateFinished: prompt = ""Enter 'ls' to print the database contents, "" \ ""'a' to add a table entry, "" \ ""'d' to delete a single table row, "" \ ""'p' to select a entire table to purge, "" \ ""'f' to finish or "" \ ""'x' to exit: "" response = goodlogging.Log.Input(""DM"", prompt) goodlogging.Log.NewLine() goodlogging.Log.IncreaseIndent() # Exit program if response.lower() == 'x': goodlogging.Log.Fatal(""DB"", ""Program exited by user response"") # Finish updating database elif response.lower() == 'f': updateFinished = True # Print database tables elif response.lower() == 'ls': self.PrintAllTables() # Purge a given table elif response.lower() == 'p': response = goodlogging.Log.Input(""DM"", ""Enter database table to purge or 'c' to cancel: "") # Go back to main update selection if response.lower() == 'c': goodlogging.Log.Info(""DB"", ""Database table purge cancelled"") # Purge table else: if response in self._tableDict.keys(): self._PrintDatabaseTable(response) deleteConfirmation = goodlogging.Log.Input(""DB"", ""***WARNING*** DELETE ALL ROWS FROM {0} TABLE? [y/n]: "".format(response)) deleteConfirmation = util.ValidUserResponse(deleteConfirmation, ('y', 'n')) if deleteConfirmation.lower() == 'n': goodlogging.Log.Info(""DB"", ""Database table purge cancelled"") else: self._PurgeTable(response) goodlogging.Log.Info(""DB"", ""{0} database table purged"".format(response)) else: goodlogging.Log.Info(""DB"", ""Unknown table name ({0}) given to purge"".format(response)) # Add new row to table elif response.lower() == 'a': addFinished = False while not addFinished: prompt = ""Enter new database row (in format TABLE COL1=VAL COL2=VAL etc) "" \ ""or 'c' to cancel: "" response = goodlogging.Log.Input(""DM"", prompt) # Go back to main update selection if response.lower() == 'c': goodlogging.Log.Info(""DB"", ""Database table add cancelled"") addFinished = True # Add row to table else: self._UpdateDatabaseFromResponse(response, 'ADD') # Delete row(s) from table elif response.lower() == 'd': deleteFinished = False while not deleteFinished: prompt = ""Enter database row to delete (in format TABLE COL1=VAL COL2=VAL etc) "" \ ""or 'c' to cancel: "" response = goodlogging.Log.Input(""DM"", prompt) # Go back to main update selection if response.lower() == 'c': goodlogging.Log.Info(""DB"", ""Database table row delete cancelled"") deleteFinished = True # Delete row(s) from table else: self._UpdateDatabaseFromResponse(response, 'DEL') # Unknown user input given else: goodlogging.Log.Info(""DB"", ""Unknown response"") goodlogging.Log.DecreaseIndent() goodlogging.Log.NewLine() goodlogging.Log.Info(""DB"", ""Manual database update complete."") self.PrintAllTables()" 3021,"def _get_minidom_tag_value(station, tag_name): """"""get a value from a tag (if it exists)"""""" tag = station.getElementsByTagName(tag_name)[0].firstChild if tag: return tag.nodeValue return None" 3022,"def _parse(data, obj_name, attr_map): """"""parse xml data into a python map"""""" parsed_xml = minidom.parseString(data) parsed_objects = [] for obj in parsed_xml.getElementsByTagName(obj_name): parsed_obj = {} for (py_name, xml_name) in attr_map.items(): parsed_obj[py_name] = _get_minidom_tag_value(obj, xml_name) parsed_objects.append(parsed_obj) return parsed_objects" 3023,"def get_all_stations(self, station_type=None): """"""Returns information of all stations. @param<optional> station_type: ['mainline', 'suburban', 'dart'] """""" params = None if station_type and station_type in STATION_TYPE_TO_CODE_DICT: url = self.api_base_url + 'getAllStationsXML_WithStationType' params = { 'stationType': STATION_TYPE_TO_CODE_DICT[station_type] } else: url = self.api_base_url + 'getAllStationsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] return self._parse_station_list(response.content)" 3024,"def get_all_current_trains(self, train_type=None, direction=None): """"""Returns all trains that are due to start in the next 10 minutes @param train_type: ['mainline', 'suburban', 'dart'] """""" params = None if train_type: url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType' params = { 'TrainType': STATION_TYPE_TO_CODE_DICT[train_type] } else: url = self.api_base_url + 'getCurrentTrainsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_all_train_data(response.content) if direction is not None: return self._prune_trains(trains, direction=direction) return trains" 3025,"def get_station_by_name(self, station_name, num_minutes=None, direction=None, destination=None, stops_at=None): """"""Returns all trains due to serve station `station_name`. @param station_code @param num_minutes. Only trains within this time. Between 5 and 90 @param direction Filter by direction. Northbound or Southbound @param destination Filter by name of the destination stations @param stops_at Filber by name of one of the stops """""" url = self.api_base_url + 'getStationDataByNameXML' params = { 'StationDesc': station_name } if num_minutes: url = url + '_withNumMins' params['NumMins'] = num_minutes response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_station_data(response.content) if direction is not None or destination is not None: return self._prune_trains(trains, direction=direction, destination=destination, stops_at=stops_at) return trains" 3026,"def _prune_trains(self, trains, direction=None, destination=None, stops_at=None): """"""Only return the data matching direction and / or destination. If stops_at is set this may do a number of extra HTTP requests @param trains list of trains to filter @param direction Filter by train direction. Northbound or Southbound @param destination Filter by name of the destination stations @param stops_at Filber by name of one of the stops """""" pruned_data = [] for train in trains: append = True if direction is not None and train[""direction""] != direction: append = False if destination is not None and train[""destination""] != destination: append = False if append and stops_at is not None: if stops_at != train['destination']: stops = self.get_train_stops(train[""code""]) for stop in stops: append = False if stop[""location""] == stops_at: append = True break if append: pruned_data.append(train) return pruned_data" 3027,"def get_train_stops(self, train_code, date=None): """"""Get details for a train. @param train_code code for the trian @param date Date in format ""15 oct 2017"". If none use today """""" if date is None: date = datetime.date.today().strftime(""%d %B %Y"") url = self.api_base_url + 'getTrainMovementsXML' params = { 'TrainId': train_code, 'TrainDate': date } response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] return self._parse_train_movement_data(response.content)" 3028,"def fill_fields(self, **kwargs): """"""Fills the fields referenced by kwargs keys and fill them with the value"""""" for name, value in kwargs.items(): field = getattr(self, name) field.send_keys(value)" 3029,"def selector(self, fieldname): """"""Gets a selector for the given page element as a tuple (by, selector)"""""" finder = self._finders[fieldname] return (finder._by, finder._selector)" 3030,"def authorize_url(client_id=None, redirect_uri=None, state=None, scopes=None, show_dialog=False, http_client=None): """""" Trigger authorization dialog :param str client_id: Client ID :param str redirect_uri: Application Redirect URI :param str state: Application State :param List[str] scopes: Scopes to request :param bool show_dialog: Show the dialog :param http_client: HTTP Client for requests :return str Authorize URL :rtype str """""" params = { 'client_id': client_id or os.environ.get('SPOTIFY_CLIENT_ID'), 'redirect_uri': redirect_uri or os.environ.get('SPOTIFY_REDIRECT_URI'), 'state': state or str(uuid.uuid4()).replace('-', ''), 'scope': ' '.join(scopes) if scopes else '', 'show_dialog': show_dialog, 'response_type': 'code' } query = ['{}={}'.format(k, v) for k, v in params.items()] return '{}?{}'.format('https://accounts.spotify.com/authorize', '&'.join(query))" 3031,"def refresh(self): """""" Refresh the access token """""" data = { 'grant_type': 'refresh_token', 'refresh_token': self._token.refresh_token } response = self.http_client.post(self.URL, data=data, auth=(self.client_id, self.client_secret)) response.raise_for_status() self._token = Token.from_json(response.json())" 3032,"def instance_of(cls): """""" Create an invariant requiring the value is an instance of ``cls``. """""" def check(value): return ( isinstance(value, cls), u""{value!r} is instance of {actual!s}, required {required!s}"".format( value=value, actual=fullyQualifiedName(type(value)), required=fullyQualifiedName(cls), ), ) return check" 3033,"def provider_of(iface): """""" Create an invariant requiring the value provides the zope.interface ``iface``. """""" def check(value): return ( iface.providedBy(value), u""{value!r} does not provide {interface!s}"".format( value=value, interface=fullyQualifiedName(iface), ), ) return check" 3034,"def temp_dir(suffix='', prefix='tmp', parent_dir=None, make_cwd=False): """""" Create a temporary directory and optionally change the current working directory to it. The directory is deleted when the context exits. The temporary directory is created when entering the context manager, and deleted when exiting it: >>> import temporary >>> with temporary.temp_dir() as temp_dir: ... assert temp_dir.is_dir() >>> assert not temp_dir.exists() This time let's make the temporary directory our working directory: >>> import os >>> with temporary.temp_dir(make_cwd=True) as temp_dir: ... assert str(temp_dir) == os.getcwd() >>> assert not str(temp_dir) == os.getcwd() The suffix, prefix, and parent_dir options are passed to the standard ``tempfile.mkdtemp()`` function: >>> with temporary.temp_dir() as p: ... with temporary.temp_dir(suffix='suf', prefix='pre', parent_dir=p) as d: ... assert d.parent == p ... assert d.name.startswith('pre') ... assert d.name.endswith('suf') This function can also be used as a decorator, with the in_temp_dir alias: >>> @temporary.in_temp_dir() ... def my_function(): ... assert old_cwd != os.getcwd() ... >>> old_cwd = os.getcwd() >>> my_function() >>> assert old_cwd == os.getcwd() """""" prev_cwd = os.getcwd() parent_dir = parent_dir if parent_dir is None else str(parent_dir) abs_path = tempfile.mkdtemp(suffix, prefix, parent_dir) path = pathlib.Path(abs_path) try: if make_cwd: os.chdir(str(abs_path)) yield path.resolve() finally: if make_cwd: os.chdir(prev_cwd) with temporary.util.allow_missing_file(): shutil.rmtree(str(abs_path))" 3035,"def openSafeReplace(filepath, mode='w+b'): """"""Context manager to open a temporary file and replace the original file on closing. """""" tempfileName = None #Check if the filepath can be accessed and is writable before creating the #tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writtable' % (filepath, )) with tempfile.NamedTemporaryFile(delete=False, mode=mode) as tmpf: tempfileName = tmpf.name yield tmpf #Check if the filepath can be accessed and is writable before moving the #tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writtable' % (filepath, )) #Note: here unhandled exceptions may still occur because of race conditions, #messing things up. shutil.move(tempfileName, filepath)" 3036,"def _isFileAccessible(filepath): """"""Returns True if the specified filepath is writable."""""" directory = os.path.dirname(filepath) if not os.access(directory, os.W_OK): #Return False if directory does not exist or is not writable return False if os.path.exists(filepath): if not os.access(filepath, os.W_OK): #Return False if file is not writable return False try: openfile = os.open(filepath, os.O_WRONLY) os.close(openfile) except IOError: #Return False if file is locked return False #Return True if file is writtable return True" 3037,"def writeJsonZipfile(filelike, data, compress=True, mode='w', name='data'): """"""Serializes the objects contained in data to a JSON formated string and writes it to a zipfile. :param filelike: path to a file (str) or a file-like object :param data: object that should be converted to a JSON formated string. Objects and types in data must be supported by the json.JSONEncoder or have the method ``._reprJSON()`` defined. :param compress: bool, True to use zip file compression :param mode: 'w' to truncate and write a new file, or 'a' to append to an existing file :param name: the file name that will be given to the JSON output in the archive """""" zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, mode, allowZip64=True) as containerFile: containerFile.writestr(name, json.dumps(data, cls=MaspyJsonEncoder), zipcomp )" 3038,"def writeBinaryItemContainer(filelike, binaryItemContainer, compress=True): """"""Serializes the binaryItems contained in binaryItemContainer and writes them into a zipfile archive. Examples of binaryItem classes are :class:`maspy.core.Ci` and :class:`maspy.core.Sai`. A binaryItem class has to define the function ``_reprJSON()`` which returns a JSON formated string representation of the class instance. In addition it has to contain an attribute ``.arrays``, a dictionary which values are ``numpy.array``, that are serialized to bytes and written to the ``binarydata`` file of the zip archive. See :func:`_dumpArrayDictToFile()` The JSON formated string representation of the binaryItems, together with the metadata, necessary to restore serialized numpy arrays, is written to the ``metadata`` file of the archive in this form: ``[[serialized binaryItem, [metadata of a numpy array, ...]], ...]`` Use the method :func:`loadBinaryItemContainer()` to restore a binaryItemContainer from a zipfile. :param filelike: path to a file (str) or a file-like object :param binaryItemContainer: a dictionary containing binaryItems :param compress: bool, True to use zip file compression """""" allMetadata = dict() binarydatafile = io.BytesIO() #Note: It would be possible to sort the items here for index, binaryItem in enumerate(viewvalues(binaryItemContainer)): metadataList = _dumpArrayDictToFile(binarydatafile, binaryItem.arrays) allMetadata[index] = [binaryItem._reprJSON(), metadataList] #TODO: Is seek here still necessary? binarydatafile.seek(0) zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, 'w', allowZip64=True) as containerFile: containerFile.writestr('metadata', json.dumps(allMetadata, cls=MaspyJsonEncoder), zipcomp ) containerFile.writestr('binarydata', binarydatafile.getvalue(), zipcomp)" 3039,"def _dumpArrayDictToFile(filelike, arrayDict): """"""Function to serialize and write ``numpy.array`` contained in a dictionary to a file. See also :func:`_dumpArrayToFile` and :func:`_dumpNdarrayToFile`. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param arrayDict: a dictionary which values are ``numpy.array``, that are serialized to bytes and written to the filelike. :returns: a list of metadata dictionaries a metadata dictionary contains information necessary to restore the ``numpy.arrays`` from the file and the corresponding key from the arrayDict as 'arrayKey'. """""" metadataList = list() for arrayKey in sorted(arrayDict): array = arrayDict[arrayKey] if array.ndim == 1: metadata = _dumpArrayToFile(filelike, array) else: metadata = _dumpNdarrayToFile(filelike, array) metadata['arrayKey'] = arrayKey metadataList.append(metadata) return metadataList" 3040,"def _dumpArrayToFile(filelike, array): """"""Serializes a 1-dimensional ``numpy.array`` to bytes, writes the bytes to the filelike object and returns a dictionary with metadata, necessary to restore the ``numpy.array`` from the file. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param array: a 1-dimensional ``numpy.array`` :returns: a metadata dictionary :: {'start': start position in the file, 'end': end position in the file, 'size': size of the array, 'dtype': numpy data type of the array } """""" bytedata = array.tobytes('C') start = filelike.tell() end = start + len(bytedata) metadata = {'start': start, 'end': end, 'size': array.size, 'dtype': array.dtype.name } filelike.write(bytedata) return metadata" 3041,"def _dumpNdarrayToFile(filelike, ndarray): """"""Serializes an N-dimensional ``numpy.array`` to bytes, writes the bytes to the filelike object and returns a dictionary with metadata, necessary to restore the ``numpy.array`` from the file. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param ndarray: a N-dimensional ``numpy.array`` :returns: a metadata dictionary :: {'start': start position in the file, 'end': end position in the file, 'size': size of the array, 'dtype': numpy data type of the array, 'shape': description of the array shape } """""" bytedata = ndarray.tobytes('C') start = filelike.tell() end = start + len(bytedata) metadata = {'start': start, 'end': end, 'size': ndarray.size, 'dtype': ndarray.dtype.name, 'shape': ndarray.shape } filelike.write(bytedata) return metadata" 3042,"def loadBinaryItemContainer(zippedfile, jsonHook): """"""Imports binaryItems from a zipfile generated by :func:`writeBinaryItemContainer`. :param zipfile: can be either a path to a file (a string) or a file-like object :param jsonHook: a custom decoding function for JSON formated strings of the binaryItems stored in the zipfile. :returns: a dictionary containing binaryItems ``{binaryItem.id: binaryItem, ... }`` """""" binaryItemContainer = dict() with zipfile.ZipFile(zippedfile, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. metadataText = io.TextIOWrapper(containerZip.open('metadata'), encoding='utf-8' ).read() allMetadata = json.loads(metadataText, object_hook=jsonHook) metadataIndex = [str(_) for _ in sorted([int(i) for i in viewkeys(allMetadata) ]) ] binarydataFile = containerZip.open('binarydata') for index in metadataIndex: binaryItem = allMetadata[index][0] for binaryMetadata in allMetadata[index][1]: arrayKey = binaryMetadata['arrayKey'] rawdata = binarydataFile.read(binaryMetadata['end'] - binaryMetadata['start'] ) array = _arrayFromBytes(rawdata, binaryMetadata) binaryItem.arrays[arrayKey] = array binaryItemContainer[binaryItem.id] = binaryItem return binaryItemContainer" 3043,"def _arrayFromBytes(dataBytes, metadata): """"""Generates and returns a numpy array from raw data bytes. :param bytes: raw data bytes as generated by ``numpy.ndarray.tobytes()`` :param metadata: a dictionary containing the data type and optionally the shape parameter to reconstruct a ``numpy.array`` from the raw data bytes. ``{""dtype"": ""float64"", ""shape"": (2, 3)}`` :returns: ``numpy.array`` """""" array = numpy.fromstring(dataBytes, dtype=numpy.typeDict[metadata['dtype']]) if 'shape' in metadata: array = array.reshape(metadata['shape']) return array" 3044,"def searchFileLocation(targetFileName, targetFileExtension, rootDirectory, recursive=True): """"""Search for a filename with a specified file extension in all subfolders of specified rootDirectory, returns first matching instance. :param targetFileName: #TODO: docstring :type targetFileName: str :param rootDirectory: #TODO: docstring :type rootDirectory: str :param targetFileExtension: #TODO: docstring :type targetFileExtension: str :param recursive: bool, specify whether subdirectories should be searched :returns: a filepath (str) or None """""" expectedFileName = targetFileName.split('.')[0] + '.' + targetFileExtension targetFilePath = None if recursive: for dirpath, dirnames, filenames in os.walk(rootDirectory): for filename in filenames: if filename == expectedFileName: targetFilePath = joinpath(dirpath, filename) break if targetFilePath is not None: break else: for filename in os.listdir(rootDirectory): filePath = joinpath(rootDirectory, filename) if not os.path.isfile(filePath): continue if filename == expectedFileName: targetFilePath = filePath break return targetFilePath" 3045,"def matchingFilePaths(targetfilename, directory, targetFileExtension=None, selector=None): """"""Search for files in all subfolders of specified directory, return filepaths of all matching instances. :param targetfilename: filename to search for, only the string before the last ""."" is used for filename matching. Ignored if a selector function is specified. :param directory: search directory, including all subdirectories :param targetFileExtension: string after the last ""."" in the filename, has to be identical if specified. ""."" in targetFileExtension are ignored, thus "".txt"" is treated equal to ""txt"". :param selector: a function which is called with the value of targetfilename and has to return True (include value) or False (discard value). If no selector is specified, equality to targetfilename is used. :returns: list of matching file paths (str) """""" targetFilePaths = list() targetfilename = os.path.splitext(targetfilename)[0] targetFileExtension = targetFileExtension.replace('.', '') matchExtensions = False if targetFileExtension is None else True if selector is None: selector = functools.partial(operator.eq, targetfilename) for dirpath, dirnames, filenames in os.walk(directory): for filename in filenames: filenameNoextension = os.path.splitext(filename)[0] if selector(filenameNoextension): if matchExtensions: if not filename.endswith('.' + targetFileExtension): continue targetFilePaths.append(joinpath(dirpath, filename)) return targetFilePaths" 3046,"def listFiletypes(targetfilename, directory): """"""Looks for all occurences of a specified filename in a directory and returns a list of all present file extensions of this filename. In this cas everything after the first dot is considered to be the file extension: ``""filename.txt"" -> ""txt""``, ``""filename.txt.zip"" -> ""txt.zip""`` :param targetfilename: a filename without any extensions :param directory: only files present in this directory are compared to the targetfilename :returns: a list of file extensions (str) """""" targetextensions = list() for filename in os.listdir(directory): if not os.path.isfile(joinpath(directory, filename)): continue splitname = filename.split('.') basename = splitname[0] extension = '.'.join(splitname[1:]) if basename == targetfilename: targetextensions.append(extension) return targetextensions" 3047,"def findAllSubstrings(string, substring): """""" Returns a list of all substring starting positions in string or an empty list if substring is not present in string. :param string: a template string :param substring: a string, which is looked for in the ``string`` parameter. :returns: a list of substring starting positions in the template string """""" #TODO: solve with regex? what about '.': #return [m.start() for m in re.finditer('(?='+substring+')', string)] start = 0 positions = [] while True: start = string.find(substring, start) if start == -1: break positions.append(start) #+1 instead of +len(substring) to also find overlapping matches start += 1 return positions" 3048,"def toList(variable, types=(basestring, int, float, )): """"""Converts a variable of type string, int, float to a list, containing the variable as the only element. :param variable: any python object :type variable: (str, int, float, others) :returns: [variable] or variable """""" if isinstance(variable, types): return [variable] else: return variable" 3049,"def calcDeviationLimits(value, tolerance, mode): """"""Returns the upper and lower deviation limits for a value and a given tolerance, either as relative or a absolute difference. :param value: can be a single value or a list of values if a list of values is given, the minimal value will be used to calculate the lower limit and the maximum value to calculate the upper limit :param tolerance: a number used to calculate the limits :param mode: either ``absolute`` or ``relative``, specifies how the ``tolerance`` should be applied to the ``value``. """""" values = toList(value) if mode == 'relative': lowerLimit = min(values) * (1 - tolerance) upperLimit = max(values) * (1 + tolerance) elif mode == 'absolute': lowerLimit = min(values) - tolerance upperLimit = max(values) + tolerance else: raise Exception('mode %s not specified' %(filepath, )) return lowerLimit, upperLimit" 3050,"def returnArrayFilters(arr1, arr2, limitsArr1, limitsArr2): """"""#TODO: docstring :param arr1: #TODO: docstring :param arr2: #TODO: docstring :param limitsArr1: #TODO: docstring :param limitsArr2: #TODO: docstring :returns: #TODO: docstring """""" posL = bisect.bisect_left(arr1, limitsArr1[0]) posR = bisect.bisect_right(arr1, limitsArr1[1]) matchMask = ((arr2[posL:posR] <= limitsArr2[1]) & (arr2[posL:posR] >= limitsArr2[0]) ) return posL, posR, matchMask" 3051,"def applyArrayFilters(array, posL, posR, matchMask): """"""#TODO: docstring :param array: #TODO: docstring :param posL: #TODO: docstring :param posR: #TODO: docstring :param matchMask: #TODO: docstring :returns: ``numpy.array``, a subset of the input ``array``. """""" return numpy.compress(matchMask, array[posL:posR], axis=0)" 3052,"def averagingData(array, windowSize=None, averagingType='median'): """"""#TODO: docstring :param array: #TODO: docstring :param windowSize: #TODO: docstring :param averagingType: ""median"" or ""mean"" :returns: #TODO: docstring """""" assert averagingType in ['median', 'mean'] if windowSize is None: windowSize = int(len(array) / 50) if int(len(array) / 50) > 100 else 100 if averagingType == 'median': averagedData = runningMedian(array, windowSize) elif averagingType == 'mean': averagedData = runningMean(array, len(array), windowSize) return averagedData" 3053,"def returnSplineList(dependentVar, independentVar, subsetPercentage=0.4, cycles=10, minKnotPoints=10, initialKnots=200, splineOrder=2, terminalExpansion=0.1 ): """""" #TODO: docstring Note: Expects sorted arrays. :param dependentVar: #TODO: docstring :param independentVar: #TODO: docstring :param subsetPercentage: #TODO: docstring :param cycles: #TODO: docstring :param minKnotPoints: #TODO: docstring :param initialKnots: #TODO: docstring :param splineOrder: #TODO: docstring :param terminalExpansion: expand subsets on both sides :returns: #TODO: docstring """""" expansions = ddict(list) expansionArea = (independentVar[-1] - independentVar[0]) * terminalExpansion #adds 100 data points at both ends of the dependent and independent array for i in range(100): expansions['indUp'].append(independentVar[-1] + expansionArea/100*i) expansions['indDown'].append(independentVar[0] - expansionArea/100*(100-i+1) ) expansions['depUp'].append(dependentVar[-1]) expansions['depDown'].append(dependentVar[0]) dependentVar = numpy.array(expansions['depDown'] + list(dependentVar) + expansions['depUp'], dtype=numpy.float64 ) independentVar = numpy.array(expansions['indDown'] + list(independentVar) + expansions['indUp'], dtype=numpy.float64 ) splineList = list() for cycle in range(cycles): subset = sorted(random.sample(range(len(dependentVar)), int(len(dependentVar) * subsetPercentage) ) ) terminalExpansion dependentSubset = dependentVar[subset] independentSubset = independentVar[subset] minIndVar = independentSubset[minKnotPoints] maxIndVar = independentSubset[-minKnotPoints] knots = [float(i) * (maxIndVar-minIndVar) / initialKnots + minIndVar for i in range(1, initialKnots) ] ## remove knots with less then minKnotPoints data points ## lastKnot = knots[0] newKnotList = [lastKnot] for knotPos in range(1,len(knots)): nextKnot = knots[knotPos] numHits = (len(independentSubset[(independentSubset >= lastKnot) & (independentSubset <= nextKnot)]) ) if numHits >= minKnotPoints: newKnotList.append(nextKnot) lastKnot = nextKnot knots = newKnotList spline = LSQUnivariateSpline(independentSubset, dependentSubset, knots, k=splineOrder) splineList.append(spline) return splineList" 3054,"def tolerantArrayMatching(referenceArray, matchArray, matchTolerance, matchUnit): """"""#TODO: docstring Note: arrays must be sorted :param referenceArray: #TODO: docstring :param matchArray: #TODO: docstring :param matchTolerance: #TODO: docstring :param matchUnit: #TODO: docstring :returns: #TODO: docstring #TODO: change matchUnit to ""absolute"", ""relative"" and remove the ""*1e-6"" """""" if matchUnit == 'relative': lowLimMatchArr = matchArray * (1 - matchTolerance) uppLimMatchArr = matchArray * (1 + matchTolerance) elif matchUnit == 'ppm': lowLimMatchArr = matchArray * (1 - matchTolerance*1e-6) uppLimMatchArr = matchArray * (1 + matchTolerance*1e-6) elif matchUnit == 'da': lowLimMatchArr = matchArray - matchTolerance uppLimMatchArr = matchArray + matchTolerance else: raise Exception('wrong matchUnit type specified (da or ppm): ', matchUnit) lowerLimitMask = numpy.zeros_like(matchArray, dtype=int) upperLimitMask = numpy.zeros_like(matchArray, dtype=int) refPosLow = int() maxReferenceValue = referenceArray[-1] for matchPos, (lowerMatch, upperMatch) in enumerate(zip(lowLimMatchArr, uppLimMatchArr ) ): if lowerMatch < maxReferenceValue: while referenceArray[refPosLow] < lowerMatch: refPosLow += 1 refPosHigh = refPosLow #Try except statement because this case can only happen once at the #end of the array try: while referenceArray[refPosHigh] <= upperMatch: refPosHigh += 1 except IndexError: refPosHigh = len(referenceArray) - 1 lowerLimitMask[matchPos] = refPosLow upperLimitMask[matchPos] = refPosHigh else: refPosLow = len(referenceArray) - 1 refPosHigh = len(referenceArray) - 1 lowerLimitMask[matchPos] = refPosLow upperLimitMask[matchPos] = refPosHigh break matchPos += 1 lowerLimitMask[matchPos:len(matchArray)] = refPosLow upperLimitMask[matchPos:len(matchArray)] = refPosHigh return lowerLimitMask, upperLimitMask" 3055,"def open(self, filepath, mode='w+b'): """"""Opens a file - will actually return a temporary file but replace the original file when the context is closed. """""" #Check if the filepath can be accessed and is writable before creating #the tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writable' % (filepath,)) if filepath in self._files: with open(self._files[filepath], mode=mode) as tmpf: yield tmpf else: tempfilepath = None with tempfile.NamedTemporaryFile(delete=False, mode=mode) as tmpf: tempfilepath = tmpf.name yield tmpf self._files[filepath] = tempfilepath" 3056,"def default(self, obj): """""" :returns: obj._reprJSON() if it is defined, else json.JSONEncoder.default(obj) """""" if hasattr(obj, '_reprJSON'): return obj._reprJSON() #Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)" 3057,"def processInput(self, dataAveraging=False, windowSize=None): """""" #TODO: docstring :param dataAveraging: #TODO: docstring :param windowSize: #TODO: docstring """""" self.dependentVar = numpy.array(self.dependentVarInput, dtype=numpy.float64 ) self.independentVar = numpy.array(self.independentVarInput, dtype=numpy.float64 ) sortMask = self.independentVar.argsort() self.dependentVar = self.dependentVar[sortMask] self.independentVar = self.independentVar[sortMask] if dataAveraging: averagedData = averagingData(self.dependentVar, windowSize=windowSize, averagingType=dataAveraging ) averagedData = numpy.array(averagedData, dtype=numpy.float64) missingNumHigh = numpy.floor((self.independentVar.size - averagedData.size ) / 2 ) missingNumLow = ((self.independentVar.size - averagedData.size) - missingNumHigh ) self.dependentVar = averagedData self.independentVar = self.independentVar[missingNumLow: -missingNumHigh]" 3058,"def generateSplines(self): """"""#TODO: docstring """""" _ = returnSplineList(self.dependentVar, self.independentVar, subsetPercentage=self.splineSubsetPercentage, cycles=self.splineCycles, minKnotPoints=self.splineMinKnotPoins, initialKnots=self.splineInitialKnots, splineOrder=self.splineOrder, terminalExpansion=self.splineTerminalExpansion ) self.splines = _" 3059,"def corrArray(self, inputArray): """"""#TODO: docstring :param inputArray: #TODO: docstring :returns: #TODO docstring """""" outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray)) for currSpline in self.splines ]).mean(axis=0) return outputArray" 3060,"def fixminimized(self, alphabet): """""" After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None """""" endstate = len(list(self.states)) for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = TropicalWeight(float('inf')) for char in alphabet: self.add_arc(endstate, endstate, char)" 3061,"def _path_to_str(self, path): """""" Convert a path to the string representing the path Args: path (tuple): A tuple of arcs Returns: inp (str): The path concatenated as as string """""" inp = '' for arc in path: i = self.isyms.find(arc.ilabel) # Ignore \epsilon transitions both on input if i != fst.EPSILON: inp += i return inp" 3062,"def init_from_acceptor(self, acceptor): """""" Adds a sink state Args: alphabet (list): The input alphabet Returns: None """""" states = sorted( acceptor.states, key=attrgetter('initial'), reverse=True) for state in states: for arc in state.arcs: itext = acceptor.isyms.find(arc.ilabel) if itext in self.alphabet: self.add_arc(state.stateid, arc.nextstate, itext) if state.final: self[state.stateid].final = True if state.initial: self[state.stateid].initial = True" 3063,"def consume_input(self, inp): """""" Return True/False if the machine accepts/reject the input. Args: inp (str): input string to be consumed Returns: bool: A true or false value depending on if the DFA accepts the provided input """""" cur_state = sorted( self.states, key=attrgetter('initial'), reverse=True)[0] while len(inp) > 0: found = False for arc in cur_state.arcs: if self.isyms.find(arc.ilabel) == inp[0]: cur_state = self[arc.nextstate] inp = inp[1:] found = True break if not found: return False return cur_state.final != TropicalWeight(float('inf'))" 3064,"def random_strings(self, string_length=1): """""" Generate string_length random strings that belong to the automaton. Args: string_length (integer): The size of the random string Returns: str: The generated string """""" str_list = [] for path in self.uniform_generate(string_length): str_list.append(self._path_to_str(path)) return str_list" 3065,"def save(self, txt_fst_filename): """""" Save the machine in the openFST format in the file denoted by txt_fst_filename. Args: txt_fst_filename (str): The name of the file Returns: None """""" txt_fst = open(txt_fst_filename, 'w+') states = sorted(self.states, key=attrgetter('initial'), reverse=True) for state in states: for arc in state.arcs: itext = self.isyms.find(arc.ilabel) otext = self.osyms.find(arc.ilabel) txt_fst.write( '{}\t{}\t{}\t{}\n'.format( state.stateid, arc.nextstate, itext.encode('hex'), otext.encode('hex'))) if state.final: txt_fst.write('{}\n'.format(state.stateid)) txt_fst.close()" 3066,"def load(self, txt_fst_filename): """""" Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_filename (string): The name of the file Returns: None """""" with open(txt_fst_filename, 'r') as txt_fst: for line in txt_fst: line = line.strip() splitted_line = line.split() if len(splitted_line) == 1: self[int(splitted_line[0])].final = True else: self.add_arc(int(splitted_line[0]), int( splitted_line[1]), splitted_line[2].decode('hex'))" 3067,"def persistent_menu(menu): """""" more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return: """""" if len(menu) > 3: raise Invalid('menu should not exceed 3 call to actions') if any(len(item['call_to_actions']) > 5 for item in menu if item['type'] == 'nested'): raise Invalid('call_to_actions is limited to 5 for sub-levels') for item in menu: if len(item['title']) > 30: raise Invalid('menu item title should not exceed 30 characters') if item['type'] == 'postback' and len(item['payload']) > 1000: raise Invalid('menu item payload should not exceed 1000 characters')" 3068,"def send_text_message(text, quick_replies): """""" more: https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message and https://developers.facebook.com/docs/messenger-platform/send-api-reference/quick-replies :param text: :param quick_replies: :return: """""" if len(text) > 640: raise ExceedLengthException( 'send message text should not exceed 640 character limit', limit=640, ) if isinstance(quick_replies, list): if len(quick_replies) > 10: raise Invalid('send message quick replies should not exceed 10 limit') for item in quick_replies: if 'content_type' not in item: raise Invalid('send message quick replies should have content_type') if item['content_type'] == 'text': if len(item['title']) > 20: raise Invalid('send message quick replies title should not exceed 20 character limit') if len(item['payload']) > 1000: raise Invalid('send message quick replies payload should not exceed 1000 character limit')" 3069,"def main(): """"""Main Function"""""" alphabet = list(string.lowercase) # + [""<"", "">""] # Create an SFA for the regular expression .*<t>.* sfa = SFA(alphabet) # sfa.add_arc(0,0,SetPredicate([ i for i in alphabet if i != ""<"" ])) # sfa.add_arc(0,1,SetPredicate(list(""<""))) # # sfa.add_arc(1,2,SetPredicate(list(""t""))) # sfa.add_arc(1,0,SetPredicate([ i for i in alphabet if i != ""t"" ])) # # sfa.add_arc(2,3,SetPredicate(list("">""))) # sfa.add_arc(2,0,SetPredicate([ i for i in alphabet if i != "">"" ])) # # sfa.add_arc(3,3,SetPredicate(alphabet)) # # sfa.states[3].final = True sfa.add_arc(0, 7, SetPredicate([i for i in alphabet if i != ""d"" and i != ""input_string""])) sfa.add_arc(1, 7, SetPredicate([i for i in alphabet if i != ""i""])) sfa.add_arc(2, 7, SetPredicate([i for i in alphabet if i != ""p""])) sfa.add_arc(3, 7, SetPredicate([i for i in alphabet if i != ""v""])) sfa.add_arc(5, 7, SetPredicate(list(alphabet))) sfa.add_arc(4, 7, SetPredicate([i for i in alphabet if i != ""a""])) sfa.add_arc(6, 7, SetPredicate([i for i in alphabet if i != ""n""])) sfa.add_arc(7, 7, SetPredicate(list(alphabet))) sfa.add_arc(0, 1, SetPredicate(list(""d""))) sfa.add_arc(1, 3, SetPredicate(list(""i""))) sfa.add_arc(3, 5, SetPredicate(list(""v""))) sfa.add_arc(0, 2, SetPredicate(list(""input_string""))) sfa.add_arc(2, 4, SetPredicate(list(""p""))) sfa.add_arc(4, 6, SetPredicate(list(""a""))) sfa.add_arc(6, 5, SetPredicate(list(""n""))) sfa.states[5].final = True dfa = sfa.concretize() # dfa.minimize() dfa.save('concrete_re_sfa.dfa') # Consume some input input_string = ""koukouroukou"" print 'SFA-DFA result on {}: {} - {}'.format(input_string, sfa.consume_input(input_string), dfa.consume_input(input_string)) input_string = ""divspan"" print 'SFA-DFA result on {}: {} - {}'.format(input_string, sfa.consume_input(input_string), dfa.consume_input(input_string)) input_string = ""div"" print 'SFA-DFA result on {}: {} - {}'.format(input_string, sfa.consume_input(input_string), dfa.consume_input(input_string)) input_string = ""span"" print 'SFA-DFA result on {}: {} - {}'.format(input_string, sfa.consume_input(input_string), dfa.consume_input(input_string))" 3070,"def refactor(self, symbol, value): """""" Args: symbol: value: Returns: None """""" if value: self.pset.add(symbol) else: self.pset.remove(symbol)" 3071,"def add_state(self): """"""This function adds a new state"""""" sid = len(self.states) self.states.append(SFAState(sid))" 3072,"def add_arc(self, src, dst, char): """""" This function adds a new arc in a SFA state Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The transition symbol Returns: None """""" assert type(src) == type(int()) and type(dst) == type(int()), \ ""State type should be integer."" while src >= len(self.states) or dst >= len(self.states): self.add_state() self.states[src].arcs.append(SFAArc(src, dst, char))" 3073,"def consume_input(self, inp): """""" Return True/False if the machine accepts/reject the input. Args: inp (str): input string to be consumed Retunrs: bool: A true or false value depending on if the DFA accepts the provided input """""" cur_state = self.states[0] for character in inp: found = False for arc in cur_state.arcs: if arc.guard.is_sat(character): cur_state = self.states[arc.dst_state] found = True break if not found: raise RuntimeError('SFA not complete') return cur_state.final" 3074,"def concretize(self): """""" Transforms the SFA into a DFA Args: None Returns: DFA: The generated DFA """""" dfa = DFA(self.alphabet) for state in self.states: for arc in state.arcs: for char in arc.guard: dfa.add_arc(arc.src_state, arc.dst_state, char) for i in xrange(len(self.states)): if self.states[i].final: dfa[i].final = True return dfa" 3075,"def _write(self, ret): """""" This function needs to correspond to this: https://github.com/saltstack/salt/blob/develop/salt/returners/redis_return.py#L88 """""" self.redis.set('{0}:{1}'.format(ret['id'], ret['jid']), json.dumps(ret)) self.redis.lpush('{0}:{1}'.format(ret['id'], ret['fun']), ret['jid']) self.redis.sadd('minions', ret['id']) self.redis.sadd('jids', ret['jid'])" 3076,"def _initAddons(cls, recurse=True): """""" Initializes the addons for this manager. """""" for addon_module in cls.addonModules(recurse): projex.importmodules(addon_module)" 3077,"def addons(cls, recurse=True): """""" Returns a dictionary containing all the available addons for this mixin class. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. :param recurse | <bool> :return {<str> name: <variant> addon, ..} """""" cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) out = {} # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addons(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, {})) return out" 3078,"def addonModules(cls, recurse=True): """""" Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..] """""" prop = '_{0}__addon_modules'.format(cls.__name__) out = set() # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addonModules(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, set())) return out" 3079,"def byName(cls, name, recurse=True, default=None): """""" Returns the addon whose name matches the inputted name. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. If no addon is found, the default is returned. :param name | <str> recurse | <bool> default | <variant> """""" cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) try: return getattr(cls, prop, {})[name] except KeyError: if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): return base.byName(name, recurse) return default" 3080,"def initAddons(cls, recurse=True): """""" Loads different addon modules for this class. This method should not be overloaded in a subclass as it also manages the loaded state to avoid duplicate loads. Instead, you can re-implement the _initAddons method for custom loading. :param recurse | <bool> """""" key = '_{0}__addons_loaded'.format(cls.__name__) if getattr(cls, key, False): return cls._initAddons(recurse) setattr(cls, key, True)" 3081,"def registerAddon(cls, name, addon, force=False): """""" Registers the inputted addon to the class. :param name | <str> addon | <variant> """""" prop = '_{0}__addons'.format(cls.__name__) cmds = getattr(cls, prop, {}) if name in cmds and not force: raise errors.AddonAlreadyExists(cls, name, addon) cmds[name] = addon try: if issubclass(addon, cls): setattr(addon, '_{0}__addonName'.format(addon.__name__), name) except StandardError: pass setattr(cls, prop, cmds)" 3082,"def registerAddonModule(cls, module): """""" Registers a module to use to import addon subclasses from. :param module | <str> || <module> """""" prop = '_{0}__addon_modules'.format(cls.__name__) mods = getattr(cls, prop, set()) mods.add(module) setattr(cls, prop, mods)" 3083,"def unregisterAddon(cls, name): """""" Unregisters the addon defined by the given name from the class. :param name | <str> """""" prop = '_{0}__addons'.format(cls.__name__) cmds = getattr(cls, prop, {}) cmds.pop(name, None)" 3084,"def unregisterAddonModule(cls, module): """""" Unregisters the module to use to import addon subclasses from. :param module | <str> || <module> """""" prop = '_{0}__addon_modules'.format(cls.__name__) mods = getattr(cls, prop, set()) try: mods.remove(module) except KeyError: pass" 3085,"def emit(self, record): """""" Throws an error based on the information that the logger reported, given the logging level. :param record: <logging.LogRecord> """""" if not logging.raiseExceptions: return logger = logging.getLogger(record.name) # raise an exception based on the error logging if logger.level <= record.levelno: err = record.msg[0] if not isinstance(err, Exception): err = ProjexError(nstr(record.msg)) # log the traceback info data = record.__dict__.copy() data['type'] = type(err).__name__ msg = ERROR_MESSAGE % data sys.stderr.write(msg) raise err" 3086,"def cli(ctx, stage): """"""listen to push requests for src and pull requests from target (experimental)"""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not listen') raise click.Abort() SRC = None if stage in STAGES: try: SRC = ctx.cfg.CFG[stage].SOURCE except KeyError: pass if not SRC: ctx.say_red('There is no SOURCE in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if 'SERVER' not in SRC: ctx.say_red('There is no SOURCE.SERVER in stage:' + stage) raise click.Abort() src_server = get_server(SRC.SERVER, ctx.home) # connect storage / pipeline to target via transform # write state listening on port etc into def message_handler(**m): print(str(arrow.now), str(m)) return True, 'handled' try: # TODO: bg & # src_listening = src_server.listen(cfg=SRC, src_server.listen(cfg=SRC, push_handler=message_handler, pull_handler=message_handler) except Exception as e: ctx.say_red( 'cannot listen from source client bubble.clients.' + SRC.SERVER) ctx.say_red(str(e)) raise click.Abort('cannot listen')" 3087,"def get_field_label_css_class(self, bound_field): """""" Returns 'form-check-label' if widget is CheckboxInput. For all other fields, no css class is added. """""" # If we render CheckboxInputs, Bootstrap requires a different # field label css class for checkboxes. if isinstance(bound_field.field.widget, forms.CheckboxInput): return 'form-check-label' return super().get_field_label_css_class(bound_field)" 3088,"def get_widget_css_class(self, field_name, field): """""" Returns 'form-check-input' if widget is CheckboxInput or 'form-control-file' if widget is FileInput. For all other fields return the default value from the form property (""form-control""). """""" # If we render CheckboxInputs, Bootstrap requires a different # widget css class for checkboxes. if isinstance(field.widget, forms.CheckboxInput): return 'form-check-input' # Idem for fileinput. if isinstance(field.widget, forms.FileInput): return 'form-control-file' return super().get_widget_css_class(field_name, field)" 3089,"def handle(self): """""" Handle a message :return: True if success, False otherwise """""" if self.component_type == StreamComponent.SOURCE: msg = self.handler_function() return self.__send(msg) logger = self.logger data = self.__receive() if data is None: return False else: logger.debug(""Calling %s "" % self.handler_function) result = self.handler_function(data.decode(self.char_encoding)) if self.component_type == StreamComponent.PROCESSOR: logger.debug(""Sending p3:%s %s %s"" % (PYTHON3, result, str(type(result)))) if not self.__send(result): return False return True" 3090,"def realpath_with_context(path, context): """""" Convert a path into its realpath: * For relative path: use :attr:`context.workdir` as root directory * For absolute path: Pass-through without any changes. :param path: Filepath to convert (as string). :param context: Behave context object (with :attr:`context.workdir`) :return: Converted path. """""" if not os.path.isabs(path): # XXX ensure_workdir_exists(context) assert context.workdir path = os.path.join(context.workdir, os.path.normpath(path)) return path" 3091,"def posixpath_normpath(pathname): """""" Convert path into POSIX path: * Normalize path * Replace backslash with slash :param pathname: Pathname (as string) :return: Normalized POSIX path. """""" backslash = '\\' pathname2 = os.path.normpath(pathname) or ""."" if backslash in pathname2: pathname2 = pathname2.replace(backslash, '/') return pathname2" 3092,"def create_textfile_with_contents(filename, contents, encoding='utf-8'): """""" Creates a textual file with the provided contents in the workdir. Overwrites an existing file. """""" ensure_directory_exists(os.path.dirname(filename)) if os.path.exists(filename): os.remove(filename) outstream = codecs.open(filename, ""w"", encoding) outstream.write(contents) if contents and not contents.endswith(""\n""): outstream.write(""\n"") outstream.flush() outstream.close() assert os.path.exists(filename), ""ENSURE file exists: %s"" % filename" 3093,"def ensure_directory_exists(dirname, context=None): """""" Ensures that a directory exits. If it does not exist, it is automatically created. """""" real_dirname = dirname if context: real_dirname = realpath_with_context(dirname, context) if not os.path.exists(real_dirname): os.makedirs(real_dirname) assert os.path.exists(real_dirname), ""ENSURE dir exists: %s"" % dirname assert os.path.isdir(real_dirname), ""ENSURE isa dir: %s"" % dirname" 3094,"def p_andnode_expression(self, t): '''andnode_expression : LB identlist RB ''' self.accu.add(Term('vertex', [""and(\""""+t[2]+""\"")""])) t[0] = ""and(\""""+t[2]+""\"")""" 3095,"def p_identlist(self, t): '''identlist : IDENT | NOT IDENT | IDENT AND identlist | NOT IDENT AND identlist ''' if len(t)==5 : #print(t[1],t[2],t[3],t[4]) t[0] = t[1]+t[2]+t[3]+t[4] elif len(t)==4 : #print(t[1],t[2],t[3]) t[0] = t[1]+t[2]+t[3] elif len(t)==3 : #print(t[1],t[2]) t[0] = t[1]+t[2] elif len(t)==2 : #print(t[0],t[1]) t[0]=t[1] else: print(""Syntax error at '"",str(t),""'"")" 3096,"def deserialize(self, msg): 'deserialize output to a Python object' self.logger.debug('deserializing %s', msg) return json.loads(msg)" 3097,"def append_request_id(req, resp, resource, params): """"""Append request id which got from response header to resource.req_ids list. """""" def get_headers(resp): if hasattr(resp, 'headers'): return resp.headers if hasattr(resp, '_headers'): return resp._headers return None if(isinstance(resp, Response) or (get_headers(resp) is not None)): # Extract 'x-request-id' from headers if # response is a Response object. request_id = get_headers(resp).get('x-request-id') else: # If resp is of type string or None. request_id = resp if resource.req_ids is None: resource.req_ids = [] if request_id not in resource.req_ids: resource.req_ids.append(request_id)" 3098,"def _sanitizer(self, obj): """"""Sanitizer method that will be passed to json.dumps."""""" if isinstance(obj, datetime.datetime): return obj.isoformat() if hasattr(obj, ""to_dict""): return obj.to_dict() return obj" 3099,"def make_uniq_for_step(ctx, ukeys, step, stage, full_data, clean_missing_after_seconds, to_uniq): """"""initially just a copy from UNIQ_PULL"""""" # TODO: # this still seems to work ok for Storage types json/bubble, # for DS we need to reload de dumped step to uniqify if not ukeys: return to_uniq else: uniq_data = bubble_lod_load(ctx, step, stage) ctx.say('Creating uniq identifiers for [' + step + '] information', 0) ctx.gbc.say('uniq_data:', stuff=uniq_data, verbosity=1000) # TODO:make: data->keyed.items uniq_step_res = make_uniq(ctx=ctx, ldict=to_uniq, keyed=uniq_data, uniqstr=ukeys, tag=step, full_data=full_data, remove_missing_after_seconds=clean_missing_after_seconds) ctx.gbc.say('uniq_step_res:', stuff=uniq_step_res, verbosity=1000) to_uniq_newest = get_newest_uniq(ctx.gbc, uniq_step_res) # TODO: selected pulled only from slice of uniq # PROBLEM: slice of pull is not equal to slice of newest uniq, # can only select keys from newest, from slice of pulled # need a uid list from to_transform # to_transform = get_gen_slice(gbc, to_transform_newest, amount, index) # for now not a big problem, as with 'pump' there should be no problem to_uniq = to_uniq_newest # todo make keyed.items->data uniq_res_list = get_uniq_list(ctx.gbc, uniq_step_res) reset = True pfr = bubble_lod_dump(ctx=ctx, step=step, stage=stage, full_data=full_data, reset=reset, data_gen=uniq_res_list) ctx.gbc.say('saved uniq ' + step + ' data res:', stuff=pfr, verbosity=700) return to_uniq" 3100,"def list_nic(self, instance_id): """"""List all Network Interface Controller"""""" output = self.client.describe_instances(InstanceIds=[instance_id]) output = output.get(""Reservations"")[0].get(""Instances"")[0] return output.get(""NetworkInterfaces"")" 3101,"def list_ip(self, instance_id): """"""Add all IPs"""""" output = self.client.describe_instances(InstanceIds=[instance_id]) output = output.get(""Reservations"")[0].get(""Instances"")[0] ips = {} ips['PrivateIp'] = output.get(""PrivateIpAddress"") ips['PublicIp'] = output.get(""PublicIpAddress"") return ips" 3102,"def main(): """""" Testing function for Flex Regular Expressions to FST DFA """""" if len(argv) < 2: print 'Usage: %s fst_file [optional: save_file]' % argv[0] return flex_a = Flexparser() mma = flex_a.yyparse(argv[1]) mma.minimize() print mma if len(argv) == 3: mma.save(argv[2])" 3103,"def _read_transitions(self): """""" Read DFA transitions from flex compiled file Args: None Returns: list: The list of states and the destination for a character """""" states = [] i = 0 regex = re.compile('[ \t\n\r:,]+') found = 0 # For maintaining the state of yy_nxt declaration state = 0 # For maintaining the state of opening and closing tag of yy_nxt substate = 0 # For maintaining the state of opening and closing tag of each set in yy_nxt mapping = [] # For writing each set of yy_next cur_line = None with open(self.outfile) as flex_file: for cur_line in flex_file: if cur_line[0:35] == ""static yyconst flex_int16_t yy_nxt["" or cur_line[0:33] == ""static const flex_int16_t yy_nxt["": found = 1 # print 'Found yy_next declaration' continue if found == 1: if state == 0 and cur_line[0:5] == "" {"": state = 1 continue if state == 1 and cur_line[0:7] == "" } ;"": state = 0 break if substate == 0 and cur_line[0:5] == "" {"": mapping = [] substate = 1 continue if substate == 1: if cur_line[0:6] != "" },"": cur_line = """".join(cur_line.split()) if cur_line == '': continue if cur_line[cur_line.__len__() - 1] == ',': splitted_line = regex.split( cur_line[:cur_line.__len__() - 1]) else: splitted_line = regex.split(cur_line) mapping = mapping + splitted_line continue else: cleared = [] for j in mapping: cleared.append(int(j)) states.append(cleared) mapping = [] substate = 0 return states" 3104,"def _read_accept_states(self): """""" Read DFA accepted states from flex compiled file Args: None Returns: list: The list of accepted states """""" states = [] i = 0 regex = re.compile('[ \t\n\r:,]+') found = 0 # For maintaining the state of yy_accept declaration state = 0 # For maintaining the state of opening and closing tag of yy_accept mapping = [] # For writing each set of yy_accept cur_line = None with open(self.outfile) as flex_file: for cur_line in flex_file: if cur_line[0:37] == ""static yyconst flex_int16_t yy_accept"" or cur_line[0:35] == ""static const flex_int16_t yy_accept"": found = 1 continue if found == 1: # print x if state == 0 and cur_line[0:5] == "" {"": mapping.append(0) # there is always a zero there state = 1 continue if state == 1: if cur_line[0:7] != "" } ;"": cur_line = """".join(cur_line.split()) if cur_line == '': continue if cur_line[cur_line.__len__() - 1] == ',': splitted_line = regex.split( cur_line[:cur_line.__len__() - 1]) else: splitted_line = regex.split(cur_line) mapping = mapping + splitted_line continue else: cleared = [] for j in mapping: cleared.append(int(j)) max_value = max(cleared) for i in range(0, len(cleared)): if cleared[i] > 0 and cleared[ i] < (max_value - 1): states.append(i) return states return []" 3105,"def _create_states(self, states_num): """""" Args: states_num (int): Number of States Returns: list: An initialized list """""" states = [] for i in range(0, states_num): states.append(i) return states" 3106,"def _add_sink_state(self, states): """""" This function adds a sing state in the total states Args: states (list): The current states Returns: None """""" cleared = [] for i in range(0, 128): cleared.append(-1) states.append(cleared)" 3107,"def _create_delta(self): """""" This function creates the delta transition Args: startState (int): Initial state of automaton Results: int, func: A number indicating the total states, and the delta function """""" states = self._read_transitions() total_states = len(states) self._add_sink_state(states) nulltrans = self._read_null_transitions() def delta(current_state, character): """""" Sub function describing the transitions Args: current_state (str): The current state character (str): The input character Returns: str: The next state """""" if character != '': newstate = states[current_state][ord(character)] if newstate > 0: return newstate else: return total_states else: return nulltrans[current_state] return total_states + 1, delta" 3108,"def yyparse(self, lexfile): """""" Args: lexfile (str): Flex file to be parsed Returns: DFA: A dfa automaton """""" temp = tempfile.gettempdir() self.outfile = temp+'/'+''.join( random.choice( string.ascii_uppercase + string.digits) for _ in range(5)) + '_lex.yy.c' self._create_automaton_from_regex(lexfile) states_num, delta = self._create_delta() states = self._create_states(states_num) accepted_states = self._read_accept_states() if self.alphabet != []: alphabet = self.alphabet else: alphabet = createalphabet() mma = DFA(alphabet) for state in states: if state != 0: for char in alphabet: nextstate = delta(state, char) mma.add_arc(state - 1, nextstate - 1, char) if state in accepted_states: mma[state - 1].final = True if os.path.exists(self.outfile): os.remove(self.outfile) return mma" 3109,"def set_path(ctx, path_str, value, data): """""" Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable. """""" ctx.gbc.say('set_path:value:' + str(value) + ' at:' + path_str + ' in:', stuff=data, verbosity=1001) path = path_str.split('.') ctx.gbc.say('path:', stuff=path, verbosity=100) if len(path) > 1: destk = '.'.join(path[0:-1]) lp = path[-1] ctx.gbc.say('destk:%s' % destk, verbosity=100) ctx.gbc.say('last:%s' % lp, verbosity=100) ctx.gbc.say('current keys:', stuff=data.keys(), verbosity=1001) if len(path) > 2: destk = unescape(ctx, destk) if destk not in data.keys(): ctx.gbc.say('destk not in current keys:', stuff=data.keys(), verbosity=1001) data[destk] = {} ctx.gbc.say('destk not added:', stuff=data, verbosity=1001) lp = unescape(ctx, lp) data[destk][lp] = value ctx.gbc.say('destk and val added:', stuff=data, verbosity=1001) else: path_str = unescape(ctx, path_str) data[path_str] = value ctx.gbc.say('set_path:res:', stuff=data, verbosity=1001) return data" 3110,"def cli(ctx, setkv, copyk, delk,showtype): """"""Show or change the configuration"""""" if not ctx.bubble: ctx.say_yellow( 'There is no bubble present, will not show or set the config') raise click.Abort() new_cfg = flat(ctx, ctx.cfg) ctx.say('current config', stuff=ctx.cfg, verbosity=10) ctx.say('current flat config with meta', stuff=new_cfg, verbosity=100) new_cfg_no_meta = {} meta_ends = ['_doct_as_key', '_doct_level', '___bts_flat_', '___bts_flat_star_path_', '___bts_flat_star_select_'] lkeys = list(new_cfg.keys()) for k in lkeys: addkey = True # ctx.say('k:'+k) if k.startswith('___bts_'): addkey = False for meta_end in meta_ends: if k.endswith(meta_end): addkey = False if addkey: # ctx.say('adding k:'+k) new_cfg_no_meta[k] = new_cfg[k] else: pass # ctx.say('not adding meta k:'+k) ctx.say('current flat config without metakeys', stuff=new_cfg_no_meta, verbosity=3) if not setkv and not copyk and not delk: ctx.say('current configuration') for k, v in new_cfg_no_meta.items(): tstr='' if showtype: tstr=' type: '+TYPES[str(type(v))] ctx.say(' '+k+': '+str(v)+tstr) modified = 0 if setkv: for key, value,vtype in setkv: ctx.say('setting k:%s,v:%s,t:%s'%(key,value,vtype)) vtval='VALUE_NOT_SET' try: if vtype==""STRING"": vtval=str(value) if vtype==""INTEGER"": vtval=int(value) if vtype==""FLOAT"": vtval=float(value) if vtype==""BOOLEAN"": if value.lower() in TRUES: vtval=True if value.lower() in FALSES: vtval=False if vtval not in [True,False]: ctx.cry(""boolean value must be one of (case insensitive):"", stuff={'True':TRUES,'False':FALSES}) raise TypeError() except Exception as e: ctx.cry('cannot set k:%s,v:%s,t:%s:'%(key,value,vtype)) raise e if vtval != 'VALUE_NOT_SET': new_cfg[str(key)] = vtval modified += 1 else: ctx.cry('cannot set k:%s,v:%s,t:%s:typed value is not set yet'%(key,value,vtype)) if copyk: for srckey, destkey in copyk: if srckey.endswith('.*'): src_val = get_flat_path(ctx, new_cfg, srckey) for k in src_val: # TODO: use magic for sep sep = '.' new_cfg[str(destkey + sep + k)] = str(src_val[k]) modified += 1 else: if srckey in new_cfg: new_cfg[str(destkey)] = new_cfg[srckey] modified += 1 if delk: if delk.endswith('.*'): # fix PY3: RuntimeError: dictionary changed size during iteration lkeys = list(new_cfg.keys()) for k in lkeys: if k.startswith(delk[:-2]): del(new_cfg[k]) modified += 1 else: if delk in new_cfg: del(new_cfg[delk]) modified += 1 if modified: ctx.say('new flat config', stuff=new_cfg, verbosity=100) fat_cfg = unflat(ctx, new_cfg) ctx.say('new config, #changes:'+str(modified), verbosity=3) ctx.say('new config', stuff=fat_cfg, verbosity=30) fat_cfg = unflat(ctx, new_cfg) doct_fat_cfg = BubbleDoct(fat_cfg) ctx.say('new config fat doct', stuff=doct_fat_cfg, verbosity=100) res = put_config(ctx, YCFG=BubbleDoct(doct_fat_cfg)) ctx.say('put config res:', stuff=res, verbosity=10) return True" 3111,"def coerce(self, value): """"""Convert text values into integer values. Args: value (str or int): The value to coerce. Raises: TypeError: If the value is not an int or string. ValueError: If the value is not int or an acceptable value. Returns: int: The integer value represented. """""" if isinstance(value, int) or isinstance(value, compat.long): return value return int(value)" 3112,"def has_permission(self): """"""Permission checking for ""normal"" Django."""""" objs = [None] if hasattr(self, 'get_perms_objects'): objs = self.get_perms_objects() else: if hasattr(self, 'get_object'): try: objs = [self.get_object()] except Http404: raise except: pass if objs == [None]: objs = self.get_queryset() if (hasattr(self, 'permission_filter_queryset') and self.permission_filter_queryset is not False and self.request.method == 'GET'): if objs != [None]: self.perms_filter_queryset(objs) return True else: return check_perms(self.request.user, self.get_permission_required(), objs, self.request.method)" 3113,"def check_permissions(self, request): """"""Permission checking for DRF."""""" objs = [None] if hasattr(self, 'get_perms_objects'): objs = self.get_perms_objects() else: if hasattr(self, 'get_object'): try: objs = [self.get_object()] except Http404: raise except: pass if objs == [None]: objs = self.get_queryset() if len(objs) == 0: objs = [None] if (hasattr(self, 'permission_filter_queryset') and self.permission_filter_queryset is not False and self.request.method == 'GET'): if objs != [None]: self.perms_filter_queryset(objs) else: has_perm = check_perms(self.request.user, self.get_permission_required(), objs, self.request.method) if not has_perm: msg = self.get_permission_denied_message( default=""Permission denied."" ) if isinstance(msg, Sequence): msg = msg[0] self.permission_denied(request, message=msg)" 3114,"def _hashed_key(self): """""" Returns 16-digit numeric hash of the redis key """""" return abs(int(hashlib.md5( self.key_prefix.encode('utf8') ).hexdigest(), 16)) % (10 ** ( self._size_mod if hasattr(self, '_size_mod') else 5))" 3115,"def expire_at(self, _time): """""" Sets the expiration time of :prop:key_prefix to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970) """""" return self._client.expireat(self.key_prefix, round(_time))" 3116,"def pexpire_at(self, _time): """""" Sets the expiration time of :prop:key_prefix to @_time @_time: absolute Unix timestamp (milliseconds since January 1, 1970) """""" return self._client.pexpireat(self.key_prefix, round(_time))" 3117,"def _decode(self, obj): """""" Decodes @obj using :prop:encoding if :prop:decode_responses """""" if self.decode_responses and isinstance(obj, bytes): try: return obj.decode(self.encoding) except UnicodeDecodeError: return obj return obj" 3118,"def _loads(self, string): """""" If :prop:serialized is True, @string will be unserialized using :prop:serializer """""" if not self.serialized: return self._decode(string) if string is not None: try: return self.serializer.loads(string) except TypeError: #: catches bytes errors with the builtin json library return self.serializer.loads(self._decode(string)) except pickle.UnpicklingError as e: #: incr and decr methods create issues when pickle serialized # It's a terrible idea for a serialized instance # to be performing incr and decr methods, but I think # it makes sense to catch the error regardless decoded = self._decode(string) if decoded.isdigit(): return decoded raise pickle.UnpicklingError(e)" 3119,"def _dumps(self, obj): """""" If :prop:serialized is True, @obj will be serialized using :prop:serializer """""" if not self.serialized: return obj return self.serializer.dumps(obj)" 3120,"def get(self, key, default=None): """""" Gets @key from :prop:key_prefix, defaulting to @default """""" try: return self[key] except KeyError: return default or self._default" 3121,"def incr(self, key, by=1): """""" Increments @key by @by -> #int the value of @key after the increment """""" return self._client.incr(self.get_key(key), by)" 3122,"def decr(self, key, by=1): """""" Decrements @key by @by -> #int the value of @key after the decrement """""" return self._client.decr(self.get_key(key), by)" 3123,"def mget(self, *keys): """""" -> #list of values at the specified @keys """""" keys = list(map(self.get_key, keys)) return list(map(self._loads, self._client.mget(*keys)))" 3124,"def update(self, data): """""" Set given keys to their respective values @data: #dict or :class:RedisMap of |{key: value}| entries to set """""" if not data: return _rk, _dumps = self.get_key, self._dumps data = self._client.mset({ _rk(key): _dumps(value) for key, value in data.items()})" 3125,"def set_ttl(self, key, ttl): """""" Sets time to live for @key to @ttl seconds -> #bool True if the timeout was set """""" return self._client.expire(self.get_key(key), ttl)" 3126,"def set_pttl(self, key, ttl): """""" Sets time to live for @key to @ttl milliseconds -> #bool True if the timeout was set """""" return self._client.pexpire(self.get_key(key), ttl)" 3127,"def expire_at(self, key, _time): """""" Sets the expiration time of @key to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970) """""" return self._client.expireat(self.get_key(key), round(_time))" 3128,"def pop(self, key): """""" Removes @key from the instance, returns its value """""" r = self[key] self.remove(key) return r" 3129,"def remove(self, *keys): """""" Deletes @keys from :prop:_client @*keys: keys to remove -> #int the number of keys that were removed """""" keys = list(map(self.get_key, keys)) return self._client.delete(*keys)" 3130,"def scan(self, match=""*"", count=1000, cursor=0): """""" Iterates the set of keys in :prop:key_prefix in :prop:_client @match: #str pattern to match after the :prop:key_prefix @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection @cursor: the next cursor position -> #tuple (#int cursor position in scan, #list of full key names) """""" cursor, data = self._client.scan( cursor=cursor, match=""{}:{}"".format(self.key_prefix, match), count=count) return (cursor, list(map(self._decode, data)))" 3131,"def iter(self, match=""*"", count=1000): """""" Iterates the set of keys in :prop:key_prefix in :prop:_client @match: #str pattern to match after the :prop:key_prefix @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> yields redis keys within this instance """""" replace_this = self.key_prefix+"":"" for key in self._client.scan_iter( match=""{}:{}"".format(self.key_prefix, match), count=count): yield self._decode(key).replace(replace_this, """", 1)" 3132,"def items(self): """""" Iterates the set of |{key: value}| entries in :prop:key_prefix of :prop:_client -> yields redis (key, value) #tuples within this instance """""" cursor = '0' _loads = self._loads _mget = self._client.mget while cursor != 0: cursor, keys = self.scan(cursor=cursor) if keys: vals = _mget(*keys) for i, key in enumerate(keys): yield ( key.replace( self.key_prefix+"":"", """", 1), _loads(vals[i]) )" 3133,"def clear(self, match=""*"", count=1000): """""" Removes all |{key: value}| entries in :prop:key_prefix of :prop:_client """""" cursor = '0' while cursor != 0: cursor, keys = self.scan(cursor=cursor, match=match, count=count) if keys: self._client.delete(*keys)" 3134,"def size(self): """""" -> #int number of keys in this instance """""" return int(self._client.hget(self._bucket_key, self.key_prefix) or 0)" 3135,"def _bucket_key(self): """""" Returns hash bucket key for the redis key """""" return ""{}.size.{}"".format( self.prefix, (self._hashed_key//1000) if self._hashed_key > 1000 else self._hashed_key)" 3136,"def incr(self, key, by=1): """""" :see::meth:RedisMap.incr """""" pipe = self._client.pipeline(transaction=False) pipe.incr(self.get_key(key), by) if key not in self: pipe.hincrby(self._bucket_key, self.key_prefix, 1) result = pipe.execute() return result[0]" 3137,"def update(self, data): """""" :see::meth:RedisMap.update """""" result = None if data: pipe = self._client.pipeline(transaction=False) for k in data.keys(): pipe.exists(self.get_key(k)) exists = pipe.execute() exists = exists.count(True) _rk, _dumps = self.get_key, self._dumps data = { _rk(key): _dumps(value) for key, value in data.items()} pipe.mset(data) pipe.hincrby(self._bucket_key, self.key_prefix, len(data)-exists) result = pipe.execute()[0] return result" 3138,"def clear(self, match=""*"", count=1000): """""" :see:meth:RedisMap.clear """""" cursor = '0' pipe = self._client.pipeline(transaction=False) while cursor != 0: cursor, keys = self.scan(cursor=cursor, match=match, count=count) if keys: pipe.delete(*keys) pipe.hdel(self._bucket_key, self.key_prefix) pipe.execute() return True" 3139,"def get(self, key, default=None): """""" Gets @key from :prop:key_prefix, defaulting to @default """""" result = self._loads(self._client.get(self.get_key(key))) if result is not None: return result else: return default or self._default" 3140,"def incr(self, field, by=1): """""" :see::meth:RedisMap.incr """""" return self._client.hincrby(self.key_prefix, field, by)" 3141,"def decr(self, field, by=1): """""" :see::meth:RedisMap.decr """""" return self._client.hincrby(self.key_prefix, field, by * -1)" 3142,"def mget(self, *keys): """""" -> #list of values at the specified @keys """""" return list(map( self._loads, self._client.hmget(self.key_prefix, *keys)))" 3143,"def all(self): """""" -> #dict of all |{key: value}| entries in :prop:key_prefix of :prop:_client """""" return { self._decode(k): self._loads(v) for k, v in self._client.hgetall(self.key_prefix).items() }" 3144,"def update(self, data): """""" :see::meth:RedisMap.update """""" result = None if data: _dumps = self._dumps data = { key: _dumps(value) for key, value in data.items()} result = self._client.hmset(self.key_prefix, data) return result" 3145,"def scan(self, match=""*"", count=1000, cursor=0): """""" :see::meth:RedisMap.scan """""" cursor, results = self._client.hscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map(self._decode, results)))" 3146,"def iter(self, match=""*"", count=1000): """""" :see::meth:RedisMap.iter """""" for field, value in self._client.hscan_iter( self.key_prefix, match=match, count=count): yield self._decode(field)" 3147,"def items(self, match=""*"", count=1000): """""" :see::meth:RedisMap.items """""" for field, value in self._client.hscan_iter( self.key_prefix, match=match, count=count): yield self._decode(field), self._loads(value)" 3148,"def keys(self): """""" :see::meth:RedisMap.keys """""" for field in self._client.hkeys(self.key_prefix): yield self._decode(field)" 3149,"def values(self): """""" :see::meth:RedisMap.keys """""" for val in self._client.hvals(self.key_prefix): yield self._loads(val)" 3150,"def get(self, key, default=None): """""" Gets @key from :prop:key_prefix, defaulting to @default """""" try: result = self._loads(self._client.hget(self.key_prefix, key)) assert result is not None return result except (AssertionError, KeyError): return default or self._default" 3151,"def reverse_iter(self, start=None, stop=None, count=2000): """""" -> yields items of the list in reverse """""" cursor = '0' count = 1000 start = start if start is not None else (-1 * count) stop = stop if stop is not None else -1 _loads = self._loads while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in reversed(cursor or []): yield _loads(x) start -= count stop -= count" 3152,"def reverse(self): """""" In place reverses the list. Very expensive on large data sets. The reversed list will be persisted to the redis :prop:_client as well. """""" tmp_list = RedisList( randint(0, 100000000), prefix=self.key_prefix, client=self._client, serializer=self.serializer, serialized=self.serialized) cursor = '0' count = 1000 start = (-1 * count) stop = -1 _loads = self._loads while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) if cursor: tmp_list.extend(map(_loads, reversed(cursor))) start -= count stop -= count self._client.rename(tmp_list.key_prefix, self.key_prefix) tmp_list.clear()" 3153,"def pop(self, index=None): """""" Removes and returns the item at @index or from the end of the list -> item at @index """""" if index is None: return self._loads(self._client.rpop(self.key_prefix)) elif index == 0: return self._loads(self._client.lpop(self.key_prefix)) else: _uuid = gen_rand_str(16, 24) r = self[index] self[index] = _uuid self.remove(_uuid) return r" 3154,"def extend(self, items): """""" Adds @items to the end of the list -> #int length of list after operation """""" if items: if self.serialized: items = list(map(self._dumps, items)) self._client.rpush(self.key_prefix, *items)" 3155,"def append(self, item): """""" Adds @item to the end of the list -> #int length of list after operation """""" return self._client.rpush(self.key_prefix, self._dumps(item))" 3156,"def count(self, value): """""" Not recommended for use on large lists due to time complexity, but it works. Use with caution. -> #int number of occurences of @value """""" cnt = 0 for x in self: if x == value: cnt += 1 return cnt" 3157,"def push(self, *items): """""" Prepends the list with @items -> #int length of list after operation """""" if self.serialized: items = list(map(self._dumps, items)) return self._client.lpush(self.key_prefix, *items)" 3158,"def index(self, item): """""" Not recommended for use on large lists due to time complexity, but it works -> #int list index of @item """""" for i, x in enumerate(self.iter()): if x == item: return i return None" 3159,"def insert(self, index, value): """""" Inserts @value before @index in the list. @index: list index to insert @value before @value: item to insert @where: whether to insert BEFORE|AFTER @refvalue -> #int new length of the list on success or -1 if refvalue is not in the list. """""" _uuid = gen_rand_str(24, 32) item_at_index = self[index] self[index] = _uuid uuid = _uuid _uuid = self._dumps(uuid) pipe = self._client.pipeline(transaction=True) # Needs to be atomic pipe.linsert( self.key_prefix, ""BEFORE"", _uuid, self._dumps(value)) pipe.linsert( self.key_prefix, ""BEFORE"", _uuid, item_at_index) results = pipe.execute() self.remove(uuid) return results[0]" 3160,"def remove(self, item, count=0): """""" Removes @item from the list for @count number of occurences """""" self._client.lrem(self.key_prefix, count, self._dumps(item))" 3161,"def iter(self, start=0, count=1000): """""" @start: #int cursor start position @stop: #int cursor stop position @count: #int buffer limit -> yields all of the items in the list """""" cursor = '0' _loads = self._loads stop = start + count while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in cursor or []: yield _loads(x) start += (count + 1) stop += (count + 1)" 3162,"def trim(self, start, end): """""" Trim the list, removing all values not within the slice between @start and @end. @start and @end can be negative numbers just like python slicing notation. @start: #int start position @end: #int end position -> result of :meth:redis.StrictRedis.ltrim """""" return self._client.ltrim(self.key_prefix, start, end)" 3163,"def add(self, member): """""" Adds @member to the set -> #int the number of @members that were added to the set, excluding pre-existing members (1 or 0) """""" return self._client.sadd(self.key_prefix, self._dumps(member))" 3164,"def update(self, members): """""" Adds @members to the set @members: a :class:RedisSet object or #set -> #int the number of @members that were added to the set, excluding pre-existing members """""" if isinstance(members, RedisSet): size = self.size return (self.unionstore( self.key_prefix, members.key_prefix) - size) if self.serialized: members = list(map(self._dumps, members)) if members: return self._client.sadd(self.key_prefix, *members) return 0" 3165,"def union(self, *others): """""" Calculates union between sets @others: one or several :class:RedisSet objects or #str redis set keynames -> #set of new set members """""" others = self._typesafe_others(others) return set(map( self._loads, self._client.sunion(self.key_prefix, *others)))" 3166,"def unioniter(self, *others): """""" The same as :meth:union, but returns iterator instead of #set @others: one or several :class:RedisSet objects or #str redis set keynames -> yields members of the resulting set """""" others = self._typesafe_others(others) for other in self._client.sunion(self.key_prefix, *others): yield self._loads(other)" 3167,"def unionstore(self, destination, *others): """""" The same as :meth:union, but stores the result in @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of items in the resulting set """""" others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sunionstore(destination, self.key_prefix, *others)" 3168,"def intersection(self, *others): """""" Calculates the intersection of all the given sets, that is, members which are present in all given sets. @others: one or several #str keynames or :class:RedisSet objects -> #set of resulting intersection between @others and this set """""" others = self._typesafe_others(others) return set(map( self._loads, self._client.sinter(self.key_prefix, *others)))" 3169,"def interiter(self, *others): """""" The same as :meth:intersection, but returns iterator instead of #set @others: one or several #str keynames or :class:RedisSet objects -> yields members of the resulting set """""" others = self._typesafe_others(others) for other in self._client.sinter(self.key_prefix, *others): yield self._loads(other)" 3170,"def interstore(self, destination, *others): """""" The same as :meth:intersection, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set """""" others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sinterstore(destination, self.key_prefix, *others)" 3171,"def difference(self, *others): """""" Calculates the difference between this set and @others @others: one or several #str keynames or :class:RedisSet objects -> set resulting from the difference between the first set and all @others. """""" others = self._typesafe_others(others) return set(map( self._loads, self._client.sdiff(self.key_prefix, *others)))" 3172,"def diffiter(self, *others): """""" The same as :meth:difference, but returns iterator instead of #set @others: one or several #str keynames or :class:RedisSet objects -> yields members resulting from the difference between the first set and all @others. """""" others = self._typesafe_others(others) for other in self._client.sdiff(self.key_prefix, *others): yield self._loads(other)" 3173,"def diffstore(self, destination, *others): """""" The same as :meth:difference, but stores the resulting set @destination @destination: #str keyname or :class:RedisSet @others: one or several #str keynames or :class:RedisSet objects -> #int number of members in resulting set """""" others = self._typesafe_others(others) destination = self._typesafe(destination) return self._client.sdiffstore(destination, self.key_prefix, *others)" 3174,"def move(self, member, destination): """""" Moves @member from this set to @destination atomically @member: a member of this set @destination: #str redis keyname or :class:RedisSet object -> #bool True if the member was moved """""" destination = self._typesafe(destination) return self._client.smove( self.key_prefix, destination, self._dumps(member))" 3175,"def rand(self, count=1): """""" Gets @count random members from the set @count: #int number of members to return -> @count set members """""" result = self._client.srandmember(self.key_prefix, count) return set(map(self._loads, result))" 3176,"def remove(self, *members): """""" Removes @members from the set -> #int the number of members that were removed from the set """""" if self.serialized: members = list(map(self._dumps, members)) return self._client.srem(self.key_prefix, *members)" 3177,"def members(self): """""" -> #set of all members in the set """""" if self.serialized: return set(map( self._loads, self._client.smembers(self.key_prefix))) else: return set(map( self._decode, self._client.smembers(self.key_prefix)))" 3178,"def scan(self, match=""*"", count=1000, cursor=0): """""" :see::RedisMap.scan """""" cursor, data = self._client.sscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, set(map(self._loads, data)))" 3179,"def iter(self, match=""*"", count=1000): """""" Iterates the set members in :prop:key_prefix of :prop:_client @match: #str pattern to match items by @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> yields members of the set """""" _loads = self._loads for m in self._client.sscan_iter( self.key_prefix, match=""*"", count=count): yield _loads(m)" 3180,"def incr(self, member, by=1): """""" Increments @member by @by within the sorted set """""" return self._client.zincrby(self.key_prefix, self._dumps(member), by)" 3181,"def decr(self, member, by=1): """""" Decrements @member by @by within the sorted set """""" return self._client.zincrby( self.key_prefix, self._dumps(member), by * -1)" 3182,"def add(self, *args, **kwargs): """""" Adds member/value pairs to the sorted set in two ways: To add with @args: .. pairs = [4.0, 'member1', 5.0, 'member2'] sorted_set.add(*pairs) # sorted_set.add(4.0, 'member1', 5.0, 'member2') .. To add with @kwargs: .. pairs = {""member1"": 4.0, ""member2"": 5.0} sorted_set.add(**pairs) # sorted_set.add(member1=4.0, member2=5.0) .. """""" if args or kwargs: _dumps = self._dumps zargs = list(args) if args and self.serialized: # args format: value, key, value, key... zargs = [ _dumps(x) if (i % 2 == 1 and self.serialized) else x for i, x in enumerate(args)] if kwargs: # kwargs format: key=value, key=value zargs += [ _dumps(x) if (i % 2 == 1 and self.serialized) else x for y in kwargs.items() for i, x in enumerate(reversed(y))] return self._client.zadd(self.key_prefix, *zargs)" 3183,"def update(self, data): """""" Adds @data to the sorted set @data: #dict or dict-like object """""" if data: _dumps = self._dumps zargs = [ _dumps(x) if (i % 2 == 1) else x for y in data.items() for i, x in enumerate(reversed(y)) ] return self._client.zadd(self.key_prefix, *zargs)" 3184,"def remove(self, *members): """""" Removes @members from the sorted set """""" members = list(map(self._dumps, members)) self._client.zrem(self.key_prefix, *members)" 3185,"def rank(self, member): """""" Gets the ASC rank of @member from the sorted set, that is, lower scores have lower ranks """""" if self.reversed: return self._client.zrevrank(self.key_prefix, self._dumps(member)) return self._client.zrank(self.key_prefix, self._dumps(member))" 3186,"def count(self, min, max): """""" -> #int number of elements in the sorted set with a score between @min and @max. """""" return self._client.zcount(self.key_prefix, min, max)" 3187,"def iter(self, start=0, stop=-1, withscores=False, reverse=None): """""" Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs """""" reverse = reverse if reverse is not None else self.reversed _loads = self._loads for member in self._client.zrange( self.key_prefix, start=start, end=stop, withscores=withscores, desc=reverse, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)" 3188,"def iterbyscore(self, min='-inf', max='+inf', start=None, num=None, withscores=False, reverse=None): """""" Return a range of values from the sorted set name with scores between @min and @max. If @start and @num are specified, then return a slice of the range. @min: #int minimum score, or #str '-inf' @max: #int minimum score, or #str '+inf' @start: #int starting range position @num: #int number of members to fetch @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs """""" reverse = reverse if reverse is not None else self.reversed zfunc = self._client.zrangebyscore if not reverse \ else self._client.zrevrangebyscore _loads = self._loads for member in zfunc( self.key_prefix, min=min, max=max, start=start, num=num, withscores=withscores, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)" 3189,"def itemsbyscore(self, min='-inf', max='+inf', start=None, num=None, reverse=None): """""" Return a range of |(member, score)| pairs from the sorted set name with scores between @min and @max. If @start and @num are specified, then return a slice of the range. @min: #int minimum score, or #str '-inf' @max: #int minimum score, or #str '+inf' @start: #int starting range position @num: #int number of members to fetch @reverse: #bool indicating whether to sort the results descendingly -> yields |(member, score)| #tuple pairs """""" reverse = reverse if reverse is not None else self.reversed for member in self.iterbyscore( min, max, start, num, withscores=True, reverse=reverse): yield member" 3190,"def iterscan(self, match=""*"", count=1000): """""" Much slower than iter(), but much more memory efficient if k/v's retrieved are one-offs @match: matches member names in the sorted set @count: the user specified the amount of work that should be done at every call in order to retrieve elements from the collection -> iterator of |(member, score)| pairs """""" if self.serialized: return map( lambda x: (self._loads(x[0]), self.cast(x[1])), self._client.zscan_iter( self.key_prefix, match=match, count=count)) else: return map( lambda x: (self._decode(x[0]), self.cast(x[1])), self._client.zscan_iter( self.key_prefix, match=match, count=count))" 3191,"def scan(self, match=""*"", count=1000, cursor=0): """""" :see::meth:RedisMap.scan """""" if self.serialized: cursor, data = self._client.zscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map( lambda x: (self._loads(x[0]), self.cast(x[1])), data))) else: cursor, data = self._client.zscan( self.key_prefix, cursor=cursor, match=match, count=count) return (cursor, list(map( lambda x: (self._decode(x[0]), self.cast(x[1])), data)))" 3192,"def recv_blocking(conn, msglen): """"""Recieve data until msglen bytes have been received."""""" msg = b'' while len(msg) < msglen: maxlen = msglen-len(msg) if maxlen > 4096: maxlen = 4096 tmpmsg = conn.recv(maxlen) if not tmpmsg: raise RuntimeError(""socket connection broken"") msg += tmpmsg logging.debug(""Msglen: %d of %d"", len(msg), msglen) logging.debug(""Message: %s"", msg) return msg" 3193,"def compare_password(expected, actual): """"""Compare two 64byte encoded passwords."""""" if expected == actual: return True, ""OK"" msg = [] ver_exp = expected[-8:].rstrip() ver_act = actual[-8:].rstrip() if expected[:-8] != actual[:-8]: msg.append(""Password mismatch"") if ver_exp != ver_act: msg.append(""asterisk_mbox version mismatch. Client: '"" + ver_act + ""', Server: '"" + ver_exp + ""'"") return False, "". "".join(msg)" 3194,"def encode_to_sha(msg): """"""coerce numeric list inst sha-looking bytearray"""""" if isinstance(msg, str): msg = msg.encode('utf-8') return (codecs.encode(msg, ""hex_codec"") + (b'00' * 32))[:64]" 3195,"def decode_from_sha(sha): """"""convert coerced sha back into numeric list"""""" if isinstance(sha, str): sha = sha.encode('utf-8') return codecs.decode(re.sub(rb'(00)*$', b'', sha), ""hex_codec"")" 3196,"def put(self, item, block=True, timeout=None): """"""put."""""" super().put(item, block, timeout) self._putsocket.send(b'x')" 3197,"def get(self, block=True, timeout=None): """"""get."""""" try: item = super().get(block, timeout) self._getsocket.recv(1) return item except queue.Empty: raise queue.Empty" 3198,"def _api_path(self, item): """"""Get the API path for the current cursor position."""""" if self.base_url is None: raise NotImplementedError(""base_url not set"") path = ""/"".join([x.blob[""id""] for x in item.path]) return ""/"".join([self.base_url, path])" 3199,"def run(self): """""" Run at parse time. When the documents are initially being scanned, this method runs and does two things: (a) creates an instance that is added to the site's widgets, and (b) leaves behind a placeholder docutils node that can later be processed after the docs are resolved. The latter needs enough information to retrieve the former. """""" this_widget = self.get_widget(self.docname) self.widgets[repr(this_widget)] = this_widget # Now add the node to the doctree widget_node = widget() ids = [repr(this_widget)] names = [self.name] attrs = dict(ids=ids, names=names) widget_node.update_basic_atts(attrs) return [widget_node]" 3200,"def register_references(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames: List[str]): """""" Walk the registry and add sphinx directives """""" references: ReferencesContainer = sphinx_app.env.references for name, klass in kb_app.config.resources.items(): # Name is the value in the decorator and directive, e.g. # @kb.resource('category') means name=category if getattr(klass, 'is_reference', False): references[name] = dict()" 3201,"def register_handlers(self, handler_classes): """""" Create handlers from discovered handler classes :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes """""" for handler_class in handler_classes: self.handlers.append(handler_class(client=self.client)) logging.info('Successfully registered {handler_class}'.format( handler_class=getattr(handler_class, '__name__', str(handler_class))) )" 3202,"def on_tweet(self, tweet): """""" Callback to receive tweet from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the received tweet to registered handlers. :param tweet: An object containing a tweet's text and metadata :type tweet: :class:`~responsebot.models.Tweet` """""" logging.info(u'Received tweet: `{message}`'.format(message=tweet.text)) for handler in self.handlers: if not handler.catch_self_tweets and self.is_self_tweet(tweet): continue if not handler.filter.match_tweet(tweet=tweet, user_stream=self.client.config.get('user_stream')): continue handler.on_tweet(tweet)" 3203,"def on_event(self, event): """""" Callback to receive events from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the received event to registered handlers. :param event: The received event :type event: :class:`~responsebot.models.Event` error from a custom handler """""" if event.event not in TWITTER_NON_TWEET_EVENTS: logging.warning(u'Received unknown twitter event {event}'.format(event=event.event)) return logging.info(u'Received event {event}'.format(event=event.event)) for handler in self.handlers: handler.on_event(event)" 3204,"def get_merged_filter(self): """""" Return merged filter from list of handlers :return: merged filter :rtype: :class:`~responsebot.models.TweetFilter` """""" track = set() follow = set() for handler in self.handlers: track.update(handler.filter.track) follow.update(handler.filter.follow) return TweetFilter(track=list(track), follow=list(follow))" 3205,"def get_domain(url): """""" Get domain part of an url. For example: https://www.python.org/doc/ -> https://www.python.org """""" parse_result = urlparse(url) domain = ""{schema}://{netloc}"".format( schema=parse_result.scheme, netloc=parse_result.netloc) return domain" 3206,"def join_all(domain, *parts): """""" Join all url components. Example:: >>> join_all(""https://www.apple.com"", ""iphone"") https://www.apple.com/iphone :param domain: Domain parts, example: https://www.python.org :param parts: Other parts, example: ""/doc"", ""/py27"" :return: url """""" l = list() if domain.endswith(""/""): domain = domain[:-1] l.append(domain) for part in parts: for i in part.split(""/""): if i.strip(): l.append(i) url = ""/"".join(l) return url" 3207,"def add_params(endpoint, params): """""" Combine query endpoint and params. Example:: >>> add_params(""https://www.google.com/search"", {""q"": ""iphone""}) https://www.google.com/search?q=iphone """""" p = PreparedRequest() p.prepare(url=endpoint, params=params) if PY2: # pragma: no cover return unicode(p.url) else: # pragma: no cover return p.url" 3208,"def neval(expression, globals=None, locals=None, **kwargs): """"""Evaluate *expression* using *globals* and *locals* dictionaries as *global* and *local* namespace. *expression* is transformed using :class:`.NapiTransformer`."""""" try: import __builtin__ as builtins except ImportError: import builtins from ast import parse from ast import fix_missing_locations as fml try: transformer = kwargs['transformer'] except KeyError: from napi.transformers import NapiTransformer as transformer #try: node = parse(expression, '<string>', 'eval') #except ImportError: # builtins.eval(expression) #else: if globals is None: globals = builtins.globals() if locals is None: locals = {} trans = transformer(globals=globals, locals=locals, **kwargs) trans.visit(node) code = compile(fml(node), '<string>', 'eval') return builtins.eval(code, globals, locals)" 3209,"def nexec(statement, globals=None, locals=None, **kwargs): """"""Execute *statement* using *globals* and *locals* dictionaries as *global* and *local* namespace. *statement* is transformed using :class:`.NapiTransformer`."""""" try: import __builtin__ as builtins except ImportError: import builtins from ast import parse from napi.transformers import NapiTransformer from ast import fix_missing_locations as fml try: node = parse(statement, '<string>', 'exec') except ImportError:#KeyError: exec(statement) else: if globals is None: globals = builtins.globals() if locals is None: locals = {} trans = NapiTransformer(globals=globals, locals=locals, **kwargs) trans.visit(node) code = compile(fml(node), '<string>', 'exec') return builtins.eval(code, globals, locals)" 3210,"def cli(ctx, oldversion): """"""Upgrade the current bubble, should mimic init as much as possible(experimental)"""""" # print ctx.bubble path = ctx.home bubble_file_name = path + '/.bubble' config_file = path + '/config/config.yaml' if file_exists(bubble_file_name): pass else: with open(bubble_file_name, 'w') as dot_bubble: dot_bubble.write('bubble=' + metadata.version) dot_bubble.write('\nconfig=' + config_file) ctx.say_green('Initialised a new bubble in [%s]' % click.format_filename(bubble_file_name)) create_dir(ctx, path + '/config/') create_dir(ctx, path + '/logs/') create_dir(ctx, path + '/export/') create_dir(ctx, path + '/import/') create_dir(ctx, path + '/remember/') create_dir(ctx, path + '/remember/archive') rules_file = path + '/config/rules.bubble' if file_exists(bubble_file_name): pass else: with open(rules_file, 'w') as rules: rules.write(get_example_rules_bubble()) ctx.say_green('Created an example rules in [%s]' % click.format_filename(rules_file)) rule_functions_file = path + '/custom_rule_functions.py' if file_exists(rule_functions_file): pass else: with open(rule_functions_file, 'w') as rule_functions: rule_functions.write(get_example_rule_functions()) ctx.say_green('Created an example rule_functions in [%s]' % click.format_filename(rule_functions_file)) ctx.say_green('Bubble upgraded')" 3211,"def _list_remote(store, maildir, verbose=False): """"""List the a maildir. store is an abstract representation of the source maildir. maildir is the local maildir to which mail will be pulled. This is a generator for a reason. Because of the way ssh multi-mastering works a single open TCP connection allows multiple virtual ssh connections. So the encryption and tcp only has to be done once. If this command returned a list then the ssh list command would have finished and the ssh connection for each message would have to be made again. """""" # This command produces a list of all files in the maildir like: # base-filename timestamp container-directory command = """"""echo {maildir}/{{cur,new}} | tr ' ' '\\n' | while read path ; do ls -1Ugo --time-style=+%s $path | sed -rne ""s|[a-zA-Z-]+[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+([0-9]+)[ \t]+([0-9]+\\.[A-Za-z0-9]+)(\\.([.A-Za-z0-9-]+))*(:[2],([PRSTDF]*))*|\\2 \\1 $path|p"";done"""""" stdout = store.cmd(command, verbose) lines = stdout.split(""\n"") for line in lines: parts = line.split("" "") if len(parts) >= 3: yield parts[0:3]" 3212,"def sshpull(host, maildir, localmaildir, noop=False, verbose=False, filterfile=None): """"""Pull a remote maildir to the local one. """""" store = _SSHStore(host, maildir) _pull(store, localmaildir, noop, verbose, filterfile)" 3213,"def filepull(maildir, localmaildir, noop=False, verbose=False, filterfile=None): """"""Pull one local maildir into another. The source need not be an md folder (it need not have a store). In this case filepull is kind of an import. """""" store = _Store(maildir) _pull(store, localmaildir, noop, verbose, filterfile)" 3214,"def _filter(msgdata, mailparser, mdfolder, mailfilters): """"""Filter msgdata by mailfilters"""""" if mailfilters: for f in mailfilters: msg = mailparser.parse(StringIO(msgdata)) rule = f(msg, folder=mdfolder) if rule: yield rule return" 3215,"def cmd(self, cmd, verbose=False): """"""Executes the specified command on the remote host. The cmd must be format safe, this means { and } must be doubled, thusly: echo /var/local/maildir/{{cur,new}} the cmd can include the format word 'maildir' to be replaced by self.directory. eg: echo {maildir}/{{cur,new}} """""" command = cmd.format(maildir=self.directory) if verbose: print(command) p = Popen([ ""ssh"", ""-T"", self.host, command ], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() return stdout" 3216,"def fetch_result(self): """"""Return a list of urls for each search result."""""" results = self.soup.find_all('div', {'class': 'container container-small'}) href = None is_match = False i = 0 while i < len(results) and not is_match: result = results[i] anchor = result.find('a', {'rel': 'bookmark'}) is_match = self._filter_results(result, anchor) href = anchor['href'] i += 1 try: page = get_soup(href) except (Exception): page = None # Return page if search is successful if href and page: return page else: raise PageNotFoundError(PAGE_ERROR)" 3217,"def _filter_results(self, result, anchor): """"""Filter search results by checking category titles and dates"""""" valid = True try: cat_tag = result.find('a', {'rel': 'category tag'}).string title = anchor.string.lower() date_tag = result.find('time').string except (AttributeError, TypeError): return False if cat_tag != ""Daily Ratings"": valid = False if not date_in_range(self.date, date_tag, 5): valid = False if self.category == 'cable' and 'cable' not in title: valid = False elif self.category != 'cable' and 'cable' in title: valid = False return valid" 3218,"def _build_url(self): """"""Build url based on searching by date or by show."""""" url_params = [ BASE_URL, self.category + ' ratings', self.day, self.year, self.month ] return SEARCH_URL.format(*url_params)" 3219,"def _assert_category(self, category): """"""Validate category argument"""""" category = category.lower() valid_categories = ['cable', 'broadcast', 'final', 'tv'] assert_msg = ""%s is not a valid category."" % (category) assert (category in valid_categories), assert_msg" 3220,"def _get_response(self, url, **params): """""" Giving a service path and optional specific arguments, returns the response string. """""" data = urlencode(params) url = ""%s?%s"" % (url, data) headers = {'User-Agent': self.get_random_agent()} request = Request(url, headers=headers, method='GET') def open_request(request, attempts, err=None): if attempts > self.request_attempts: raise attempts += 1 try: with urlopen(request, timeout=self.timeout) as response: return response.read() except HTTPError as err: if err.getcode() < 500: raise print(""HTTPError occurred while trying to request the url "" ""%s. %s. Trying again in %s seconds..."" % (url, err, self.seconds_between_attempts)) time.sleep(self.seconds_between_attempts) return open_request(request, attempts, err) attempts = 0 self.last_response = open_request(request, attempts) return self.last_response" 3221,"def get_response(self, path, **params): """""" Giving a service path and optional specific arguments, returns the response string. """""" url = ""%s%s"" % (self.base_url, path) return self._get_response(url, **params)" 3222,"def get_data(self, path, **params): """""" Giving a service path and optional specific arguments, returns the XML data from the API parsed as a dict structure. """""" xml = self.get_response(path, **params) try: return parse(xml) except Exception as err: print(path) print(params) print(err) raise" 3223,"def run(self, port): # pragma: no coverage """""" Run on given port. Parse standard options and start the http server. """""" tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(self) http_server.listen(port) tornado.ioloop.IOLoop.instance().start()" 3224,"def log_request(self, handler): """""" Override base method to log requests to JSON UDP collector and emit a metric. """""" packet = {'method': handler.request.method, 'uri': handler.request.uri, 'remote_ip': handler.request.remote_ip, 'status': handler.get_status(), 'request_time_ms': handler.request.request_time() * 1000.0, 'service_id': self.service_id, 'request_id': handler.request.headers.get(REQUEST_ID_HEADER, 'undefined') } # handler can optionally define additional data to log if hasattr(handler, 'logvalues'): for key, value in handler.logvalues.iteritems(): packet[key] = value servicelog.log(packet) metric = ""requests."" + str(handler.get_status()) metrics.timing(metric, handler.request.request_time() * 1000.0) super(LoggingApplication, self).log_request(handler)" 3225,"def logvalue(self, key, value): """"""Add log entry to request log info"""""" if not hasattr(self, 'logvalues'): self.logvalues = {} self.logvalues[key] = value" 3226,"def write_error(self, status_code, **kwargs): """"""Log halt_reason in service log and output error page"""""" message = default_message = httplib.responses.get(status_code, '') # HTTPError exceptions may have a log_message attribute if 'exc_info' in kwargs: (_, exc, _) = kwargs['exc_info'] if hasattr(exc, 'log_message'): message = str(exc.log_message) or default_message self.logvalue('halt_reason', message) title = ""{}: {}"".format(status_code, default_message) body = ""{}: {}"".format(status_code, message) self.finish(""<html><title>"" + title + """" """" + body + """")" 3227,"def timeit(self, metric, func, *args, **kwargs): """"""Time execution of callable and emit metric then return result."""""" return metrics.timeit(metric, func, *args, **kwargs)" 3228,"def require_content_type(self, content_type): """"""Raises a 400 if request content type is not as specified."""""" if self.request.headers.get('content-type', '') != content_type: self.halt(400, 'Content type must be ' + content_type)" 3229,"def set_headers(self, headers): """"""Set headers"""""" for (header, value) in headers.iteritems(): self.set_header(header, value)" 3230,"def _ensure_request_id_header(self): ""Ensure request headers have a request ID. Set one if needed."" if REQUEST_ID_HEADER not in self.request.headers: self.request.headers.add(REQUEST_ID_HEADER, uuid.uuid1().hex)" 3231,"def load_parameters(self, source): """"""For YML, the source it the file path"""""" with open(source) as parameters_source: loaded = yaml.safe_load(parameters_source.read()) for k, v in loaded.items(): if isinstance(v, str): loaded[k] = ""'""+v+""'"" return loaded" 3232,"def load_config(self, config_source, parameters_source): """"""For YML, the source it the file path"""""" with open(config_source) as config_source: config_raw = config_source.read() parameters = {} """"""Parameteres from file"""""" if os.path.isfile(parameters_source): params = self.load_parameters(parameters_source) if params is not None: parameters.update(params) """"""Overwrite parameteres with the environment variables"""""" env_params = {} env_params.update(os.environ) for k, v in env_params.items(): if is_string(v): env_params[k] = ""'"" + v + ""'"" parameters.update(env_params) """"""Replace the parameters"""""" final_configuration = config_raw.format(**parameters) final_configuration = yaml.safe_load(final_configuration) return final_configuration if final_configuration is not None else {}" 3233,"def load_parameters(self, source): """"""For JSON, the source it the file path"""""" with open(source) as parameters_source: return json.loads(parameters_source.read())" 3234,"def load_config(self, config_source, parameters_source): """"""For JSON, the source it the file path"""""" with open(config_source) as config_source: config_raw = config_source.read() """"""Replace the parameters"""""" pattern = ""(%[a-zA-Z_0-9]*%)"" self.parameters = {} """"""Parameteres from file"""""" if os.path.isfile(parameters_source): self.parameters.update(self.load_parameters(parameters_source)) """"""Overwrite parameteres with the environment variables"""""" self.parameters.update(os.environ) replaced_config = re.sub(pattern=pattern, repl=self._replace_function, string=config_raw) return json.loads(replaced_config)" 3235,"def main(): """"""Testing function for DFA _Brzozowski Operation"""""" if len(argv) < 2: targetfile = 'target.y' else: targetfile = argv[1] print 'Parsing ruleset: ' + targetfile, flex_a = Flexparser() mma = flex_a.yyparse(targetfile) print 'OK' print 'Perform minimization on initial automaton:', mma.minimize() print 'OK' print 'Perform StateRemoval on minimal automaton:', state_removal = StateRemoval(mma) mma_regex = state_removal.get_regex() print mma_regex" 3236,"def _state_removal_init(self): """"""State Removal Operation Initialization"""""" # First, we remove all multi-edges: for state_i in self.mma.states: for state_j in self.mma.states: if state_i.stateid == state_j.stateid: self.l_transitions[ state_i.stateid, state_j.stateid] = self.epsilon else: self.l_transitions[ state_i.stateid, state_j.stateid] = self.empty for arc in state_i.arcs: if arc.nextstate == state_j.stateid: if self.l_transitions[state_i.stateid, state_j.stateid] != self.empty: self.l_transitions[state_i.stateid, state_j.stateid] \ += self.mma.isyms.find(arc.ilabel) else: self.l_transitions[state_i.stateid, state_j.stateid] = \ self.mma.isyms.find(arc.ilabel)" 3237,"def _state_removal_remove(self, k): """""" State Removal Remove operation l_transitions[i,i] += l_transitions[i,k] . star(l_transitions[k,k]) . l_transitions[k,i] l_transitions[j,j] += l_transitions[j,k] . star(l_transitions[k,k]) . l_transitions[k,j] l_transitions[i,j] += l_transitions[i,k] . star(l_transitions[k,k]) . l_transitions[k,j] l_transitions[j,i] += l_transitions[j,k] . star(l_transitions[k,k]) . l_transitions[k,i] Args: k (int): The node that will be removed Returns: None """""" for state_i in self.mma.states: for state_j in self.mma.states: if self.l_transitions[state_i.stateid, k] != self.empty: l_ik = self.l_transitions[state_i.stateid, k] else: l_ik = """" if self.l_transitions[state_j.stateid, k] != self.empty: l_jk = self.l_transitions[state_j.stateid, k] else: l_jk = """" if self.l_transitions[k, state_i.stateid] != self.empty: l_ki = self.l_transitions[k, state_i.stateid] else: l_ki = """" if self.l_transitions[k, state_j.stateid] != self.empty: l_kj = self.l_transitions[k, state_j.stateid] else: l_kj = """" if self.l_transitions[state_i.stateid, state_i.stateid] != self.empty: self.l_transitions[state_i.stateid, state_i.stateid] += l_ik + \ self.star(self.l_transitions[k, k]) + l_ki else: self.l_transitions[state_i.stateid, state_i.stateid] = l_ik + \ self.star(self.l_transitions[k, k]) + l_ki if self.l_transitions[state_j.stateid, state_j.stateid] != self.empty: self.l_transitions[state_j.stateid, state_j.stateid] += l_jk + \ self.star(self.l_transitions[k, k]) + l_kj else: self.l_transitions[state_j.stateid, state_j.stateid] = l_jk + \ self.star(self.l_transitions[k, k]) + l_kj if self.l_transitions[state_i.stateid, state_j.stateid] != self.empty: self.l_transitions[state_i.stateid, state_j.stateid] += l_ik + \ self.star(self.l_transitions[k, k]) + l_kj else: self.l_transitions[state_i.stateid, state_j.stateid] = l_ik + \ self.star(self.l_transitions[k, k]) + l_kj if self.l_transitions[state_j.stateid, state_i.stateid] != self.empty: self.l_transitions[state_j.stateid, state_i.stateid] += l_jk + \ self.star(self.l_transitions[k, k]) + l_ki else: self.l_transitions[state_j.stateid, state_i.stateid] = l_jk + \ self.star(self.l_transitions[k, k]) + l_ki" 3238,"def _state_removal_solve(self): """"""The State Removal Operation"""""" initial = sorted( self.mma.states, key=attrgetter('initial'), reverse=True)[0].stateid for state_k in self.mma.states: if state_k.final: continue if state_k.stateid == initial: continue self._state_removal_remove(state_k.stateid) print self.l_transitions return self.l_transitions" 3239,"def cli(ctx, amount, index, stage, stepresult, formattype, select, where, order, outputfile, showkeys, showvalues, showalways, position): """"""Export from memory to format supported by tablib"""""" if not ctx.bubble: msg = 'There is no bubble present, will not export' ctx.say_yellow(msg) raise click.Abort() path = ctx.home + '/' if stage not in STAGES: ctx.say_yellow('There is no known stage:' + stage) raise click.Abort() if stepresult not in exportables: ctx.say_yellow('stepresult not one of: ' + ', '.join(exportables)) raise click.Abort() data_gen = bubble_lod_load(ctx, stepresult, stage) ctx.gbc.say('data_gen:', stuff=data_gen, verbosity=20) part = get_gen_slice(ctx.gbc, data_gen, amount, index) ctx.gbc.say('selected part:', stuff=part, verbosity=20) aliases = get_pairs(ctx.gbc, select, missing_colon=True) if position or len(aliases) == 0: ctx.gbc.say('adding position to selection of columns:', stuff=aliases, verbosity=20) aliases.insert(0, {'key': buts('index'), 'val': 'BUBBLE_IDX'}) ctx.gbc.say('added position to selection of columns:', stuff=aliases, verbosity=20) wheres = get_pairs(ctx.gbc, where) # TODO: use aliases as lookup for wheres data = tablib.Dataset() data.headers = [sel['val'] for sel in aliases] ctx.gbc.say('select wheres:' + str(wheres), verbosity=20) ctx.gbc.say('select aliases:' + str(aliases), verbosity=20) ctx.gbc.say('select data.headers:' + str(data.headers), verbosity=20) not_shown = True try: for ditem in part: row = [] ctx.gbc.say('curr dict', stuff=ditem, verbosity=101) flitem = flat(ctx, ditem) ctx.gbc.say('curr flat dict', stuff=flitem, verbosity=101) row_ok = True for wp in wheres: # TODO: negative selects: k:None, k:False,k:Zero,k:Null,k:0,k:-1,k:'',k:"""", # TODO: negative selects: # k:BUBBLE_NO_KEY,k:BUBBLE_NO_VAL,k:BUBBLE_NO_KEY_OR_NO_VAL wcheck_key=True if wp['key'] not in flitem: row_ok = False wcheck_key=False if wcheck_key and wp['val'] not in str(flitem[wp['key']]): row_ok = False if not row_ok: continue for sel in aliases: if sel['key'] in flitem: row.append(flitem[sel['key']]) else: # temporary to check, not use case for buts() bnp = '____BTS_NO_PATH_' tempv = get_flat_path(ctx, flitem, sel['key'] + '.*', bnp) if tempv != bnp: row.append(tempv) else: row.append('None') # TODO maybe 'NONE', or just '' or something like: # magic.export_format_none data.append(row) # todo: count keys, and show all keys in selection: i,a if not_shown and showkeys: if not showalways: not_shown = False ks = list(flitem.keys()) ks.sort() ctx.say( 'available dict path keys from first selected dict:', verbosity=0) for k in ks: ctx.say('keypath: ' + k, verbosity=0) if showvalues: ctx.say('value: ' + str(flitem[k]) + '\n', verbosity=0) except Exception as excpt: ctx.say_red('Cannot export data', stuff=excpt) raise click.Abort() if not outputfile: outputfile = path + 'export/export_' + \ stepresult + '_' + stage + '.' + formattype # todo: order key must be present in selection # add to selection before # and remove from result before output to format. if order: olast2 = order[-2:] ctx.gbc.say('order:' + order + ' last2:' + olast2, verbosity=100) if olast2 not in [':+', ':-']: data = data.sort(order, False) else: if olast2 == ':+': data = data.sort(order[:-2], False) if olast2 == ':-': data = data.sort(order[:-2], True) # Write `spreadsheet` to disk formatted = None if formattype == 'yaml': formatted = data.yaml if formattype == 'json': formatted = data.json if formattype == 'csv': formatted = data.csv # TODO: # if formattype == 'ldif': # formatted = data.ldif if formattype == 'tab': # standard, output, whatever tablib makes of it, ascii table print(data) if formatted: enc_formatted = formatted.encode('utf-8') of_path = opath.Path(outputfile) of_dir = of_path.dirname() if not of_dir.exists(): of_dir.makedirs_p() with open(outputfile, 'wb') as f: f.write(enc_formatted) ctx.say_green('exported: ' + outputfile)" 3240,"def save(self, *args, **kwargs): """""" **uid**: :code:`{jurisdiction.uid}_body:{slug}` """""" stripped_name = ' '.join( w for w in self.organization.name.split() if w not in STOPWORDS ) if not self.slug: self.slug = uuslug( stripped_name, instance=self, max_length=100, separator='-', start_no=2 ) self.uid = '{}_body:{}'.format( self.jurisdiction.uid, slugify(stripped_name)) super(Body, self).save(*args, **kwargs)" 3241,"def _set_elangles(self): """"""Sets the values of instance variable elangles. Method creates a dictionary containing the elangles of the pvol file. Elangles are ordered in acending order using uppercase letters as keys Examples -------- >>> pvol = OdimPVOL('pvol.h5') >>> print(pvol.elangles) {'A': 0.5, 'C': 1.5, 'B': 0.69999999999999996, 'E': 5.0, 'D': 3.0} """""" elang_list = list(self.attr_gen('elangle')) try: elevation_angles = sorted(zip(*elang_list)[1]) n_elangles = len(elevation_angles) self.elangles = dict(zip(list(string.ascii_uppercase[:n_elangles]), elevation_angles)) except IndexError: self.elangles = {}" 3242,"def select_dataset(self, elangle, quantity): """""" Selects the matching dataset and returns its path. Parameters ---------- elangle : str Upper case ascii letter defining the elevation angle quantity : str Name of the quantity e.g. DBZH, VRAD, RHOHV... Returns ------- dataset : str Path of the matching dataset or None if no dataset is found. Examples -------- Get the hdf5 path of the DBZH dataset at lowest elevation angle >>> pvol = odimPVOL('pvol.h5') >>> dataset = pvol.select_dataset('A', 'DBZH') >>> print(dataset) '/dataset1/data1/data' """""" elangle_path = None try: search_results = self.search('elangle', self.elangles[elangle]) except KeyError: return None if search_results == []: print('Elevation angle {} is not found from file'.format(elangle)) print('File contains elevation angles:{}'.format(self.elangles)) else: elangle_path = search_results[0] if elangle_path is not None: dataset_root = re.search( '^/dataset[0-9]+/', elangle_path).group(0) quantity_path = None search_results = self.search('quantity', quantity) for path in search_results: if dataset_root in path: quantity_path = path break if quantity_path is not None: dataset_path = re.search('^/dataset[0-9]+/data[0-9]/', quantity_path).group(0) dataset_path = os.path.join(dataset_path, 'data') if isinstance(self[dataset_path], h5py.Dataset): self.dataset = self[dataset_path].ref return dataset_path" 3243,"def sector(self, start_ray, end_ray, start_distance=None, end_distance=None, units='b'): """"""Slices a sector from the selected dataset. Slice contains the start and end rays. If start and end rays are equal one ray is returned. If the start_ray is greater than the end_ray slicing continues over the 359-0 border. Parameters ---------- start_ray : int Starting ray of of the slice first ray is 0 end_ray : int End ray of the slice, last ray is 359 Keywords -------- start_distance : int Starting distance of the slice, if not defined sector starts form zero end_distance : int Ending distance of the slice, if not defined sector continues to the end last ray of the dataset units : str Units used in distance slicing. Option 'b' means that bin number is used as index. Option 'm' means that meters are used and the slicing index is calculated using bin width. Returns ------- sector : ndarray Numpy array containing the sector values Examples -------- Get one ray from the selected dataset >>> pvol = odimPVOL('pvol.h5') >>> pvol.select_dataset('A', 'DBZH') >>> ray = pvol.sector(10, 10) Get sector from selected dataset, rays from 100 to 200 at distances from 5 km to 10 km. >>> pvol = odimPVOL('pvol.h5') >>> pvol.select_dataset('A', 'DBZH') >>> sector = pvol.sector(100, 200, 5000, 10000) """""" if self.dataset is None: raise ValueError('Dataset is not selected') # Validate parameter values ray_max, distance_max = self.dataset.shape if start_ray > ray_max: raise ValueError('Value of start_ray is bigger than the number of rays') if start_ray < 0: raise ValueError('start_ray must be non negative') if start_distance is None: start_distance_index = 0 else: if units == 'b': start_distance_index = start_distance elif units == 'm': try: rscale = next(self.attr_gen('rscale')).value except: raise MissingMetadataError start_distance_index = int(start_distance / rscale) if end_distance is None: end_distance_index = self.dataset.shape[1] else: if units == 'b': end_distance_index = end_distance elif units == 'm': end_distance_index = int(end_distance / rscale) if end_ray is None: sector = self.dataset[start_ray, start_distance_index:end_distance_index] else: if start_ray <= end_ray: sector = self.dataset[start_ray:end_ray+1, start_distance_index:end_distance_index] else: sector1 = self.dataset[start_ray:, start_distance_index:end_distance_index] sector2 = self.dataset[:end_ray+1, start_distance_index:end_distance_index] sector = np.concatenate((sector1, sector2), axis=0) return sector" 3244,"def select_dataset(self, quantity): """""" Selects the matching dataset and returns its path. After the dataset has been selected, its values can be accessed trough dataset member variable. Parameters ---------- quantity : str name of the quantity Examples -------- Select DBZH composite >>> comp = OdimCOMP('comp.h5') >>> dataset_path = comp.select_dataset('DBZH') >>> print(dataset_path) >>> '/dataset1/data1/data' >>> print(comp.dataset) [[255 255 255 ..., 255 255 255] [255 255 255 ..., 255 255 255] [255 255 255 ..., 255 255 255] ..., [255 255 255 ..., 255 255 255] [255 255 255 ..., 255 255 255] [255 255 255 ..., 255 255 255]] """""" # Files with a following dataset structure. # Location of 'quantity' attribute: /dataset1/data1/what # Dataset path structure: /dataset1/data1/data search_results = self.search('quantity', quantity) try: quantity_path = search_results[0] except IndexError: print('Attribute quantity=\'{}\' was not found from file'.format(quantity)) return None full_dataset_path = quantity_path.replace('/what', '/data') try: if isinstance(self[full_dataset_path], h5py.Dataset): self.dataset = self[full_dataset_path].ref return full_dataset_path else: self.dataset = None return None except KeyError: # Files with following dataset structure # Location of 'quantity' attribute: /dataset1/what # Dataset path structure: /dataset1/data1/data dataset_root_path = re.search( '^/dataset[0-9]+/', quantity_path).group(0) dataset_paths = self.datasets() for ds_path in dataset_paths: try: full_dataset_path = re.search( '^{}data[0-9]+/data'.format(dataset_root_path), ds_path).group(0) break except: pass if isinstance(self[full_dataset_path], h5py.Dataset): self.dataset = self[full_dataset_path].ref return full_dataset_path else: self.dataset = None return None" 3245,"def request(self, url, method, body="""", headers={}, retry=True): """"""Execute an HTTP request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. method -- The HTTP method to use. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """""" if headers: headers = dict(list(headers.items()) + list(self.headers.items())) else: headers = self.headers if not sys.version_info >= (3,) and headers: headers = dict((k.encode('ascii') if isinstance(k, unicode) else k, v.encode('ascii') if isinstance(v, unicode) else v) for k, v in headers.items()) url = self.base_url + url if not sys.version_info >= (3,): if isinstance(url, unicode): url = url.encode('ascii') r = self._doRequest(url, method, body, headers) retry_http_codes = [503, 504] if r.status_code in retry_http_codes and retry: tries = 5 delay = .5 backoff = 2 while r.status_code in retry_http_codes and tries > 0: tries -= 1 time.sleep(delay) delay *= backoff r = self._doRequest(url, method, body, headers) r.raise_for_status() result = {} contentType = r.headers[""Content-Type""] if contentType is None: contentType = ""text/plain"" else: contentType = contentType.split("";"")[0] if contentType.lower() == ""application/json"": try: result[""body""] = json.loads(r.text) except: result[""body""] = r.text else: result[""body""] = r.text result[""status""] = r.status_code result[""resp""] = r result[""content-type""] = contentType return result" 3246,"def get(self, url, headers={}, retry=True): """"""Execute an HTTP GET request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """""" return self.request(url=url, method=""GET"", headers=headers, retry=retry)" 3247,"def post(self, url, body="""", headers={}, retry=True): """"""Execute an HTTP POST request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """""" headers[""Content-Length""] = str(len(body)) return self.request(url=url, method=""POST"", body=body, headers=headers, retry=retry)" 3248,"def patch(self, url, body="""", headers={}, retry=True): """"""Execute an HTTP PATCH request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """""" return self.request(url=url, method=""PATCH"", body=body, headers=headers, retry=retry)" 3249,"def clone(cls, srcpath, destpath): """"""Copy a main repository to a new location."""""" try: os.makedirs(destpath) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [SVNADMIN, 'dump', '--quiet', '.'] dump = subprocess.Popen( cmd, cwd=srcpath, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) repo = cls.create(destpath) repo.load(dump.stdout) stderr = dump.stderr.read() dump.stdout.close() dump.stderr.close() dump.wait() if dump.returncode != 0: raise subprocess.CalledProcessError(dump.returncode, cmd, stderr) return repo" 3250,"def create(cls, path): """"""Create a new repository"""""" try: os.makedirs(path) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [SVNADMIN, 'create', path] subprocess.check_call(cmd) return cls(path)" 3251,"def proplist(self, rev, path=None): """"""List Subversion properties of the path"""""" rev, prefix = self._maprev(rev) if path is None: return self._proplist(str(rev), None) else: path = type(self).cleanPath(_join(prefix, path)) return self._proplist(str(rev), path)" 3252,"def propget(self, prop, rev, path=None): """"""Get Subversion property value of the path"""""" rev, prefix = self._maprev(rev) if path is None: return self._propget(prop, str(rev), None) else: path = type(self).cleanPath(_join(prefix, path)) return self._propget(prop, str(rev), path)" 3253,"def dump( self, stream, progress=None, lower=None, upper=None, incremental=False, deltas=False ): """"""Dump the repository to a dumpfile stream. :param stream: A file stream to which the dumpfile is written :param progress: A file stream to which progress is written :param lower: Must be a numeric version number :param upper: Must be a numeric version number See ``svnadmin help dump`` for details on the other arguments. """""" cmd = [SVNADMIN, 'dump', '.'] if progress is None: cmd.append('-q') if lower is not None: cmd.append('-r') if upper is None: cmd.append(str(int(lower))) else: cmd.append('%d:%d' % (int(lower), int(upper))) if incremental: cmd.append('--incremental') if deltas: cmd.append('--deltas') p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress) p.wait() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd)" 3254,"def load( self, stream, progress=None, ignore_uuid=False, force_uuid=False, use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None ): """"""Load a dumpfile stream into the repository. :param stream: A file stream from which the dumpfile is read :param progress: A file stream to which progress is written See ``svnadmin help load`` for details on the other arguments. """""" cmd = [SVNADMIN, 'load', '.'] if progress is None: cmd.append('-q') if ignore_uuid: cmd.append('--ignore-uuid') if force_uuid: cmd.append('--force-uuid') if use_pre_commit_hook: cmd.append('--use-pre-commit-hook') if use_post_commit_hook: cmd.append('--use-post-commit-hook') if parent_dir: cmd.extend(['--parent-dir', parent_dir]) p = subprocess.Popen( cmd, cwd=self.path, stdin=stream, stdout=progress, stderr=subprocess.PIPE ) stderr = p.stderr.read() p.stderr.close() p.wait() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd, stderr)" 3255,"def temp_file( content=None, suffix='', prefix='tmp', parent_dir=None): """""" Create a temporary file and optionally populate it with content. The file is deleted when the context exits. The temporary file is created when entering the context manager and deleted when exiting it. >>> import temporary >>> with temporary.temp_file() as temp_file: ... assert temp_file.exists() >>> assert not temp_file.exists() The user may also supply some content for the file to be populated with: >>> with temporary.temp_file('hello!') as temp_file: ... with temp_file.open() as f: ... assert f.read() == 'hello!' The temporary file can be placed in a custom directory: >>> with temporary.temp_dir() as temp_dir: ... with temporary.temp_file(parent_dir=temp_dir) as temp_file: ... assert temp_file.parent == temp_dir If, for some reason, the user wants to delete the temporary file before exiting the context, that's okay too: >>> with temporary.temp_file() as temp_file: ... temp_file.unlink() """""" binary = isinstance(content, (bytes, bytearray)) parent_dir = parent_dir if parent_dir is None else str(parent_dir) fd, abs_path = tempfile.mkstemp(suffix, prefix, parent_dir, text=False) path = pathlib.Path(abs_path) try: try: if content: os.write(fd, content if binary else content.encode()) finally: os.close(fd) yield path.resolve() finally: with temporary.util.allow_missing_file(): path.unlink()" 3256,"def _emit_no_set_found(environment_name, product_name): """""" writes to std out and logs if no connection string is found for deployment :param environment_name: :param product_name: :return: """""" sys.stdout.write(colorama.Fore.YELLOW + 'No connections found in global config file ' 'in environment: {0} for product: {1}' .format(environment_name, product_name) + colorama.Fore.RESET) sys.stdout.write('\n') logger.warning('No connections found in environment: {0} for product: {1}' .format(environment_name, product_name))" 3257,"def load_content(self): """""" Load the book content """""" # get the toc file from the root file rel_path = self.root_file_url.replace(os.path.basename(self.root_file_url), '') self.toc_file_url = rel_path + self.root_file.find(id=""ncx"")['href'] self.toc_file_soup = bs(self.book_file.read(self.toc_file_url), 'xml') # get the book content from the toc file for n, c in cross(self.toc_file_soup.find_all('navLabel'), self.toc_file_soup.find_all('content')): content_soup = bs(self.book_file.read(rel_path + c.get('src'))) self.content.append({'part_name': c.text, 'source_url': c.get('src'), 'content_source': content_soup, 'content_source_body': content_soup.body, 'content_source_text': content_soup.body.text})" 3258,"def UninstallTrump(RemoveDataTables=True, RemoveOverrides=True, RemoveFailsafes=True): """""" This script removes all tables associated with Trump. It's written for PostgreSQL, but should be very easy to adapt to other databases. """""" ts = ['_symbols', '_symbol_validity', '_symbol_tags', '_symbol_aliases', '_feeds', '_feed_munging', '_feed_munging_args', '_feed_sourcing', '_feed_validity', '_feed_meta', '_feed_tags', '_feed_handle', '_index_kwargs', '_indicies', '_symbol_handle', '_symboldatadef'] if RemoveOverrides: ts.append('_overrides') if RemoveFailsafes: ts.append('_failsafes') engine = create_engine(ENGINE_STR) if RemoveDataTables: results = engine.execute(""SELECT name FROM _symbols;"") datatables = [row['name'] for row in results] ts = ts + datatables drops = """".join(['DROP TABLE IF EXISTS ""{}"" CASCADE;'.format(t) for t in ts]) engine.execute(drops)" 3259,"def digestInSilico(proteinSequence, cleavageRule='[KR]', missedCleavage=0, removeNtermM=True, minLength=5, maxLength=55): """"""Returns a list of peptide sequences and cleavage information derived from an in silico digestion of a polypeptide. :param proteinSequence: amino acid sequence of the poly peptide to be digested :param cleavageRule: cleavage rule expressed in a regular expression, see :attr:`maspy.constants.expasy_rules` :param missedCleavage: number of allowed missed cleavage sites :param removeNtermM: booo, True to consider also peptides with the N-terminal methionine of the protein removed :param minLength: int, only yield peptides with length >= minLength :param maxLength: int, only yield peptides with length <= maxLength :returns: a list of resulting peptide enries. Protein positions start with ``1`` and end with ``len(proteinSequence``. :: [(peptide amino acid sequence, {'startPos': int, 'endPos': int, 'missedCleavage': int} ), ... ] .. note:: This is a regex example for specifying N-terminal cleavage at lysine sites ``\\w(?=[K])`` """""" passFilter = lambda startPos, endPos: (endPos - startPos >= minLength and endPos - startPos <= maxLength ) _regexCleave = re.finditer(cleavageRule, proteinSequence) cleavagePosList = set(itertools.chain(map(lambda x: x.end(), _regexCleave))) cleavagePosList.add(len(proteinSequence)) cleavagePosList = sorted(list(cleavagePosList)) #Add end of protein as cleavage site if protein doesn't end with specififed #cleavage positions numCleavageSites = len(cleavagePosList) if missedCleavage >= numCleavageSites: missedCleavage = numCleavageSites -1 digestionresults = list() #Generate protein n-terminal peptides after methionine removal if removeNtermM and proteinSequence[0] == 'M': for cleavagePos in range(0, missedCleavage+1): startPos = 1 endPos = cleavagePosList[cleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = cleavagePos digestionresults.append((sequence, info)) #Generate protein n-terminal peptides if cleavagePosList[0] != 0: for cleavagePos in range(0, missedCleavage+1): startPos = 0 endPos = cleavagePosList[cleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = cleavagePos digestionresults.append((sequence, info)) #Generate all remaining peptides, including the c-terminal peptides lastCleavagePos = 0 while lastCleavagePos < numCleavageSites: for missedCleavage in range(0, missedCleavage+1): nextCleavagePos = lastCleavagePos + missedCleavage + 1 if nextCleavagePos < numCleavageSites: startPos = cleavagePosList[lastCleavagePos] endPos = cleavagePosList[nextCleavagePos] if passFilter(startPos, endPos): sequence = proteinSequence[startPos:endPos] info = dict() info['startPos'] = startPos+1 info['endPos'] = endPos info['missedCleavage'] = missedCleavage digestionresults.append((sequence, info)) lastCleavagePos += 1 return digestionresults" 3260,"def calcPeptideMass(peptide, **kwargs): """"""Calculate the mass of a peptide. :param aaMass: A dictionary with the monoisotopic masses of amino acid residues, by default :attr:`maspy.constants.aaMass` :param aaModMass: A dictionary with the monoisotopic mass changes of modications, by default :attr:`maspy.constants.aaModMass` :param elementMass: A dictionary with the masses of chemical elements, by default ``pyteomics.mass.nist_mass`` :param peptide: peptide sequence, modifications have to be written in the format ""[modificationId]"" and ""modificationId"" has to be present in :attr:`maspy.constants.aaModMass` #TODO: change to a more efficient way of calculating the modified mass, by first extracting all present modifications and then looking up their masses. """""" aaMass = kwargs.get('aaMass', maspy.constants.aaMass) aaModMass = kwargs.get('aaModMass', maspy.constants.aaModMass) elementMass = kwargs.get('elementMass', pyteomics.mass.nist_mass) addModMass = float() unmodPeptide = peptide for modId, modMass in viewitems(aaModMass): modSymbol = '[' + modId + ']' numMod = peptide.count(modSymbol) if numMod > 0: unmodPeptide = unmodPeptide.replace(modSymbol, '') addModMass += modMass * numMod if unmodPeptide.find('[') != -1: print(unmodPeptide) raise Exception('The peptide contains modification, ' + 'not present in maspy.constants.aaModMass' ) unmodPeptideMass = sum(aaMass[i] for i in unmodPeptide) unmodPeptideMass += elementMass['H'][0][0]*2 + elementMass['O'][0][0] modPeptideMass = unmodPeptideMass + addModMass return modPeptideMass" 3261,"def removeModifications(peptide): """"""Removes all modifications from a peptide string and return the plain amino acid sequence. :param peptide: peptide sequence, modifications have to be written in the format ""[modificationName]"" :param peptide: str :returns: amino acid sequence of ``peptide`` without any modifications """""" while peptide.find('[') != -1: peptide = peptide.split('[', 1)[0] + peptide.split(']', 1)[1] return peptide" 3262,"def returnModPositions(peptide, indexStart=1, removeModString='UNIMOD:'): """"""Determines the amino acid positions of all present modifications. :param peptide: peptide sequence, modifications have to be written in the format ""[modificationName]"" :param indexStart: returned amino acids positions of the peptide start with this number (first amino acid position = indexStart) :param removeModString: string to remove from the returned modification name :return: {modificationName:[position1, position2, ...], ...} #TODO: adapt removeModString to the new unimod ids in #maspy.constants.aaModComp (""UNIMOD:X"" -> ""u:X"") -> also change unit tests. """""" unidmodPositionDict = dict() while peptide.find('[') != -1: currModification = peptide.split('[')[1].split(']')[0] currPosition = peptide.find('[') - 1 if currPosition == -1: # move n-terminal modifications to first position currPosition = 0 currPosition += indexStart peptide = peptide.replace('['+currModification+']', '', 1) if removeModString: currModification = currModification.replace(removeModString, '') unidmodPositionDict.setdefault(currModification,list()) unidmodPositionDict[currModification].append(currPosition) return unidmodPositionDict" 3263,"def calcMhFromMz(mz, charge): """"""Calculate the MH+ value from mz and charge. :param mz: float, mass to charge ratio (Dalton / charge) :param charge: int, charge state :returns: mass to charge ratio of the mono protonated ion (charge = 1) """""" mh = (mz * charge) - (maspy.constants.atomicMassProton * (charge-1) ) return mh" 3264,"def calcMzFromMh(mh, charge): """"""Calculate the mz value from MH+ and charge. :param mh: float, mass to charge ratio (Dalton / charge) of the mono protonated ion :param charge: int, charge state :returns: mass to charge ratio of the specified charge state """""" mz = (mh + (maspy.constants.atomicMassProton * (charge-1))) / charge return mz" 3265,"def calcMzFromMass(mass, charge): """"""Calculate the mz value of a peptide from its mass and charge. :param mass: float, exact non protonated mass :param charge: int, charge state :returns: mass to charge ratio of the specified charge state """""" mz = (mass + (maspy.constants.atomicMassProton * charge)) / charge return mz" 3266,"def calcMassFromMz(mz, charge): """"""Calculate the mass of a peptide from its mz and charge. :param mz: float, mass to charge ratio (Dalton / charge) :param charge: int, charge state :returns: non protonated mass (charge = 0) """""" mass = (mz - maspy.constants.atomicMassProton) * charge return mass" 3267,"def execute(self, processProtocol, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """"""Execute a process on the remote machine using SSH @param processProtocol: the ProcessProtocol instance to connect @param executable: the executable program to run @param args: the arguments to pass to the process @param env: environment variables to request the remote ssh server to set @param path: the remote path to start the remote process on @param uid: user id or username to connect to the ssh server with @param gid: this is not used for remote ssh processes @param usePTY: wither to request a pty for the process @param childFDs: file descriptors to use for stdin, stdout and stderr """""" sshCommand = (command if isinstance(command, SSHCommand) else SSHCommand(command, self.precursor, path)) commandLine = sshCommand.getCommandLine() # Get connection to ssh server connectionDeferred = self.getConnection(uid) # spawn the remote process connectionDeferred.addCallback(connectProcess, processProtocol, commandLine, env, usePTY, childFDs) return connectionDeferred" 3268,"def _getUserAuthObject(self, user, connection): """"""Get a SSHUserAuthClient object to use for authentication @param user: The username to authenticate for @param connection: The connection service to start after authentication """""" credentials = self._getCredentials(user) userAuthObject = AutomaticUserAuthClient(user, connection, **credentials) return userAuthObject" 3269,"def _verifyHostKey(self, hostKey, fingerprint): """"""Called when ssh transport requests us to verify a given host key. Return a deferred that callback if we accept the key or errback if we decide to reject it. """""" if fingerprint in self.knownHosts: return defer.succeed(True) return defer.fail(UnknownHostKey(hostKey, fingerprint))" 3270,"def yield_once(iterator): """""" Decorator to make an iterator returned by a method yield each result only once. >>> @yield_once ... def generate_list(foo): ... return foo >>> list(generate_list([1, 2, 1])) [1, 2] :param iterator: Any method that returns an iterator :return: An method returning an iterator that yields every result only once at most. """""" @wraps(iterator) def yield_once_generator(*args, **kwargs): yielded = set() for item in iterator(*args, **kwargs): if item not in yielded: yielded.add(item) yield item return yield_once_generator" 3271,"def _to_list(var): """""" Make variable to list. >>> _to_list(None) [] >>> _to_list('whee') ['whee'] >>> _to_list([None]) [None] >>> _to_list((1, 2, 3)) [1, 2, 3] :param var: variable of any type :return: list """""" if isinstance(var, list): return var elif var is None: return [] elif isinstance(var, str) or isinstance(var, dict): # We dont want to make a list out of those via the default constructor return [var] else: try: return list(var) except TypeError: return [var]" 3272,"def arguments_to_lists(function): """""" Decorator for a function that converts all arguments to lists. :param function: target function :return: target function with only lists as parameters """""" def l_function(*args, **kwargs): l_args = [_to_list(arg) for arg in args] l_kwargs = {} for key, value in kwargs.items(): l_kwargs[key] = _to_list(value) return function(*l_args, **l_kwargs) return l_function" 3273,"def get_public_members(obj): """""" Retrieves a list of member-like objects (members or properties) that are publically exposed. :param obj: The object to probe. :return: A list of strings. """""" return {attr: getattr(obj, attr) for attr in dir(obj) if not attr.startswith(""_"") and not hasattr(getattr(obj, attr), '__call__')}" 3274,"def generate_repr(*members): """""" Decorator that binds an auto-generated ``__repr__()`` function to a class. The generated ``__repr__()`` function prints in following format: Note that this decorator modifies the given class in place! :param members: An iterable of member names to include into the representation-string. Providing no members yields to inclusion of all member variables and properties in alphabetical order (except if they start with an underscore). To control the representation of each member, you can also pass a tuple where the first element contains the member to print and the second one the representation function (which defaults to the built-in ``repr()``). Using None as representation function is the same as using ``repr()``. Supported members are fields/variables, properties and getter-like functions (functions that accept no arguments). :raises ValueError: Raised when the passed (member, repr-function)-tuples have not a length of 2. :raises AttributeError: Raised when a given member/attribute was not found in class. :raises TypeError: Raised when a provided member is a bound method that is not a getter-like function (means it must accept no parameters). :return: The class armed with an auto-generated __repr__ function. """""" def decorator(cls): cls.__repr__ = __repr__ return cls if members: # Prepare members list. members_to_print = list(members) for i, member in enumerate(members_to_print): if isinstance(member, tuple): # Check tuple dimensions. length = len(member) if length == 2: members_to_print[i] = (member[0], member[1] if member[1] else repr) else: raise ValueError(""Passed tuple "" + repr(member) + "" needs to be 2-dimensional, but has "" + str(length) + "" dimensions."") else: members_to_print[i] = (member, repr) def __repr__(self): return _construct_repr_string(self, members_to_print) else: def __repr__(self): # Need to fetch member variables every time since they are unknown # until class instantation. members_to_print = get_public_members(self) member_repr_list = ((member, repr) for member in sorted(members_to_print, key=str.lower)) return _construct_repr_string(self, member_repr_list) return decorator" 3275,"def generate_eq(*members): """""" Decorator that generates equality and inequality operators for the decorated class. The given members as well as the type of self and other will be taken into account. Note that this decorator modifies the given class in place! :param members: A list of members to compare for equality. """""" def decorator(cls): def eq(self, other): if not isinstance(other, cls): return False return all(getattr(self, member) == getattr(other, member) for member in members) def ne(self, other): return not eq(self, other) cls.__eq__ = eq cls.__ne__ = ne return cls return decorator" 3276,"def generate_ordering(*members): """""" Decorator that generates ordering operators for the decorated class based on the given member names. All ordering except equality functions will raise a TypeError when a comparison with an unrelated class is attempted. (Comparisons with child classes will thus work fine with the capabilities of the base class as python will choose the base classes comparison operator in that case.) Note that this decorator modifies the given class in place! :param members: A list of members to compare, ordered from high priority to low. I.e. if the first member is equal the second will be taken for comparison and so on. If a member is None it is considered smaller than any other value except None. """""" def decorator(cls): def lt(self, other): if not isinstance(other, cls): raise TypeError(""Comparison with unrelated classes is "" ""unsupported."") for member in members: if getattr(self, member) == getattr(other, member): continue if ( getattr(self, member) is None or getattr(other, member) is None): return getattr(self, member) is None return getattr(self, member) < getattr(other, member) return False cls.__lt__ = lt return total_ordering(generate_eq(*members)(cls)) return decorator" 3277,"def enforce_signature(function): """""" Enforces the signature of the function by throwing TypeError's if invalid arguments are provided. The return value is not checked. You can annotate any parameter of your function with the desired type or a tuple of allowed types. If you annotate the function with a value, this value only will be allowed (useful especially for None). Example: >>> @enforce_signature ... def test(arg: bool, another: (int, None)): ... pass ... >>> test(True, 5) >>> test(True, None) Any string value for any parameter e.g. would then trigger a TypeError. :param function: The function to check. """""" argspec = inspect.getfullargspec(function) annotations = argspec.annotations argnames = argspec.args unnamed_annotations = {} for i, arg in enumerate(argnames): if arg in annotations: unnamed_annotations[i] = (annotations[arg], arg) def decorated(*args, **kwargs): for i, annotation in unnamed_annotations.items(): if i < len(args): assert_right_type(args[i], annotation[0], annotation[1]) for argname, argval in kwargs.items(): if argname in annotations: assert_right_type(argval, annotations[argname], argname) return function(*args, **kwargs) return decorated" 3278,"def as_string(self): """"""Get the underlying message object as a string"""""" if self.headers_only: self.msgobj = self._get_content() # We could just use msgobj.as_string() but this is more flexible... we might need it. from email.generator import Generator fp = StringIO() g = Generator(fp, maxheaderlen=60) g.flatten(self.msgobj) text = fp.getvalue() return text" 3279,"def iteritems(self): """"""Present the email headers"""""" for n,v in self.msgobj.__dict__[""_headers""]: yield n.lower(), v return" 3280,"def _set_flag(self, flag): """"""Turns the specified flag on"""""" self.folder._invalidate_cache() # TODO::: turn the flag off when it's already on def replacer(m): return ""%s/%s.%s%s"" % ( joinpath(self.folder.base, self.folder.folder, ""cur""), m.group(""key""), m.group(""hostname""), "":2,%s"" % ( ""%s%s"" % (m.group(""flags""), flag) if m.group(""flags"") \ else flag ) ) newfilename = self.msgpathre.sub(replacer, self.filename) self.filesystem.rename(self.filename, newfilename) self.filename = newfilename" 3281,"def _get_message(self, key, since=None): """"""Return the MdMessage object for the key. The object is either returned from the cache in the store or made, cached and then returned. If 'since' is passed in the modification time of the file is checked and the message is only returned if the mtime is since the specified time. If the 'since' check fails, None is returned. 'since' must be seconds since epoch. """""" stored = self.store[key] if isinstance(stored, dict): filename = stored[""path""] folder = stored[""folder""] if since and since > 0.0: st = stat(filename) if st.st_mtime < since: return None stored = MdMessage( key, filename = filename, folder = folder, filesystem = folder.filesystem ) self.store[key] = stored else: if since and since > 0.0: st = stat(stored.filename) if st.st_mtime < since: return None return stored" 3282,"def _foldername(self, additionalpath=""""): """"""Dot decorate a folder name."""""" if not self._foldername_cache.get(additionalpath): fn = joinpath(self.base, self.folder, additionalpath) \ if not self.is_subfolder \ else joinpath(self.base, "".%s"" % self.folder, additionalpath) self._foldername_cache[additionalpath] = fn return self._foldername_cache[additionalpath]" 3283,"def folders(self): """"""Return a map of the subfolder objects for this folder. This is a snapshot of the folder list at the time the call was made. It does not update over time. The map contains MdFolder objects: maildir.folders()[""Sent""] might retrieve the folder .Sent from the maildir. """""" entrys = self.filesystem.listdir(abspath(self._foldername())) regex = re.compile(""\\..*"") just_dirs = dict([(d,d) for d in entrys if regex.match(d)]) folder = self._foldername() filesystem = self.filesystem class FolderList(object): def __iter__(self): dirs = list(just_dirs.keys()) dirs.sort() dirs.reverse() for dn in dirs: yield MdFolder( dn[1:], base=folder, subfolder=True, filesystem=filesystem ) return def __list__(self): return [dn[1:] for dn in just_dirs] def __contains__(self, name): return just_dirs.__contains__("".%s"" % name) def __getitem__(self, name): return MdFolder( just_dirs["".%s"" % name][1:], base=folder, subfolder=True, filesystem=filesystem ) f = FolderList() return f" 3284,"def move(self, key, folder): """"""Move the specified key to folder. folder must be an MdFolder instance. MdFolders can be obtained through the 'folders' method call. """""" # Basically this is a sophisticated __delitem__ # We need the path so we can make it in the new folder path, host, flags = self._exists(key) self._invalidate_cache() # Now, move the message file to the new folder newpath = joinpath( folder.base, folder.get_name(), ""cur"", # we should probably move it to new if it's in new basename(path) ) self.filesystem.rename(path, newpath) # And update the caches in the new folder folder._invalidate_cache()" 3285,"def _muaprocessnew(self): """"""Moves all 'new' files into cur, correctly flagging"""""" foldername = self._foldername(""new"") files = self.filesystem.listdir(foldername) for filename in files: if filename == """": continue curfilename = self._foldername(joinpath(""new"", filename)) newfilename = joinpath( self._cur, ""%s:2,%s"" % (filename, """") ) self.filesystem.rename(curfilename, newfilename)" 3286,"def _exists(self, key): """"""Find a key in a particular section Searches through all the files and looks for matches with a regex. """""" filecache, keycache = self._fileslist() msg = keycache.get(key, None) if msg: path = msg.filename meta = filecache[path] return path, meta[""hostname""], meta.get(""flags"", """") raise KeyError(""not found %s"" % key)" 3287,"def show_slices(data3d, contour=None, seeds=None, axis=0, slice_step=None, shape=None, show=True, flipH=False, flipV=False, first_slice_offset=0, first_slice_offset_to_see_seed_with_label=None, slice_number=None ): """""" Show slices as tiled image :param data3d: Input data :param contour: Data for contouring :param seeds: Seed data :param axis: Axis for sliceing :param slice_step: Show each ""slice_step""-th slice, can be float :param shape: tuple(vertical_tiles_number, horisontal_tiles_number), set shape of output tiled image. slice_step is estimated if it is not set explicitly :param first_slice_offset: set offset of first slice :param first_slice_offset_to_see_seed_with_label: find offset to see slice with seed with defined label :param slice_number: int, Number of showed slices. Overwrites shape and slice_step. """""" if slice_number is not None: slice_step = data3d.shape[axis] / slice_number # odhad slice_step, neni li zadan # slice_step estimation # TODO make precise estimation (use np.linspace to indexing?) if slice_step is None: if shape is None: slice_step = 1 else: slice_step = ((data3d.shape[axis] - first_slice_offset ) / float(np.prod(shape))) if first_slice_offset_to_see_seed_with_label is not None: if seeds is not None: inds = np.nonzero(seeds==first_slice_offset_to_see_seed_with_label) # print(inds) # take first one with defined seed # ind = inds[axis][0] # take most used index ind = np.median(inds[axis]) first_slice_offset = ind % slice_step data3d = _import_data(data3d, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset) contour = _import_data(contour, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset) seeds = _import_data(seeds, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset) number_of_slices = data3d.shape[axis] # square image # nn = int(math.ceil(number_of_slices ** 0.5)) # sh = [nn, nn] # 4:3 image meta_shape = shape if meta_shape is None: na = int(math.ceil(number_of_slices * 16.0 / 9.0) ** 0.5) nb = int(math.ceil(float(number_of_slices) / na)) meta_shape = [nb, na] dsh = __get_slice(data3d, 0, axis).shape slimsh = [int(dsh[0] * meta_shape[0]), int(dsh[1] * meta_shape[1])] slim = np.zeros(slimsh, dtype=data3d.dtype) slco = None slse = None if seeds is not None: slse = np.zeros(slimsh, dtype=seeds.dtype) if contour is not None: slco = np.zeros(slimsh, dtype=contour.dtype) # slse = # f, axarr = plt.subplots(sh[0], sh[1]) for i in range(0, number_of_slices): cont = None seeds2d = None im2d = __get_slice(data3d, i, axis, flipH=flipH, flipV=flipV) if contour is not None: cont = __get_slice(contour, i, axis, flipH=flipH, flipV=flipV) slco = __put_slice_in_slim(slco, cont, meta_shape, i) if seeds is not None: seeds2d = __get_slice(seeds, i, axis, flipH=flipH, flipV=flipV) slse = __put_slice_in_slim(slse, seeds2d, meta_shape, i) # plt.axis('off') # plt.subplot(sh[0], sh[1], i+1) # plt.subplots_adjust(wspace=0, hspace=0) slim = __put_slice_in_slim(slim, im2d, meta_shape, i) # show_slice(im2d, cont, seeds2d) show_slice(slim, slco, slse) if show: plt.show()" 3288,"def __get_slice(data, slice_number, axis=0, flipH=False, flipV=False): """""" :param data: :param slice_number: :param axis: :param flipV: vertical flip :param flipH: horizontal flip :return: """""" if axis == 0: data2d = data[slice_number, :, :] elif axis == 1: data2d = data[:, slice_number, :] elif axis == 2: data2d = data[:, :, slice_number] else: logger.error(""axis number error"") print(""axis number error"") return None if flipV: if data2d is not None: data2d = data2d[-1:0:-1,:] if flipH: if data2d is not None: data2d = data2d[:, -1:0:-1] return data2d" 3289,"def __put_slice_in_slim(slim, dataim, sh, i): """""" put one small slice as a tile in a big image """""" a, b = np.unravel_index(int(i), sh) st0 = int(dataim.shape[0] * a) st1 = int(dataim.shape[1] * b) sp0 = int(st0 + dataim.shape[0]) sp1 = int(st1 + dataim.shape[1]) slim[ st0:sp0, st1:sp1 ] = dataim return slim" 3290,"def show_slice(data2d, contour2d=None, seeds2d=None): """""" :param data2d: :param contour2d: :param seeds2d: :return: """""" import copy as cp # Show results colormap = cp.copy(plt.cm.get_cmap('brg')) colormap._init() colormap._lut[:1:, 3] = 0 plt.imshow(data2d, cmap='gray', interpolation='none') if contour2d is not None: plt.contour(contour2d, levels=[0.5, 1.5, 2.5]) if seeds2d is not None: # Show results colormap = copy.copy(plt.cm.get_cmap('Paired')) # colormap = copy.copy(plt.cm.get_cmap('gist_rainbow')) colormap._init() colormap._lut[0, 3] = 0 tmp0 = copy.copy(colormap._lut[:,0]) tmp1 = copy.copy(colormap._lut[:,1]) tmp2 = copy.copy(colormap._lut[:,2]) colormap._lut[:, 0] = sigmoid(tmp0, 0.5, 5) colormap._lut[:, 1] = sigmoid(tmp1, 0.5, 5) colormap._lut[:, 2] = 0# sigmoid(tmp2, 0.5, 5) # seed 4 colormap._lut[140:220:, 1] = 0.7# sigmoid(tmp2, 0.5, 5) colormap._lut[140:220:, 0] = 0.2# sigmoid(tmp2, 0.5, 5) # seed 2 colormap._lut[40:120:, 1] = 1.# sigmoid(tmp2, 0.5, 5) colormap._lut[40:120:, 0] = 0.1# sigmoid(tmp2, 0.5, 5) # seed 2 colormap._lut[120:150:, 0] = 1.# sigmoid(tmp2, 0.5, 5) colormap._lut[120:150:, 1] = 0.1# sigmoid(tmp2, 0.5, 5) # my colors # colormap._lut[1,:] = [.0,.1,.0,1] # colormap._lut[2,:] = [.1,.1,.0,1] # colormap._lut[3,:] = [.1,.1,.1,1] # colormap._lut[4,:] = [.3,.3,.3,1] plt.imshow(seeds2d, cmap=colormap, interpolation='none')" 3291,"def _import_data(data, axis, slice_step, first_slice_offset=0): """""" import ndarray or SimpleITK data """""" try: import SimpleITK as sitk if type(data) is sitk.SimpleITK.Image: data = sitk.GetArrayFromImage(data) except: pass data = __select_slices(data, axis, slice_step, first_slice_offset=first_slice_offset) return data" 3292,"def generate_data(shp=[16, 20, 24]): """""" Generating data """""" x = np.ones(shp) # inserting box x[4:-4, 6:-2, 1:-6] = -1 x_noisy = x + np.random.normal(0, 0.6, size=x.shape) return x_noisy" 3293,"def index_to_coords(index, shape): '''convert index to coordinates given the shape''' coords = [] for i in xrange(1, len(shape)): divisor = int(np.product(shape[i:])) value = index // divisor coords.append(value) index -= value * divisor coords.append(index) return tuple(coords)" 3294,"def slices(img, shape=[3, 4]): """""" create tiled image with multiple slices :param img: :param shape: :return: """""" sh = np.asarray(shape) i_max = np.prod(sh) allimg = np.zeros(img.shape[-2:] * sh) for i in range(0, i_max): # i = 0 islice = round((img.shape[0] / float(i_max)) * i) # print(islice) imgi = img[islice, :, :] coords = index_to_coords(i, sh) aic = np.asarray(img.shape[-2:]) * coords allimg[aic[0]:aic[0] + imgi.shape[-2], aic[1]:aic[1] + imgi.shape[-1]] = imgi # plt.imshow(imgi) # print(imgi.shape) # print(img.shape) return allimg" 3295,"def sed2(img, contour=None, shape=[3, 4]): """""" plot tiled image of multiple slices :param img: :param contour: :param shape: :return: """""" """""" :param img: :param contour: :param shape: :return: """""" plt.imshow(slices(img, shape), cmap='gray') if contour is not None: plt.contour(slices(contour, shape))" 3296,"def set_window(self, windowC, windowW): """""" Sets visualization window :param windowC: window center :param windowW: window width :return: """""" if not (windowW and windowC): windowW = np.max(self.img) - np.min(self.img) windowC = (np.max(self.img) + np.min(self.img)) / 2.0 self.imgmax = windowC + (windowW / 2) self.imgmin = windowC - (windowW / 2) self.windowC = windowC self.windowW = windowW" 3297,"def rotate_to_zaxis(self, new_zaxis): """""" rotate image to selected axis :param new_zaxis: :return: """""" img = self._rotate_end(self.img, self.zaxis) seeds = self._rotate_end(self.seeds, self.zaxis) contour = self._rotate_end(self.contour, self.zaxis) # Rotate data in depndecy on zaxispyplot self.img = self._rotate_start(img, new_zaxis) self.seeds = self._rotate_start(seeds, new_zaxis) self.contour = self._rotate_start(contour, new_zaxis) self.zaxis = new_zaxis # import ipdb # ipdb.set_trace() # self.actual_slice_slider.valmax = self.img.shape[2] - 1 self.actual_slice = 0 self.rotated_back = False # update slicer self.fig.delaxes(self.ax_actual_slice) self.ax_actual_slice.cla() del(self.actual_slice_slider) self.fig.add_axes(self.ax_actual_slice) self.actual_slice_slider = Slider(self.ax_actual_slice, 'Slice', 0, self.img.shape[2] - 1, valinit=0) self.actual_slice_slider.on_changed(self.sliceslider_update) self.update_slice()" 3298,"def __flip(self, sliceimg): """""" Flip if asked in self.flipV or self.flipH :param sliceimg: one image slice :return: flipp """""" if self.flipH: sliceimg = sliceimg[:, -1:0:-1] if self.flipV: sliceimg = sliceimg [-1:0:-1,:] return sliceimg" 3299,"def on_scroll(self, event): ''' mouse wheel is used for setting slider value''' if event.button == 'up': self.next_slice() if event.button == 'down': self.prev_slice() self.actual_slice_slider.set_val(self.actual_slice)" 3300,"def on_press(self, event): 'on but-ton press we will see if the mouse is over us and store data' if event.inaxes != self.ax: return # contains, attrd = self.rect.contains(event) # if not contains: return # print('event contains', self.rect.xy) # x0, y0 = self.rect.xy self.press = [event.xdata], [event.ydata], event.button" 3301,"def on_motion(self, event): 'on motion we will move the rect if the mouse is over us' if self.press is None: return if event.inaxes != self.ax: return # print(event.inaxes) x0, y0, btn = self.press x0.append(event.xdata) y0.append(event.ydata)" 3302,"def on_release(self, event): 'on release we reset the press data' if self.press is None: return # print(self.press) x0, y0, btn = self.press if btn == 1: color = 'r' elif btn == 2: color = 'b' # noqa # plt.axes(self.ax) # plt.plot(x0, y0) # button Mapping btn = self.button_map[btn] self.set_seeds(y0, x0, self.actual_slice, btn) # self.fig.canvas.draw() # pdb.set_trace(); self.press = None self.update_slice()" 3303,"def get_seed_sub(self, label): """""" Return list of all seeds with specific label """""" sx, sy, sz = np.nonzero(self.seeds == label) return sx, sy, sz" 3304,"def find(self, instance_id): """""" find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance """""" instance = AtlasServiceInstance.Instance(instance_id, self.backend) self.backend.storage.populate(instance) return instance" 3305,"def create(self, instance, parameters, existing): """""" Create the instance Args: instance (AtlasServiceInstance.Instance): Existing or New instance parameters (dict): Parameters for the instance existing (bool): Create an instance on an existing Atlas cluster Returns: ProvisionedServiceSpec: Status Raises: ErrInstanceAlreadyExists: If instance exists but with different parameters ErrClusterNotFound: Cluster does not exist """""" if not instance.isProvisioned(): # Set parameters instance.parameters = parameters # Existing cluster if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]): # We need to use an existing cluster that is not available ! raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER]) elif not existing: # We need to create a new cluster # We should not reach this code because the AtlasBroker.provision should # raise an ErrPlanUnsupported before. raise NotImplementedError() result = self.backend.storage.store(instance) # Provision done return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED, """", str(result)) elif instance.parameters == parameters: # Identical so nothing to do return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS, """", ""duplicate"") else: # Different parameters ... raise ErrInstanceAlreadyExists()" 3306,"def delete(self, instance): """"""Delete the instance Args: instance (AtlasServiceInstance.Instance): an existing instance Returns: DeprovisionServiceSpec: Status """""" #TODO: Really drop the database based on a policy set in `instance.parameters`. # # We need : # - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain) # - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database) # - credential on the Atlas cluster `instance.get_cluster()` to drop the database # self.backend.storage.remove(instance) return DeprovisionServiceSpec(False, ""done"")" 3307,"def push(self, item): '''Push the value item onto the heap, maintaining the heap invariant. If the item is not hashable, a TypeError is raised. ''' hash(item) heapq.heappush(self._items, item)" 3308,"def format_field_by_match(self, value, match): """"""Formats a field by a Regex match of the format spec pattern."""""" groups = match.groups() fill, align, sign, sharp, zero, width, comma, prec, type_ = groups if not comma and not prec and type_ not in list('fF%'): return None if math.isnan(value) or math.isinf(value): return None locale = self.numeric_locale # Format number value. prefix = get_prefix(sign) if type_ == 'd': if prec is not None: raise ValueError('precision not allowed in ' 'integer format specifier') string = format_number(value, 0, prefix, locale) elif type_ in 'fF%': format_ = format_percent if type_ == '%' else format_number string = format_(value, int(prec or DEFAULT_PREC), prefix, locale) else: # Don't handle otherwise. return None if not comma: # Formatted number always contains group symbols. # Remove the symbols if not required. string = remove_group_symbols(string, locale) if not (fill or align or zero or width): return string # Fix a layout. spec = ''.join([fill or u'', align or u'>', zero or u'', width or u'']) return format(string, spec)" 3309,"def stdout_encode(u, default='utf-8'): """""" Encodes a given string with the proper standard out encoding If sys.stdout.encoding isn't specified, it this defaults to @default @default: default encoding -> #str with standard out encoding """""" # from http://stackoverflow.com/questions/3627793/best-output-type-and- # encoding-practices-for-repr-functions encoding = sys.stdout.encoding or default return u.encode(encoding, ""replace"").decode(encoding, ""replace"")" 3310,"def get_terminal_width(): """""" -> #int width of the terminal window """""" # http://www.brandonrubin.me/2014/03/18/python-snippet-get-terminal-width/ command = ['tput', 'cols'] try: width = int(subprocess.check_output(command)) except OSError as e: print( ""Invalid Command '{0}': exit status ({1})"".format( command[0], e.errno)) except subprocess.CalledProcessError as e: print( ""'{0}' returned non-zero exit status: ({1})"".format( command, e.returncode)) else: return width" 3311,"def gen_rand_str(*size, use=None, keyspace=None): """""" Generates a random string using random module specified in @use within the @keyspace @*size: #int size range for the length of the string @use: the random module to use @keyspace: #str chars allowed in the random string .. from redis_structures.debug import gen_rand_str gen_rand_str() # -> 'PRCpAq' gen_rand_str(1, 2) # -> 'Y' gen_rand_str(12, keyspace=""abcdefg"") # -> 'gaaacffbedf' .. """""" keyspace = keyspace or (string.ascii_letters + string.digits) keyspace = [char for char in keyspace] use = use or np.random if size: size = size if len(size) == 2 else (size[0], size[0] + 1) else: size = (6, 7) return ''.join( use.choice(keyspace) for _ in range(use.randint(*size)))" 3312,"def rand_readable(*size, use=None, density=6): """""" Generates a random string with readable characters using random module specified in @use @*size: #int size range for the length of the string @use: the random module to use @density: how often to include a vowel, you can expect a vowel about once every (density) nth character .. from redis_structures.debug import rand_readable rand_readable() # -> 'hyiaqk' rand_readable(15, 20) # -> 'oqspyywvhifsaikiaoi' rand_readable(15, 20, density=1) # -> 'oeuiueioieeioeeeue' rand_readable(15, 20, density=15) # -> 'ktgjabwdqhgeanh' .. """""" use = use or np.random keyspace = [c for c in string.ascii_lowercase if c != ""l""] vowels = (""a"", ""e"", ""i"", ""o"", ""u"") def use_vowel(density): not use.randint(0, density) if size: size = size if len(size) == 2 else (size[0]-1, size[0]) else: size = (6, 7) return ''.join( use.choice(vowels if use_vowel(density) else keyspace) for _ in range(use.randint(*size)))" 3313,"def get_parent_obj(obj): """""" Gets the name of the object containing @obj and returns as a string @obj: any python object -> #str parent object name or None .. from redis_structures.debug import get_parent_obj get_parent_obj(get_parent_obj) # -> .. """""" try: cls = get_class_that_defined_method(obj) if cls and cls != obj: return cls except AttributeError: pass if hasattr(obj, '__module__') and obj.__module__: try: module = importlib.import_module(obj.__module__) objname = get_obj_name(obj).split(""."") owner = getattr(module, objname[-2]) return getattr(owner, objname[-1]) except Exception: try: return module except Exception: pass try: assert hasattr(obj, '__qualname__') or hasattr(obj, '__name__') objname = obj.__qualname__ if hasattr(obj, '__qualname__') \ else obj.__name__ objname = objname.split(""."") assert len(objname) > 1 return locate(""."".join(objname[:-1])) except Exception: try: module = importlib.import_module(""."".join(objname[:-1])) return module except Exception: pass return None" 3314,"def get_obj_name(obj, full=True): """""" Gets the #str name of @obj @obj: any python object @full: #bool returns with parent name as well if True -> #str object name .. from redis_structures.debug import get_parent_obj get_obj_name(get_obj_name) # -> 'get_obj_name' get_obj_name(redis_structures.debug.Timer) # -> 'Timer' .. """""" has_name_attr = hasattr(obj, '__name__') if has_name_attr and obj.__name__ == """": try: src = whitespace_sub("""", inspect.getsource(obj))\ .replace(""\n"", ""; "").strip("" <>"") except OSError: src = obj.__name__ return lambda_sub("""", src) if hasattr(obj, '__qualname__') and obj.__qualname__: return obj.__qualname__.split(""."")[-1] elif has_name_attr and obj.__name__: return obj.__name__.split(""."")[-1] elif hasattr(obj, '__class__'): return str(obj.__class__.__name__).strip(""<>"") else: return str(obj.__repr__())" 3315,"def format(self): """""" Formats the __repr__ string -> #str containing __repr__ output """""" _bold = bold if not self.pretty: _bold = lambda x: x # Attach memory address and return _attrs = self._format_attrs() self.data = ""<{}.{}({}){}>{}"".format( self.obj.__module__ if hasattr(self.obj, ""__module__"") \ else ""__main__"", _bold(self.obj.__class__.__name__), _attrs, "":{}"".format(hex(id(self.obj))) if self.address else """", _break+self.supplemental if self.supplemental else """") return stdout_encode(self.data)" 3316,"def randstr(self): """""" -> #str result of :func:gen_rand_str """""" return gen_rand_str( 2, 10, use=self.random, keyspace=list(self.keyspace))" 3317,"def set(self, size=1000): """""" Creates a random #set @size: #int number of random values to include in the set -> random #set """""" get_val = lambda: self._map_type() return set(get_val() for x in range(size))" 3318,"def list(self, size=1000, tree_depth=1): """""" Creates a random #list @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|[value1, value2]| 2=|[[value1, value2], [value1, value2]]| -> random #list """""" if not tree_depth: return self._map_type() return list(self.deque(size, tree_depth-1) for x in range(size))" 3319,"def pretty_print(self, obj=None): """""" Formats and prints @obj or :prop:obj @obj: the object you'd like to prettify """""" print(self.pretty(obj if obj is not None else self.obj))" 3320,"def array(self): """""" Returns :prop:intervals as a numpy array, caches -> :class:numpy.array """""" if self._intervals_len: if self._array_len != self._intervals_len: if not self._array_len: self._array = np.array(self.intervals) \ if hasattr(np, 'array') else self.intervals else: self._array = np.concatenate(( self._array, self.intervals), axis=0) \ if hasattr(np, 'concatenate') else \ (self._array + self.intervals) self._array_len += len(self.intervals) self.intervals = [] return self._array return []" 3321,"def reset(self): """""" Resets the time intervals """""" self._start = 0 self._first_start = 0 self._stop = time.perf_counter() self._array = None self._array_len = 0 self.intervals = [] self._intervals_len = 0" 3322,"def time(self, intervals=1, *args, _show_progress=True, _print=True, _collect_garbage=False, **kwargs): """""" Measures the execution time of :prop:_callables for @intervals @intervals: #int number of intervals to measure the execution time of the function for @*args: arguments to pass to the callable being timed @**kwargs: arguments to pass to the callable being timed @_show_progress: #bool whether or not to print a progress bar @_print: #bool whether or not to print the results of the timing @_collect_garbage: #bool whether or not to garbage collect while timing @_quiet: #bool whether or not to disable the print() function's ability to output to terminal during the timing -> #tuple of :class:Timer :prop:results of timing """""" self.reset() self.num_intervals = intervals for func in self.progress(self._callables): try: #: Don't ruin all timings if just one doesn't work t = Timer( func, _precision=self.precision, _parent_progressbar=self.progress) t.time( intervals, *args, _print=False, _show_progress=_show_progress, _collect_garbage=_collect_garbage, **kwargs) except Exception as e: print(RuntimeWarning( ""{} with {}"".format(colorize( ""{} failed"".format(Look.pretty_objname( func, color=""yellow"")), ""yellow""), repr(e)))) self._callable_results.append(t) self.progress.update() self.info(_print=_print) return self.results" 3323,"def read(filename): """""" Read a file relative to setup.py location. """""" import os here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, filename)) as fd: return fd.read()" 3324,"def find_version(filename): """""" Find package version in file. """""" import re content = read(filename) version_match = re.search( r""^__version__ = ['\""]([^'\""]*)['\""]"", content, re.M ) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')" 3325,"def find_requirements(filename): """""" Find requirements in file. """""" import string content = read(filename) requirements = [] for line in content.splitlines(): line = line.strip() if line and line[:1] in string.ascii_letters: requirements.append(line) return requirements" 3326,"def generate_uuid(basedata=None): """""" Provides a _random_ UUID with no input, or a UUID4-format MD5 checksum of any input data provided """""" if basedata is None: return str(uuid.uuid4()) elif isinstance(basedata, str): checksum = hashlib.md5(basedata).hexdigest() return '%8s-%4s-%4s-%4s-%12s' % ( checksum[0:8], checksum[8:12], checksum[12:16], checksum[16:20], checksum[20:32])" 3327,"def from_unix(cls, seconds, milliseconds=0): """""" Produce a full |datetime.datetime| object from a Unix timestamp """""" base = list(time.gmtime(seconds))[0:6] base.append(milliseconds * 1000) # microseconds return cls(*base)" 3328,"def to_unix(cls, timestamp): """""" Wrapper over time module to produce Unix epoch time as a float """""" if not isinstance(timestamp, datetime.datetime): raise TypeError('Time.milliseconds expects a datetime object') base = time.mktime(timestamp.timetuple()) return base" 3329,"def milliseconds_offset(cls, timestamp, now=None): """""" Offset time (in milliseconds) from a |datetime.datetime| object to now """""" if isinstance(timestamp, (int, float)): base = timestamp else: base = cls.to_unix(timestamp) base += (timestamp.microsecond / 1000000) if now is None: now = time.time() return (now - base) * 1000" 3330,"def fixUTF8(cls, data): # Ensure proper encoding for UA's servers... """""" Convert all strings to UTF-8 """""" for key in data: if isinstance(data[key], str): data[key] = data[key].encode('utf-8') return data" 3331,"def alias(cls, typemap, base, *names): """""" Declare an alternate (humane) name for a measurement protocol parameter """""" cls.parameter_alias[base] = (typemap, base) for i in names: cls.parameter_alias[i] = (typemap, base)" 3332,"def consume_options(cls, data, hittype, args): """""" Interpret sequential arguments related to known hittypes based on declared structures """""" opt_position = 0 data['t'] = hittype # integrate hit type parameter if hittype in cls.option_sequence: for expected_type, optname in cls.option_sequence[hittype]: if opt_position < len(args) and isinstance(args[opt_position], expected_type): data[optname] = args[opt_position] opt_position += 1" 3333,"def hittime(cls, timestamp=None, age=None, milliseconds=None): """""" Returns an integer represeting the milliseconds offset for a given hit (relative to now) """""" if isinstance(timestamp, (int, float)): return int(Time.milliseconds_offset(Time.from_unix(timestamp, milliseconds=milliseconds))) if isinstance(timestamp, datetime.datetime): return int(Time.milliseconds_offset(timestamp)) if isinstance(age, (int, float)): return int(age * 1000) + (milliseconds or 0)" 3334,"def set_timestamp(self, data): """""" Interpret time-related options, apply queue-time parameter as needed """""" if 'hittime' in data: # an absolute timestamp data['qt'] = self.hittime(timestamp=data.pop('hittime', None)) if 'hitage' in data: # a relative age (in seconds) data['qt'] = self.hittime(age=data.pop('hitage', None))" 3335,"async def send(self, hittype, *args, **data): """""" Transmit HTTP requests to Google Analytics using the measurement protocol """""" if hittype not in self.valid_hittypes: raise KeyError('Unsupported Universal Analytics Hit Type: {0}'.format(repr(hittype))) self.set_timestamp(data) self.consume_options(data, hittype, args) for item in args: # process dictionary-object arguments of transcient data if isinstance(item, dict): for key, val in self.payload(item): data[key] = val for k, v in self.params.items(): # update only absent parameters if k not in data: data[k] = v data = dict(self.payload(data)) if self.hash_client_id: data['cid'] = generate_uuid(data['cid']) # Transmit the hit to Google... await self.http.send(data)" 3336,"def p_null_assignment(self, t): '''null_assignment : IDENT EQ NULL''' self.accu.add(Term('obs_vlabel', [self.name,""gen(\""""+t[1]+""\"")"",""0""]))" 3337,"def p_plus_assignment(self, t): '''plus_assignment : IDENT EQ PLUS''' self.accu.add(Term('obs_vlabel', [self.name,""gen(\""""+t[1]+""\"")"",""1""]))" 3338,"def p_minus_assignment(self, t): '''minus_assignment : IDENT EQ MINUS''' self.accu.add(Term('obs_vlabel', [self.name,""gen(\""""+t[1]+""\"")"",""-1""]))" 3339,"def p_notplus_assignment(self, t): '''notplus_assignment : IDENT EQ NOTPLUS''' self.accu.add(Term('obs_vlabel', [self.name,""gen(\""""+t[1]+""\"")"",""notPlus""]))" 3340,"def p_notminus_assignment(self, t): '''notminus_assignment : IDENT EQ NOTMINUS''' self.accu.add(Term('obs_vlabel', [self.name,""gen(\""""+t[1]+""\"")"",""notMinus""]))" 3341,"def p_input_assignment(self, t): '''input_assignment : IDENT EQ INPUT''' self.accu.add(Term('input', [self.name,""gen(\""""+t[1]+""\"")""]))" 3342,"def p_min_assignment(self, t): '''min_assignment : IDENT EQ MIN''' self.accu.add(Term('ismin', [self.name,""gen(\""""+t[1]+""\"")""]))" 3343,"def p_max_assignment(self, t): '''max_assignment : IDENT EQ MAX''' self.accu.add(Term('ismax', [self.name,""gen(\""""+t[1]+""\"")""]))" 3344,"def get_service_version(self, service_id, mode='production', version='default'): ''' get_service_version(self, service_id, mode='production', version='default') | Get a specific version details of a given service. Opereto will try to fetch the requested service version. If not found, it will return the default production version. The ""actual_version"" field of the returned JSON indicates what version of the service is returned. If the actual version is null, it means that this service does not have any version at all. To make it operational, you will have to import or upload a default version. :Parameters: * *service_id* (`string`) -- Identifier of an existing service * *mode* (`string`) -- development/production. Default is production * *version* (`string`) -- version of the service (""default"" is the default. :return: json service version details :Example: .. code-block:: python service_version = opereto_client.get_service_version(serviceId, mode='development', version='111') ''' return self._call_rest_api('get', '/services/'+service_id+'/'+mode+'/'+version, error='Failed to fetch service information')" 3345,"def verify_service(self, service_id, specification=None, description=None, agent_mapping=None): ''' verify_service(self, service_id, specification=None, description=None, agent_mapping=None) | Verifies validity of service yaml :Parameters: * *service_id* (`string`) -- Identifier of an existing service * *specification* (`string`) -- service specification yaml * *description* (`string`) -- service description written in text or markdown style * *agent_mapping* (`string`) -- agents mapping specification :return: json service version details :Example: .. code-block:: python spec = { ""type"": ""action"", ""cmd"": ""python -u run.py"", ""timeout"": 600, ""item_properties"": [ {""key"": ""key1"", ""type"": ""text"", ""value"": ""value1"", ""direction"": ""input""}, {""key"": ""key2"", ""type"": ""boolean"", ""value"": True, ""direction"": ""input""} ] } if opereto_client.verify_service ('hello_world', specification=spec)['errors'] == []: result = True ''' request_data = {'id': service_id} if specification: request_data['spec']=specification if description: request_data['description']=description if agent_mapping: request_data['agents']=agent_mapping return self._call_rest_api('post', '/services/verify', data=request_data, error='Service [%s] verification failed'%service_id)" 3346,"def modify_service(self, service_id, type): ''' modify_service(self, service_id, type) | Modifies a service type (action, container, etc.) :Parameters: * *service_id* (`string`) -- Identifier of an existing service * *type* (`string`) -- service type :return: Service modification metadata (service id, type, modified date, versions :Example: .. code-block:: python service_modification_metadata = opereto_client.modify_service ('myService', 'container') if service_modification_metadata['type'] == 'container' print 'service type of {} changed to container'.format('myService') ''' request_data = {'id': service_id, 'type': type} return self._call_rest_api('post', '/services', data=request_data, error='Failed to modify service [%s]'%service_id)" 3347,"def upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs): ''' upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs) Upload a service version to Opereto :Parameters: * *service_zip_file* (`string`) -- zip file location containing service and service specification * *mode* (`string`) -- production/development (default is production) * *service_version* (`string`) -- Service version * *service_id* (`string`) -- Service Identifier :Keywords args: * *comment* (`string`) -- comment :Example: .. code-block:: python opereto_client.upload_service_version(service_zip_file=zip_action_file+'.zip', mode='production', service_version='111') ''' files = {'service_file': open(service_zip_file,'rb')} url_suffix = '/services/upload/%s'%mode if mode=='production': url_suffix+='/'+service_version if service_id: url_suffix+='/'+service_id if kwargs: url_suffix=url_suffix+'?'+urlencode(kwargs) return self._call_rest_api('post', url_suffix, files=files, error='Failed to upload service version')" 3348,"def import_service_version(self, repository_json, mode='production', service_version='default', service_id=None, **kwargs): ''' import_service_version(self, repository_json, mode='production', service_version='default', service_id=None, **kwargs) Imports a service version into Opereto from a remote repository (GIT, SVN, AWS S3, any HTTPS repository) :Parameters: * *repository_json* (`object`) -- repository_json :Example of repository JSON: .. code-block:: json #GIT source control { ""repo_type"": ""git"", ""url"": ""git@bitbucket.org:my_account_name/my_project.git"", ""branch"": ""master"", ""ot_dir"": ""mydir"" } #SVN { ""repo_type"": ""svn"", ""url"": ""svn://myhost/myrepo"", ""username"": ""OPTIONAL_USERNAME"", ""password"": ""OPTIONAL_PASSWORD"", ""ot_dir"": ""my_service_dir"" } # Any HTTP based remote storage { ""repo_type"": ""http"", ""url"": ""https://www.dropbox.com/s/1234567890/MyFile.zip?dl=0"", ""username"": ""OPTIONAL_PASSWORD"", ""ot_dir"": ""my_service_dir"" } # AWS S3 Storage { ""repo_type"": ""s3"", ""bucket"": ""my_bucket/my_service.zip"", ""access_key"": ""MY_ACCESS_KEY"", ""secret_key"": ""MY_SECRET_KEY"", ""ot_dir"": ""my_service_dir"" } * *mode* (`string`) -- production/development (default is production) * *service_version* (`string`) -- Service version * *service_id* (`string`) -- Service version :return: status - success/failure :Example: .. code-block:: python # for GIT repository_json = { ""branch"": ""master"", ""ot_dir"": ""microservices/hello_world"", ""repo_type"": ""git"", ""url"": ""https://github.com/myCompany/my_services.git"" } opereto_client.import_service_version(repository_json, mode='production', service_version='default', service_id=self.my_service2) ''' request_data = {'repository': repository_json, 'mode': mode, 'service_version': service_version, 'id': service_id} url_suffix = '/services' if kwargs: url_suffix=url_suffix+'?'+urlencode(kwargs) return self._call_rest_api('post', url_suffix, data=request_data, error='Failed to import service')" 3349,"def delete_service_version(self, service_id , service_version='default', mode='production'): ''' delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ''' return self._call_rest_api('delete', '/services/'+service_id+'/'+mode+'/'+service_version, error='Failed to delete service')" 3350,"def verify_environment_scheme(self, environment_type, environment_topology): ''' verify_environment_scheme(self, environment_type, environment_topology) Verifies json scheme of an environment :Parameters: * *environment_type* (`string`) -- Topology identifier * *environment_topology* (`object`) -- Environment json to validate :return: Success or errors in case the verification failed :Return Example: .. code-block:: json # verification failure {'errors': ['Topology key cluster_name is missing in environment specification'], 'agents': {}, 'success': False, 'warnings': []} # verification success {'errors': [], 'agents': {}, 'success': True, 'warnings': []} :Example: .. code-block:: python environment_topology = { ""cluster_name"": ""k8s-clusbbe9"", ""config_file"": { ""contexts"": [ { ""name"": ""my-context"" } ], ""clusters"": [ { ""name"": ""k8s-clusbbe9"" } ] } } environment = opereto_client.verify_environment_scheme(environment_type = 'myTopology', environment_topology = environment_topology) ''' request_data = {'topology_name': environment_type, 'topology': environment_topology} return self._call_rest_api('post', '/environments/verify', data=request_data, error='Failed to verify environment.')" 3351,"def verify_environment(self, environment_id): ''' verify_environment(self, environment_id) Verifies validity of an existing environment :Parameters: * *environment_id* (`string`) -- Environment identifier :return: Success or errors in case the verification failed :Return Example: .. code-block:: json # verification failure {'errors': ['Topology key cluster_name is missing in environment specification'], 'agents': {}, 'success': False, 'warnings': []} # verification success {'errors': [], 'agents': {}, 'success': True, 'warnings': []} ''' request_data = {'id': environment_id} return self._call_rest_api('post', '/environments/verify', data=request_data, error='Failed to verify environment.')" 3352,"def create_environment(self, topology_name, topology={}, id=None, **kwargs): ''' create_environment(self, topology_name, topology={}, id=None, **kwargs) Create a new environment :Parameters: * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment. * *topology* (`object`) -- Topology data (must match the topology json schema) * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier. :return: id of the created environment ''' request_data = {'topology_name': topology_name,'id': id, 'topology':topology, 'add_only':True} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to create environment')" 3353,"def modify_environment(self, environment_id, **kwargs): ''' modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment ''' request_data = {'id': environment_id} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to modify environment')" 3354,"def search_agents(self, start=0, limit=100, filter={}, **kwargs): ''' search_agents(self, start=0, limit=100, filter={}, **kwargs) Search agents :Parameters: * *start* (`int`) -- start index to retrieve from. Default is 0 * *limit* (`int`) -- maximum number of entities to retrieve. Default is 100 * *filter* (`object`) -- free text search pattern (checks in agent data and properties) :return: List of search results or empty list :Example: .. code-block:: python filter = {'generic': 'my Agent'} search_result = opereto_client.search_agents(filter=filter) ''' request_data = {'start': start, 'limit': limit, 'filter': filter} request_data.update(kwargs) return self._call_rest_api('post', '/search/agents', data=request_data, error='Failed to search agents')" 3355,"def modify_agent_property(self, agent_id, key, value): ''' modify_agent_property(self, agent_id, key, value) Modifies a single single property of an agent. If the property does not exists then it is created as a custom property. :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent * *key* (`string`) -- Key of a property to change * *value* (`string`) -- New Value of the property to change :Example: .. code-block:: python opereto_client.modify_agent_property('my_agent_id', 'agent_new_property', 'agent value') ''' return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data={key: value}, error='Failed to modify agent [%s] property [%s]'%(agent_id,key))" 3356,"def modify_agent_properties(self, agent_id, key_value_map={}): ''' modify_agent_properties(self, agent_id, key_value_map={}) Modify properties of an agent. If properties do not exists, they will be created :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent * *key_value_map* (`object`) -- Key value map of properties to change * *value* (`string`) -- New Value of the property to change :Example: .. code-block:: python opereto_client.modify_agent_properties('my_agent_id', {""mykey"": ""myvalue"", ""mykey2"": ""myvalue2""}) ''' return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data=key_value_map, error='Failed to modify agent [%s] properties'%agent_id)" 3357,"def create_agent(self, agent_id=None, **kwargs): ''' create_agent(self, agent_id=None, **kwargs) | Creates an agent based on the identifier provided. \ | The agent will become online when a real agent will connect using this identifier. \ | However, in most cases, the agent entity is created automatically when a new agent connects to opereto. \ :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Keywords args: * *name* (`string`) -- Display name to show in the UI * *description* (`string`) -- A textual description of the agent * *permissions* (`object`) -- Permissions on the agent * *owners* (`array`) -- List of Opereto usernames that may modify and delete the agent * *owners* (`array`) -- List of Opereto usernames that may run services on the agent :return: id of the generated agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.create_agent(agent_id='xAgent', name='My new agent', description='A new created agent to be called from X machines') ''' request_data = {'id': agent_id, 'add_only':True} request_data.update(**kwargs) return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to create agent')" 3358,"def modify_agent(self, agent_id, **kwargs): ''' modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name') ''' request_data = {'id': agent_id} request_data.update(**kwargs) return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_id)" 3359,"def create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs): ''' create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs) Registers a new process or processes :Parameters: * *service* (`string`) -- Service which process will be started * *agent* (`string`) -- The service identifier (e.g shell_command) * *title* (`string`) -- Title for the process * *mode* (`string`) -- production/development * *service_version* (`string`) -- Version of the service to execute :Keywords args: Json value map containing the process input properties :return: process id :Example: .. code-block:: python process_properties = {""my_input_param"" : ""1""} pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties) ''' if not agent: agent = self.input.get('opereto_agent') if not mode: mode=self.input.get('opereto_execution_mode') or 'production' if not service_version: service_version=self.input.get('opereto_service_version') request_data = {'service_id': service, 'agents': agent, 'mode': mode, 's_version':service_version} if title: request_data['name']=title if self.input.get('pid'): request_data['pflow_id']=self.input.get('pid') request_data.update(**kwargs) ret_data= self._call_rest_api('post', '/processes', data=request_data, error='Failed to create a new process') if not isinstance(ret_data, list): raise OperetoClientError(str(ret_data)) pid = ret_data[0] message = 'New process created for service [%s] [pid = %s] '%(service, pid) if agent: message += ' [agent = %s]'%agent else: message += ' [agent = any ]' self.logger.info(message) return str(pid)" 3360,"def rerun_process(self, pid, title=None, agent=None): ''' rerun_process(self, pid, title=None, agent=None) Reruns a process :Parameters: * *pid* (`string`) -- Process id to rerun * *title* (`string`) -- Title for the process * *agent* (`string`) -- a valid value may be one of the following: agent identifier, agent identifiers (list) : [""agent_1"", ""agent_2""..], ""all"", ""any"" :return: process id ''' request_data = {} if title: request_data['name']=title if agent: request_data['agents']=agent if self.input.get('pid'): request_data['pflow_id']=self.input.get('pid') ret_data= self._call_rest_api('post', '/processes/'+pid+'/rerun', data=request_data, error='Failed to create a new process') if not isinstance(ret_data, list): raise OperetoClientError(str(ret_data)) new_pid = ret_data[0] message = 'Re-executing process [%s] [new process pid = %s] '%(pid, new_pid) self.logger.info(message) return str(new_pid)" 3361,"def modify_process_properties(self, key_value_map={}, pid=None): ''' modify_process_properties(self, key_value_map={}, pid=None) Modify process output properties. Please note that process property key provided must be declared as an output property in the relevant service specification. :Parameters: * *key_value_map* (`object`) -- key value map with process properties to modify * *pid* (`string`) -- Identifier of an existing process :Example: .. code-block:: python process_output_properties = {""my_output_param"" : ""1""} pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.modify_process_properties(process_output_properties, pid) ''' pid = self._get_pid(pid) request_data={""properties"": key_value_map} return self._call_rest_api('post', '/processes/'+pid+'/output', data=request_data, error='Failed to output properties')" 3362,"def modify_process_property(self, key, value, pid=None): ''' modify_process_property(self, key, value, pid=None) Modify process output property. Please note that the process property key provided must be declared as an output property in the relevant service specification. :Parameters: * *key* (`String`) -- key of property to modify * *key* (`value`) -- value of property to modify * *pid* (`string`) -- Identifier of an existing process :Example: .. code-block:: python pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.modify_process_property(""my_output_param"", ""1"" , pid) ''' pid = self._get_pid(pid) request_data={""key"" : key, ""value"": value} return self._call_rest_api('post', '/processes/'+pid+'/output', data=request_data, error='Failed to modify output property [%s]'%key)" 3363,"def modify_process_summary(self, pid=None, text='', append=False): ''' modify_process_summary(self, pid=None, text='') Modifies the summary text of the process execution :Parameters: * *key* (`pid`) -- Identifier of an existing process * *key* (`text`) -- summary text * *append* (`boolean`) -- True to append to summary. False to override it. ''' pid = self._get_pid(pid) if append: current_summary = self.get_process_info(pid).get('summary') or '' modified_text = current_summary + '\n' + text text = modified_text request_data = {""id"": pid, ""data"": str(text)} return self._call_rest_api('post', '/processes/'+pid+'/summary', data=request_data, error='Failed to update process summary')" 3364,"def stop_process(self, pids, status='success'): ''' stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated ''' if status not in process_result_statuses: raise OperetoClientError('Invalid process result [%s]'%status) pids = self._get_pids(pids) for pid in pids: self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process')" 3365,"def get_process_status(self, pid=None): ''' get_process_status(self, pid=None) Get current status of a process :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/status', error='Failed to fetch process status')" 3366,"def get_process_flow(self, pid=None): ''' get_process_flow(self, pid=None) Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors. You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/flow', error='Failed to fetch process information')" 3367,"def get_process_rca(self, pid=None): ''' get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information')" 3368,"def get_process_info(self, pid=None): ''' get_process_info(self, pid=None) Get process general information. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid, error='Failed to fetch process information')" 3369,"def get_process_log(self, pid=None, start=0, limit=1000): ''' get_process_log(self, pid=None, start=0, limit=1000 Get process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *pid* (`string`) -- start index to retrieve logs from * *pid* (`string`) -- maximum number of entities to retrieve :return: Process log entries ''' pid = self._get_pid(pid) data = self._call_rest_api('get', '/processes/'+pid+'/log?start={}&limit={}'.format(start,limit), error='Failed to fetch process log') return data['list']" 3370,"def search_process_log(self, pid, filter={}, start=0, limit=1000): ''' search_process_log(self, pid, filter={}, start=0, limit=1000) Search in process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *start* (`int`) -- start index to retrieve from. Default is 0 * *limit* (`int`) -- maximum number of entities to retrieve. Default is 100 * *filter* (`object`) -- free text search pattern (checks in process log data) :return: Count of records found and list of search results or empty list :Example: .. code-block:: python filter = {'generic': 'my product param'} search_result = opereto_client.search_globals(filter=filter) if search_result['total'] > 0 print(search_result['list']) ''' pid = self._get_pid(pid) request_data = {'start': start, 'limit': limit, 'filter': filter} return self._call_rest_api('post', '/processes/' + pid + '/log/search', data=request_data, error='Failed to search in process log')" 3371,"def get_process_properties(self, pid=None, name=None): ''' get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name ''' pid = self._get_pid(pid) res = self._call_rest_api('get', '/processes/'+pid+'/properties', error='Failed to fetch process properties') if name: try: return res[name] except KeyError as e: raise OperetoClientError(message='Invalid property [%s]'%name, code=404) else: return res" 3372,"def wait_for(self, pids=[], status_list=process_result_statuses): ''' wait_for(self, pids=[], status_list=process_result_statuses) Waits for a process to finish :Parameters: * *pids* (`list`) -- list of processes waiting to be finished * *status_list* (`list`) -- optional - List of statuses to wait for processes to finish with :Example: .. code-block:: python pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service') opereto_client.wait_for([pid], ['failure', 'error']) opereto_client.rerun_process(pid) ''' results={} pids = self._get_pids(pids) for pid in pids: while(True): try: stat = self._call_rest_api('get', '/processes/'+pid+'/status', error='Failed to fetch process [%s] status'%pid) if stat in status_list: results[pid]=stat break time.sleep(5) except requests.exceptions.RequestException as e: # reinitialize session using api call decorator self.session=None raise e return results" 3373,"def wait_to_start(self, pids=[]): ''' wait_to_start(self, pids=[]) Wait for processes to start :Parameters: * *pids* (`list`) -- list of processes to wait to start ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses+['in_process'])" 3374,"def wait_to_end(self, pids=[]): ''' wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses)" 3375,"def get_process_runtime_cache(self, key, pid=None): ''' get_process_runtime_cache(self, key, pid=None) Get a pre-defined run time parameter value :Parameters: * *key* (`string`) -- Identifier of the runtime cache * *pid* (`string`) -- Identifier of an existing process ''' value = None pid = self._get_pid(pid) value = self._call_rest_api('get', '/processes/'+pid+'/cache?key=%s'%key, error='Failed to fetch process runtime cache') return value" 3376,"def set_process_runtime_cache(self, key, value, pid=None): ''' set_process_runtime_cache(self, key, value, pid=None) Set a process run time parameter :Parameters: * *key* (`string`) -- parameter key * *key* (`value`) -- parameter value * *key* (`pid`) -- optional - Identifier of an existing process ''' pid = self._get_pid(pid) self._call_rest_api('post', '/processes/'+pid+'/cache', data={'key': key, 'value': value}, error='Failed to modify process runtime cache')" 3377,"def create_product(self, product, version, build, name=None, description=None, attributes={}): ''' create_product(self, product, version, build, name=None, description=None, attributes={}) Create product :Parameters: * *product* (`string`) -- product * *version* (`string`) -- version * *build* (`string`) -- build * *name* (`string`) -- name * *description* (`string`) -- description * *attributes* (`object`) -- product attributes ''' request_data = {'product': product, 'version': version, 'build': build} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes ret_data= self._call_rest_api('post', '/products', data=request_data, error='Failed to create a new product') pid = ret_data message = 'New product created [pid = %s] '%pid self.logger.info(message) return str(pid)" 3378,"def modify_product(self, product_id, name=None, description=None, attributes={}): ''' modify_product(self, product_id, name=None, description=None, attributes={}) Modify an existing product :Parameters: * *product_id* (`string`) -- identifier of an existing product * *name* (`string`) -- name of the product * *description* (`string`) -- product description * *attributes* (`object`) -- product attributes to modify ''' request_data = {'id': product_id} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes return self._call_rest_api('post', '/products', data=request_data, error='Failed to modify a new product')" 3379,"def modify_kpi(self, kpi_id, product_id, measures=[], append=False, **kwargs): ''' modify_kpi(self, kpi_id, product_id, measures=[], append=False, **kwargs) Creates a new kpi or modifies existing one. :Parameters: * *kpi_id* (`string`) -- The KPI identifier (unique per product) * *product_id* (`string`) -- The product (release candidate) identifier * *measures* (`list`) -- List of numeric (integers or floats) measures * *append* (`boolean`) -- True to append new measures to existing ones for this API. False to override previous measures ''' if not isinstance(measures, list): measures = [measures] request_data = {'kpi_id': kpi_id, 'product_id': product_id, 'measures': measures, 'append': append} request_data.update(kwargs) return self._call_rest_api('post', '/kpi', data=request_data, error='Failed to modify a kpi entry')" 3380,"def create_qc(self, product_id=None, expected_result='', actual_result='', weight=100, status='success', **kwargs): ''' create_qc(self, product_id=None, expected_result='', actual_result='', weight=100, status='success', **kwargs) Create Quality Criteria :Parameters: * *product_id* (`string`) -- The product (release candidate) identifier * *expected_result* (`string`) -- Text describing the expected result of this criteria * *actual_result* (`string`) -- Text describing the actual result of this criteria * *weight* (`integer`) -- Overall weight of this criteria (integer between 0-100) * *status* (`string`) -- pass/fail/norun ''' request_data = {'product_id': product_id, 'expected': expected_result, 'actual': actual_result,'weight': weight, 'exec_status': status} request_data.update(**kwargs) return self._call_rest_api('post', '/qc', data=request_data, error='Failed to create criteria')" 3381,"def modify_qc(self, qc_id=None, **kwargs): ''' modify_qc(self, qc_id=None, **kwargs) Modify a Quality Criteria :Parameters: * *qc_id* (`string`) -- The Quality criteria identifier ''' if qc_id: request_data = {'id': qc_id} request_data.update(**kwargs) return self._call_rest_api('post', '/qc', data=request_data, error='Failed to modify criteria') else: return self.create_qc(**kwargs)" 3382,"def write(self): """"""Pull features from the instream and write them to the output."""""" for entry in self._instream: if isinstance(entry, Feature): for feature in entry: if feature.num_children > 0 or feature.is_multi: if feature.is_multi and feature != feature.multi_rep: continue self.feature_counts[feature.type] += 1 fid = '{}{}'.format(feature.type, self.feature_counts[feature.type]) feature.add_attribute('ID', fid) else: feature.drop_attribute('ID') if isinstance(entry, Sequence) and not self._seq_written: print('##FASTA', file=self.outfile) self._seq_written = True print(repr(entry), file=self.outfile)" 3383,"def datetime2yeardoy(time: Union[str, datetime.datetime]) -> Tuple[int, float]: """""" Inputs: T: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse Outputs: yd: yyyyddd four digit year, 3 digit day of year (INTEGER) utsec: seconds from midnight utc """""" T = np.atleast_1d(time) utsec = np.empty_like(T, float) yd = np.empty_like(T, int) for i, t in enumerate(T): if isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, str): t = parse(t) utsec[i] = datetime2utsec(t) yd[i] = t.year*1000 + int(t.strftime('%j')) return yd.squeeze()[()], utsec.squeeze()[()]" 3384,"def yeardoy2datetime(yeardate: int, utsec: Union[float, int] = None) -> datetime.datetime: """""" Inputs: yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits) outputs: t: datetime http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date """""" if isinstance(yeardate, (tuple, list, np.ndarray)): if utsec is None: return np.asarray([yeardoy2datetime(y) for y in yeardate]) elif isinstance(utsec, (tuple, list, np.ndarray)): return np.asarray([yeardoy2datetime(y, s) for y, s in zip(yeardate, utsec)]) yeardate = int(yeardate) yd = str(yeardate) if len(yd) != 7: raise ValueError('yyyyddd expected') year = int(yd[:4]) assert 0 < year < 3000, 'year not in expected format' dt = datetime.datetime(year, 1, 1) + datetime.timedelta(days=int(yd[4:]) - 1) assert isinstance(dt, datetime.datetime) if utsec is not None: dt += datetime.timedelta(seconds=utsec) return dt" 3385,"def date2doy(time: Union[str, datetime.datetime]) -> Tuple[int, int]: """""" < 366 for leap year too. normal year 0..364. Leap 0..365. """""" T = np.atleast_1d(time) year = np.empty(T.size, dtype=int) doy = np.empty_like(year) for i, t in enumerate(T): yd = str(datetime2yeardoy(t)[0]) year[i] = int(yd[:4]) doy[i] = int(yd[4:]) assert ((0 < doy) & (doy < 366)).all(), 'day of year must be 0 < doy < 366' return doy, year" 3386,"def datetime2gtd(time: Union[str, datetime.datetime, np.datetime64], glon: Union[float, List[float], np.ndarray] = np.nan) -> Tuple[int, float, float]: """""" Inputs: time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse glon: Numpy 2-D array of geodetic longitudes (degrees) Outputs: iyd: day of year utsec: seconds from midnight utc stl: local solar time """""" # %% T = np.atleast_1d(time) glon = np.asarray(glon) doy = np.empty_like(T, int) utsec = np.empty_like(T, float) stl = np.empty((T.size, *glon.shape)) for i, t in enumerate(T): if isinstance(t, str): t = parse(t) elif isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, (datetime.datetime, datetime.date)): pass else: raise TypeError('unknown time datatype {}'.format(type(t))) # %% Day of year doy[i] = int(t.strftime('%j')) # %% seconds since utc midnight utsec[i] = datetime2utsec(t) stl[i, ...] = utsec[i] / 3600. + glon / 15. return doy, utsec, stl" 3387,"def datetime2utsec(t: Union[str, datetime.date, datetime.datetime, np.datetime64]) -> float: """""" input: datetime output: float utc seconds since THIS DAY'S MIDNIGHT """""" if isinstance(t, (tuple, list, np.ndarray)): return np.asarray([datetime2utsec(T) for T in t]) elif isinstance(t, datetime.date) and not isinstance(t, datetime.datetime): return 0. elif isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, str): t = parse(t) return datetime.timedelta.total_seconds(t - datetime.datetime.combine(t.date(), datetime.datetime.min.time()))" 3388,"def yeardec2datetime(atime: float) -> datetime.datetime: """""" Convert atime (a float) to DT.datetime This is the inverse of datetime2yeardec. assert dt2t(t2dt(atime)) == atime http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python Authored by ""unutbu"" http://stackoverflow.com/users/190597/unutbu In Python, go from decimal year (YYYY.YYY) to datetime, and from datetime to decimal year. """""" # %% if isinstance(atime, (float, int)): # typically a float year = int(atime) remainder = atime - year boy = datetime.datetime(year, 1, 1) eoy = datetime.datetime(year + 1, 1, 1) seconds = remainder * (eoy - boy).total_seconds() T = boy + datetime.timedelta(seconds=seconds) assert isinstance(T, datetime.datetime) elif isinstance(atime[0], float): return np.asarray([yeardec2datetime(t) for t in atime]) else: raise TypeError('expecting float, not {}'.format(type(atime))) return T" 3389,"def datetime2yeardec(time: Union[str, datetime.datetime, datetime.date]) -> float: """""" Convert a datetime into a float. The integer part of the float should represent the year. Order should be preserved. If adate datetime.date: """""" gives random date in year"""""" if calendar.isleap(year): doy = random.randrange(366) else: doy = random.randrange(365) return datetime.date(year, 1, 1) + datetime.timedelta(days=doy)" 3391,"def init_arg_names(obj): """""" Names of arguments to __init__ method of this object's class. """""" # doing something wildly hacky by pulling out the arguments to # __init__ or __new__ and hoping that they match fields defined on the # object try: init_code = obj.__init__.__func__.__code__ except AttributeError: try: init_code = obj.__new__.__func__.__code__ except AttributeError: # if object is a namedtuple then we can return its fields # as the required initial args if hasattr(obj, ""_fields""): return obj._fields else: raise ValueError(""Cannot determine args to %s.__init__"" % (obj,)) arg_names = init_code.co_varnames[:init_code.co_argcount] # drop self argument nonself_arg_names = arg_names[1:] return nonself_arg_names" 3392,"def function_to_serializable_representation(fn): """""" Converts a Python function into a serializable representation. Does not currently work for methods or functions with closure data. """""" if type(fn) not in (FunctionType, BuiltinFunctionType): raise ValueError( ""Can't serialize %s : %s, must be globally defined function"" % ( fn, type(fn),)) if hasattr(fn, ""__closure__"") and fn.__closure__ is not None: raise ValueError(""No serializable representation for closure %s"" % (fn,)) return {""__module__"": get_module_name(fn), ""__name__"": fn.__name__}" 3393,"def dict_to_serializable_repr(x): """""" Recursively convert values of dictionary to serializable representations. Convert non-string keys to JSON representations and replace them in the dictionary with indices of unique JSON strings (e.g. __1, __2, etc..). """""" # list of JSON representations of hashable objects which were # used as keys in this dictionary serialized_key_list = [] serialized_keys_to_names = {} # use the class of x rather just dict since we might want to convert # derived classes such as OrderedDict result = type(x)() for (k, v) in x.items(): if not isinstance(k, string_types): # JSON does not support using complex types such as tuples # or user-defined objects with implementations of __hash__ as # keys in a dictionary so we must keep the serialized # representations of such values in a list and refer to indices # in that list serialized_key_repr = to_json(k) if serialized_key_repr in serialized_keys_to_names: k = serialized_keys_to_names[serialized_key_repr] else: k = index_to_serialized_key_name(len(serialized_key_list)) serialized_keys_to_names[serialized_key_repr] = k serialized_key_list.append(serialized_key_repr) result[k] = to_serializable_repr(v) if len(serialized_key_list) > 0: # only include this list of serialized keys if we had any non-string # keys result[SERIALIZED_DICTIONARY_KEYS_FIELD] = serialized_key_list return result" 3394,"def from_serializable_dict(x): """""" Reconstruct a dictionary by recursively reconstructing all its keys and values. This is the most hackish part since we rely on key names such as __name__, __class__, __module__ as metadata about how to reconstruct an object. TODO: It would be cleaner to always wrap each object in a layer of type metadata and then have an inner dictionary which represents the flattened result of to_dict() for user-defined objects. """""" if ""__name__"" in x: return _lookup_value(x.pop(""__module__""), x.pop(""__name__"")) non_string_key_objects = [ from_json(serialized_key) for serialized_key in x.pop(SERIALIZED_DICTIONARY_KEYS_FIELD, []) ] converted_dict = type(x)() for k, v in x.items(): serialized_key_index = parse_serialized_keys_index(k) if serialized_key_index is not None: k = non_string_key_objects[serialized_key_index] converted_dict[k] = from_serializable_repr(v) if ""__class__"" in converted_dict: class_object = converted_dict.pop(""__class__"") if ""__value__"" in converted_dict: return class_object(converted_dict[""__value__""]) elif hasattr(class_object, ""from_dict""): return class_object.from_dict(converted_dict) else: return class_object(**converted_dict) return converted_dict" 3395,"def to_dict(obj): """""" If value wasn't isn't a primitive scalar or collection then it needs to either implement to_dict (instances of Serializable) or has member data matching each required arg of __init__. """""" if isinstance(obj, dict): return obj elif hasattr(obj, ""to_dict""): return obj.to_dict() try: return simple_object_to_dict(obj) except: raise ValueError( ""Cannot convert %s : %s to dictionary"" % ( obj, type(obj)))" 3396,"def to_serializable_repr(x): """""" Convert an instance of Serializable or a primitive collection containing such instances into serializable types. """""" t = type(x) if isinstance(x, list): return list_to_serializable_repr(x) elif t in (set, tuple): return { ""__class__"": class_to_serializable_representation(t), ""__value__"": list_to_serializable_repr(x) } elif isinstance(x, dict): return dict_to_serializable_repr(x) elif isinstance(x, (FunctionType, BuiltinFunctionType)): return function_to_serializable_representation(x) elif type(x) is type: return class_to_serializable_representation(x) else: state_dictionary = to_serializable_repr(to_dict(x)) state_dictionary[""__class__""] = class_to_serializable_representation( x.__class__) return state_dictionary" 3397,"def _convert_rules_bubble(self, srules=''): """"""srules, a string containing the rules in bubble format will be converted to the internal list of dictonary based rules. '>>>': seperator : a rule has only certain amount of seperators a rule is built like: >>>input>>>function>>>output>>> for example: >>>i>>>adder>>>o>>>> >>>42>>>is_it_the_answer>>>the_answer>>> is converted to: [{'in':'i','fun':'adder','out':'o'}, {'in':'42','fun':'is_it_the_answer','out':'the_answer'}] a rule withhout a name, but with a depency on rule_one >>>panic>>>is_there_an_answer>>>dont_panic>>>rule_one>>> a rule without depencies and a name >>>42>>>is_it_the_answer>>>the_answer>>>nodeps>>rule_one>>> """""" if not isinstance(srules, str): self.cry('convert_rules_bubble: cannot convert srules of type,' + 'list of rules ==> [] :' + str(type(srules)), stuff=srules, verbosity=10) return [] if not srules: self.say('convert_rules_bubble: cannot convert empty srules', verbosity=10) return [] # no rules lines = srules.splitlines() self.say('convert_rules_bubble:lines', stuff=lines, verbosity=10) line_number = 0 rules = [] for r in lines: line_number += 1 # todo: do we wan't this in a configuration, yes! add magic! # in util.escaped it's defined as an escape # but for rules it is best to define a magic value something like # BMGC.TRANSFORMER.RULES_SEPERATOR #seems better option for # or # BMGC.TRANSFORMER_RULES_SEPERATOR #seems simpler # BMGC should implement a sane default magic for undefined values. r = r.strip() if not r.endswith('>>>'): continue if not r.startswith('>>>'): continue parts = [p.strip() for p in r.split('>>>')] rule = None lp = len(parts) if lp == 3: rule = Rule(input=parts[1], src_nr=line_number) if lp == 4: rule = Rule(input=parts[1], fun=parts[2], src_nr=line_number) if lp == 5: rule = Rule(input=parts[1], fun=parts[2], output=parts[3], src_nr=line_number) if lp == 6: rule = Rule(input=parts[1], fun=parts[2], output=parts[3], depend=parts[4], src_nr=line_number) if lp == 7: rule = Rule(input=parts[1], fun=parts[2], output=parts[3], depend=parts[4], name=parts[5], src_nr=line_number) if rule: rules.append(rule) else: self.cry( 'parts not 3..7 rule with parts[' + str(lp) + '] from line:[' + str(line_number) + ']\n\'' + r + '\'', verbosity=10) for r in rules: r.set_parent(self) self._rules = rules self.say('convert_rules_bubble:res:rules', stuff=rules, verbosity=10) return rules" 3398,"def _combine_rest_push(self): """"""Combining Rest and Push States"""""" new = [] change = 0 # DEBUG # logging.debug('Combining Rest and Push') i = 0 examinetypes = self.quickresponse_types[3] for state in examinetypes: if state.type == 3: for nextstate_id in state.trans.keys(): found = 0 # if nextstate_id != state.id: if nextstate_id in self.quickresponse: examines = self.quickresponse[nextstate_id] for examine in examines: if examine.id == nextstate_id and examine.type == 1: temp = PDAState() temp.type = 1 temp.sym = examine.sym temp.id = state.id for nextnextstate_id in examine.trans: # if nextnextstate_id != examine.id : for x_char in state.trans[nextstate_id]: for z_char in examine.trans[ nextnextstate_id]: if nextnextstate_id not in temp.trans: temp.trans[ nextnextstate_id] = [] if x_char != 0 and z_char != 0: temp.trans[ nextnextstate_id].append(x_char + z_char) # DEBUGprint 'transition is now # '+x_char +' + '+ z_char elif x_char != 0 and z_char == 0: temp.trans[ nextnextstate_id].append(x_char) # DEBUGprint 'transition is now # '+x_char elif x_char == 0 and z_char != 0: temp.trans[ nextnextstate_id].append(z_char) # DEBUGprint 'transition is now # '+z_char elif x_char == 0 and z_char == 0: temp.trans[ nextnextstate_id].append(0) # DEBUGprint 'transition is now # empty' else: pass found = 1 new.append(temp) if found == 1: # print 'Lets combine one with id '+`state.id`+'(rest) # and one with id '+`nextstate_id`+'(push)' change = 1 # del(state.trans[nextstate_id]) i = i + 1 if change == 0: return [] else: return new" 3399,"def _check(self, accepted): """"""_check for string existence"""""" # logging.debug('A check is now happening...') # for key in self.statediag[1].trans: # logging.debug('transition to '+`key`+"" with ""+self.statediag[1].trans[key][0]) total = [] if 1 in self.quickresponse: total = total + self.quickresponse[1] if (1, 0) in self.quickresponse: total = total + self.quickresponse[(1, 0)] for key in total: if (key.id == 1 or key.id == (1, 0)) and key.type == 3: if accepted is None: if 2 in key.trans: # print 'Found' return key.trans[2] else: for state in accepted: if (2, state) in key.trans: # print 'Found' return key.trans[(2, state)] return -1" 3400,"def _stage(self, accepted, count=0): """"""This is a repeated state in the state removal algorithm"""""" new5 = self._combine_rest_push() new1 = self._combine_push_pop() new2 = self._combine_push_rest() new3 = self._combine_pop_rest() new4 = self._combine_rest_rest() new = new1 + new2 + new3 + new4 + new5 del new1 del new2 del new3 del new4 del new5 if len(new) == 0: # self.printer() # print 'PDA is empty' # logging.debug('PDA is empty') return None self.statediag = self.statediag + new del new # print 'cleaning...' # It is cheaper to create a new array than to use the old one and # delete a key newstates = [] for key in self.statediag: if len(key.trans) == 0 or key.trans == {}: # rint 'delete '+`key.id` # self.statediag.remove(key) pass else: newstates.append(key) del self.statediag self.statediag = newstates self.quickresponse = {} self.quickresponse_types = {} self.quickresponse_types[0] = [] self.quickresponse_types[1] = [] self.quickresponse_types[2] = [] self.quickresponse_types[3] = [] self.quickresponse_types[4] = [] for state in self.statediag: if state.id not in self.quickresponse: self.quickresponse[state.id] = [state] else: self.quickresponse[state.id].append(state) self.quickresponse_types[state.type].append(state) # else: # print `key.id`+' (type: '+`key.type`+' and sym:'+`key.sym`+')' # print key.trans # print 'checking...' exists = self._check(accepted) if exists == -1: # DEBUGself.printer() # raw_input('next step?') return self._stage(accepted, count + 1) else: # DEBUGself.printer() # print 'Found ' print exists # return self._stage(accepted, count+1) return exists" 3401,"def printer(self): """"""Visualizes the current state"""""" for key in self.statediag: if key.trans is not None and len(key.trans) > 0: print '****** ' + repr(key.id) + '(' + repr(key.type)\ + ' on sym ' + repr(key.sym) + ') ******' print key.trans" 3402,"def init(self, states, accepted): """"""Initialization of the indexing dictionaries"""""" self.statediag = [] for key in states: self.statediag.append(states[key]) self.quickresponse = {} self.quickresponse_types = {} self.quickresponse_types[0] = [] self.quickresponse_types[1] = [] self.quickresponse_types[2] = [] self.quickresponse_types[3] = [] self.quickresponse_types[4] = [] for state in self.statediag: if state.id not in self.quickresponse: self.quickresponse[state.id] = [state] else: self.quickresponse[state.id].append(state) self.quickresponse_types[state.type].append(state) # self.printer() # raw_input('next stepA?') return self._stage(accepted, 0)" 3403,"def execute(filelocation, outpath, executable, args=None, switchArgs=None): """"""Executes the dinosaur tool on Windows operating systems. :param filelocation: either a single mgf file path or a list of file paths. :param outpath: path of the output file, file must not exist :param executable: must specify the complete file path of the spectra-cluster-cli.jar file, supported version is 1.0.2 BETA. :param args: list of arguments containing a value, for details see the spectra-cluster-cli help. Arguments should be added as tuples or a list. For example: [('precursor_tolerance', '0.5'), ('rounds', '3')] :param switchArgs: list of arguments not containing a value, for details see the spectra-cluster-cli help. Arguments should be added as strings. For example: ['fast_mode', 'keep_binary_files'] """""" procArgs = ['java', '-jar', executable] procArgs.extend(['-output_path', outpath]) if args is not None: for arg in args: procArgs.extend(['-'+arg[0], arg[1]]) if switchArgs is not None: procArgs.extend(['-'+arg for arg in switchArgs]) procArgs.extend(aux.toList(filelocation)) ## run it ## proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()" 3404,"def generate_example(): """"""Generate a configuration file example. This utility will load some number of Python modules which are assumed to register options with confpy and generate an example configuration file based on those options. """""" cmd_args = sys.argv[1:] parser = argparse.ArgumentParser(description='Confpy example generator.') parser.add_argument( '--module', action='append', help='A python module which should be imported.', ) parser.add_argument( '--file', action='append', help='A python file which should be evaled.', ) parser.add_argument( '--format', default='JSON', choices=('JSON', 'INI'), help='The output format of the configuration file.', ) args = parser.parse_args(cmd_args) for module in args.module or (): __import__(module) for source_file in args.file or (): cfg = pyfile.PythonFile(path=source_file).config cfg = config.Configuration() print(example.generate_example(cfg, ext=args.format))" 3405,"def count(self, val=True): """"""Get the number of bits in the array with the specified value. Args: val: A boolean value to check against the array's value. Returns: An integer of the number of bits in the array equal to val. """""" return sum((elem.count(val) for elem in self._iter_components()))" 3406,"def prepare(self, *, primef, reqef): """"""Extract the composite array's data into a usable bitarray based on if NoCare bits should be rendered as True or False. This method does the heavy lifting of producing a bitarray that is more efficient for tdo bits when that optimization is available. KWArgs: primef: A contracts.Requirement capability of the associated primitive. reqef: A contracts.Requirement (generated from primitive compilation) describing the signal requirements of the data in this CompositeBitarray. Returns: A bitarray (CompositeBitarray, ConstantBitarray, etc) that is the combined result of all the composite bitarray's components. If this CompositeBitarray's backing linked list can be merged into a single node, that single node is returned. Otherwise, this CompositeBitarray is returned. """""" #TODO remove bitarray copies! if not primef.satisfies(reqef): raise Exception(""Compiler error. Requested effect can not be "" ""satisfied by primitive capabilities"") assertPreferFalse = reqef == ZERO or primef == ARBITRARY or\ (reqef == NOCARE and primef == ZERO) testBitarrayFalse = reqef==ZERO or\ (reqef==NOCARE and primef==ZERO) testBitarrayTrue = reqef==ONE or (reqef==NOCARE and primef==ONE) assert not (testBitarrayTrue and testBitarrayFalse) #print(""DATA"", self) #print(""ORIG"", [""%s(%s:%s)""% # (type(elem.value).__name__, # elem.value._val if isinstance(elem.value, # ConstantBitarray)\ # else ""_"", len(elem.value)) # for elem in self._llhead.iternexttill(self._lltail)]) if self._offset or self._tailoffset: if self._is_single_llnode: if isinstance(self._llhead.value, (ConstantBitarray, NoCareBitarray, PreferFalseBitarray)): oldnode = self._llhead if self._offset == 0: oldnode.prev = None if self._tailoffset == 0: oldnode.next = None self._llhead = _DLLNode( oldnode.value[self._offset:\ self._offset+self._tailbitsused]) self._lltail = self._llhead self._offset = 0 self._tailbitsused = self._taillen elif isinstance(self._llhead.value, bitarray): if testBitarrayFalse or testBitarrayTrue: oldnode = self._llhead newval = oldnode.value[self._offset: self._offset+self._tailbitsused] if testBitarrayFalse: if not newval.any(): newval = ConstantBitarray(False, len(newval)) else: raise Exception(""bitarray in data contains a 1"") if testBitarrayTrue: if newval.all(): newval = ConstantBitarray(True, len(newval)) else: raise Exception(""bitarray in data contains a 0"") self._llhead = _DLLNode(newval) self._lltail = self._llhead self._offset = 0 self._tailbitsused = self._taillen else: #IF HEAD IS NOT TAIL; OFFSET OR TAILOFFSET if self._offset: if isinstance(self._llhead.value, (ConstantBitarray, NoCareBitarray, PreferFalseBitarray)): oldhead = self._llhead self._llhead = _DLLNode( oldhead.value[self._offset:]) self._llhead.next = oldhead.next oldhead.next = None self._offset = 0 elif isinstance(self._llhead.value, bitarray): oldhead = self._llhead newval = oldhead.value[self._offset:] if testBitarrayFalse: if not newval.any(): newval = ConstantBitarray(False, len(newval)) else: raise Exception(""bitarray in data contains a 1"") if testBitarrayTrue: if newval.all(): newval = ConstantBitarray(True, len(newval)) else: raise Exception(""bitarray in data contains a 0"") self._llhead = _DLLNode(newval) self._llhead.next = oldhead.next oldhead.next = None self._offset = 0 if self._tailoffset:#IF HEAD IS NOT TAIL AND TAILOFFSET if isinstance(self._lltail.value, (ConstantBitarray, NoCareBitarray, PreferFalseBitarray)): oldtail = self._lltail self._lltail = _DLLNode( oldtail.value[:self._tailbitsused]) self._lltail.prev = oldtail.prev oldtail.prev = None self._tailbitsused = self._taillen elif isinstance(self._lltail.value, bitarray): oldtail = self._lltail newval = oldtail.value[:self._tailbitsused] if testBitarrayFalse: if not newval.any(): newval = ConstantBitarray(False, len(newval)) else: raise Exception(""bitarray in data contains a 1"") if testBitarrayTrue: if newval.all(): newval = ConstantBitarray(True, len(newval)) else: raise Exception(""bitarray in data contains a 0"") self._lltail = _DLLNode(newval) self._lltail.prev = oldtail.prev oldtail.prev = None self._tailbitsused = self._taillen for elem in self._llhead.iternexttill(self._lltail): if isinstance(elem.value, PreferFalseBitarray): if assertPreferFalse: elem._value = ConstantBitarray(False, len(elem.value)) else: elem._value = NoCareBitarray(len(elem.value)) if isinstance(elem.value, bitarray): if testBitarrayFalse: if not elem.value.any(): elem.value = ConstantBitarray(False, len(elem.value)) else: raise Exception(""bitarray in data contains a 1"") if testBitarrayTrue: if elem.value.all(): elem.value = ConstantBitarray(True, len(elem.value)) else: raise Exception(""bitarray in data contains a 0"") #print(""TRAN"", [""%s(%s:%s)""% # (type(elem.value).__name__, # elem.value._val if isinstance(elem.value, # ConstantBitarray)\ # else ""_"", len(elem.value)) # for elem in self._llhead.iternexttill(self._lltail)]) if not self._is_single_llnode and\ (self._lltail.next is not self._llhead or\ (self._offset == 0 and self._tailbitsused == self._taillen) ): self._do_merge(stoponfail=False) #print(""\033[1mPOST"", ""+ "".join([""%s%s(%s:%s)\033[0m""% # ('\033[91m' if isinstance(elem.value, bitarray) else # ('\033[94m' if isinstance(elem.value, # (NoCareBitarray, PreferFalseBitarray)) # else '\033[92m'),type(elem.value).__name__, # elem.value._val if isinstance(elem.value, # ConstantBitarray)\ # else (elem.value.to01() if isinstance(elem.value, # bitarray) # else ""_""), len(elem.value)) # for elem in self._llhead.iternexttill(self._lltail)])) if self._is_single_llnode and self._offset == 0 and\ self._tailbitsused == self._taillen: if isinstance(self._llhead.value, (NoCareBitarray, PreferFalseBitarray)): return ConstantBitarray(False, len(self._llhead.value)) return self._llhead.value return self" 3407,"def _api_group_for_type(cls): """""" Determine which Kubernetes API group a particular PClass is likely to belong with. This is basically nonsense. The question being asked is wrong. An abstraction has failed somewhere. Fixing that will get rid of the need for this. """""" _groups = { (u""v1beta1"", u""Deployment""): u""extensions"", (u""v1beta1"", u""DeploymentList""): u""extensions"", (u""v1beta1"", u""ReplicaSet""): u""extensions"", (u""v1beta1"", u""ReplicaSetList""): u""extensions"", } key = ( cls.apiVersion, cls.__name__.rsplit(u""."")[-1], ) group = _groups.get(key, None) return group" 3408,"def response(request, status, obj): """""" Generate a response. :param IRequest request: The request being responsed to. :param int status: The response status code to set. :param obj: Something JSON-dumpable to write into the response body. :return bytes: The response body to write out. eg, return this from a *render_* method. """""" request.setResponseCode(status) request.responseHeaders.setRawHeaders( u""content-type"", [u""application/json""], ) body = dumps_bytes(obj) return body" 3409,"def create(self, collection_name, obj): """""" Create a new object in the named collection. :param unicode collection_name: The name of the collection in which to create the object. :param IObject obj: A description of the object to create. :return _KubernetesState: A new state based on the current state but also containing ``obj``. """""" obj = self.agency.before_create(self, obj) new = self.agency.after_create(self, obj) updated = self.transform( [collection_name], lambda c: c.add(new), ) return updated" 3410,"def replace(self, collection_name, old, new): """""" Replace an existing object with a new version of it. :param unicode collection_name: The name of the collection in which to replace an object. :param IObject old: A description of the object being replaced. :param IObject new: A description of the object to take the place of ``old``. :return _KubernetesState: A new state based on the current state but also containing ``obj``. """""" self.agency.before_replace(self, old, new) updated = self.transform( [collection_name], lambda c: c.replace(old, new), ) return updated" 3411,"def delete(self, collection_name, obj): """""" Delete an existing object. :param unicode collection_name: The name of the collection from which to delete the object. :param IObject obj: A description of the object to delete. :return _KubernetesState: A new state based on the current state but not containing ``obj``. """""" updated = self.transform( [collection_name], lambda c: obj.delete_from(c), ) return updated" 3412,"def get_list_connections(self, environment, product, unique_name_list=None, is_except=False): """""" Gets list of connections that satisfy the filter by environment, product and (optionally) unique DB names :param environment: Environment name :param product: Product name :param unique_name_list: list of unique db aliases :param is_except: take the connections with aliases provided or, the other wat around, take all the rest :return: list of dictionaries with connections """""" return_list = [] for item in self.connection_sets: if unique_name_list: if item['unique_name']: if is_except: if item['environment'] == environment and item['product'] == product and \ (item['unique_name'] not in unique_name_list): return_list.append(item) elif not is_except: if item['environment'] == environment and item['product'] == product and \ (item['unique_name'] in unique_name_list): return_list.append(item) else: if item['environment'] == environment and item['product'] == product: return_list.append(item) return return_list" 3413,"def set_address(): """"""Set the (host, port) to connect to from the environment. If the environment is updated, a call to this function will update the address this client connects to. This function will prefer to use the ``STATSD`` connection string environment variable, but will fall back to using the ``STATSD_HOST`` and ``STATSD_PORT``. """""" global STATSD_ADDR connection_string = os.getenv('STATSD') if connection_string: url = urlparse.urlparse(connection_string) STATSD_ADDR = (url.hostname, url.port) else: STATSD_ADDR = (os.getenv('STATSD_HOST', 'localhost'), int(os.getenv('STATSD_PORT', 8125)))" 3414,"def execution_timer(value): """"""The ``execution_timer`` decorator allows for easy instrumentation of the duration of function calls, using the method name in the key. The following example would add duration timing with the key ``my_function`` .. code: python @statsd.execution_timer def my_function(foo): pass You can also have include a string argument value passed to your method as part of the key. Pass the index offset of the arguments to specify the argument number to use. In the following example, the key would be ``my_function.baz``: .. code:python @statsd.execution_timer(2) def my_function(foo, bar, 'baz'): pass """""" def _invoke(method, key_arg_position, *args, **kwargs): start_time = time.time() result = method(*args, **kwargs) duration = time.time() - start_time key = [method.func_name] if key_arg_position is not None: key.append(args[key_arg_position]) add_timing('.'.join(key), value=duration) return result if type(value) is types.FunctionType: def wrapper(*args, **kwargs): return _invoke(value, None, *args, **kwargs) return wrapper else: def duration_decorator(func): def wrapper(*args, **kwargs): return _invoke(func, value, *args, **kwargs) return wrapper return duration_decorator" 3415,"def _send(key, value, metric_type): """"""Send the specified value to the statsd daemon via UDP without a direct socket connection. :param str value: The properly formatted statsd counter value """""" if STATSD_PREFIX: key = '.'.join([STATSD_PREFIX, key]) try: STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key, value, metric_type).encode(), STATSD_ADDR) except socket.error: LOGGER.exception(SOCKET_ERROR)" 3416,"def type_names(prefix, sizerange): """""" Helper for type name generation, like: bytes1 .. bytes32 """""" namelist = [] for i in sizerange: namelist.append(prefix + str(i)) return tuple(namelist)" 3417,"def type_names_mn(prefix, sizerangem, sizerangen): """""" Helper for type name generation, like: fixed0x8 .. fixed0x256 """""" lm = [] ln = [] namelist = [] # construct lists out of ranges for i in sizerangem: lm.append(i) for i in sizerangen: ln.append(i) # sizes (in bits) are valid if (%8 == 0) and (m+n <= 256) # first condition is covered by passing proper sizerange{m,n} validpairs = [tuple([m,n]) for m in lm for n in ln if m+n<=256] for i in validpairs: namelist.append(prefix + str(i[0]) + 'x' + str(i[1])) return tuple(namelist)" 3418,"def _get_lang(self, *args, **kwargs): """""" Let users select language """""" if ""lang"" in kwargs: if kwargs[""lang""] in self._available_languages: self.lang = kwargs[""lang""]" 3419,"def notify(self, msg, color='green', notify='true', message_format='text'): """"""Send notification to specified HipChat room"""""" self.message_dict = { 'message': msg, 'color': color, 'notify': notify, 'message_format': message_format, } if not self.debug: return requests.post( self.notification_url, json.dumps(self.message_dict), headers=self.headers ) else: print('HipChat message: <{}>'.format(msg)) return []" 3420,"def trial(path=TESTS_PATH, coverage=False): """"""Run tests using trial """""" args = ['trial'] if coverage: args.append('--coverage') args.append(path) print args local(' '.join(args))" 3421,"def process_result_value(self, value, dialect): """""" When SQLAlchemy gets the string representation from a ReprObjType column, it converts it to the python equivalent via exec. """""" if value is not None: cmd = ""value = {}"".format(value) exec(cmd) return value" 3422,"def make_regex(separator): """"""Utility function to create regexp for matching escaped separators in strings. """""" return re.compile(r'(?:' + re.escape(separator) + r')?((?:[^' + re.escape(separator) + r'\\]|\\.)+)')" 3423,"def strip_comments(text): """"""Comment stripper for JSON. """""" regex = r'\s*(#|\/{2}).*$' regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\"").*\""),?)(?:\s)*(((#|(\/{2})).*)|)$' # noqa lines = text.split('\n') for index, line in enumerate(lines): if re.search(regex, line): if re.search(r'^' + regex, line, re.IGNORECASE): lines[index] = """" elif re.search(regex_inline, line): lines[index] = re.sub(regex_inline, r'\1', line) return '\n'.join(lines)" 3424,"def register(action): """"""Action registration is used to support generating lists of permitted actions from a permission set and an object pattern. Only registered actions will be returned by such queries. """""" if isinstance(action, str): Action.register(Action(action)) elif isinstance(action, Action): Action.registered.add(action) else: for a in action: Action.register(a)" 3425,"def add(self, effect=None, act=None, obj=None, policy=None, policies=None): """"""Insert an individual (effect, action, object) triple or all triples for a policy or list of policies. """""" if policies is not None: for p in policies: self.add(policy=p) elif policy is not None: for e, a, o in policy: self.add(e, a, o) else: objc = obj.components if obj is not None else [] self.tree[act.components + objc] = effect" 3426,"def allow(self, act, obj=None): """"""Determine where a given action on a given object is allowed. """""" objc = obj.components if obj is not None else [] try: return self.tree[act.components + objc] == 'allow' except KeyError: return False" 3427,"def permitted_actions(self, obj=None): """"""Determine permitted actions for a given object pattern. """""" return [a for a in Action.registered if self.allow(a, obj(str(a)) if obj is not None else None)]" 3428,"def subscribe(ws): """"""WebSocket endpoint, used for liveupdates"""""" while ws is not None: gevent.sleep(0.1) try: message = ws.receive() # expect function name to subscribe to if message: stream.register(ws, message) except WebSocketError: ws = None" 3429,"def could_scope_out(self): """""" could bubble up from current scope :return: """""" return not self.waiting_for or \ isinstance(self.waiting_for, callable.EndOfStory) or \ self.is_breaking_a_loop()" 3430,"def get_child_story(self): logger.debug('# get_child_story') """""" try child story that match message and get scope of it :return: """""" story_loop = self.compiled_story() if hasattr(story_loop, 'children_matcher') and not self.matched: return self.get_story_scope_child(story_loop) story_part = self.get_current_story_part() if not hasattr(story_part, 'get_child_by_validation_result'): logger.debug('# does not have get_child_by_validation_result') return None if isinstance(self.waiting_for, forking.SwitchOnValue): logger.debug('# switch on value') return story_part.get_child_by_validation_result(self.waiting_for.value) # for some base classes we could try validate result direct child_story = story_part.get_child_by_validation_result(self.waiting_for) if child_story: logger.debug('# child_story') logger.debug(child_story) return child_story stack_tail = self.stack_tail() if stack_tail['data'] is not None and not self.matched: validator = matchers.deserialize(stack_tail['data']) logger.debug('# validator') logger.debug(validator) logger.debug('# self.message') logger.debug(self.message) validation_result = validator.validate(self.message) logger.debug('# validation_result') logger.debug(validation_result) res = story_part.get_child_by_validation_result(validation_result) logger.debug('# res') logger.debug(res) # or we validate message # but can't find right child story # maybe we should use independent validators for each story here if res is None: return self.get_story_scope_child(story_part) else: return res return None" 3431,"def is_waiting_for_input(self): """""" could make one step further :return: """""" return self.waiting_for and \ not isinstance(self.waiting_for, forking.SwitchOnValue) and \ not is_base_type(self.waiting_for)" 3432,"def alias(self): """""" If the _alias cache is None, just build the alias from the item name. """""" if self._alias is None: if self.name in self.aliases_fix: self._alias = self.aliases_fix[self.name] else: self._alias = self.name.lower()\ .replace(' ', '-')\ .replace('(', '')\ .replace(')', '') return self._alias" 3433,"def load_configs(self, conf_file): """""" Assumes that the config file does not have any sections, so throw it all in global """""" with open(conf_file) as stream: lines = itertools.chain((""[global]"",), stream) self._config.read_file(lines) return self._config['global']" 3434,"def remove_quotes(self, configs): """""" Because some values are wraped in single quotes """""" for key in configs: value = configs[key] if value[0] == ""'"" and value[-1] == ""'"": configs[key] = value[1:-1] return configs" 3435,"def multikey_sort(items, columns): """"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys """""" comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns ] def cmp(a, b): return (a > b) - (a < b) def comparer(left, right): comparer_iter = ( cmp(fn(left), fn(right)) * mult for fn, mult in comparers ) return next((result for result in comparer_iter if result), 0) return sorted(items, key=cmp_to_key(comparer))" 3436,"def sanitize(string): """""" Catch and replace invalid path chars [replace, with] """""" replace_chars = [ ['\\', '-'], [':', '-'], ['/', '-'], ['?', ''], ['<', ''], ['>', ''], ['`', '`'], ['|', '-'], ['*', '`'], ['""', '\''], ['.', ''], ['&', 'and'] ] for ch in replace_chars: string = string.replace(ch[0], ch[1]) return string" 3437,"def chunks_of(max_chunk_size, list_to_chunk): """""" Yields the list with a max size of max_chunk_size """""" for i in range(0, len(list_to_chunk), max_chunk_size): yield list_to_chunk[i:i + max_chunk_size]" 3438,"def split_into(max_num_chunks, list_to_chunk): """""" Yields the list with a max total size of max_num_chunks """""" max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks) return chunks_of(max_chunk_size, list_to_chunk)" 3439,"def norm_path(path): """""" :return: Proper path for os with vars expanded out """""" # path = os.path.normcase(path) path = os.path.expanduser(path) path = os.path.expandvars(path) path = os.path.normpath(path) return path" 3440,"def create_hashed_path(base_path, name, depth=2): """""" Create a directory structure using the hashed filename :return: string of the path to save to not including filename/ext """""" if depth > 16: logger.warning(""depth cannot be greater then 16, setting to 16"") depth = 16 name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest() if base_path.endswith(os.path.sep): save_path = base_path else: save_path = base_path + os.path.sep for i in range(1, depth + 1): end = i * 2 start = end - 2 save_path += name_hash[start:end] + os.path.sep return {'path': save_path, 'hash': name_hash, }" 3441,"def create_path(path, is_dir=False): """""" Check if path exists, if not create it :param path: path or file to create directory for :param is_dir: pass True if we are passing in a directory, default = False :return: os safe path from `path` """""" path = norm_path(path) path_check = path if not is_dir: path_check = os.path.dirname(path) does_path_exists = os.path.exists(path_check) if does_path_exists: return path try: os.makedirs(path_check) except OSError: pass return path" 3442,"def rate_limited(num_calls=1, every=1.0): """""" Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936 Need to make a few changes that included having num_calls be a float Prevent a method from being called if it was previously called before a time widows has elapsed. Keyword Arguments: num_calls (float): Maximum method invocations within a period. Must be greater than 0. every (float): A dampening factor (in seconds). Can be any number greater than 0. Return: function: Decorated function that will forward method invocations if the time window has elapsed. """""" frequency = abs(every) / float(num_calls) def decorator(func): """""" Extend the behaviour of the following function, forwarding method invocations if the time window hes elapsed. Arguments: func (function): The function to decorate Returns: function: Decorated function """""" # To get around issues with function local scope # and reassigning variables, we wrap the time # within a list. When updating the value we're # not reassigning `last_called`, which would not # work, but instead reassigning the value at a # particular index. last_called = [0.0] # Add thread safety lock = threading.RLock() def wrapper(*args, **kargs): """"""Decorator wrapper function"""""" with lock: elapsed = time.time() - last_called[0] left_to_wait = frequency - elapsed if left_to_wait > 0: time.sleep(left_to_wait) last_called[0] = time.time() return func(*args, **kargs) return wrapper return decorator" 3443,"def rate_limited_old(max_per_second): """""" Source: https://gist.github.com/gregburek/1441055 """""" lock = threading.Lock() min_interval = 1.0 / max_per_second def decorate(func): last_time_called = time.perf_counter() @wraps(func) def rate_limited_function(*args, **kwargs): lock.acquire() nonlocal last_time_called try: elapsed = time.perf_counter() - last_time_called left_to_wait = min_interval - elapsed if left_to_wait > 0: time.sleep(left_to_wait) return func(*args, **kwargs) finally: last_time_called = time.perf_counter() lock.release() return rate_limited_function return decorate" 3444,"def timeit(stat_tracker_func, name): """""" Pass in a function and the name of the stat Will time the function that this is a decorator to and send the `name` as well as the value (in seconds) to `stat_tracker_func` `stat_tracker_func` can be used to either print out the data or save it """""" def _timeit(func): def wrapper(*args, **kw): start_time = time.time() result = func(*args, **kw) stop_time = time.time() stat_tracker_func(name, stop_time - start_time) return result return wrapper return _timeit" 3445,"def get_proxy_parts(proxy): """""" Take a proxy url and break it up to its parts """""" proxy_parts = {'schema': None, 'user': None, 'password': None, 'host': None, 'port': None, } # Find parts results = re.match(proxy_parts_pattern, proxy) if results: matched = results.groupdict() for key in proxy_parts: proxy_parts[key] = matched.get(key) else: logger.error(""Invalid proxy format `{proxy}`"".format(proxy=proxy)) if proxy_parts['port'] is None: proxy_parts['port'] = '80' return proxy_parts" 3446,"def remove_html_tag(input_str='', tag=None): """""" Returns a string with the html tag and all its contents from a string """""" result = input_str if tag is not None: pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag)) result = re.sub(pattern, '', str(input_str)) return result" 3447,"def asodict(self, handlepoints=True, reportpoints=True): """"""Returns an ordered dictionary of handle/report points"""""" out = odict() if handlepoints: for hp in self.handlepoints: out[hp.hpoint] = hp.trace if reportpoints: for rp in self.reportpoints: if not (rp.rpoint in out): out[rp.rpoint] = odict() out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended} return out" 3448,"def asodict(self, freports=True, handlepoints=True, reportpoints=True): """"""Returns an ordered dictionary of feed, and handle/report points"""""" out = odict() if freports: for fr in self.freports: out[fr.num] = {'tstamp' : fr.tstamp, 'report' : fr.asodict(handlepoints, reportpoints)} if handlepoints: for hp in self.handlepoints: out[hp.hpoint] = hp.trace if reportpoints: for rp in self.reportpoints: if not (rp.rpoint in out): out[rp.rpoint] = odict() out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended} return out" 3449,"def ip_between(ip, start, finish): """"""Checks to see if IP is between start and finish"""""" if is_IPv4Address(ip) and is_IPv4Address(start) and is_IPv4Address(finish): return IPAddress(ip) in IPRange(start, finish) else: return False" 3450,"def is_rfc1918(ip): """"""Checks to see if an IP address is used for local communications within a private network as specified by RFC 1918 """""" if ip_between(ip, ""10.0.0.0"", ""10.255.255.255""): return True elif ip_between(ip, ""172.16.0.0"", ""172.31.255.255""): return True elif ip_between(ip, ""192.168.0.0"", ""192.168.255.255""): return True else: return False" 3451,"def is_reserved(ip): """"""Checks to see if an IP address is reserved for special purposes. This includes all of the RFC 1918 addresses as well as other blocks that are reserved by IETF, and IANA for various reasons. https://en.wikipedia.org/wiki/Reserved_IP_addresses """""" if ip_between(ip, ""0.0.0.0"", ""0.255.255.255""): return True elif ip_between(ip, ""10.0.0.0"", ""10.255.255.255""): return True elif ip_between(ip, ""100.64.0.0"", ""100.127.255.255""): return True elif ip_between(ip, ""127.0.0.0"", ""127.255.255.255""): return True elif ip_between(ip, ""169.254.0.0"", ""169.254.255.255""): return True elif ip_between(ip, ""172.16.0.0"", ""172.31.255.255""): return True elif ip_between(ip, ""192.0.0.0"", ""192.0.0.255""): return True elif ip_between(ip, ""192.0.2.0"", ""192.0.2.255""): return True elif ip_between(ip, ""192.88.99.0"", ""192.88.99.255""): return True elif ip_between(ip, ""192.168.0.0"", ""192.168.255.255""): return True elif ip_between(ip, ""198.18.0.0"", ""198.19.255.255""): return True elif ip_between(ip, ""198.51.100.0"", ""198.51.100.255""): return True elif ip_between(ip, ""203.0.113.0"", ""203.0.113.255""): return True elif ip_between(ip, ""224.0.0.0"", ""255.255.255.255""): return True else: return False" 3452,"def is_hash(fhash): """"""Returns true for valid hashes, false for invalid."""""" # Intentionally doing if/else statement for ease of testing and reading if re.match(re_md5, fhash): return True elif re.match(re_sha1, fhash): return True elif re.match(re_sha256, fhash): return True elif re.match(re_sha512, fhash): return True elif re.match(re_ssdeep, fhash): return True else: return False" 3453,"def ip_to_geojson(ipaddress, name=""Point""): """"""Generate GeoJSON for given IP address"""""" geo = ip_to_geo(ipaddress) point = { ""type"": ""FeatureCollection"", ""features"": [ { ""type"": ""Feature"", ""properties"": { ""name"": name }, ""geometry"": { ""type"": ""Point"", ""coordinates"": [ geo[""longitude""], geo[""latitude""] ] } } ] } return point" 3454,"def ips_to_geojson(ipaddresses): """"""Generate GeoJSON for given IP address"""""" features = [] for ipaddress in ipaddresses: geo = gi.record_by_addr(ipaddress) features.append({ ""type"": ""Feature"", ""properties"": { ""name"": ipaddress }, ""geometry"": { ""type"": ""Point"", ""coordinates"": [ geo[""longitude""], geo[""latitude""] ] } }) points = { ""type"": ""FeatureCollection"", ""features"": features } return points" 3455,"def reverse_dns_sna(ipaddress): """"""Returns a list of the dns names that point to a given ipaddress using StatDNS API"""""" r = requests.get(""http://api.statdns.com/x/%s"" % ipaddress) if r.status_code == 200: names = [] for item in r.json()['answer']: name = str(item['rdata']).strip(""."") names.append(name) return names elif r.json()['code'] == 503: # NXDOMAIN - no PTR record return None" 3456,"def vt_ip_check(ip, vt_api): """"""Checks VirusTotal for occurrences of an IP address"""""" if not is_IPv4Address(ip): return None url = 'https://www.virustotal.com/vtapi/v2/ip-address/report' parameters = {'ip': ip, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None" 3457,"def vt_name_check(domain, vt_api): """"""Checks VirusTotal for occurrences of a domain name"""""" if not is_fqdn(domain): return None url = 'https://www.virustotal.com/vtapi/v2/domain/report' parameters = {'domain': domain, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None" 3458,"def vt_hash_check(fhash, vt_api): """"""Checks VirusTotal for occurrences of a file hash"""""" if not is_hash(fhash): return None url = 'https://www.virustotal.com/vtapi/v2/file/report' parameters = {'resource': fhash, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None" 3459,"def ipinfo_ip_check(ip): """"""Checks ipinfo.io for basic WHOIS-type data on an IP address"""""" if not is_IPv4Address(ip): return None response = requests.get('http://ipinfo.io/%s/json' % ip) return response.json()" 3460,"def ipvoid_check(ip): """"""Checks IPVoid.com for info on an IP address"""""" if not is_IPv4Address(ip): return None return_dict = {} headers = {'User-Agent': useragent} url = 'http://ipvoid.com/scan/%s/' % ip response = requests.get(url, headers=headers) data = BeautifulSoup(response.text) if data.findAll('span', attrs={'class': 'label label-success'}): return None elif data.findAll('span', attrs={'class': 'label label-danger'}): for each in data.findAll('img', alt='Alert'): detect_site = each.parent.parent.td.text.lstrip() detect_url = each.parent.a['href'] return_dict[detect_site] = detect_url return return_dict" 3461,"def urlvoid_check(name, api_key): """"""Checks URLVoid.com for info on a domain"""""" if not is_fqdn(name): return None url = 'http://api.urlvoid.com/api1000/{key}/host/{name}'.format(key=api_key, name=name) response = requests.get(url) tree = ET.fromstring(response.text) if tree.find('./detections/engines'): return [e.text for e in tree.find('./detections/engines')] else: return None" 3462,"def urlvoid_ip_check(ip): """"""Checks URLVoid.com for info on an IP address"""""" if not is_IPv4Address(ip): return None return_dict = {} headers = {'User-Agent': useragent} url = 'http://urlvoid.com/ip/%s/' % ip response = requests.get(url, headers=headers) data = BeautifulSoup(response.text) h1 = data.findAll('h1')[0].text if h1 == 'Report not found': return None elif re.match('^IP', h1): return_dict['bad_names'] = [] return_dict['other_names'] = [] for each in data.findAll('img', alt='Alert'): return_dict['bad_names'].append(each.parent.text.strip()) for each in data.findAll('img', alt='Valid'): return_dict['other_names'].append(each.parent.text.strip()) return return_dict" 3463,"def dshield_ip_check(ip): """"""Checks dshield for info on an IP address"""""" if not is_IPv4Address(ip): return None headers = {'User-Agent': useragent} url = 'https://isc.sans.edu/api/ip/' response = requests.get('{0}{1}?json'.format(url, ip), headers=headers) return response.json()" 3464,"def cli(ctx, amount, index, stage): """"""Push data to Target Service Client"""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not push') raise click.Abort() TGT = None transformed = True STAGE = None if stage in STAGES and stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] if not STAGE: ctx.say_red('There is no STAGE in CFG:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if 'TARGET' in STAGE: TGT = STAGE.TARGET if 'TRANSFORM' in STAGE: transformed = True else: transformed = False if not transformed: ctx.say_yellow(""""""There is no transform defined in the configuration, will not transform, using the results of step 'pulled' instead of 'push' """""") if not TGT: ctx.say_red('There is no TARGET in: ' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() tgt_client = get_client(ctx.gbc, TGT.CLIENT, ctx.home) try: tclient = tgt_client.BubbleClient(cfg=TGT) tclient.set_parent(ctx.gbc) tclient.set_verbose(ctx.get_verbose()) except Exception as e: ctx.say_red('cannot create bubble client:' + TGT.CLIENT) ctx.say_red(str(e)) raise click.Abort('can not push') step_to_load = 'push' if not transformed: step_to_load = 'pulled' data_gen = bubble_lod_load(ctx, step_to_load, stage) full_data = False if amount == -1 and index == -1: full_data = True to_push = get_gen_slice(ctx.gbc, data_gen, amount, index) error_count = Counter() total_count = Counter() pushres = do_yielding_push(ctx=ctx, to_push=to_push, tclient=tclient, total_counter=total_count, error_counter=error_count) pfr = bubble_lod_dump(ctx=ctx, step='pushed', stage=stage, full_data=full_data, reset=True, data_gen=pushres) ctx.say('pushed [%d] objects' % pfr['total']) stats = {} stats['pushed_stat_error_count'] = error_count.get_total() stats['pushed_stat_total_count'] = total_count.get_total() update_stats(ctx, stage, stats) return True" 3465,"def validate(version, comparison): """""" Returns whether or not the version for this plugin satisfies the inputted expression. The expression will follow the dependency declaration rules associated with setuptools in Python. More information can be found at [https://pythonhosted.org/setuptools/setuptools.html#declaring-dependencies] :param version | expression | :return """""" # match any if not comparison: return True # loop through all available opts = comparison.split(',') expr = re.compile('(==|!=|<=|>=|<|>)(.*)') for opt in opts: try: test, value = expr.match(opt.strip()).groups() except StandardError: raise errors.InvalidVersionDefinition(opt) value = value.strip() # test for an exact match if test == '==': if value == version: return True # test for negative exact matches elif test == '!=': if value == version: return False # test for range conditions elif test == '<': if vercmp(version, value) != -1: return False elif test == '<=': if vercmp(version, value) not in (-1, 0): return False elif test == '>': if vercmp(value, version) != -1: return False elif test == '>=': if vercmp(value, version) not in (-1, 0): return False return True" 3466,"def _full_kind(details): """""" Determine the full kind (including a group if applicable) for some failure details. :see: ``v1.Status.details`` """""" kind = details[u""kind""] if details.get(u""group"") is not None: kind += u""."" + details[u""group""] return kind" 3467,"def RemoveEmptyDirectoryTree(path, silent = False, recursion = 0): """""" Delete tree of empty directories. Parameters ---------- path : string Path to root of directory tree. silent : boolean [optional: default = False] Turn off log output. recursion : int [optional: default = 0] Indicates level of recursion. """""" if not silent and recursion is 0: goodlogging.Log.Info(""UTIL"", ""Starting removal of empty directory tree at: {0}"".format(path)) try: os.rmdir(path) except OSError: if not silent: goodlogging.Log.Info(""UTIL"", ""Removal of empty directory tree terminated at: {0}"".format(path)) return else: if not silent: goodlogging.Log.Info(""UTIL"", ""Directory deleted: {0}"".format(path)) RemoveEmptyDirectoryTree(os.path.dirname(path), silent, recursion + 1)" 3468,"def CheckPathExists(path): """""" Check if path exists, if it does add number to path (incrementing until a unique path is found). Parameters ---------- path : string Path of directory to try. Returns ---------- string Path of unique directory. """""" i = 0 root, ext = os.path.splitext(path) while os.path.exists(path): i = i + 1 goodlogging.Log.Info(""UTIL"", ""Path {0} already exists"".format(path)) path = ""{0}_{1}"".format(root, i) + ext return path" 3469,"def StripSpecialCharacters(string, stripAll = False): """""" Strips special characters, duplicate spaces and post/pre-ceeding spaces. Strip on single spaces, periods, hyphens and underscores is conditional on if stripAll is set Parameters ---------- string : string String to strip special characters from. stripAll : boolean [optional: default = False] If set will also strip single spaces, periods, hyphens and underscores. Returns ---------- string Resulting string with special characters removed. """""" goodlogging.Log.Info(""UTIL"", ""Stripping any special characters from {0}"".format(string), verbosity=goodlogging.Verbosity.MINIMAL) string = string.strip() string = re.sub('[&]', 'and', string) string = re.sub(r'[@#$%^&*{};:,/<>?\\|`~=+±§£]', '', string) string = re.sub('\s\s+', ' ', string) if stripAll: string = re.sub('[_.-]', '', string) string = re.sub('\s', '', string) goodlogging.Log.Info(""UTIL"", ""New string is: {0}"".format(string), verbosity=goodlogging.Verbosity.MINIMAL) return string" 3470,"def ValidUserResponse(response, validList): """""" Check if user response is in a list of valid entires. If an invalid response is given re-prompt user to enter one of the valid options. Do not proceed until a valid entry is given. Parameters ---------- response : string Response string to check. validList : list A list of valid responses. Returns ---------- string A valid response string. """""" if response in validList: return response else: prompt = ""Unknown response given - please reenter one of [{0}]: "".format('/'.join(validList)) response = goodlogging.Log.Input(""DM"", prompt) return ValidUserResponse(response, validList)" 3471,"def UserAcceptance( matchList, recursiveLookup = True, promptComment = None, promptOnly = False, xStrOverride = ""to skip this selection"" ): """""" Prompt user to select a entry from a given match list or to enter a new string to look up. If the match list is empty user must enter a new string or exit. Parameters ---------- matchList : list A list of entries which the user can select a valid match from. recursiveLookup : boolean [optional: default = True] Allow user to enter a new string to look up. promptComment : string [optional: default = None] Add an additional comment on the end of the prompt message. promptOnly : boolean [optional: default = False] Set to true if match list is expected to be empty. In which case the presence of an empty match list will not be mentioned and user will be expected to enter a new response to look up. xStrOverride : string [optional: default = ""to skip this selection""] Override the string for 'x' response. This can be used if the behaviour of the 'x' response is changed. Returns ---------- string or None Either a entry from matchList, another valid response or a new string to look up. If match list is empty and recursive lookup is disabled or if the user response is 'x' this will return None. """""" matchString = ', '.join(matchList) if len(matchList) == 1: goodlogging.Log.Info(""UTIL"", ""Match found: {0}"".format(matchString)) prompt = ""Enter 'y' to accept this match or e"" elif len(matchList) > 1: goodlogging.Log.Info(""UTIL"", ""Multiple possible matches found: {0}"".format(matchString)) prompt = ""Enter correct match from list or e"" else: if promptOnly is False: goodlogging.Log.Info(""UTIL"", ""No match found"") prompt = ""E"" if not recursiveLookup: return None if recursiveLookup: prompt = prompt + ""nter a different string to look up or e"" prompt = prompt + ""nter 'x' {0} or enter 'exit' to quit this program"".format(xStrOverride) if promptComment is None: prompt = prompt + "": "" else: prompt = prompt + "" ({0}): "".format(promptComment) while(1): response = goodlogging.Log.Input('UTIL', prompt) if response.lower() == 'exit': goodlogging.Log.Fatal(""UTIL"", ""Program terminated by user 'exit'"") if response.lower() == 'x': return None elif response.lower() == 'y' and len(matchList) == 1: return matchList[0] elif len(matchList) > 1: for match in matchList: if response.lower() == match.lower(): return match if recursiveLookup: return response" 3472,"def GetBestMatch(target, matchList): """""" Finds the elements of matchList which best match the target string. Note that this searches substrings so ""abc"" will have a 100% match in both ""this is the abc"", ""abcde"" and ""abc"". The return from this function is a list of potention matches which shared the same highest match score. If any exact match is found (1.0 score and equal size string) this will be given alone. Parameters ---------- target : string Target string to match. matchList : list List of strings to match target against. Returns ---------- list A list of potention matches which share the same highest match score. If any exact match is found (1.0 score and equal size string) this will be given alone. """""" bestMatchList = [] if len(matchList) > 0: ratioMatch = [] for item in matchList: ratioMatch.append(GetBestStringMatchValue(target, item)) maxRatio = max(ratioMatch) if maxRatio > 0.8: matchIndexList = [i for i, j in enumerate(ratioMatch) if j == maxRatio] for index in matchIndexList: if maxRatio == 1 and len(matchList[index]) == len(target): return [matchList[index], ] else: bestMatchList.append(matchList[index]) return bestMatchList" 3473,"def GetBestStringMatchValue(string1, string2): """""" Return the value of the highest matching substrings between two strings. Parameters ---------- string1 : string First string. string2 : string Second string. Returns ---------- int Integer value representing the best match found between string1 and string2. """""" # Ignore case string1 = string1.lower() string2 = string2.lower() # Ignore non-alphanumeric characters string1 = ''.join(i for i in string1 if i.isalnum()) string2 = ''.join(i for i in string2 if i.isalnum()) # Finding best match value between string1 and string2 if len(string1) == 0 or len(string2) == 0: bestRatio = 0 elif len(string1) == len(string2): match = difflib.SequenceMatcher(None, string1, string2) bestRatio = match.ratio() else: if len(string1) > len(string2): shortString = string2 longString = string1 else: shortString = string1 longString = string2 match = difflib.SequenceMatcher(None, shortString, longString) bestRatio = match.ratio() for block in match.get_matching_blocks(): subString = longString[block[1]:block[1]+block[2]] subMatch = difflib.SequenceMatcher(None, shortString, subString) if(subMatch.ratio() > bestRatio): bestRatio = subMatch.ratio() return(bestRatio)" 3474,"def WebLookup(url, urlQuery=None, utf8=True): """""" Look up webpage at given url with optional query string Parameters ---------- url : string Web url. urlQuery : dictionary [optional: default = None] Parameter to be passed to GET method of requests module utf8 : boolean [optional: default = True] Set response encoding Returns ---------- string GET response text """""" goodlogging.Log.Info(""UTIL"", ""Looking up info from URL:{0} with QUERY:{1})"".format(url, urlQuery), verbosity=goodlogging.Verbosity.MINIMAL) response = requests.get(url, params=urlQuery) goodlogging.Log.Info(""UTIL"", ""Full url: {0}"".format(response.url), verbosity=goodlogging.Verbosity.MINIMAL) if utf8 is True: response.encoding = 'utf-8' if(response.status_code == requests.codes.ok): return(response.text) else: response.raise_for_status()" 3475,"def ArchiveProcessedFile(filePath, archiveDir): """""" Move file from given file path to archive directory. Note the archive directory is relative to the file path directory. Parameters ---------- filePath : string File path archiveDir : string Name of archive directory """""" targetDir = os.path.join(os.path.dirname(filePath), archiveDir) goodlogging.Log.Info(""UTIL"", ""Moving file to archive directory:"") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info(""UTIL"", ""FROM: {0}"".format(filePath)) goodlogging.Log.Info(""UTIL"", ""TO: {0}"".format(os.path.join(targetDir, os.path.basename(filePath)))) goodlogging.Log.DecreaseIndent() os.makedirs(targetDir, exist_ok=True) try: shutil.move(filePath, targetDir) except shutil.Error as ex4: err = ex4.args[0] goodlogging.Log.Info(""UTIL"", ""Move to archive directory failed - Shutil Error: {0}"".format(err))" 3476,"def send(self,text): """"""Send a string to the PiLite, can be simple text or a $$$ command"""""" #print text self.s.write(text) time.sleep(0.001*len(text))" 3477,"def send_wait(self,text): """"""Send a string to the PiLite, sleep until the message has been displayed (based on an estimate of the speed of the display. Due to the font not being monotype, this will wait too long in most cases"""""" self.send(text) time.sleep(len(text)*PiLite.COLS_PER_CHAR*self.speed/1000.0)" 3478,"def set_speed(self,speed): """"""Set the display speed. The parameters is the number of milliseconds between each column scrolling off the display"""""" self.speed=speed self.send_cmd(""SPEED""+str(speed))" 3479,"def set_fb_pic(self,pattern): """"""Set the ""frame buffer"". This allows ""nice"" string to be sent, because it first removes all whitespace, then transposes so that the X and Y axes are swapped, so what is seen in the file matches what will be seen on the screen. Also '.' and '*' can be used in place of 0 and 1. """""" pattern=''.join(pattern.split()) # Remove whitespace pattern=pattern.replace('*','1') pattern=pattern.replace('.','0') fb='' for x in range(14): for y in range(9): fb+=pattern[y*14+x] self.set_fb(fb)" 3480,"def set_fb_random(self): """"""Set the ""frame buffer"" to a random pattern"""""" pattern=''.join([random.choice(['0','1']) for i in xrange(14*9)]) self.set_fb(pattern)" 3481,"def set_pixel(self,x,y,state): """"""Set pixel at ""x,y"" to ""state"" where state can be one of ""ON"", ""OFF"" or ""TOGGLE"" """""" self.send_cmd(""P""+str(x+1)+"",""+str(y+1)+"",""+state)" 3482,"def display_char(self,x,y,char): """"""Display character ""char"" with its top left at ""x,y"" """""" self.send_cmd(""T""+str(x+1)+"",""+str(y+1)+"",""+char)" 3483,"def the_magic_mapping_function(peptides, fastaPath, importAttributes=None, ignoreUnmapped=True): """"""Returns a dictionary mapping peptides to protein group leading proteins. :param peptides: a set of peptide sequences :param fastaPath: FASTA file path :param importAttributes: dict, can be used to override default parameters passed to the function maspy.proteindb.importProteinDatabase(). Default attribtues are: {'cleavageRule': '[KR]', 'removeNtermM': True, 'ignoreIsoleucine': True, forceId': True, 'headerParser': PROTEINDB.fastaParserSpectraClusterPy} :param ignoreUnmapped: bool, if True ignores peptides that cannot be mapped to any protein present in the FASTA file :returns: dict, {peptide: set([groupLeaders1, ...])} where groupLeaders is a string joining all leading proteins of a group with a "";"", for example {'peptide': {""protein1;proetin2;protein3""}} """""" missedCleavage = max([p.count('K') + p.count('R') for p in peptides]) - 1 minLength = min([len(p) for p in peptides]) maxLength = max([len(p) for p in peptides]) defaultAttributes = { 'cleavageRule': '[KR]', 'minLength': minLength, 'maxLength': maxLength, 'removeNtermM': True, 'ignoreIsoleucine': True, 'missedCleavage': missedCleavage, 'forceId': True, 'headerParser': PROTEINDB.fastaParserSpectraClusterPy, } if importAttributes is not None: defaultAttributes.update(importAttributes) proteindb = PROTEINDB.importProteinDatabase(fastaPath, **defaultAttributes) #This could be automated by adding a function to the inference module proteinToPeptides = ddict(set) for peptide in peptides: #ignore any peptide that's not mapped if ""ignoreUnmapped"" is True try: peptideDbEntry = proteindb.peptides[peptide] except KeyError as exception: if ignoreUnmapped: continue else: exceptionText = 'No protein mappings for peptide ""'+peptide+'""' raise KeyError(exceptionText) for protein in peptideDbEntry.proteins: proteinToPeptides[protein].add(peptide) #Generate the ProteinInference instance inference = INFERENCE.mappingBasedGrouping(proteinToPeptides) peptideGroupMapping = dict() for peptide in peptides: groupLeaders = set() for proteinId in inference.pepToProts[peptide]: for proteinGroup in inference.getGroups(proteinId): groupLeaders.add(';'.join(sorted(proteinGroup.leading))) peptideGroupMapping[peptide] = groupLeaders return peptideGroupMapping" 3484,"def cli(ctx, amount, index, stage): """"""Pull, Transform, Push,streaming inside a pipe(experimental)."""""" ctx.obj.say_green('Starting Streaming Pipe') res_pull = ctx.invoke(pull, amount=amount, index=index, stage=stage) res_tra = False if res_pull: # amount to transform can be less (or more) res_tra = ctx.invoke( transform, amount=amount, index=index, stage=stage) if res_tra: # amount to push can be less (or more) res_push = ctx.invoke(push, amount=amount, index=index, stage=stage) if res_pull and res_tra and res_push: ctx.obj.say_green('Streaming Pipe finsished') return True return False" 3485,"def camelHump(text): """""" Converts the inputted text to camel humps by joining all capital letters toegether (The Quick, Brown, Fox.Tail -> TheQuickBrownFoxTail) :param: text text to be changed :return: :usage: |import projex.text |print projex.text.camelHump('The,Quick, Brown, Fox.Tail') """""" # make sure the first letter is upper case output = ''.join([word[0].upper() + word[1:] for word in words(text)]) if output: output = output[0].lower() + output[1:] return output" 3486,"def capitalize(text): """""" Capitalizes the word using the normal string capitalization method, however if the word contains only capital letters and numbers, then it will not be affected. :param text | :return """""" text = nativestring(text) if EXPR_CAPITALS.match(text): return text return text.capitalize()" 3487,"def classname(text): """""" Converts the inputted text to the standard classname format (camel humped with a capital letter to start. :return """""" if not text: return text text = camelHump(text) return text[0].upper() + text[1:]" 3488,"def encoded(text, encoding=DEFAULT_ENCODING): """""" Encodes the inputted unicode/string variable with the given encoding type. :param text | encoding | :return """""" # already a string item if type(text) == bytes_type: return text elif type(text) != unicode_type: # convert a QString value if type(text).__name__ == 'QString': if encoding == 'utf-8': return unicode_type(text.toUtf8(), 'utf-8') elif encoding == 'latin-1': return unicode_type(text.toLatin1(), 'latin-1') elif encoding == 'ascii': return unicode_type(text.toAscii(), 'ascii') else: return unicode_type(text, encoding) # convert a standard item else: try: return bytes_type(text) except StandardError: return '????' if encoding: try: return text.encode(encoding) except StandardError: return text.encode(encoding, errors='ignore') else: for enc in SUPPORTED_ENCODINGS: try: return text.encode(enc) except StandardError: pass return '????'" 3489,"def decoded(text, encoding=DEFAULT_ENCODING): """""" Attempts to decode the inputted unicode/string variable using the given encoding type. If no encoding is provided, then it will attempt to use one of the ones available from the default list. :param text | encoding | || None :return """""" # unicode has already been decoded if type(text) == unicode_type: return text elif type(text) != bytes_type: try: return unicode_type(text) except StandardError: try: text = bytes_type(text) except StandardError: msg = u'<< projex.text.decoded: unable to decode ({0})>>' return msg.format(repr(text)) if encoding: try: return text.decode(encoding) except StandardError: pass for enc in SUPPORTED_ENCODINGS: try: return text.decode(enc) except StandardError: pass return u'????'" 3490,"def nativestring(val, encodings=None): """""" Converts the inputted value to a native python string-type format. :param val | encodings | (, ..) || None :sa decoded :return || """""" # if it is already a native python string, don't do anything if type(val) in (bytes_type, unicode_type): return val # otherwise, attempt to return a decoded value try: return unicode_type(val) except StandardError: pass try: return bytes_type(val) except StandardError: return decoded(val)" 3491,"def joinWords(text, separator=''): """""" Collects all the words from a text and joins them together with the inputted separator. :sa [[#words]] :param text :param separator :return :usage |import projex |print projex.joinWords('This::is.a testTest','-') """""" text = nativestring(text) output = separator.join(words(text.strip(separator))) # no need to check for bookended items when its an empty string if not separator: return output # look for beginning characters begin = re.match('^\%s+' % separator, text) if begin: output = begin.group() + output # make sure to not double up if begin.group() == text: return output # otherwise, look for the ending results end = re.search('\%s+$' % separator, text) if end: output += end.group() return output" 3492,"def pluralize(word, count=None, format=u'{word}'): """""" Converts the inputted word to the plural form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.plural If you do not have that module, then a simpler and less impressive pluralization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word | :return """""" if count == 1: return word elif count is not None: return format.format(word=word, count=count) word = nativestring(word) if inflect_engine: return format.format(word=inflect_engine.plural(word)) all_upper = EXPR_UPPERCASE.match(word) is not None # go through the different plural expressions, searching for the # proper replacement for expr, plural in PLURAL_RULES: results = expr.match(word) if results: result_dict = results.groupdict() single = result_dict.get('single', '') # check if its capitalized if all_upper: return format.format(word=single + plural.upper()) else: return format.format(word=single + plural) # by default, just include 's' at the end if all_upper: return format.format(word=word + 'S') return format.format(word=word + 's')" 3493,"def render(text, options, processed=None): """""" Replaces the templates within the inputted text with the given options. Templates are defined as text within matching braces, and can include additional formatting options. Any key that is not found in the options will be replaced as it was found in the inputted text. :param text :param options { key: value, .. } :param processed [ key, .. ] used internally :return formatted text :usage |import projex.text |options = { 'key': 10, 'name': 'eric' } |template = '[name::lower]_[key]_[date::%m-%d-%y].txt' |projex.text.render( template, options ) :built-ins date will render the current datetime :options The following are a list of formatting options text: lower | converts the value to lowercase upper | converts the value to uppercase camelHump | converts the value to camel-humped text underscore | converts the value to underscored text pretty | converts the value to pretty text capitalized | converts the value to capltalized words words | converts the value to space separated words upper_first | capitalizes just the first letter lower_first | lowercases just the first letter replace(x, y) | replaces the instances of x with y lstrip(x) | removes the beginning instance of x rstrip(x) | removes the ending instance of x slice(x, y) | same as doing string[x:y] """""" output = unicode_type(text) expr = re.compile('(\[+([^\[\]]+)\]\]?)') results = expr.findall(output) curr_date = datetime.datetime.now() options_re = re.compile('(\w+)\(?([^\)]+)?\)?') if processed is None: processed = [] for repl, key in results: # its possible to get multiple items for processing if repl in processed: continue # record the repl value as being processed processed.append(repl) # replace template templates if repl.startswith('[[') and repl.endswith(']]'): output = output.replace(repl, '[%s]' % key) continue # determine the main key and its options splt = key.split('::') key = splt[0] prefs = splt[1:] value = None # use the inputted options if key in options: # extract the value value = options[key] # format a float if type(value) in (float, int): if prefs: value = prefs[0] % value else: value = nativestring(value) # convert date time values elif type(value) in (datetime.datetime, datetime.date, datetime.time): if not prefs: date_format = '%m/%d/%y' else: date_format = prefs[0] prefs = prefs[1:] value = value.strftime(nativestring(date_format)) else: value = render(options[key], options, processed) # look for the built-in options elif key == 'date': value = curr_date if not prefs: date_format = '%m/%d/%y' else: date_format = prefs[0] prefs = prefs[1:] value = value.strftime(nativestring(date_format)) # otherwise, continue else: continue # apply the prefs to the value if value and prefs: for pref in prefs: result = options_re.match(pref) pref, opts = result.groups() if opts: opts = [opt.strip() for opt in opts.split(',')] else: opts = [] if 'lower' == pref: value = value.lower() elif 'upper' == pref: value = value.upper() elif 'upper_first' == pref: value = value[0].upper() + value[1:] elif 'lower_first' == pref: value = value[0].lower() + value[1:] elif 'camelHump' == pref: value = camelHump(value) elif 'underscore' == pref: value = underscore(value) elif 'capitalize' == pref: value = capitalize(value) elif pref in ('pluralize', 'plural'): value = pluralize(value) elif 'words' == pref: value = ' '.join(words(value)) elif 'pretty' == pref: value = pretty(value) elif 'replace' == pref: if len(opts) == 2: value = value.replace(opts[0], opts[1]) else: logger.warning('Invalid options for replace: %s', ', '.join(opts)) elif 'slice' == pref: if len(opts) == 2: value = value[int(opts[0]):int(opts[1])] else: logger.warning('Invalid options for slice: %s', ', '.join(opts)) elif 'lstrip' == pref: if not opts: value = value.lstrip() else: for k in opts: if value.startswith(k): value = value[len(k):] elif 'rstrip' == pref: if not opts: value = value.rstrip() else: for k in opts: if value.endswith(k): value = value[:-len(k)] output = output.replace(repl, value) return output" 3494,"def safe_eval(value): """""" Converts the inputted text value to a standard python value (if possible). :param value | || :return """""" if not isinstance(value, (str, unicode)): return value try: return CONSTANT_EVALS[value] except KeyError: try: return ast.literal_eval(value) except StandardError: return value" 3495,"def sectioned(text, sections=1): """""" Splits the inputted text up into sections. :param text | sections | :return """""" text = nativestring(text) if not text: return '' count = len(text) / max(1, sections) return ' '.join([text[i:i + count] for i in range(0, len(text), count)])" 3496,"def singularize(word): """""" Converts the inputted word to the single form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.singular_noun. If you do not have that module, then a simpler and less impressive singularization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word :return """""" word = toUtf8(word) if inflect_engine: result = inflect_engine.singular_noun(word) if result is False: return word return result # go through the different plural expressions, searching for the # proper replacement if word.endswith('ies'): return word[:-3] + 'y' elif word.endswith('IES'): return word[:-3] + 'Y' elif word.endswith('s') or word.endswith('S'): return word[:-1] return word" 3497,"def stemmed(text): """""" Returns a list of simplified and stemmed down terms for the inputted text. This will remove common terms and words from the search and return only the important root terms. This is useful in searching algorithms. :param text | :return [, ..] """""" terms = re.split('\s*', toAscii(text)) output = [] for term in terms: # ignore apostrophe's if term.endswith(""'s""): stripped_term = term[:-2] else: stripped_term = term single_term = singularize(stripped_term) if term in COMMON_TERMS or stripped_term in COMMON_TERMS or single_term in COMMON_TERMS: continue output.append(single_term) return output" 3498,"def stripHtml(html, joiner=''): """""" Strips out the HTML tags from the inputted text, returning the basic text. This algorightm was found on [http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python StackOverflow]. :param html | :return """""" stripper = HTMLStripper() stripper.feed(html.replace('
    ', '\n').replace('
    ', '\n')) return stripper.text(joiner)" 3499,"def truncate(text, length=50, ellipsis='...'): """""" Returns a truncated version of the inputted text. :param text | length | ellipsis | :return """""" text = nativestring(text) return text[:length] + (text[length:] and ellipsis)" 3500,"def toBytes(text, encoding=DEFAULT_ENCODING): """""" Converts the inputted text to base string bytes array. :param text | :return || (python3) """""" if not text: return text if not isinstance(text, bytes_type): text = text.encode(encoding) return text" 3501,"def toUnicode(data, encoding=DEFAULT_ENCODING): """""" Converts the inputted data to unicode format. :param data | || || :return || """""" if isinstance(data, unicode_type): return data if isinstance(data, bytes_type): return unicode_type(data, encoding=encoding) if hasattr(data, '__iter__'): try: dict(data) except TypeError: pass except ValueError: return (toUnicode(i, encoding) for i in data) else: if hasattr(data, 'items'): data = data.items() return dict(((toUnicode(k, encoding), toUnicode(v, encoding)) for k, v in data)) return data" 3502,"def underscore(text, lower=True): """""" Splits all the words from the inputted text into being separated by underscores :sa [[#joinWords]] :param text :return :usage |import projex.text |print projex.text.underscore('TheQuick, Brown, Fox') """""" out = joinWords(text, '_') if lower: return out.lower() return out" 3503,"def xmlindent(elem, level=0, spacer=' '): """""" Indents the inputted XML element based on the given indent level. :param elem | """""" i = ""\n"" + level * spacer if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + spacer if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: xmlindent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i" 3504,"def words(text): """""" Extracts a list of words from the inputted text, parsing out non-alphanumeric characters and splitting camel humps to build the list of words :param text :return :usage |import projex.text |print projex.text.words('TheQuick, TheBrown Fox.Tail') """""" stext = nativestring(text) if not stext: return [] # first, split all the alphanumeric characters up phrases = EXPR_PHRASE.findall(stext) # second, split all the camel humped words output = [] for phrase in phrases: output += EXPR_WORD.findall(phrase) return output" 3505,"def to_json(data): """"""Return data as a JSON string."""""" return json.dumps(data, default=lambda x: x.__dict__, sort_keys=True, indent=4)" 3506,"def convert_string(string, chars=None): """"""Remove certain characters from a string."""""" if chars is None: chars = [',', '.', '-', '/', ':', ' '] for ch in chars: if ch in string: string = string.replace(ch, ' ') return string" 3507,"def convert_time(time): """"""Convert a time string into 24-hour time."""""" split_time = time.split() try: # Get rid of period in a.m./p.m. am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj = datetime.strptime(time_str, '%I %p') return time_obj.strftime('%H:%M %p')" 3508,"def convert_month(date, shorten=True, cable=True): """"""Replace month by shortening or lengthening it. :param shorten: Set to True to shorten month name. :param cable: Set to True if category is Cable. """""" month = date.split()[0].lower() if 'sept' in month: shorten = False if cable else True try: if shorten: month = SHORT_MONTHS[MONTHS.index(month)] else: month = MONTHS[SHORT_MONTHS.index(month)] except ValueError: month = month.title() return '{0} {1}'.format(month, ' '.join(date.split()[1:]))" 3509,"def convert_date(date): """"""Convert string to datetime object."""""" date = convert_month(date, shorten=False) clean_string = convert_string(date) return datetime.strptime(clean_string, DATE_FMT.replace('-',''))" 3510,"def date_in_range(date1, date2, range): """"""Check if two date objects are within a specific range"""""" date_obj1 = convert_date(date1) date_obj2 = convert_date(date2) return (date_obj2 - date_obj1).days <= range" 3511,"def inc_date(date_obj, num, date_fmt): """"""Increment the date by a certain number and return date object. as the specific string format. """""" return (date_obj + timedelta(days=num)).strftime(date_fmt)" 3512,"def get_soup(url): """"""Request the page and return the soup."""""" html = requests.get(url, stream=True, headers=HEADERS) if html.status_code != 404: return BeautifulSoup(html.content, 'html.parser') else: return None" 3513,"def match_list(query_list, string): """"""Return True if all words in a word list are in the string. :param query_list: list of words to match :param string: the word or words to be matched against """""" # Get rid of 'the' word to ease string matching match = False index = 0 string = ' '.join(filter_stopwords(string)) if not isinstance(query_list, list): query_list = [query_list] while index < len(query_list): query = query_list[index] words_query = filter_stopwords(query) match = all(word in string for word in words_query) if match: break index += 1 return match" 3514,"def filter_stopwords(phrase): """"""Filter out stop words and return as a list of words"""""" if not isinstance(phrase, list): phrase = phrase.split() stopwords = ['the', 'a', 'in', 'to'] return [word.lower() for word in phrase if word.lower() not in stopwords]" 3515,"def safe_unicode(string): """"""If Python 2, replace non-ascii characters and return encoded string."""""" if not PY3: uni = string.replace(u'\u2019', ""'"") return uni.encode('utf-8') return string" 3516,"def get_strings(soup, tag): """"""Get all the string children from an html tag."""""" tags = soup.find_all(tag) strings = [s.string for s in tags if s.string] return strings" 3517,"def cli(ctx, given_name, demo): """"""Initializes a bubble."""""" path = None if path is None: path = ctx.home bubble_file_name = path + '/.bubble' config_file = path + '/config/config.yaml' if os.path.exists(bubble_file_name) and os.path.isfile(bubble_file_name): ctx.say_yellow( 'There is already a bubble present, will not initialize bubble in:' + path) return else: given_name = '(((' + given_name + ')))' with open(bubble_file_name, 'w') as dot_bubble: dot_bubble.write('bubble=' + metadata.version + '\n') dot_bubble.write('name=' + given_name + '\n') dot_bubble.write('home=' + ctx.home + '\n') dot_bubble.write( 'local_init_timestamp=' + str(arrow.utcnow()) + '\n') # aka date_of_birth dot_bubble.write( 'local_creator_user=' + str(os.getenv('USER')) + '\n') dot_bubble.write( 'local_created_in_env=' + str(os.environ) + '\n') ctx.say_green('Initialised a new bubble in [%s]' % click.format_filename(bubble_file_name)) create_dir(ctx, path + '/config/') create_dir(ctx, path + '/logs/') create_dir(ctx, path + '/export/') create_dir(ctx, path + '/import/') create_dir(ctx, path + '/remember/') create_dir(ctx, path + '/remember/archive') with open(config_file, 'w') as cfg_file: cfg_file.write(get_example_configuration()) ctx.say_green('Created an example configuration in %s' % click.format_filename(config_file)) rules_file = path + '/config/rules.bubble' with open(rules_file, 'w') as rules: rules.write(get_example_rules_bubble()) ctx.say_green('Created an example rules in [%s]' % click.format_filename(rules_file)) rule_functions_file = path + '/custom_rule_functions.py' with open(rule_functions_file, 'w') as rule_functions: rule_functions.write(get_example_rule_functions()) ctx.say_green('Created an example rule_functions in [%s]' % click.format_filename(rule_functions_file)) src_client_file = path + '/mysrcclient.py' with open(src_client_file, 'w') as src_client: src_client.write(get_example_client_pull()) ctx.say_green('Created source example client with pull method [%s]' % click.format_filename(src_client_file)) tgt_client_file = path + '/mytgtclient.py' with open(tgt_client_file, 'w') as tgt_client: tgt_client.write(get_example_client_push()) ctx.say_green('Created an target example client with push method [%s]' % click.format_filename(src_client_file)) ctx.say_green( 'Bubble initialized, please adjust your configuration file')" 3518,"def _bld_op(self, op, num, **kwargs): """"""implements pandas an operator"""""" kwargs['other'] = num setattr(self, op, {'mtype': pab, 'kwargs': kwargs})" 3519,"def _bld_pab_generic(self, funcname, **kwargs): """""" implements a generic version of an attribute based pandas function """""" margs = {'mtype': pab, 'kwargs': kwargs} setattr(self, funcname, margs)" 3520,"def _bld_pnab_generic(self, funcname, **kwargs): """""" implement's a generic version of a non-attribute based pandas function """""" margs = {'mtype': pnab, 'kwargs': kwargs} setattr(self, funcname, margs)" 3521,"def get(self, request, *args, **kwargs): """""" List all products in the shopping cart """""" cart = ShoppingCartProxy(request) return JsonResponse(cart.get_products(onlypublic=request.GET.get('onlypublic', True)))" 3522,"def post(self, request, *args, **kwargs): """""" Adds new product to the current shopping cart """""" POST = json.loads(request.body.decode('utf-8')) if 'product_pk' in POST and 'quantity' in POST: cart = ShoppingCartProxy(request) cart.add( product_pk=int(POST['product_pk']), quantity=int(POST['quantity']) ) return JsonResponse(cart.products) return HttpResponseBadRequest()" 3523,"def dispatch(self, *args, **kwargs): self.__line_pk = kwargs.get('pk', None) """""" if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists(): self.form_class = LineBasketFormPack self.__is_pack = True else: self.__is_pack = False """""" return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)" 3524,"def get_form(self, form_class=None): # form_kwargs = super(LineBasketUpdateModal, self).get_form_kwargs(*args, **kwargs) form = super(LinesUpdateModalBasket, self).get_form(form_class) initial = form.initial initial['type_tax'] = self.object.product_final.product.tax.pk initial['tax'] = self.object.tax_basket initial['price'] = float(self.object.price_base_basket) * (1 + (self.object.tax_basket / 100)) """""" if self.__is_pack: options = [] lang = get_language_database() for option in SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk): initial['packs[{}]'.format(option.product_option.pk)] = option.product_final.pk a = { 'id': option.product_option.pk, 'label': getattr(option.product_option, lang).name, 'products': list(option.product_option.products_pack.all().values('pk').annotate(name=F('{}__name'.format(lang)))), 'selected': option.product_final.pk, } options.append(a) # compatibility with GenForeignKey initial['packs'] = json.dumps({'__JSON_DATA__': options}) """""" return form" 3525,"def form_valid(self, form): # lb = SalesLines.objects.filter(pk=self.__line_pk).first() # product_old = lb.product_final product_pk = self.request.POST.get(""product_final"", None) quantity = self.request.POST.get(""quantity"", None) product_final = ProductFinal.objects.filter(pk=product_pk).first() """""" if product: is_pack = product.is_pack() else: is_pack = False """""" if product_final and quantity: reason = form.data['reason'] if reason: reason_obj = ReasonModification.objects.filter(pk=reason).first() if reason_obj: try: with transaction.atomic(): result = super(LinesUpdateModalBasket, self).form_valid(form) reason_basket = ReasonModificationLineBasket() reason_basket.basket = self.object.basket reason_basket.reason = reason_obj reason_basket.line = self.object reason_basket.user = get_current_user() reason_basket.quantity = self.object.quantity reason_basket.save() return result except ValidationError as e: errors = form._errors.setdefault(""product_final"", ErrorList()) errors.append(e) return super(LinesUpdateModalBasket, self).form_invalid(form) else: errors = form._errors.setdefault(""reason"", ErrorList()) errors.append(_(""Reason of modification invalid"")) return super(LinesUpdatelOrder, self).form_invalid(form) else: errors = form._errors.setdefault(""reason"", ErrorList()) errors.append(_(""Reason of modification invalid"")) return super(LinesUpdatelOrder, self).form_invalid(form) """""" if is_pack: options = product.productfinals_option.filter(active=True) options_pack = [] for option in options: field = 'packs[{}]'.format(option.pk) opt = self.request.POST.get(field, None) if opt: opt_product = ProductFinal.objects.filter(pk=opt).first() if opt_product: options_pack.append({ 'product_option': option, 'product_final': opt_product, 'quantity': quantity }) else: errors = form._errors.setdefault(field, ErrorList()) errors.append(_(""Product Option invalid"")) return super(LinesUpdateModalBasket, self).form_invalid(form) else: errors = form._errors.setdefault(field, ErrorList()) errors.append(_(""Option invalid"")) return super(LinesUpdateModalBasket, self).form_invalid(form) """""" else: errors = form._errors.setdefault(""product_final"", ErrorList()) errors.append((_(""Product invalid""), quantity, product_final)) return super(LinesUpdateModalBasket, self).form_invalid(form) """""" ret = super(LinesUpdateModalBasket, self).form_valid(form) if product_old != self.object.product: self.object.remove_options() if is_pack: self.object.set_options(options_pack) return ret """"""" 3526,"def register_signal(alias: str, signal: pyqtSignal): """""" Used to register signal at the dispatcher. Note that you can not use alias that already exists. :param alias: Alias of the signal. String. :param signal: Signal itself. Usually pyqtSignal instance. :return: """""" if SignalDispatcher.signal_alias_exists(alias): raise SignalDispatcherError('Alias ""' + alias + '"" for signal already exists!') SignalDispatcher.signals[alias] = signal" 3527,"def register_handler(alias: str, handler: callable): """""" Used to register handler at the dispatcher. :param alias: Signal alias to match handler to. :param handler: Handler. Some callable. :return: """""" if SignalDispatcher.handlers.get(alias) is None: SignalDispatcher.handlers[alias] = [handler] else: SignalDispatcher.handlers.get(alias).append(handler)" 3528,"def dispatch(): """""" This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases. :return: """""" aliases = SignalDispatcher.signals.keys() for alias in aliases: handlers = SignalDispatcher.handlers.get(alias) signal = SignalDispatcher.signals.get(alias) if signal is None or handlers.__len__() == 0: continue for handler in handlers: signal.connect(handler)" 3529,"def signal_alias_exists(alias: str) -> bool: """""" Checks if signal alias exists. :param alias: Signal alias. :return: """""" if SignalDispatcher.signals.get(alias): return True return False" 3530,"def handler_alias_exists(alias: str) -> bool: """""" Checks if handler alisa exists. :param alias: Handler alias. :return: """""" if SignalDispatcher.handlers.get(alias): return True return False" 3531,"def get_function_data(minion, jid): """"""AJAX access for loading function/job details."""""" redis = Redis(connection_pool=redis_pool) data = redis.get('{0}:{1}'.format(minion, jid)) return Response(response=data, status=200, mimetype=""application/json"")" 3532,"def get_api_publisher(self, social_user): """""" and other https://vk.com/dev.php?method=wall.post """""" def _post(**kwargs): api = self.get_api(social_user) from pudb import set_trace; set_trace() # api.group.getInfo('uids'='your_group_id', 'fields'='members_count') #response = api.wall.post(**kwargs) return response return _post" 3533,"def _get_rev(self, fpath): """""" Get an SCM version number. Try svn and git. """""" rev = None try: cmd = [""git"", ""log"", ""-n1"", ""--pretty=format:\""%h\"""", fpath] rev = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0] except: pass if not rev: try: cmd = [""svn"", ""info"", fpath] svninfo = Popen(cmd, stdout=PIPE, stderr=PIPE).stdout.readlines() for info in svninfo: tokens = info.split("":"") if tokens[0].strip() == ""Last Changed Rev"": rev = tokens[1].strip() except: pass return rev" 3534,"def execute_migrations(self, show_traceback=True): """""" Executes all pending migrations across all capable databases """""" all_migrations = get_pending_migrations(self.path, self.databases) if not len(all_migrations): sys.stdout.write(""There are no migrations to apply.\n"") for db, migrations in all_migrations.iteritems(): connection = connections[db] # init connection cursor = connection.cursor() cursor.close() for migration in migrations: migration_path = self._get_migration_path(db, migration) with Transactional(): sys.stdout.write( ""Executing migration %r on %r...."" % (migration, db) ) created_models = self._execute_migration( db, migration_path, show_traceback=show_traceback ) emit_post_sync_signal( created_models=created_models, verbosity=self.verbosity, interactive=self.interactive, db=db, ) if self.load_initial_data: sys.stdout.write( ""Running loaddata for initial_data fixtures on %r.\n"" % db ) call_command( ""loaddata"", ""initial_data"", verbosity=self.verbosity, database=db, )" 3535,"def handle(self, *args, **options): """""" Upgrades the database. Executes SQL scripts that haven't already been applied to the database. """""" self.do_list = options.get(""do_list"") self.do_execute = options.get(""do_execute"") self.do_create = options.get(""do_create"") self.do_create_all = options.get(""do_create_all"") self.do_seed = options.get(""do_seed"") self.load_initial_data = options.get(""load_initial_data"", True) self.args = args if options.get(""path""): self.path = options.get(""path"") else: default_path = self._get_default_migration_path() self.path = getattr( settings, ""NASHVEGAS_MIGRATIONS_DIRECTORY"", default_path ) self.verbosity = int(options.get(""verbosity"", 1)) self.interactive = options.get(""interactive"") self.databases = options.get(""databases"") # We only use the default alias in creation scenarios (upgrades # default to all databases) if self.do_create and not self.databases: self.databases = [DEFAULT_DB_ALIAS] if self.do_create and self.do_create_all: raise CommandError(""You cannot combine --create and --create-all"") self.init_nashvegas() if self.do_create_all: self.create_all_migrations() elif self.do_create: assert len(self.databases) == 1 self.create_migrations(self.databases[0]) if self.do_execute: self.execute_migrations() if self.do_list: self.list_migrations() if self.do_seed: self.seed_migrations()" 3536,"def is_git_directory(path='.'): """""" Checks if given directory is a git repository :param path: path to check :return: True if it's a git repo and False otherwise """""" try: dulwich.repo.Repo.discover(path) except dulwich.errors.NotGitRepository: return False return True" 3537,"def get_git_remote_url(path='.', remote='origin'): """""" Get git remote url :param path: path to repo :param remote: :return: remote url or exception """""" return dulwich.repo.Repo.discover(path).get_config()\ .get((b'remote', remote.encode('utf-8')), b'url').decode('utf-8')" 3538,"def plantuml(desc): """"""Generate plantuml class diagram :param desc: result of sadisplay.describe function Return plantuml class diagram string """""" classes, relations, inherits = desc result = [ '@startuml', 'skinparam defaultFontName Courier', ] for cls in classes: # issue #11 - tabular output of class members (attrs) # http://stackoverflow.com/a/8356620/258194 # build table class_desc = [] # table columns class_desc += [(i[1], i[0]) for i in cls['cols']] # class properties class_desc += [('+', i) for i in cls['props']] # methods class_desc += [('%s()' % i, '') for i in cls['methods']] result.append( 'Class %(name)s {\n%(desc)s\n}' % { 'name': cls['name'], 'desc': '\n'.join(tabular_output(class_desc)), } ) for item in inherits: result.append(""%(parent)s <|-- %(child)s"" % item) for item in relations: result.append(""%(from)s <--o %(to)s: %(by)s"" % item) result += [ 'right footer generated by sadisplay v%s' % __version__, '@enduml', ] return '\n\n'.join(result)" 3539,"def dot(desc, color, title=""Trump's ORM""): """"""Generate dot file :param desc: result of sadisplay.describe function Return string """""" classes, relations, inherits = desc CLASS_TEMPLATE = """""" %(name)s [label=<

  • %(cols)s%(props)s%(methods)s
    %(name)s
    >] """""".format(**color) COLUMN_TEMPLATE = """"""%(name)s%(type)s"""""".format(**color) PROPERTY_TEMPLATE = """"""%(name)sPROP"""""".format(**color) METHOD_TEMPLATE = """"""%(name)s()METH"""""".format(**color) EDGE_INHERIT = ""\tedge [\n\t\tarrowhead = empty\n\t]"" INHERIT_TEMPLATE = ""\t%(child)s -> %(parent)s \n"" EDGE_REL = ""\tedge [\n\t\tarrowhead = ediamond\n\t\tarrowtail = open\n\t]"" RELATION_TEMPLATE = ""\t\""%(from)s\"" -> \""%(to)s\"" [label = \""%(by)s\""]"" result = ["""""" digraph G { label = ""%s""; fontname = ""Bitstream Vera Sans"" fontsize = 12 node [ fontname = ""Bitstream Vera Sans"" fontsize = 8 shape = ""plaintext"" ] edge [ fontname = ""Bitstream Vera Sans"" fontsize = 8 ] """""" % title] for cls in classes: cols = ' '.join([ COLUMN_TEMPLATE % {'type': c[0], 'name': c[1]} for c in cls['cols'] ]) props = ' '.join([ PROPERTY_TEMPLATE % {'name': p} for p in cls['props'] ]) methods = ' '.join([ METHOD_TEMPLATE % {'name': m} for m in cls['methods'] ]) renderd = CLASS_TEMPLATE % { 'name': cls['name'], 'cols': cols, 'props': props, 'methods': methods, } result.append(renderd) result += [EDGE_INHERIT] for item in inherits: result.append(INHERIT_TEMPLATE % item) result += [EDGE_REL] for item in relations: result.append(RELATION_TEMPLATE % item) result += [ '}' ] return '\n'.join(result)" 3540,"def is_reference_target(resource, rtype, label): """""" Return true if the resource has this rtype with this label """""" prop = resource.props.references.get(rtype, False) if prop: return label in prop" 3541,"def get_sources(self, resources): """""" Filter resources based on which have this reference """""" rtype = self.rtype # E.g. category label = self.props.label # E.g. category1 result = [ resource for resource in resources.values() if is_reference_target(resource, rtype, label) ] return result" 3542,"def setup(app: Sphinx): """""" Initialize Kaybee as a Sphinx extension """""" # Scan for directives, first in the system, second in the docs project importscan.scan(plugins) dectate.commit(kb) app.add_config_value('kaybee_settings', KaybeeSettings(), 'html') bridge = 'kaybee.plugins.postrenderer.config.KaybeeBridge' app.config.template_bridge = bridge app.connect('env-updated', flush_everything) app.connect(SphinxEvent.BI.value, # pragma nocover lambda sphinx_app: EventAction.call_builder_init( kb, sphinx_app) ) app.connect(SphinxEvent.EPD.value, # pragma nocover lambda sphinx_app, sphinx_env, docname: EventAction.call_purge_doc( kb, sphinx_app, sphinx_env, docname) ) app.connect(SphinxEvent.EBRD.value, # pragma nocover lambda sphinx_app, sphinx_env, docnames: EventAction.call_env_before_read_docs( kb, sphinx_app, sphinx_env, docnames) ) app.connect(SphinxEvent.DREAD.value, # pragma nocover lambda sphinx_app, doctree: EventAction.call_env_doctree_read( kb, sphinx_app, doctree) ) app.connect(SphinxEvent.DRES.value, # pragma nocover lambda sphinx_app, doctree, fromdocname: EventAction.call_doctree_resolved( kb, sphinx_app, doctree, fromdocname) ) app.connect(SphinxEvent.EU.value, # pragma nocover lambda sphinx_app, sphinx_env: EventAction.call_env_updated( kb, sphinx_app, sphinx_env) ) app.connect(SphinxEvent.HCP.value, # pragma nocover lambda sphinx_app: EventAction.call_html_collect_pages( kb, sphinx_app) ) app.connect(SphinxEvent.ECC.value, # pragma nocover lambda sphinx_builder, sphinx_env: EventAction.call_env_check_consistency( kb, sphinx_builder, sphinx_env) ) app.connect(SphinxEvent.MR.value, # pragma nocover lambda sphinx_app, sphinx_env, node, contnode: EventAction.call_missing_reference( kb, sphinx_app, sphinx_env, node, contnode) ) app.connect(SphinxEvent.HPC.value, # pragma nocover lambda sphinx_app, pagename, templatename, context, doctree: EventAction.call_html_page_context( kb, sphinx_app, pagename, templatename, context, doctree) ) return dict( version=__version__, parallel_read_safe=False )" 3543,"def icon(self): """""" Returns the icon filepath for this plugin. :return """""" path = self._icon if not path: return '' path = os.path.expandvars(os.path.expanduser(path)) if path.startswith('.'): base_path = os.path.dirname(self.filepath()) path = os.path.abspath(os.path.join(base_path, path)) return path" 3544,"def addPluginPath(cls, pluginpath): """""" Adds the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [, ..] || """""" prop_key = '_%s__pluginpath' % cls.__name__ curr_path = getattr(cls, prop_key, None) if not curr_path: curr_path = [] setattr(cls, prop_key, curr_path) if isinstance(pluginpath, basestring): pluginpath = pluginpath.split(os.path.pathsep) for path in pluginpath: if not path: continue path = os.path.expanduser(os.path.expandvars(path)) paths = path.split(os.path.pathsep) if len(paths) > 1: cls.addPluginPath(paths) else: curr_path.append(path)" 3545,"def pluginRegisterType(cls): """""" Returns the register type for this plugin class. :return """""" default = Plugin.Type.Module default |= Plugin.Type.Package default |= Plugin.Type.RegistryFile return getattr(cls, '_%s__pluginRegisterType', default)" 3546,"def loadPlugins(cls): """""" Initializes the plugins by loading modules from the inputted paths. """""" plugs = getattr(cls, '_%s__plugins' % cls.__name__, None) if plugs is not None: return plugs = {} setattr(cls, '_%s__plugins' % cls.__name__, plugs) typ = cls.pluginRegisterType() for path in cls.pluginPath(): base_package = projex.packageFromPath(path) base_path = os.path.normpath(projex.packageRootPath(path)) # make sure it is at the front of the path if base_path in sys.path: sys.path.remove(base_path) sys.path.insert(0, base_path) processed = ['__init__'] # load support for registries if typ & Plugin.Type.RegistryFile: files = glob.glob(os.path.join(path, '*/register.xml')) for file_ in files: name = os.path.normpath(file_).split(os.path.sep)[-2] processed.append(name) try: proxy = PluginProxy.fromFile(cls, file_) cls.register(proxy) except Exception, e: name = projex.text.pretty(name) err = Plugin(name) err.setError(e) err.setFilepath(file_) cls.register(err) # log the error msg = ""%s.plugin('%s') failed to load from %s."" logger.warning(msg % (cls.__name__, name, file_)) logger.error(e) # load support for packages if typ & Plugin.Type.Package: files = glob.glob(os.path.join(path, '*/__init__.py')) for file_ in files: name = os.path.normpath(file_).split(os.path.sep)[-2] if name in processed: continue processed.append(name) package = '.'.join([base_package, name]).strip('.') if not package: continue try: __import__(package) except Exception, e: name = projex.text.pretty(name) err = Plugin(name) err.setError(e) err.setFilepath(file_) cls.register(err) # log the error msg = ""%s.plugin('%s') failed to load from %s."" logger.warning(msg % (cls.__name__, name, file_)) logger.error(e) # load support for modules if typ & Plugin.Type.Module: files = glob.glob(os.path.join(path, '*.py')) for file_ in files: name = os.path.basename(file_).split('.')[0] if name in processed: continue processed.append(name) package = '.'.join([base_package, name]).strip('.') if not package: continue try: __import__(package) except Exception, e: name = projex.text.pretty(name) err = Plugin(name) err.setError(e) err.setFilepath(file_) cls.register(err) # log the error msg = ""%s.plugin('%s') failed to load from %s."" logger.warning(msg % (cls.__name__, name, file_)) logger.error(e)" 3547,"def plugin(cls, name): """""" Retrieves the plugin based on the inputted name. :param name | :return """""" cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) return plugs.get(nstr(name))" 3548,"def pluginNames(cls, enabled=True): """""" Returns the names of the plugins for a given class. :param enabled | || None :return [, ..] """""" return map(lambda x: x.name(), cls.plugins(enabled))" 3549,"def plugins(cls, enabled=True): """""" Returns the plugins for the given class. :param enabled | || None :return [, ..] """""" cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}).values() if enabled is None: return plugs return filter(lambda x: x.isEnabled() == enabled, plugs)" 3550,"def register(cls, plugin): """""" Registers the given plugin instance to this system. If a plugin with the same name is already registered, then this plugin will not take effect. The first registered plugin is the one that is used. :param plugin | :return """""" plugs = getattr(cls, '_%s__plugins' % cls.__name__, None) if plugs is None: cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) if plugin.name() in plugs: inst = plugs[plugin.name()] # assign the plugin instance to the proxy if isinstance(inst, PluginProxy) and \ not isinstance(plugin, PluginProxy) and \ not inst._instance: inst._instance = plugin return True return False plugs[plugin.name()] = plugin setattr(cls, '_%s__plugins' % cls.__name__, plugs) return True" 3551,"def setPluginPath(cls, pluginpath): """""" Sets the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [, ..] || """""" setattr(cls, '_%s__pluginpath' % cls.__name__, None) cls.addPluginPath(pluginpath)" 3552,"def unregister(cls, plugin): """""" Unregisters the given plugin from the system based on its name. :param plugin | """""" plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) try: plugs.pop(plugin.name()) except AttributeError: pass except ValueError: pass" 3553,"def loadInstance(self): """""" Loads the plugin from the proxy information that was created from the registry file. """""" if self._loaded: return self._loaded = True module_path = self.modulePath() package = projex.packageFromPath(module_path) path = os.path.normpath(projex.packageRootPath(module_path)) if path in sys.path: sys.path.remove(path) sys.path.insert(0, path) try: __import__(package) except Exception, e: err = Plugin(self.name(), self.version()) err.setError(e) err.setFilepath(module_path) self._instance = err self.setError(e) msg = ""%s.plugin('%s') errored loading instance from %s"" opts = (self.proxyClass().__name__, self.name(), module_path) logger.warning(msg % opts) logger.error(e)" 3554,"def modulePath(self): """""" Returns the module path information for this proxy plugin. This path will represent the root module that will be imported when the instance is first created of this plugin. :return """""" base_path = os.path.dirname(self.filepath()) module_path = self.importPath() module_path = os.path.expanduser(os.path.expandvars(module_path)) if module_path.startswith('.'): module_path = os.path.abspath(os.path.join(base_path, module_path)) return module_path" 3555,"def fromFile(cls, filepath): """""" Creates a proxy instance from the inputted registry file. :param filepath | :return || None """""" xdata = ElementTree.parse(nstr(filepath)) xroot = xdata.getroot() # collect variable information name = xroot.get('name') ver = float(xroot.get('version', '1.0')) if not name: name = os.path.basename(filepath).split('.') if name == '__init__': name = os.path.normpath(filepath).split(os.path.sep)[-2] name = projex.text.pretty(name) icon = xroot.get('icon', './icon.png') ximport = xroot.find('import') if ximport is not None: importpath = ximport.get('path', './__init__.py') else: importpath = './__init__.py' params = {'description': '', 'author': '', 'email': '', 'url': ''} for param, default in params.items(): xdata = xroot.find(param) if xdata is not None: params[param] = xdata.text # generate the proxy information proxy = PluginProxy(cls, name, ver) proxy.setImportPath(importpath) proxy.setDescription(params['description']) proxy.setAuthor(params['author']) proxy.setEmail(params['email']) proxy.setUrl(params['url']) proxy.setFilepath(filepath) return proxy" 3556,"def clean_resource_json(resource_json): """""" The catalog wants to be smaller, let's drop some stuff """""" for a in ('parent_docname', 'parent', 'template', 'repr', 'series'): if a in resource_json: del resource_json[a] props = resource_json['props'] for prop in ( 'acquireds', 'style', 'in_nav', 'nav_title', 'weight', 'auto_excerpt'): if prop in props: del props[prop] return resource_json" 3557,"def resources_to_json(resources): """""" Make a JSON/catalog representation of the resources db """""" return { docname: clean_resource_json(resource.__json__(resources)) for (docname, resource) in resources.items() }" 3558,"def references_to_json(resources, references): """""" Make a JSON/catalog representation of the references db, including the count for each """""" dump_references = {} for reftype, refvalue in references.items(): dump_references[reftype] = {} for label, reference_resource in refvalue.items(): target_count = len(reference_resource.get_sources(resources)) dump_references[reftype][label] = dict( count=target_count, docname=reference_resource.docname ) return dump_references" 3559,"def get(self, url, params=None, cache_cb=None, **kwargs): """""" Make http get request. :param url: :param params: :param cache_cb: (optional) a function that taking requests.Response as input, and returns a bool flag, indicate whether should update the cache. :param cache_expire: (optional). :param kwargs: optional arguments. """""" if self.use_random_user_agent: headers = kwargs.get(""headers"", dict()) headers.update({Headers.UserAgent.KEY: Headers.UserAgent.random()}) kwargs[""headers""] = headers url = add_params(url, params) cache_consumed, value = self.try_read_cache(url) if cache_consumed: response = requests.Response() response.url = url response._content = value else: response = self.ses.get(url, **kwargs) if self.should_we_update_cache(response, cache_cb, cache_consumed): self.cache.set( url, response.content, expire=kwargs.get(""cache_expire"", self.cache_expire), ) return response" 3560,"def get_html(self, url, params=None, cache_cb=None, decoder_encoding=None, decoder_errors=url_specified_decoder.ErrorsHandle.strict, **kwargs): """""" Get html of an url. """""" response = self.get( url=url, params=params, cache_cb=cache_cb, **kwargs ) return url_specified_decoder.decode( binary=response.content, url=response.url, encoding=decoder_encoding, errors=decoder_errors, )" 3561,"def download(self, url, dst, params=None, cache_cb=None, overwrite=False, stream=False, minimal_size=-1, maximum_size=1024 ** 6, **kwargs): """""" Download binary content to destination. :param url: binary content url :param dst: path to the 'save_as' file :param cache_cb: (optional) a function that taking requests.Response as input, and returns a bool flag, indicate whether should update the cache. :param overwrite: bool, :param stream: bool, whether we load everything into memory at once, or read the data chunk by chunk :param minimal_size: default -1, if response content smaller than minimal_size, then delete what just download. :param maximum_size: default 1GB, if response content greater than maximum_size, then delete what just download. """""" response = self.get( url, params=params, cache_cb=cache_cb, stream=stream, **kwargs ) if not overwrite: # pragma: no cover if os.path.exists(dst): raise OSError(""'%s' exists!"" % dst) if stream: chunk_size = 1024 * 1024 downloaded_size = 0 with atomic_write(dst, mode=""wb"") as f: for chunk in response.iter_content(chunk_size): if not chunk: # pragma: no cover break f.write(chunk) downloaded_size += chunk_size if (downloaded_size < minimal_size) or (downloaded_size > maximum_size): self.raise_download_oversize_error( url, downloaded_size, minimal_size, maximum_size) else: content = response.content downloaded_size = sys.getsizeof(content) if (downloaded_size < minimal_size) or (downloaded_size > maximum_size): self.raise_download_oversize_error( url, downloaded_size, minimal_size, maximum_size) else: with atomic_write(dst, mode=""wb"") as f: f.write(content)" 3562,"def option(*args, **kwargs): """"""Decorator to add an option to the optparser argument of a Cmdln subcommand To add a toplevel option, apply the decorator on the class itself. (see p4.py for an example) Example: @cmdln.option(""-E"", dest=""environment_path"") class MyShell(cmdln.Cmdln): @cmdln.option(""-f"", ""--force"", help=""force removal"") def do_remove(self, subcmd, opts, *args): #... """""" def decorate_sub_command(method): """"""create and add sub-command options"""""" if not hasattr(method, ""optparser""): method.optparser = SubCmdOptionParser() method.optparser.add_option(*args, **kwargs) return method def decorate_class(klass): """"""store toplevel options"""""" assert _forgiving_issubclass(klass, Cmdln) _inherit_attr(klass, ""toplevel_optparser_options"", [], cp=lambda l: l[:]) klass.toplevel_optparser_options.append( (args, kwargs) ) return klass #XXX Is there a possible optimization for many options to not have a # large stack depth here? def decorate(obj): if _forgiving_issubclass(obj, Cmdln): return decorate_class(obj) else: return decorate_sub_command(obj) return decorate" 3563,"def _inherit_attr(klass, attr, default, cp): """"""Inherit the attribute from the base class Copy `attr` from base class (otherwise use `default`). Copying is done using the passed `cp` function. The motivation behind writing this function is to allow inheritance among Cmdln classes where base classes set 'common' options using the `@cmdln.option` decorator. To ensure this, we must not write to the base class's options when handling the derived class. """""" if attr not in klass.__dict__: if hasattr(klass, attr): value = cp(getattr(klass, attr)) else: value = default setattr(klass, attr, value)" 3564,"def _forgiving_issubclass(derived_class, base_class): """"""Forgiving version of ``issubclass`` Does not throw any exception when arguments are not of class type """""" return (type(derived_class) is ClassType and \ type(base_class) is ClassType and \ issubclass(derived_class, base_class))" 3565,"def _dispatch_cmd(self, handler, argv): """"""Introspect sub-command handler signature to determine how to dispatch the command. The raw handler provided by the base 'RawCmdln' class is still supported: def do_foo(self, argv): # 'argv' is the vector of command line args, argv[0] is # the command name itself (i.e. ""foo"" or an alias) pass In addition, if the handler has more than 2 arguments option processing is automatically done (using optparse): @cmdln.option('-v', '--verbose', action='store_true') def do_bar(self, subcmd, opts, *args): # subcmd = <""bar"" or an alias> # opts = if opts.verbose: print ""lots of debugging output..."" # args = for arg in args: bar(arg) TODO: explain that ""*args"" can be other signatures as well. The `cmdln.option` decorator corresponds to an `add_option()` method call on an `optparse.OptionParser` instance. You can declare a specific number of arguments: @cmdln.option('-v', '--verbose', action='store_true') def do_bar2(self, subcmd, opts, bar_one, bar_two): #... and an appropriate error message will be raised/printed if the command is called with a different number of args. """""" co_argcount = handler.__func__.__code__.co_argcount if co_argcount == 2: # handler ::= do_foo(self, argv) return handler(argv) elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...) try: optparser = handler.optparser except AttributeError: optparser = handler.__func__.optparser = SubCmdOptionParser() assert isinstance(optparser, SubCmdOptionParser) # apply subcommand options' defaults from config files, if any. subcmd = handler.__name__.split('do_', 1)[1] optparser.set_defaults(**self.get_option_defaults(subcmd)) optparser.set_cmdln_info(self, argv[0]) try: opts, args = optparser.parse_args(argv[1:]) except StopOptionProcessing: #TODO: this doesn't really fly for a replacement of # optparse.py behaviour, does it? return 0 # Normal command termination try: return handler(argv[0], opts, *args) except TypeError: _, ex, _ = sys.exc_info() # Some TypeError's are user errors: # do_foo() takes at least 4 arguments (3 given) # do_foo() takes at most 5 arguments (6 given) # do_foo() takes exactly 5 arguments (6 given) # do_foo() takes exactly 5 positional arguments (6 given) # Raise CmdlnUserError for these with a suitably # massaged error message. tb = sys.exc_info()[2] # the traceback object if tb.tb_next is not None: # If the traceback is more than one level deep, then the # TypeError do *not* happen on the ""handler(...)"" call # above. In that we don't want to handle it specially # here: it would falsely mask deeper code errors. raise msg = ex.args[0] match = _INCORRECT_NUM_ARGS_RE.search(msg) if match: msg = list(match.groups()) msg[1] = int(msg[1]) - 3 if msg[1] == 1: msg[2] = msg[2].replace(""arguments"", ""argument"") msg[3] = int(msg[3]) - 3 msg = ''.join(map(str, msg)) raise CmdlnUserError(msg) else: raise else: raise CmdlnError(""incorrect argcount for %s(): takes %d, must "" ""take 2 for 'argv' signature or 3+ for 'opts' "" ""signature"" % (handler.__name__, co_argcount))" 3566,"def aquireMs1CalibrationData(msrunContainer, specfile, siiArrays=None, lockMass=None, **kwargs): """"""Aquire mass error data, observed vs expected m/z, for calibration of MS1 ion m/z values. Expected m/z values can be of ambient ions with known exact masses and from identified precursor masses of peptide spectrum matching results. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer` :param specfile: filename of an ms-run file, used to extract mass error data :param siiArrays: optional, a dictionary of numpy.arrays Must provide the keys ""obsMz"" (observed precursor m/z), ""excMz"" (exact calculated m/z of the identified peptide) and ""precursorId"" (the scan id of the MS1 that preceeded the respective MSn scan). :param lockMass: None, True or a list of lock mass m/z values If ``None`` lock masses are not used to aquire calibration data. If ``True`` use the seven predefined lock mass values. Else use the specified lock mass m/z values. :param scanRange: a list of positive and negative int values When using MSn spectrum identifications for aquiring calibration data the m/z value of the peptide precursor is used to find the corresponding ion m/z value in the preceeding MS1 scan. By adding positive and negative integers to the ``scanRange`` parameter the subsequent and preceeding MS1 scans are also used to aquire calibration data from the same peptide precursor. By default ""[-1, 0, 1]"". :param massTolerance: float or int The maximal allowed deviation for matching two masses. By default ""10 * 1e-6"" which corresponds to 10ppm. :param toleranceMode: ""relative"" or ""absolute"" Specifies how the ``massTolerance`` value is applied, by default ""relative :param topIntMatches: bool, by default False :param useTopIntIons: bool, by default False :returns: {'siId': numpy.array([str, ...]), 'rt': numpy.array([float, ...]), 'obsMz': numpy.array([float, ...]), 'excMz': numpy.array([float, ...]), 'relDev': numpy.array([float, ...]), 'absDev': numpy.array([float, ...]), 'int': numpy.array([float, ...]) 'source': numpy.array(['psm' or 'lock']) } """""" scanRange = kwargs.get('scanRange', [-1, 0, 1]) massTolerance = kwargs.get('massTolerance', 10*1e-6) toleranceMode = kwargs.get('toleranceMode', 'relative') topIntMatches = kwargs.get('topIntMatches', False) useTopIntIons = kwargs.get('useTopIntIons', False) ms1Arrays = msrunContainer.getArrays(['rt'], specfiles=specfile, sort='rt', selector=lambda si: si.msLevel==1 ) ms1Arrays['calibrationMz'] = [list() for x in range(len(ms1Arrays['id']))] if lockMass is not None: if lockMass == True: #Use default lock mass values lockMass = [445.12002, 519.13882, 593.15761, 667.1764, 536.16536, 610.18416, 684.20295] lockMassTuples = [(lockMassMz, lockMassMz, 'lock') for lockMassMz in lockMass] for ms1ArrayPos in range(len(ms1Arrays['id'])): ms1Arrays['calibrationMz'][ms1ArrayPos].extend(lockMassTuples) if siiArrays is not None: precursorArrayLookup = {__: _ for _, __ in enumerate(ms1Arrays['id'])} for obsMz, excMz, precursorId in zip(siiArrays['obsMz'], siiArrays['excMz'], siiArrays['precursorId'] ): #find array position of the precursor scan precursorArrayPos = precursorArrayLookup[precursorId] #also look for the peptide precursor ions in earlier and later MS1 #scans by modifying the precursorArrayPos according to the numbers #in ""scanRange"" for posModifier in scanRange: _arrayPos = precursorArrayPos + posModifier try: ms1Arrays['calibrationMz'][_arrayPos].append((obsMz, excMz, 'psm')) except IndexError: #An IndexError can occur because of the ""scanRange"" #extension at the end and the beginning of the MS1 scans pass calibrationDataMs1 = {_: [] for _ in ['siId', 'rt', 'obsMz', 'excMz', 'iit', 'source', 'relDev', 'absDev', 'int' ] } for siId, rtMs1, calibrationMz in zip(ms1Arrays['id'], ms1Arrays['rt'], ms1Arrays['calibrationMz'] ): if len(calibrationMz) == 0: continue ionMzListMs1 = msrunContainer.saic[specfile][siId].arrays['mz'] ionIntListMs1 = msrunContainer.saic[specfile][siId].arrays['i'] if useTopIntIons: ionIntMask = ionIntListMs1.argsort()[::-1][:useTopIntIons] ionMzListMs1 = ionMzListMs1[ionIntMask] ionIntListMs1 = ionIntListMs1[ionIntMask] currCalibrationData = ddict(list) for obsMz, excMz, source in calibrationMz: limHigh = obsMz * (1+massTolerance) limLow = obsMz * (1-massTolerance) pL = bisect.bisect_left(ionMzListMs1, limLow) pH = bisect.bisect_right(ionMzListMs1, limHigh) if pH - pL <= 0: continue ionMatchMask = abs(ionMzListMs1[pL:pH] - obsMz).argmin() ms1Mz = ionMzListMs1[pL:pH][ionMatchMask] ms1Int = ionIntListMs1[pL:pH][ionMatchMask] #TODO: rel dev calculation changed, test dataFit functions!!! relDevObs = (1 - ms1Mz / obsMz) absDevObs = obsMz - ms1Mz relDevExc = (1 - ms1Mz / excMz) absDevExc = excMz - ms1Mz if abs(relDevObs) <= massTolerance: currCalibrationData['siId'].append(siId) currCalibrationData['rt'].append(rtMs1) currCalibrationData['obsMz'].append(ms1Mz) currCalibrationData['excMz'].append(excMz) currCalibrationData['relDev'].append(relDevExc) currCalibrationData['absDev'].append(absDevExc) currCalibrationData['int'].append(ms1Int) currCalibrationData['iit'].append(msrunContainer.sic[specfile][siId].iit) currCalibrationData['source'].append(source) if len(currCalibrationData['siId']) == 0: continue for key in currCalibrationData.keys(): calibrationDataMs1[key].extend(currCalibrationData[key]) # Convert calibration data into numpy arrays for key in calibrationDataMs1.keys(): calibrationDataMs1[key] = numpy.array(calibrationDataMs1[key]) return calibrationDataMs1" 3567,"def timecalMs1DataMedian(msrunContainer, specfile, calibrationData, minDataPoints=50, deviationKey='relDev'): """"""Generates a calibration value for each MS1 scan by calculating the median deviation :param msrunContainer: intance of :class:`maspy.core.MsrunContainer` :param specfile: filename of an ms-run file, used to generate an calibration value for each MS1 spectrum item. :param calibrationData: a dictionary of ``numpy.arrays`` containing calibration data, as returned by :func:`aquireMs1CalibrationData()` :param minDataPoints: The minimal number of data points necessary to calculate the calibration value, default value is ""50"". The calibration value for each scan is calulated as the median of all calibration data points present for this scan. However, if the number of data points is less then specified by ``minDataPoints` the data points of the preceeding and subsequent scans are added until the minimal number of data points is reached. :param deviationKey: the ``calibrationData`` key which contains the calibration data that should be used. :returns: a dictionary containing the calibration values for each MS1 ``Si``. ``{si.id: {'calibValue': float, 'n': int, 'data': list}`` """""" corrData = dict() _posDict = dict() pos = 0 for si in msrunContainer.getItems(specfiles=specfile, sort='rt', selector=lambda si: si.msLevel==1 ): corrData[si.id] = {'calibValue': float(), 'n': int(), 'data': list()} _posDict[pos] = si.id pos += 1 for siId, deviation in zip(calibrationData['siId'], calibrationData[deviationKey]): corrData[siId]['data'].append(deviation) corrData[siId]['n'] += 1 for pos in range(len(corrData)): entry = corrData[_posDict[pos]] _data = [entry['data']] _n = entry['n'] expansion = 0 while _n < minDataPoints: expansion += 1 try: expData = corrData[_posDict[pos+expansion]]['data'] _data.append(expData) _n += corrData[_posDict[pos+expansion]]['n'] except KeyError: pass try: expData = corrData[_posDict[pos-expansion]]['data'] _data.append(expData) _n += corrData[_posDict[pos-expansion]]['n'] except KeyError: pass if len(entry['data']) > 0: median = numpy.median(entry['data']) factor = 1 else: median = float() factor = 0 for expData in _data[1:]: if len(expData) > 0: median += numpy.median(expData) * 0.5 factor += 0.5 median = median / factor entry['calibValue'] = median return corrData" 3568,"def applyTimeCalMs1(msrunContainer, specfile, correctionData, **kwargs): """"""Applies correction values to the MS1 ion m/z arrays in order to correct for a time dependent m/z error. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer`, containing the :class:`maspy.core.Sai` items of the ""specfile"". :param specfile: filename of an ms-run file to which the m/z calibration should be applied :param correctionData: a dictionary containing the calibration values for each MS1 ``Si``, as returned by :func:`timecalMs1DataMedian()`. ``{si.id: {'calibValue': float}`` :param toleranceMode: ""relative"" or ""absolute"" Specifies how the ``massTolerance`` value is applied, by default ""relative"". """""" toleranceMode = kwargs.get('toleranceMode', 'relative') if toleranceMode == 'relative': for siId in correctionData: calibValue = correctionData[siId]['calibValue'] msrunContainer.saic[specfile][siId].arrays['mz'] *= (1 + calibValue) elif toleranceMode == 'absolute': for siId in correctionData: calibValue = correctionData[siId]['calibValue'] msrunContainer.saic[specfile][siId].arrays['mz'] += calibValue else: raise Exception('#TODO: a proper exception text')" 3569,"def applyMassCalMs1(msrunContainer, specfile, dataFit, **kwargs): """"""Applies a correction function to the MS1 ion m/z arrays in order to correct for a m/z dependent m/z error. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer`, containing the :class:`maspy.core.Sai` items of the ""specfile"". :param specfile: filename of an ms-run file to which the m/z calibration should be applied :param dataFit: a :class:`maspy.auxiliary.DataFit` object, containing processed calibration data. :param toleranceMode: ""relative"" or ""absolute"" Specifies how the ``massTolerance`` value is applied, by default ""relative"". """""" toleranceMode = kwargs.get('toleranceMode', 'relative') if toleranceMode == 'relative': for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays['mz'] corrArr = dataFit.corrArray(mzArr) mzArr *= (1 + corrArr) elif toleranceMode == 'absolute': for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays['mz'] corrArr = dataFit.corrArray(mzArr) mzArr += corrArr else: raise Exception('#TODO: a proper exception text')" 3570,"def _make(self, key, content): """"""clean"""""" pass self.say('make a new key>>>' + key + '>>>with>>>:' + str(content)) if key.isdigit(): i = int(key) # list index [p] self.say('extending parent list to contain index:' + key) # make a list with size return self._list(i, content) else: return self._dict(key, content)" 3571,"def set_path(self, data, path, value): """""" Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable. """""" self.say('set_path:value:' + str(value) + ' at:' + str(path) + ' in:' + str(data)) if isinstance(path, str): path = path.split('.') if len(path) > 1: self.set_path(data.setdefault(path[0], {}), path[1:], value) else: data[path[0]] = value return data" 3572,"def get_genericpage(cls, kb_app): """""" Return the one class if configured, otherwise default """""" # Presumes the registry has been committed q = dectate.Query('genericpage') klasses = sorted(q(kb_app), key=lambda args: args[0].order) if not klasses: # The site doesn't configure a genericpage, return Genericpage else: return klasses[0][1]" 3573,"def buy_product(self, product_pk): """""" determina si el customer ha comprado un producto """""" if self.invoice_sales.filter(lines_sales__product_final__pk=product_pk).exists() \ or self.ticket_sales.filter(lines_sales__product_final__pk=product_pk).exists(): return True else: return False" 3574,"def create_ticket_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesTicket url_reverse = 'CDNX_invoicing_ticketsaless_list' # type_doc msg_error_relation = _(""Hay lineas asignadas a ticket"") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False) """""" context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ).values_list('pk') if new_list_lines: new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first() if new_pk: context = SalesLines.create_ticket_from_order(new_pk, new_list_lines) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context """"""" 3575,"def create_invoice_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesInvoice url_reverse = 'CDNX_invoicing_invoicesaless_list' # type_doc msg_error_relation = _(""Hay lineas asignadas a facturas"") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False) """""" context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=False ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context """"""" 3576,"def create_invoice_from_ticket(pk, list_lines): MODEL_SOURCE = SalesTicket MODEL_FINAL = SalesInvoice url_reverse = 'CDNX_invoicing_invoicesaless_list' # type_doc msg_error_relation = _(""Hay lineas asignadas a facturas"") msg_error_not_found = _('Sales ticket not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False) """""" context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context """"""" 3577,"def cli(ctx): """"""Shows the man page packed inside the bubble tool this is mainly too overcome limitations on installing manual pages in a distribution agnostic and simple way and the way bubble has been developed, in virtual python environments, installing a man page into a system location makes no sense, the system manpage will not reflect the development version. and if your is system is really bare like : docker.io/python, you will not even have man installed """""" manfile = bubble_lib_dir+os.sep+'extras'+os.sep+'Bubble.1.gz' mancmd = [""/usr/bin/man"", manfile] try: return subprocess.call(mancmd) except Exception as e: print('cannot run man with bubble man page') print('you can always have a look at: '+manfile)" 3578,"def _fetch_dimensions(self, dataset): """""" We override this method just to set the correct datatype and dialect for regions. """""" for dimension in super(SCB, self)._fetch_dimensions(dataset): if dimension.id == ""Region"": yield Dimension(dimension.id, datatype=""region"", dialect=""skatteverket"", label=dimension.label) else: yield dimension" 3579,"def call(self, func, key, timeout=None): '''Wraps a function call with cache. Args: func (function): the function to call. key (str): the cache key for this call. timeout (int): the cache timeout for the key (the unit of this parameter depends on the cache class you use, for example, if you use the classes from werkzeug, then timeout is in seconds.) Returns: The return value of calling func ''' result = self.get(key) if result == NONE_RESULT: return None if result is None: result = func() self.set( key, result if result is not None else NONE_RESULT, timeout ) return result" 3580,"def map(self, key_pattern, func, all_args, timeout=None): '''Cache return value of multiple calls. Args: key_pattern (str): the key pattern to use for generating keys for caches of the decorated function. func (function): the function to call. all_args (list): a list of args to be used to make calls to the function. timeout (int): the cache timeout Returns: A list of the return values of the calls. Example:: def add(a, b): return a + b cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7] ''' results = [] keys = [ make_key(key_pattern, func, args, {}) for args in all_args ] cached = dict(zip(keys, self.get_many(keys))) cache_to_add = {} for key, args in zip(keys, all_args): val = cached[key] if val is None: val = func(*args) cache_to_add[key] = val if val is not None else NONE_RESULT if val == NONE_RESULT: val = None results.append(val) if cache_to_add: self.set_many(cache_to_add, timeout) return results" 3581,"def default_ssl_context() -> ssl.SSLContext: """"""Creates an SSL context suitable for use with HTTP/2. See https://tools.ietf.org/html/rfc7540#section-9.2 for what this entails. Specifically, we are interested in these points: § 9.2: Implementations of HTTP/2 MUST use TLS version 1.2 or higher. § 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable compression. The h2 project has its own ideas about how this context should be constructed but the resulting context doesn't work for us in the standard Python Docker images (though it does work under macOS). See https://python-hyper.org/projects/h2/en/stable/negotiating-http2.html#client-setup-example for more. """""" ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) # OP_NO_SSLv2, OP_NO_SSLv3, and OP_NO_COMPRESSION are already set by default # so we just need to disable the old versions of TLS. ctx.options |= (ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) # ALPN and NPN allow upgrades from HTTP/1.1, but these extensions are only # supported by recent versions of OpenSSL. Try to set them up, but don't cry # if they fail. try: ctx.set_alpn_protocols([""h2"", ""http/1.1""]) except NotImplementedError: pass try: ctx.set_npn_protocols([""h2"", ""http/1.1""]) except NotImplementedError: pass return ctx" 3582,"async def _window_open(self, stream_id: int): """"""Wait until the identified stream's flow control window is open. """""" stream = self._get_stream(stream_id) return await stream.window_open.wait()" 3583,"async def send_data( self, stream_id: int, data: bytes, end_stream: bool = False, ): """"""Send data, respecting the receiver's flow control instructions. If the provided data is larger than the connection's maximum outbound frame size, it will be broken into several frames as appropriate. """""" if self.closed: raise ConnectionClosedError stream = self._get_stream(stream_id) if stream.closed: raise StreamClosedError(stream_id) remaining = data while len(remaining) > 0: await asyncio.gather( self._writable.wait(), self._window_open(stream.id), ) remaining_size = len(remaining) window_size = self._h2.local_flow_control_window(stream.id) max_frame_size = self._h2.max_outbound_frame_size send_size = min(remaining_size, window_size, max_frame_size) if send_size == 0: continue logger.debug( f'[{stream.id}] Sending {send_size} of {remaining_size} ' f'bytes (window {window_size}, frame max {max_frame_size})' ) to_send = remaining[:send_size] remaining = remaining[send_size:] end = (end_stream is True and len(remaining) == 0) self._h2.send_data(stream.id, to_send, end_stream=end) self._flush() if self._h2.local_flow_control_window(stream.id) == 0: stream.window_open.clear()" 3584,"async def read_data(self, stream_id: int) -> bytes: """"""Read data from the specified stream until it is closed by the remote peer. If the stream is never ended, this never returns. """""" frames = [f async for f in self.stream_frames(stream_id)] return b''.join(frames)" 3585,"async def read_frame(self, stream_id: int) -> bytes: """"""Read a single frame of data from the specified stream, waiting until frames are available if none are present in the local buffer. If the stream is closed and all buffered frames have been consumed, raises a StreamConsumedError. """""" stream = self._get_stream(stream_id) frame = await stream.read_frame() if frame.flow_controlled_length > 0: self._acknowledge_data(frame.flow_controlled_length, stream_id) return frame.data" 3586,"async def get_pushed_stream_ids(self, parent_stream_id: int) -> List[int]: """"""Return a list of all streams pushed by the remote peer that are children of the specified stream. If no streams have been pushed when this method is called, waits until at least one stream has been pushed. """""" if parent_stream_id not in self._streams: logger.error( f'Parent stream {parent_stream_id} unknown to this connection' ) raise NoSuchStreamError(parent_stream_id) parent = self._get_stream(parent_stream_id) await parent.pushed_streams_available.wait() pushed_streams_ids = self._pushed_stream_ids[parent.id] stream_ids: List[int] = [] if len(pushed_streams_ids) > 0: stream_ids.extend(pushed_streams_ids) pushed_streams_ids.clear() parent.pushed_streams_available.clear() return stream_ids" 3587,"def populate(self, obj): """""" Populate Query mongo to get information about the obj if it exists Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported. ErrStorageMongoConnection: Error during MongoDB communication. """""" # query if type(obj) is AtlasServiceInstance.Instance: query = { ""instance_id"" : obj.instance_id, ""binding_id"" : { ""$exists"" : False } } elif type(obj) is AtlasServiceBinding.Binding: query = { ""binding_id"" : obj.binding_id, ""instance_id"" : obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # find try: result = self.broker.find_one(query) except: raise ErrStorageMongoConnection(""Populate Instance or Binding"") if result is not None: obj.parameters = result[""parameters""] # Flags the obj to provisioned obj.provisioned = True else: # New obj.provisioned = False" 3588,"def store(self, obj): """""" Store Store an object into the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Returns: ObjectId: MongoDB _id Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageTypeUnsupported: Type unsupported. ErrStorageStore : Failed to store the binding or instance. """""" # query if type(obj) is AtlasServiceInstance.Instance: query = { ""instance_id"" : obj.instance_id, ""database"" : obj.get_dbname(), ""cluster"": obj.get_cluster(), ""parameters"" : obj.parameters } elif type(obj) is AtlasServiceBinding.Binding: query = { ""binding_id"" : obj.binding_id, ""parameters"" : obj.parameters, ""instance_id"": obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # insert try: result = self.broker.insert_one(query) except: raise ErrStorageMongoConnection(""Store Instance or Binding"") if result is not None: # Flags the obj to provisioned obj.provisioned = True return result.inserted_id raise ErrStorageStore()" 3589,"def remove(self, obj): """""" Remove Remove an object from the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported. """""" if type(obj) is AtlasServiceInstance.Instance: self.remove_instance(obj) elif type(obj) is AtlasServiceBinding.Binding: self.remove_binding(obj) else: raise ErrStorageTypeUnsupported(type(obj))" 3590,"def remove_instance(self, instance): """""" Remove an instance Remove an object from the MongoDB storage for caching Args: instance (AtlasServiceInstance.Instance): instance Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveInstance: Failed to remove the instance. """""" # query query = { ""instance_id"" : instance.instance_id, ""binding_id"" : { ""$exists"" : False } } # delete the instance try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection(""Remove Instance"") # return the result if result is not None and result.deleted_count == 1: instance.provisioned = False else: raise ErrStorageRemoveInstance(instance.instance_id)" 3591,"def remove_binding(self, binding): """""" Remove a binding Remove an object from the MongoDB storage for caching Args: binding (AtlasServiceBinding.Binding): binding Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveBinding: Failed to remove the binding """""" # query query = { ""binding_id"" : binding.binding_id, ""instance_id"" : binding.instance.instance_id } # delete the binding try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection(""Remove Binding"") # return the result if result is not None and result.deleted_count == 1: binding.provisioned = False else: raise ErrStorageRemoveBinding(binding.binding_id)" 3592,"def handle(self, request, buffer_size): """""" Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise """""" if self.component_type == StreamComponent.SOURCE: msg = self.handler_function() return self.__send(request, msg) logger = self.logger data = self.__receive(request, buffer_size) if data is None: return False else: logger.debug(data.split(self.TERMINATOR)) for message in data.split(self.TERMINATOR)[:-1]: logger.debug(message) result = self.handler_function(message) if self.component_type == StreamComponent.PROCESSOR: if not self.__send(request, result): return False return True" 3593,"def handle(self, request, buffer_size): """""" Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise """""" logger = self.logger data = self.__receive(request, buffer_size) if data is None: return False else: arr = array('B',data) for message in split_array(arr,StxEtxHandler.ETX): if message[0] == StxEtxHandler.STX: message = message[1:] logger.debug(message) result = self.handler_function(bytearray(message)) if self.component_type == StreamComponent.PROCESSOR: if not self.__send(request, result): return False return True" 3594,"def handle(self, request, buffer_size): """""" Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise """""" logger = self.logger msg = self.__receive(request, buffer_size) if msg is None: return False result = self.handler_function(msg) if self.component_type == StreamComponent.PROCESSOR: return self.__send(request, result) return True" 3595,"def importMzml(filepath, msrunContainer=None, siAttrFromSmi=None, specfilename=None): """"""Performs a complete import of a mzml file into a maspy MsrunContainer. :paramsiAttrFromSmi: allow here to specify a custom function that extracts params a from spectrumMetadataItem :param specfilename: by default the filename will be used as the specfilename in the MsrunContainer and all mzML item instances, specify here an alternative specfilename to override the default one """""" #TODO: docstring siAttrFromSmi = defaultFetchSiAttrFromSmi if siAttrFromSmi is None else siAttrFromSmi if msrunContainer is None: msrunContainer = maspy.core.MsrunContainer() basename = os.path.basename(filepath) dirname = os.path.dirname(filepath) filename, extension = os.path.splitext(basename) specfilename = filename if specfilename is None else specfilename #Check if the specified file is valid for an import if not os.path.isfile(filepath): raise IOError('File does not exist: %s' % filepath) elif extension.lower() != '.mzml': raise IOError('Filetype is not ""mzml"": %s' % filepath) elif specfilename in msrunContainer.info: print(specfilename, 'already present in the msrunContainer, aborting import.') return None mzmlReader = maspy.xml.MzmlReader(filepath) masterContainer = {'rm': str(), 'ci': {}, 'si': {}, 'sai': {}, 'smi': {}} #Dictionary recording which MS2 scans follow a MS1 scan ms1Record = ddict(list) for xmlSpectrum in mzmlReader.parseSpectra(): smi, binaryDataArrayList = smiFromXmlSpectrum(xmlSpectrum, specfilename) #Generate SpectrumItem si = maspy.core.Si(smi.id, smi.specfile) si.isValid = True siAttrFromSmi(smi, si) if si.msLevel > 1: si.precursorId = si.precursorId.split('scan=')[1] #TODO: change to use regex to extract from known vendor format ms1Record[si.precursorId].append(si.id) else: ms1Record[si.id] #Touch the ddict to add the MS1 id, if it is not already present #Generate SpectrumArrayItem sai = maspy.core.Sai(smi.id, smi.specfile) sai.arrays, sai.arrayInfo = maspy.xml.extractBinaries(binaryDataArrayList, smi.attributes['defaultArrayLength']) #Store all items in the appropriate containers masterContainer['smi'][smi.id] = smi masterContainer['si'][smi.id] = si masterContainer['sai'][smi.id] = sai for siId, msnIdList in viewitems(ms1Record): #Ignore KeyError if the spectrum is not present in the mzML file for whatever reason try: setattr(masterContainer['si'][siId], 'msnIdList', msnIdList) except KeyError: pass for xmlChromatogram in mzmlReader.chromatogramList: ci = ciFromXml(xmlChromatogram, specfilename) masterContainer['ci'][ci.id] = ci masterContainer['rm'] = mzmlReader.metadataNode msrunContainer._addSpecfile(specfilename, dirname) msrunContainer.rmc[specfilename] = masterContainer['rm'] msrunContainer.info[specfilename]['status']['rm'] = True msrunContainer.smic[specfilename] = masterContainer['smi'] msrunContainer.info[specfilename]['status']['smi'] = True msrunContainer.sic[specfilename] = masterContainer['si'] msrunContainer.info[specfilename]['status']['si'] = True msrunContainer.saic[specfilename] = masterContainer['sai'] msrunContainer.info[specfilename]['status']['sai'] = True msrunContainer.cic[specfilename] = masterContainer['ci'] msrunContainer.info[specfilename]['status']['ci'] = True return msrunContainer" 3596,"def defaultFetchSiAttrFromSmi(smi, si): """"""Default method to extract attributes from a spectrum metadata item (sai) and adding them to a spectrum item (si)."""""" for key, value in viewitems(fetchSpectrumInfo(smi)): setattr(si, key, value) for key, value in viewitems(fetchScanInfo(smi)): setattr(si, key, value) if si.msLevel > 1: for key, value in viewitems(fetchParentIon(smi)): setattr(si, key, value)" 3597,"def convertMzml(mzmlPath, outputDirectory=None): """"""Imports an mzml file and converts it to a MsrunContainer file :param mzmlPath: path of the mzml file :param outputDirectory: directory where the MsrunContainer file should be written if it is not specified, the output directory is set to the mzml files directory. """""" outputDirectory = outputDirectory if outputDirectory is not None else os.path.dirname(mzmlPath) msrunContainer = importMzml(mzmlPath) msrunContainer.setPath(outputDirectory) msrunContainer.save()" 3598,"def prepareSiiImport(siiContainer, specfile, path, qcAttr, qcLargerBetter, qcCutoff, rankAttr, rankLargerBetter): """"""Prepares the ``siiContainer`` for the import of peptide spectrum matching results. Adds entries to ``siiContainer.container`` and to ``siiContainer.info``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param path: folder location used by the ``SiiContainer`` to save and load data to the hard disk. :param qcAttr: name of the parameter to define a ``Sii`` quality cut off. Typically this is some sort of a global false positive estimator, for example a 'false discovery rate' (FDR). :param qcLargerBetter: bool, True if a large value for the ``.qcAttr`` means a higher confidence. :param qcCutOff: float, the quality threshold for the specifed ``.qcAttr`` :param rankAttr: name of the parameter used for ranking ``Sii`` according to how well they match to a fragment ion spectrum, in the case when their are multiple ``Sii`` present for the same spectrum. :param rankLargerBetter: bool, True if a large value for the ``.rankAttr`` means a better match to the fragment ion spectrum. For details on ``Sii`` ranking see :func:`applySiiRanking()` For details on ``Sii`` quality validation see :func:`applySiiQcValidation()` """""" if specfile not in siiContainer.info: siiContainer.addSpecfile(specfile, path) else: raise Exception('...') siiContainer.info[specfile]['qcAttr'] = qcAttr siiContainer.info[specfile]['qcLargerBetter'] = qcLargerBetter siiContainer.info[specfile]['qcCutoff'] = qcCutoff siiContainer.info[specfile]['rankAttr'] = rankAttr siiContainer.info[specfile]['rankLargerBetter'] = rankLargerBetter" 3599,"def addSiiToContainer(siiContainer, specfile, siiList): """"""Adds the ``Sii`` elements contained in the siiList to the appropriate list in ``siiContainer.container[specfile]``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param siiList: a list of ``Sii`` elements imported from any PSM search engine results """""" for sii in siiList: if sii.id not in siiContainer.container[specfile]: siiContainer.container[specfile][sii.id] = list() siiContainer.container[specfile][sii.id].append(sii)" 3600,"def applySiiRanking(siiContainer, specfile): """"""Iterates over all Sii entries of a specfile in siiContainer and sorts Sii elements of the same spectrum according to the score attribute specified in ``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then ranked according to their sorted position, if multiple Sii have the same score, all get the same rank and the next entries rank is its list position. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. """""" attr = siiContainer.info[specfile]['rankAttr'] reverse = siiContainer.info[specfile]['rankLargerBetter'] for itemList in listvalues(siiContainer.container[specfile]): sortList = [(getattr(sii, attr), sii) for sii in itemList] itemList = [sii for score, sii in sorted(sortList, reverse=reverse)] #Rank Sii according to their position lastValue = None for itemPosition, item in enumerate(itemList, 1): if getattr(item, attr) != lastValue: rank = itemPosition item.rank = rank lastValue = getattr(item, attr)" 3601,"def applySiiQcValidation(siiContainer, specfile): """"""Iterates over all Sii entries of a specfile in siiContainer and validates if they surpass a user defined quality threshold. The parameters for validation are defined in ``siiContainer.info[specfile]``: - ``qcAttr``, ``qcCutoff`` and ``qcLargerBetter`` In addition to passing this validation a ``Sii`` has also to be at the first list position in the ``siiContainer.container``. If both criteria are met the attribute ``Sii.isValid`` is set to ``True``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. """""" attr = siiContainer.info[specfile]['qcAttr'] cutOff = siiContainer.info[specfile]['qcCutoff'] if siiContainer.info[specfile]['qcLargerBetter']: evaluator = lambda sii: getattr(sii, attr) >= cutOff and sii.rank == 1 else: evaluator = lambda sii: getattr(sii, attr) <= cutOff and sii.rank == 1 for itemList in listvalues(siiContainer.container[specfile]): #Set the .isValid attribute of all Sii to False for sii in itemList: sii.isValid = False #Validate the first Sii sii = itemList[0] if evaluator(sii): sii.isValid = True" 3602,"def readPercolatorResults(filelocation, specfile, psmEngine): """"""Reads percolator PSM results from a txt file and returns a list of :class:`Sii ` elements. :param filelocation: file path of the percolator result file :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param psmEngine: PSM PSM search engine used for peptide spectrum matching before percolator. This is important to specify, since the scanNr information is written in a different format by some engines. It might be necessary to adjust the settings for different versions of percolator or the PSM search engines used. Possible values are 'comet', 'xtandem', 'msgf'. :returns: [sii, sii, sii, ...] """""" if psmEngine not in ['comet', 'msgf', 'xtandem']: raise Exception('PSM search engine not supported: ', psmEngine) itemList = list() #Note: regarding headerline, xtandem seperates proteins with ';', #msgf separates proteins with a tab with io.open(filelocation, 'r', encoding='utf-8') as openfile: lines = openfile.readlines() headerDict = dict([[y,x] for (x,y) in enumerate(lines[0].strip().split('\t')) ]) scanEntryList = list() for line in lines[1:]: if len(line.strip()) == 0: continue fields = line.strip().split('\t') if psmEngine in ['comet', 'msgf']: scanNr = fields[headerDict['PSMId']].split('_')[-3] elif psmEngine in ['xtandem']: scanNr = fields[headerDict['PSMId']].split('_')[-2] peptide = fields[headerDict['peptide']] if peptide.find('.') != -1: peptide = peptide.split('.')[1] #Change to the new unimod syntax peptide = peptide.replace('[UNIMOD:', '[u:') sequence = maspy.peptidemethods.removeModifications(peptide) qValue = fields[headerDict['q-value']] score = fields[headerDict['score']] pep = fields[headerDict['posterior_error_prob']] sii = maspy.core.Sii(scanNr, specfile) sii.peptide = peptide sii.sequence = sequence sii.qValue = float(qValue) sii.score = float(score) sii.pep = float(pep) sii.isValid = False itemList.append(sii) return itemList" 3603,"def importPercolatorResults(siiContainer, filelocation, specfile, psmEngine, qcAttr='qValue', qcLargerBetter=False, qcCutoff=0.01, rankAttr='score', rankLargerBetter=True): """"""Import peptide spectrum matches (PSMs) from a percolator result file, generate :class:`Sii ` elements and store them in the specified :class:`siiContainer `. Imported ``Sii`` are ranked according to a specified attribute and validated if they surpass a specified quality threshold. :param siiContainer: imported PSM results are added to this instance of :class:`siiContainer ` :param filelocation: file path of the percolator result file :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param psmEngine: PSM search engine used for peptide spectrum matching before percolator. For details see :func:`readPercolatorResults()`. Possible values are 'comet', 'xtandem', 'msgf'. :param qcAttr: name of the parameter to define a quality cut off. Typically this is some sort of a global false positive estimator (eg FDR) :param qcLargerBetter: bool, True if a large value for the ``.qcAttr`` means a higher confidence. :param qcCutOff: float, the quality threshold for the specifed ``.qcAttr`` :param rankAttr: name of the parameter used for ranking ``Sii`` according to how well they match to a fragment ion spectrum, in the case when their are multiple ``Sii`` present for the same spectrum. :param rankLargerBetter: bool, True if a large value for the ``.rankAttr`` means a better match to the fragment ion spectrum For details on ``Sii`` ranking see :func:`applySiiRanking()` For details on ``Sii`` quality validation see :func:`applySiiQcValidation()` """""" path = os.path.dirname(filelocation) siiList = readPercolatorResults(filelocation, specfile, psmEngine) prepareSiiImport(siiContainer, specfile, path, qcAttr, qcLargerBetter, qcCutoff, rankAttr, rankLargerBetter) addSiiToContainer(siiContainer, specfile, siiList) applySiiRanking(siiContainer, specfile) applySiiQcValidation(siiContainer, specfile)" 3604,"def readMsgfMzidResults(filelocation, specfile=None): """"""Reads MS-GF+ PSM results from a mzIdentML file and returns a list of :class:`Sii ` elements. :param filelocation: file path of the percolator result file :param specfile: optional, unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. If specified all the ``.specfile`` attribute of all ``Sii`` are set to this value, else it is read from the mzIdentML file. :returns: [sii, sii, sii, ...] """""" readSpecfile = True if specfile is None else False unimod = pyteomics.mass.mass.Unimod() _tempMods = dict() mzid_refs = pyteomics.mzid.read(filelocation, retrieve_refs=True, iterative=False) siiList = list() for mzidEntry in mzid_refs: mzidSii = mzidEntry['SpectrumIdentificationItem'][0] scanNumber = str(int(mzidEntry['scan number(s)'])) if readSpecfile: specfile = os.path.splitext(mzidEntry['name'])[0] sii = maspy.core.Sii(scanNumber, specfile) sii.isValid = mzidSii['passThreshold'] sii.rank = mzidSii['rank'] sii.eValue = mzidSii['MS-GF:EValue'] sii.charge = mzidSii['chargeState'] sii.sequence = mzidSii['PeptideSequence'] sii.specEValue = mzidSii['MS-GF:SpecEValue'] sii.score = numpy.log10(sii.eValue)*-1 if 'Modification' in mzidSii: modifications = list() for modEntry in mzidSii['Modification']: try: modSymbolMaspy = _tempMods[modEntry['name']] except KeyError: unimodEntry = unimod.by_title(modEntry['name']) if len(unimodEntry) != 0: modSymbol = 'u:'+str(unimodEntry['record_id']) else: modSymbol = modEntry['name'] modSymbolMaspy = '[' + modSymbol + ']' _tempMods[modEntry['name']] = modSymbolMaspy modifications.append((modEntry['location'], modSymbolMaspy)) modifications.sort(key=ITEMGETTER(0)) _lastPos = 0 _peptide = list() for pos, mod in modifications: _peptide.extend((sii.sequence[_lastPos:pos], mod)) _lastPos = pos _peptide.append(sii.sequence[_lastPos:]) sii.peptide = ''.join(_peptide) else: sii.peptide = sii.sequence siiList.append(sii) return siiList" 3605,"def importMsgfMzidResults(siiContainer, filelocation, specfile=None, qcAttr='eValue', qcLargerBetter=False, qcCutoff=0.01, rankAttr='score', rankLargerBetter=True): """"""Import peptide spectrum matches (PSMs) from a MS-GF+ mzIdentML file, generate :class:`Sii ` elements and store them in the specified :class:`siiContainer `. Imported ``Sii`` are ranked according to a specified attribute and validated if they surpass a specified quality threshold. :param siiContainer: imported PSM results are added to this instance of :class:`siiContainer ` :param filelocation: file path of the percolator result file :param specfile: optional, unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. If specified the attribute ``.specfile`` of all ``Sii`` is set to this value, else it is read from the mzIdentML file. :param qcAttr: name of the parameter to define a quality cut off. Typically this is some sort of a global false positive estimator (eg FDR) :param qcLargerBetter: bool, True if a large value for the ``.qcAttr`` means a higher confidence. :param qcCutOff: float, the quality threshold for the specifed ``.qcAttr`` :param rankAttr: name of the parameter used for ranking ``Sii`` according to how well they match to a fragment ion spectrum, in the case when their are multiple ``Sii`` present for the same spectrum. :param rankLargerBetter: bool, True if a large value for the ``.rankAttr`` means a better match to the fragment ion spectrum For details on ``Sii`` ranking see :func:`applySiiRanking()` For details on ``Sii`` quality validation see :func:`applySiiQcValidation()` """""" path = os.path.dirname(filelocation) siiList = readMsgfMzidResults(filelocation, specfile) #If the mzIdentML file contains multiple specfiles, split the sii elements # up according to their specified ""specfile"" attribute. specfiles = ddict(list) if specfile is None: for sii in siiList: specfiles[sii.specfile].append(sii) else: specfiles[specfile] = siiList for specfile in specfiles: _siiList = specfiles[specfile] prepareSiiImport(siiContainer, specfile, path, qcAttr, qcLargerBetter, qcCutoff, rankAttr, rankLargerBetter) addSiiToContainer(siiContainer, specfile, _siiList) applySiiRanking(siiContainer, specfile) applySiiQcValidation(siiContainer, specfile)" 3606,"def importPeptideFeatures(fiContainer, filelocation, specfile): """""" Import peptide features from a featureXml file, as generated for example by the OpenMS node featureFinderCentroided, or a features.tsv file by the Dinosaur command line tool. :param fiContainer: imported features are added to this instance of :class:`FeatureContainer `. :param filelocation: Actual file path :param specfile: Keyword (filename) to represent file in the :class:`FeatureContainer`. Each filename can only occure once, therefore importing the same filename again is prevented. """""" if not os.path.isfile(filelocation): warnings.warn('The specified file does not exist %s' %(filelocation, )) return None elif (not filelocation.lower().endswith('.featurexml') and not filelocation.lower().endswith('.features.tsv') ): #TODO: this is depricated as importPeptideFeatues #is not longer be used solely for featurexml print('Wrong file extension, %s' %(filelocation, )) elif specfile in fiContainer.info: print('%s is already present in the SiContainer, import interrupted.' %(specfile, ) ) return None #Prepare the file container for the import fiContainer.addSpecfile(specfile, os.path.dirname(filelocation)) #import featurexml file if filelocation.lower().endswith('.featurexml'): featureDict = _importFeatureXml(filelocation) for featureId, featureEntryDict in viewitems(featureDict): rtArea = set() for convexHullEntry in featureEntryDict['convexHullDict']['0']: rtArea.update([convexHullEntry[0]]) fi = maspy.core.Fi(featureId, specfile) fi.rt = featureEntryDict['rt'] fi.rtArea = max(rtArea) - min(rtArea) fi.rtLow = min(rtArea) fi.rtHigh = max(rtArea) fi.charge = featureEntryDict['charge'] fi.mz = featureEntryDict['mz'] fi.mh = maspy.peptidemethods.calcMhFromMz(featureEntryDict['mz'], featureEntryDict['charge']) fi.intensity = featureEntryDict['intensity'] fi.quality = featureEntryDict['overallquality'] fi.isMatched = False fi.isAnnotated = False fi.isValid = True fiContainer.container[specfile][featureId] = fi #import dinosaur tsv file elif filelocation.lower().endswith('.features.tsv'): featureDict = _importDinosaurTsv(filelocation) for featureId, featureEntryDict in viewitems(featureDict): fi = maspy.core.Fi(featureId, specfile) fi.rt = featureEntryDict['rtApex'] fi.rtArea = featureEntryDict['rtEnd'] - featureEntryDict['rtStart'] fi.rtFwhm = featureEntryDict['fwhm'] fi.rtLow = featureEntryDict['rtStart'] fi.rtHigh = featureEntryDict['rtEnd'] fi.charge = featureEntryDict['charge'] fi.numScans = featureEntryDict['nScans'] fi.mz = featureEntryDict['mz'] fi.mh = maspy.peptidemethods.calcMhFromMz(featureEntryDict['mz'], featureEntryDict['charge']) fi.intensity = featureEntryDict['intensitySum'] fi.intensityApex = featureEntryDict['intensityApex'] #Note: not used keys: #mostAbundantMz nIsotopes nScans averagineCorr mass massCalib fi.isMatched = False fi.isAnnotated = False fi.isValid = True fiContainer.container[specfile][featureId] = fi" 3607,"def _importFeatureXml(filelocation): """"""Reads a featureXml file. :param filelocation: #TODO: docstring :returns: {featureKey1: {attribute1:value1, attribute2:value2, ...}, ...} See also :func:`importPeptideFeatures` """""" with io.open(filelocation, 'r', encoding='utf-8') as openFile: readingFeature = False readingHull = False featureDict = dict() for i, line in enumerate(openFile): line = line.strip() if readingFeature == True: if line.find('')[0] hullList = list() elif readingHull == True: if line.find('') != -1: featureDict[featureKey]['convexHullDict'][hullNr] = hullList readingHull = False elif line.find('') != -1: featureDict[featureKey]['dim0'] = float(line.split('')[1].split('')[0]) elif line.find('') != -1: featureDict[featureKey]['dim1'] = float(line.split('')[1].split('')[0]) elif line.find('') != -1: featureDict[featureKey]['intensity'] = float(line.split('')[1].split('')[0]) elif line.find('') != -1: featureDict[featureKey]['overallquality'] = float(line.split('')[1].split('')[0]) elif line.find('') != -1: featureDict[featureKey]['charge'] = int( line.split('')[1].split('')[0] ) elif line.find('')[0] elif line.find('name=\""score_fit\""') != -1: featureDict[featureKey]['score_fit'] = float(line.split('value=\""')[1].split('\""/>')[0]) elif line.find('name=\""score_correlation\""') != -1: featureDict[featureKey]['score_correlation'] = float(line.split('value=\""')[1].split('\""/>')[0]) elif line.find('name=\""FWHM\""') != -1: featureDict[featureKey]['FWHM'] = float(line.split('value=\""')[1].split('\""/>')[0]) elif line.find('name=\""spectrum_index\""') != -1: featureDict[featureKey]['spectrum_index'] = line.split('value=\""')[1].split('\""/>')[0] elif line.find('name=\""spectrum_native_id\""') != -1: featureDict[featureKey]['spectrum_native_id'] = line.split('value=\""')[1].split('\""/>')[0] elif line.find('') != -1: #mzList = list() #for retentionTime,mz in featureDict[featureKey]['convexHullDict']['0']: # mzList.append(mz) featureDict[featureKey]['rt'] = featureDict[featureKey]['dim0']#numpy.median(retentionTimeList) featureDict[featureKey]['mz'] = featureDict[featureKey]['dim1']#numpy.median(mzList) readingFeature == False if line.find('')[0] featureDict[featureKey] = dict() featureDict[featureKey]['convexHullDict'] = dict() #retentionTimeList = list() return featureDict" 3608,"def _importDinosaurTsv(filelocation): """"""Reads a Dinosaur tsv file. :returns: {featureKey1: {attribute1:value1, attribute2:value2, ...}, ...} See also :func:`importPeptideFeatures` """""" with io.open(filelocation, 'r', encoding='utf-8') as openFile: #NOTE: this is pretty similar to importing percolator results, maybe unify in a common function lines = openFile.readlines() headerDict = dict([[y,x] for (x,y) in enumerate(lines[0].strip().split('\t'))]) featureDict = dict() for linePos, line in enumerate(lines[1:]): featureId = str(linePos) fields = line.strip().split('\t') entryDict = dict() for headerName, headerPos in viewitems(headerDict): entryDict[headerName] = float(fields[headerPos]) if headerName in ['rtApex', 'rtEnd', 'rtStart', 'fwhm']: #Covnert to seconds entryDict[headerName] *= 60 elif headerName in ['charge', 'intensitySum', 'nIsotopes', 'nScans', 'intensityApex']: entryDict[headerName] = int(entryDict[headerName]) featureDict[featureId] = entryDict return featureDict" 3609,"def rst_to_html(input_string: str) -> str: """""" Given a string of RST, use docutils to generate html """""" overrides = dict(input_encoding='unicode', doctitle_xform=True, initial_header_level=1) parts = publish_parts( writer_name='html', source=input_string, settings_overrides=overrides ) return parts['html_body']" 3610,"def get_rst_title(rst_doc: Node) -> Optional[Any]: """""" Given some RST, extract what docutils thinks is the title """""" for title in rst_doc.traverse(nodes.title): return title.astext() return None" 3611,"def get_rst_excerpt(rst_doc: document, paragraphs: int = 1) -> str: """""" Given rst, parse and return a portion """""" texts = [] for count, p in enumerate(rst_doc.traverse(paragraph)): texts.append(p.astext()) if count + 1 == paragraphs: break return ' '.join(texts)" 3612,"def requires_password_auth(fn): """"""Decorator for HAPI methods that requires the instance to be authenticated with a password"""""" def wrapper(self, *args, **kwargs): self.auth_context = HAPI.auth_context_password return fn(self, *args, **kwargs) return wrapper" 3613,"def requires_api_auth(fn): """"""Decorator for HAPI methods that requires the instance to be authenticated with a HAPI token"""""" def wrapper(self, *args, **kwargs): self.auth_context = HAPI.auth_context_hapi return fn(self, *args, **kwargs) return wrapper" 3614,"def parse(response): """"""Parse a postdata-style response format from the API into usable data"""""" """"""Split a a=1b=2c=3 string into a dictionary of pairs"""""" tokens = {r[0]: r[1] for r in [r.split('=') for r in response.split(""&"")]} # The odd dummy parameter is of no use to us if 'dummy' in tokens: del tokens['dummy'] """""" If we have key names that end in digits, these indicate the result set contains multiple sets For example, planet0=Hoth&x=1&y=-10&planet1=Naboo&x=9&y=13 is actually data for two planets Elements that end in digits (like tag0, tag1 for planets) are formatted like (tag0_1, tag1_1), so we rstrip underscores afterwards. """""" if re.match('\D\d+$', tokens.keys()[0]): # Produce a list of dictionaries set_tokens = [] for key, value in tokens: key = re.match('^(.+\D)(\d+)$', key) # If the key isn't in the format (i.e. a failsafe), skip it if key is not None: if key.group(1) not in set_tokens: set_tokens[key.group(1)] = {} set_tokens[key.group(1)][key.group(0).rstrip('_')] = value tokens = set_tokens return tokens" 3615,"def init_chain(self): """"""Autodetect the devices attached to the Controller, and initialize a JTAGDevice for each. This is a required call before device specific Primitives can be used. """""" if not self._hasinit: self._hasinit = True self._devices = [] self.jtag_enable() while True: # pylint: disable=no-member idcode = self.rw_dr(bitcount=32, read=True, lastbit=False)() if idcode in NULL_ID_CODES: break dev = self.initialize_device_from_id(self, idcode) if self._debug: print(dev) self._devices.append(dev) if len(self._devices) >= 128: raise JTAGTooManyDevicesError(""This is an arbitrary "" ""limit to deal with breaking infinite loops. If "" ""you have more devices, please open a bug"") self.jtag_disable() #The chain comes out last first. Reverse it to get order. self._devices.reverse()" 3616,"def get_fitted_lv1_prim(self, reqef, bitcount): """""" request r - A C 0 1 e -|? ! ! ! ! s A|? ✓ ✓ 0 1 Check this logic u C|? m ✓ 0 1 l 0|? M M 0 ! t 1|? M M ! 1 - = No Care A = arbitrary C = Constant 0 = ZERO 1 = ONE ! = ERROR ? = NO CARE RESULT ✓ = Pass data directly m = will require reconfiguring argument and using multiple of prim M = Requires using multiple of several prims to satisfy requirement """""" res = self._fitted_lv1_prim_cache.get(reqef) if res: return res prim = self.get_best_lv1_prim(reqef, bitcount) dispatcher = PrimitiveLv1Dispatcher(self, prim, reqef) self._fitted_lv1_prim_cache[reqef] = dispatcher return dispatcher" 3617,"def _UserUpdateConfigValue(self, configKey, strDescriptor, isDir = True, dbConfigValue = None): """""" Allow user to set or update config values in the database table. This is always called if no valid entry exists in the table already. Parameters ---------- configKey : string Name of config field. strDescriptor : string Description of config field. isDir : boolean [optional : default = True] Set to True if config value is expected to be a directory path. dbConfigValue : string [optional : default = None] The value of an existing entry for the given config field. Returns ---------- string New value for given config field in database. """""" newConfigValue = None if dbConfigValue is None: prompt = ""Enter new {0} or 'x' to exit: "".format(strDescriptor) else: prompt = ""Enter 'y' to use existing {0}, enter a new {0} or 'x' to exit: "".format(strDescriptor) while newConfigValue is None: response = goodlogging.Log.Input(""CLEAR"", prompt) if response.lower() == 'x': sys.exit(0) elif dbConfigValue is not None and response.lower() == 'y': newConfigValue = dbConfigValue elif not isDir: newConfigValue = response self._db.SetConfigValue(configKey, newConfigValue) else: if os.path.isdir(response): newConfigValue = os.path.abspath(response) self._db.SetConfigValue(configKey, newConfigValue) else: goodlogging.Log.Info(""CLEAR"", ""{0} is not recognised as a directory"".format(response)) return newConfigValue" 3618,"def _GetConfigValue(self, configKey, strDescriptor, isDir = True): """""" Get configuration value from database table. If no value found user will be prompted to enter one. Parameters ---------- configKey : string Name of config field. strDescriptor : string Description of config field. isDir : boolean [optional : default = True] Set to True if config value is expected to be a directory path. Returns ---------- string Value for given config field in database. """""" goodlogging.Log.Info(""CLEAR"", ""Loading {0} from database:"".format(strDescriptor)) goodlogging.Log.IncreaseIndent() configValue = self._db.GetConfigValue(configKey) if configValue is None: goodlogging.Log.Info(""CLEAR"", ""No {0} exists in database"".format(strDescriptor)) configValue = self._UserUpdateConfigValue(configKey, strDescriptor, isDir) else: goodlogging.Log.Info(""CLEAR"", ""Got {0} {1} from database"".format(strDescriptor, configValue)) if not isDir or os.path.isdir(configValue): goodlogging.Log.Info(""CLEAR"", ""Using {0} {1}"".format(strDescriptor, configValue)) goodlogging.Log.DecreaseIndent() return configValue else: goodlogging.Log.Info(""CLEAR"", ""Exiting... {0} is not recognised as a directory"".format(configValue)) sys.exit(0)" 3619,"def _UserUpdateSupportedFormats(self, origFormatList = []): """""" Add supported formats to database table. Always called if the database table is empty. User can build a list of entries to add to the database table (one entry at a time). Once finished they select the finish option and all entries will be added to the table. They can reset the list at any time before finishing. Parameters ---------- origFormatList : list [optional : default = []] List of original formats from database table. Returns ---------- string List of updated formats from database table. """""" formatList = list(origFormatList) inputDone = None while inputDone is None: prompt = ""Enter new format (e.g. .mp4, .avi), "" \ ""'r' to reset format list, "" \ ""'f' to finish or "" \ ""'x' to exit: "" response = goodlogging.Log.Input(""CLEAR"", prompt) if response.lower() == 'x': sys.exit(0) elif response.lower() == 'f': inputDone = 1 elif response.lower() == 'r': formatList = [] else: if response is not None: if(response[0] != '.'): response = '.' + response formatList.append(response) formatList = set(formatList) origFormatList = set(origFormatList) if formatList != origFormatList: self._db.PurgeSupportedFormats() for fileFormat in formatList: self._db.AddSupportedFormat(fileFormat) return formatList" 3620,"def _GetSupportedFormats(self): """""" Get supported format values from database table. If no values found user will be prompted to enter values for this table. Returns ---------- string List of supported formats from database table. """""" goodlogging.Log.Info(""CLEAR"", ""Loading supported formats from database:"") goodlogging.Log.IncreaseIndent() formatList = self._db.GetSupportedFormats() if formatList is None: goodlogging.Log.Info(""CLEAR"", ""No supported formats exist in database"") formatList = self._UserUpdateSupportedFormats() else: goodlogging.Log.Info(""CLEAR"", ""Got supported formats from database: {0}"".format(formatList)) goodlogging.Log.Info(""CLEAR"", ""Using supported formats: {0}"".format(formatList)) goodlogging.Log.DecreaseIndent() return formatList" 3621,"def _UserUpdateIgnoredDirs(self, origIgnoredDirs = []): """""" Add ignored directories to database table. Always called if the database table is empty. User can build a list of entries to add to the database table (one entry at a time). Once finished they select the finish option and all entries will be added to the table. They can reset the list at any time before finishing. Parameters ---------- origIgnoredDirs : list [optional : default = []] List of original ignored directories from database table. Returns ---------- string List of updated ignored directories from database table. """""" ignoredDirs = list(origIgnoredDirs) inputDone = None while inputDone is None: prompt = ""Enter new directory to ignore (e.g. DONE), "" \ ""'r' to reset directory list, "" \ ""'f' to finish or "" \ ""'x' to exit: "" response = goodlogging.Log.Input(""CLEAR"", prompt) if response.lower() == 'x': sys.exit(0) elif response.lower() == 'f': inputDone = 1 elif response.lower() == 'r': ignoredDirs = [] else: if response is not None: ignoredDirs.append(response) ignoredDirs = set(ignoredDirs) origIgnoredDirs = set(origIgnoredDirs) if ignoredDirs != origIgnoredDirs: self._db.PurgeIgnoredDirs() for ignoredDir in ignoredDirs: self._db.AddIgnoredDir(ignoredDir) return list(ignoredDirs)" 3622,"def _GetIgnoredDirs(self): """""" Get ignored directories values from database table. If no values found user will be prompted to enter values for this table. Returns ---------- string List of ignored directories from database table. """""" goodlogging.Log.Info(""CLEAR"", ""Loading ignored directories from database:"") goodlogging.Log.IncreaseIndent() ignoredDirs = self._db.GetIgnoredDirs() if ignoredDirs is None: goodlogging.Log.Info(""CLEAR"", ""No ignored directories exist in database"") ignoredDirs = self._UserUpdateIgnoredDirs() else: goodlogging.Log.Info(""CLEAR"", ""Got ignored directories from database: {0}"".format(ignoredDirs)) if self._archiveDir not in ignoredDirs: ignoredDirs.append(self._archiveDir) goodlogging.Log.Info(""CLEAR"", ""Using ignored directories: {0}"".format(ignoredDirs)) goodlogging.Log.DecreaseIndent() return ignoredDirs" 3623,"def _GetDatabaseConfig(self): """""" Get all configuration from database. This includes values from the Config table as well as populating lists for supported formats and ignored directories from their respective database tables. """""" goodlogging.Log.Seperator() goodlogging.Log.Info(""CLEAR"", ""Getting configuration variables..."") goodlogging.Log.IncreaseIndent() # SOURCE DIRECTORY if self._sourceDir is None: self._sourceDir = self._GetConfigValue('SourceDir', 'source directory') # TV DIRECTORY if self._inPlaceRename is False and self._tvDir is None: self._tvDir = self._GetConfigValue('TVDir', 'tv directory') # ARCHIVE DIRECTORY self._archiveDir = self._GetConfigValue('ArchiveDir', 'archive directory', isDir = False) # SUPPORTED FILE FORMATS self._supportedFormatsList = self._GetSupportedFormats() # IGNORED DIRECTORIES self._ignoredDirsList = self._GetIgnoredDirs() goodlogging.Log.NewLine() goodlogging.Log.Info(""CLEAR"", ""Configuation is:"") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info(""CLEAR"", ""Source directory = {0}"".format(self._sourceDir)) goodlogging.Log.Info(""CLEAR"", ""TV directory = {0}"".format(self._tvDir)) goodlogging.Log.Info(""CLEAR"", ""Supported formats = {0}"".format(self._supportedFormatsList)) goodlogging.Log.Info(""CLEAR"", ""Ignored directory list = {0}"".format(self._ignoredDirsList)) goodlogging.Log.ResetIndent()" 3624,"def _GetArgs(self): """""" Parse plusargs. """""" parser = argparse.ArgumentParser() parser.add_argument('-s', '--src', help='override database source directory') parser.add_argument('-d', '--dst', help='override database destination directory') parser.add_argument('-e', '--extract', help='enable extracting of rar files', action=""store_true"") parser.add_argument('-c', '--copy', help='enable copying between file systems', action=""store_true"") parser.add_argument('-i', '--inplace', help='rename files in place', action=""store_true"") parser.add_argument('-u', '--update_db', help='provides option to update existing database fields', action=""store_true"") parser.add_argument('-p', '--print_db', help='print contents of database', action=""store_true"") parser.add_argument('-n', '--no_input', help='automatically accept or skip all user input', action=""store_true"") parser.add_argument('-nr', '--no_input_rename', help='automatically accept or skip user input for guide lookup and rename', action=""store_true"") parser.add_argument('-ne', '--no_input_extract', help='automatically accept or skip user input for extraction', action=""store_true"") parser.add_argument('--debug', help='enable full logging', action=""store_true"") parser.add_argument('--tags', help='enable tags on log info', action=""store_true"") parser.add_argument('--test', help='run with test database', action=""store_true"") parser.add_argument('--reset', help='resets database', action=""store_true"") args = parser.parse_args() if args.test: self._databasePath = 'test.db' if args.no_input or args.no_input_rename: self._skipUserInputRename = True if args.no_input or args.no_input_extract: self._skipUserInputExtract = True if args.reset: goodlogging.Log.Info(""CLEAR"", ""*WARNING* YOU ARE ABOUT TO DELETE DATABASE {0}"".format(self._databasePath)) response = goodlogging.Log.Input(""CLEAR"", ""Are you sure you want to proceed [y/n]? "") if response.lower() == 'y': if(os.path.isfile(self._databasePath)): os.remove(self._databasePath) else: sys.exit(0) if args.inplace: self._inPlaceRename = True if args.copy: self._crossSystemCopyEnabled = True if args.tags: goodlogging.Log.tagsEnabled = 1 if args.debug: goodlogging.Log.verbosityThreshold = goodlogging.Verbosity.MINIMAL if args.update_db: self._dbUpdate = True if args.print_db: self._dbPrint = True if args.extract: self._enableExtract = True if args.src: if os.path.isdir(args.src): self._sourceDir = args.src else: goodlogging.Log.Fatal(""CLEAR"", 'Source directory argument is not recognised as a directory: {}'.format(args.src)) if args.dst: if os.path.isdir(args.dst): self._tvDir = args.dst else: goodlogging.Log.Fatal(""CLEAR"", 'Target directory argument is not recognised as a directory: {}'.format(args.dst))" 3625,"def _GetSupportedFilesInDir(self, fileDir, fileList, supportedFormatList, ignoreDirList): """""" Recursively get all supported files given a root search directory. Supported file extensions are given as a list, as are any directories which should be ignored. The result will be appended to the given file list argument. Parameters ---------- fileDir : string Path to root of directory tree to search. fileList : string List to add any found files to. supportedFormatList : list List of supported file extensions. ignoreDirList : list List of directories to ignore. """""" goodlogging.Log.Info(""CLEAR"", ""Parsing file directory: {0}"".format(fileDir)) if os.path.isdir(fileDir) is True: for globPath in glob.glob(os.path.join(fileDir, '*')): if util.FileExtensionMatch(globPath, supportedFormatList): newFile = tvfile.TVFile(globPath) if newFile.GetShowDetails(): fileList.append(newFile) elif os.path.isdir(globPath): if(os.path.basename(globPath) in ignoreDirList): goodlogging.Log.Info(""CLEAR"", ""Skipping ignored directory: {0}"".format(globPath)) else: self._GetSupportedFilesInDir(globPath, fileList, supportedFormatList, ignoreDirList) else: goodlogging.Log.Info(""CLEAR"", ""Ignoring unsupported file or folder: {0}"".format(globPath)) else: goodlogging.Log.Info(""CLEAR"", ""Invalid non-directory path given to parse"")" 3626,"def Run(self): """""" Main entry point for ClearManager class. Does the following steps: - Parse script arguments. - Optionally print or update database tables. - Get all configuration settings from database. - Optionally parse directory for file extraction. - Recursively parse source directory for files matching supported format list. - Call renamer.TVRenamer with file list. """""" self._GetArgs() goodlogging.Log.Info(""CLEAR"", ""Using database: {0}"".format(self._databasePath)) self._db = database.RenamerDB(self._databasePath) if self._dbPrint or self._dbUpdate: goodlogging.Log.Seperator() self._db.PrintAllTables() if self._dbUpdate: goodlogging.Log.Seperator() self._db.ManualUpdateTables() self._GetDatabaseConfig() if self._enableExtract: goodlogging.Log.Seperator() extractFileList = [] goodlogging.Log.Info(""CLEAR"", ""Parsing source directory for compressed files"") goodlogging.Log.IncreaseIndent() extract.GetCompressedFilesInDir(self._sourceDir, extractFileList, self._ignoredDirsList) goodlogging.Log.DecreaseIndent() goodlogging.Log.Seperator() extract.Extract(extractFileList, self._supportedFormatsList, self._archiveDir, self._skipUserInputExtract) goodlogging.Log.Seperator() tvFileList = [] goodlogging.Log.Info(""CLEAR"", ""Parsing source directory for compatible files"") goodlogging.Log.IncreaseIndent() self._GetSupportedFilesInDir(self._sourceDir, tvFileList, self._supportedFormatsList, self._ignoredDirsList) goodlogging.Log.DecreaseIndent() tvRenamer = renamer.TVRenamer(self._db, tvFileList, self._archiveDir, guideName = 'EPGUIDES', tvDir = self._tvDir, inPlaceRename = self._inPlaceRename, forceCopy = self._crossSystemCopyEnabled, skipUserInput = self._skipUserInputRename) tvRenamer.Run()" 3627,"def cli(ctx, amount, index, stage): """"""Transform data"""""" if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not transform') raise click.Abort() path = ctx.home + '/' STAGE = None RULES = None UNIQ_KEYS_PULL = None UNIQ_KEYS_PUSH = None CLEAN_MISSING_AFTER_SECONDS = None if stage in STAGES and stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] if not STAGE: ctx.say_red('There is no STAGE in CFG:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if 'TRANSFORM' in STAGE: TRANSFORM = ctx.cfg.CFG[stage].TRANSFORM else: ctx.say_yellow(""""""There is no transform defined in the configuration, will not transform, when pushing the results of step 'pulled' will be read instead of 'push' """""") raise click.Abort() if 'RULES' in TRANSFORM: RULES = TRANSFORM.RULES if 'UNIQ_KEYS_PULL' in TRANSFORM: UNIQ_KEYS_PULL = TRANSFORM.UNIQ_KEYS_PULL if 'UNIQ_KEYS_PUSH' in TRANSFORM: UNIQ_KEYS_PUSH = TRANSFORM.UNIQ_KEYS_PUSH if 'CLEAN_MISSING_AFTER_SECONDS' in TRANSFORM: CLEAN_MISSING_AFTER_SECONDS = TRANSFORM.CLEAN_MISSING_AFTER_SECONDS if not RULES: ctx.say_red('There is no TRANSFORM.RULES in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() full_data = False if amount == -1 and index == -1: full_data = True data_gen = bubble_lod_load(ctx, 'store', stage) stored_data = {} for stored_data_item in data_gen: stored_data = stored_data_item break # first_only ctx.gbc.say('stored:', stuff=stored_data, verbosity=150) cfgdict = {} cfgdict['CFG'] = ctx.cfg.CFG cfgdict['GENERAL_BUBBLE_CONTEXT'] = ctx.GLOBALS['gbc'] cfgdict['ARGS'] = {'stage': stage, 'path': path} if type(RULES) == str and RULES.endswith('.bubble'): rules = get_bubble(ctx.gbc, path + RULES) rule_type = 'bubble' transformer = Transformer(rules=rules, rule_type=rule_type, store=stored_data, config=cfgdict, bubble_path=path, verbose=ctx.get_verbose()) src_data = bubble_lod_load(ctx, 'pulled', stage) to_transform = get_gen_slice(ctx.gbc, src_data, amount, index) ctx.gbc.say('sliced to transform:', stuff=to_transform, verbosity=50) if UNIQ_KEYS_PULL: to_transform = make_uniq_for_step(ctx=ctx, ukeys=UNIQ_KEYS_PULL, step='uniq_pull', stage=stage, full_data=full_data, clean_missing_after_seconds=CLEAN_MISSING_AFTER_SECONDS, to_uniq=to_transform) ctx.gbc.say('transformer to transform', stuff=to_transform, verbosity=295) transformed_count = Counter() error_count = Counter() result = do_yielding_transform(ctx, transformer, to_transform, transformed_count, error_count) ########################################################################## pfr = bubble_lod_dump(ctx=ctx, step='push', stage=stage, full_data=full_data, reset=True, data_gen=result) ctx.say('transformed [%d] objects' % pfr['total']) # closing the store, to be sure, get store after yielding transform has # completed store = transformer.get_store() ctx.gbc.say('transformer persistant storage', stuff=store, verbosity=1000) pfr = bubble_lod_dump(ctx=ctx, step='store', stage=stage, full_data=full_data, reset=True, data_gen=[store]) ctx.say('pulled [%d] objects' % pfr['total']) ctx.gbc.say('transformer all done :transformed_count:%d,error_count:%d' % (transformed_count.get_total(), error_count.get_total()), verbosity=10) if UNIQ_KEYS_PUSH: make_uniq_for_step(ctx=ctx, ukeys=UNIQ_KEYS_PUSH, step='uniq_push', stage=stage, full_data=full_data, clean_missing_after_seconds=CLEAN_MISSING_AFTER_SECONDS, to_uniq=result) # TODO: check if to_uniq can be loaded inside make_uniq # the result of the transform is a generator and should be 'empty' already # by the previous dump of the results. stats = {} stats['transformed_stat_error_count'] = error_count.get_total() stats['transformed_stat_transformed_count'] = transformed_count.get_total() update_stats(ctx, stage, stats) return True" 3628,"def _merge_prims(prims, *, debug=False, stagenames=None, stages=None): """"""Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class. Used by a CommandQueue during compilation and optimization of Primitives. Args: prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together. debug: A boolean for if debug information should be generated. stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True. stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. Returns: A list or FrameSequence (the same type as prims) of the compined Primitives or Frames. """""" if isinstance(prims, FrameSequence): merged_prims = FrameSequence(prims._chain) else: merged_prims = [] working_prim = prims[0] i = 1 logging_tmp = [] while i < len(prims): tmp = prims[i] res = working_prim.merge(tmp) if res is not None: working_prim = res if debug:#pragma: no cover logging_tmp.append( [p.snapshot() for p in merged_prims+[working_prim]]) else: merged_prims.append(working_prim) working_prim = tmp i += 1 merged_prims.append(working_prim) if debug:#pragma: no cover stages.append(logging_tmp) stagenames.append(""Merge intermediate states"") return merged_prims" 3629,"def _compile_device_specific_prims(self, debug=False, stages=None, stagenames=None): """"""Using the data stored in the CommandQueue, Extract and align compatible sequences of Primitives and compile/optimize the Primitives down into a stream of Level 2 device agnostic primitives. BACKGROUND: Device Specific primitives present a special opportunity for optimization. Many JTAG systems program one device on the chain at a time. But because all devices on a JTAG chain are sent information at once, NO-OP instructions are sent to these other devices. When programming multiple devices, Sending these NO-OPS is a missed opportunity for optimization. Instead of configuring one device at a time, it is more efficient to collect instructions for all deices, and align them so multiple devices can be configured at the same time. WAT THIS METHOD DOES: This method takes in a list of Primitives, groups the device specific primitives by target device, aligns the sequences of device instructions, and expands the aligned sequences into a flat list of device agnostic primitives. Args: debug: A boolean for if debug information should be generated. stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True. stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. """""" ############### GROUPING BY EXEC BOUNDARIES!################ fences = [] fence = [self[0]] for p in self[1:]: if type(fence[0])._layer == type(p)._layer and\ isinstance(fence[0], DeviceTarget) == \ isinstance(p, DeviceTarget): fence.append(p) else: fences.append(fence) fence = [p] fences.append(fence) if debug: #pragma: no cover formatted_fences = [] for fence in fences: formatted_fence = [p.snapshot() for p in fence] formatted_fences.append(formatted_fence) formatted_fences.append([]) stages.append(formatted_fences[:-1]) #Ignore trailing [] stagenames.append(""Fencing off execution boundaries"") ############## SPLIT GROUPS BY DEVICE TARGET! ############## split_fences = [] for fence in fences: tmp_chains = {} for p in fence: k = p._device_index \ if isinstance(p, DeviceTarget) else ""chain"" subchain = tmp_chains.setdefault(k, []).append(p) split_fences.append(list(tmp_chains.values())) if debug:#pragma: no cover formatted_split_fences = [] for fence in split_fences: for group in fence: formatted_split_fences.append([p.snapshot() for p in group]) formatted_split_fences.append([]) stages.append(formatted_split_fences[:-1]) stagenames.append(""Grouping prims of each boundary by "" ""target device"") ############## ALIGN SEQUENCES AND PAD FRAMES ############## #FIRST DEV REQUIRED LINE grouped_fences = [ FrameSequence(self._chain, *fence).finalize() for f_i, fence in enumerate(split_fences) ] if debug:#pragma: no cover formatted_grouped_fences = [] for fence in grouped_fences: formatted_grouped_fences += fence.snapshot() + [[]] stages.append(formatted_grouped_fences[:-1]) stagenames.append(""Aligning and combining each group dev "" ""prim stream"") ################## RECOMBINE FRAME GROUPS ################## ingested_chain = grouped_fences[0] for fence in grouped_fences[1:]: ingested_chain += fence if debug:#pragma: no cover stages.append(ingested_chain.snapshot()) stagenames.append(""Recombining sanitized exec boundaries"") ###################### POST INGESTION ###################### ################ Flatten out LV3 Primitives ################ while(any((f._layer == 3 for f in ingested_chain))): ################# COMBINE COMPATIBLE PRIMS ################# ingested_chain = _merge_prims(ingested_chain) if debug:#pragma: no cover stages.append(ingested_chain.snapshot()) stagenames.append(""Combining compatible lv3 prims."") ################ TRANSLATION TO LOWER LAYER ################ sm = JTAGStateMachine(self._chain._sm.state) expanded_prims = FrameSequence(self._chain) for f in ingested_chain: if f._layer == 3: expanded_prims += f.expand_macro(sm) else: expanded_prims.append(f) expanded_prims.finalize() ingested_chain = expanded_prims if self._fsm is None: self._fsm = sm assert self._fsm == sm, ""Target %s != Actual %s""%\ (self._fsm.state, sm.state) if debug:#pragma: no cover stages.append(ingested_chain.snapshot()) stagenames.append(""Expanding lv3 prims"") ############## Flatten out Dev LV2 Primitives ############## while(any((isinstance(f._valid_prim, DeviceTarget) for f in ingested_chain))): ################# COMBINE COMPATIBLE PRIMS ################# ingested_chain = _merge_prims(ingested_chain) if debug:#pragma: no cover stages.append(ingested_chain.snapshot()) stagenames.append(""Merging Device Specific Prims"") ################ TRANSLATION TO LOWER LAYER ################ sm = JTAGStateMachine(self._chain._sm.state) expanded_prims = FrameSequence(self._chain) for f in ingested_chain: if issubclass(f._prim_type, DeviceTarget): expanded_prims += f.expand_macro(sm) else: f[0].apply_tap_effect(sm) expanded_prims.append(f) expanded_prims.finalize() ingested_chain = expanded_prims if self._fsm is None: self._fsm = sm assert self._fsm == sm, ""Target %s != Actual %s""%\ (self._fsm.state, sm.state) if debug:#pragma: no cover stages.append(ingested_chain.snapshot()) stagenames.append(""Expanding Device Specific Prims"") ############ Convert FrameSequence to flat array ########### flattened_prims = [f._valid_prim for f in ingested_chain] if debug:#pragma: no cover stages.append([[p.snapshot() for p in flattened_prims]]) stagenames.append(""Converting format to single stream."") return flattened_prims" 3630,"def flush(self): """"""Force the queue of Primitives to compile, execute on the Controller, and fulfill promises with the data returned."""""" self.stages = [] self.stagenames = [] if not self.queue: return if self.print_statistics:#pragma: no cover print(""LEN OF QUENE"", len(self)) t = time() if self._chain._collect_compiler_artifacts: self._compile(debug=True, stages=self.stages, stagenames=self.stagenames) else: self._compile() if self.debug: print(""ABOUT TO EXEC"", self.queue)#pragma: no cover if self.print_statistics:#pragma: no cover print(""COMPILE TIME"", time()-t) print(""TOTAL BITS OF ALL PRIMS"", sum( (p.count for p in self.queue if hasattr(p, 'count')))) t = time() self._chain._controller._execute_primitives(self.queue) if self.print_statistics: print(""EXECUTE TIME"", time()-t)#pragma: no cover self.queue = [] self._chain._sm.state = self._fsm.state" 3631,"def Ping(self, request, context): """""" Invoke the Server health endpoint :param request: Empty :param context: the request context :return: Status message 'alive' """""" status = processor_pb2.Status() status.message='alive' return status" 3632,"def Process(self, request, context): """""" Invoke the Grpc Processor, delegating to the handler_function. If the handler_function has a single argument, pass the Message payload. If two arguments, pass the payload and headers as positional arguments: handler_function(payload, headers). If the handler function return is not of type(Message), create a new Message using the original header values (new id and timestamp). :param request: the message :param context: the request context :return: response message """""" logger.debug(request) message = Message.__from_protobuf_message__(request) sig = getfullargspec(self.handler_function) if len(sig.args) == 2: result = self.handler_function(message.payload, message.headers) elif len(sig.args) == 1: result = self.handler_function(message.payload) else: context.set_code(grpc.StatusCode.INTERNAL) context.set_details('wrong number of arguments for handler function - must be 1 or 2') raise RuntimeError('wrong number of arguments for handler function - must be 1 or 2') if self.component_type == StreamComponent.PROCESSOR: if type(result) == Message: return result.__to_protobuf_message__() else: headers = MessageHeaders() headers.copy(message.headers) return Message(result, headers).__to_protobuf_message__()" 3633,"def step_impl(context): """"""Compares text as written to the log output"""""" expected_lines = context.text.split('\n') assert len(expected_lines) == len(context.output) for expected, actual in zip(expected_lines, context.output): print('--\n\texpected: {}\n\tactual: {}'.format(expected, actual)) assert expected == actual" 3634,"def _ParseShowList(self, checkOnly=False): """""" Read self._allShowList as csv file and make list of titles and IDs. Parameters ---------- checkOnly : boolean [optional : default = False] If checkOnly is True this will only check to ensure the column headers can be extracted correctly. """""" showTitleList = [] showIDList = [] csvReader = csv.reader(self._allShowList.splitlines()) for rowCnt, row in enumerate(csvReader): if rowCnt == 0: # Get header column index for colCnt, column in enumerate(row): if column == 'title': titleIndex = colCnt if column == self.ID_LOOKUP_TAG: lookupIndex = colCnt else: try: showTitleList.append(row[titleIndex]) showIDList.append(row[lookupIndex]) except UnboundLocalError: goodlogging.Log.Fatal(""EPGUIDE"", ""Error detected in EPGUIDES allshows csv content"") else: if checkOnly and rowCnt > 1: return True self._showTitleList = showTitleList self._showIDList = showIDList return True" 3635,"def _GetAllShowList(self): """""" Populates self._allShowList with the epguides all show info. On the first lookup for a day the information will be loaded from the epguides url. This will be saved to local file _epguides_YYYYMMDD.csv and any old files will be removed. Subsequent accesses for the same day will read this file. """""" today = datetime.date.today().strftime(""%Y%m%d"") saveFile = '_epguides_' + today + '.csv' saveFilePath = os.path.join(self._saveDir, saveFile) if os.path.exists(saveFilePath): # Load data previous saved to file with open(saveFilePath, 'r') as allShowsFile: self._allShowList = allShowsFile.read() else: # Download new list from EPGUIDES and strip any leading or trailing whitespace self._allShowList = util.WebLookup(self.ALLSHOW_IDLIST_URL).strip() if self._ParseShowList(checkOnly=True): # Save to file to avoid multiple url requests in same day with open(saveFilePath, 'w') as allShowsFile: goodlogging.Log.Info(""EPGUIDE"", ""Adding new EPGUIDES file: {0}"".format(saveFilePath), verbosity=self.logVerbosity) allShowsFile.write(self._allShowList) # Delete old copies of this file globPattern = '_epguides_????????.csv' globFilePath = os.path.join(self._saveDir, globPattern) for filePath in glob.glob(globFilePath): if filePath != saveFilePath: goodlogging.Log.Info(""EPGUIDE"", ""Removing old EPGUIDES file: {0}"".format(filePath), verbosity=self.logVerbosity) os.remove(filePath)" 3636,"def _GetShowID(self, showName): """""" Get epguides show id for a given show name. Attempts to match the given show name against a show title in self._showTitleList and, if found, returns the corresponding index in self._showIDList. Parameters ---------- showName : string Show name to get show ID for. Returns ---------- int or None If a show id is found this will be returned, otherwise None is returned. """""" self._GetTitleList() self._GetIDList() for index, showTitle in enumerate(self._showTitleList): if showName == showTitle: return self._showIDList[index] return None" 3637,"def _ExtractDataFromShowHtml(self, html): """""" Extracts csv show data from epguides html source. Parameters ---------- html : string Block of html text Returns ---------- string Show data extracted from html text in csv format. """""" htmlLines = html.splitlines() for count, line in enumerate(htmlLines): if line.strip() == r'
    ':
            startLine = count+1
          if line.strip() == r'
    ': endLine = count try: dataList = htmlLines[startLine:endLine] dataString = '\n'.join(dataList) return dataString.strip() except: raise Exception(""Show content not found - check EPGuides html formatting"")" 3638,"def _GetEpisodeName(self, showID, season, episode): """""" Get episode name from epguides show info. Parameters ---------- showID : string Identifier matching show in epguides. season : int Season number. epiosde : int Epiosde number. Returns ---------- int or None If an episode name is found this is returned, otherwise the return value is None. """""" # Load data for showID from dictionary showInfo = csv.reader(self._showInfoDict[showID].splitlines()) for rowCnt, row in enumerate(showInfo): if rowCnt == 0: # Get header column index for colCnt, column in enumerate(row): if column == 'season': seasonIndex = colCnt if column == 'episode': episodeIndex = colCnt if column == 'title': titleIndex = colCnt else: # Iterate rows until matching season and episode found try: int(row[seasonIndex]) int(row[episodeIndex]) except ValueError: # Skip rows which don't provide integer season or episode numbers pass else: if int(row[seasonIndex]) == int(season) and int(row[episodeIndex]) == int(episode): goodlogging.Log.Info(""EPGUIDE"", ""Episode name is {0}"".format(row[titleIndex]), verbosity=self.logVerbosity) return row[titleIndex] return None" 3639,"def ShowNameLookUp(self, string): """""" Attempts to find the best match for the given string in the list of epguides show titles. If this list has not previous been generated it will be generated first. Parameters ---------- string : string String to find show name match against. Returns ---------- string Show name which best matches input string. """""" goodlogging.Log.Info(""EPGUIDES"", ""Looking up show name match for string '{0}' in guide"".format(string), verbosity=self.logVerbosity) self._GetTitleList() showName = util.GetBestMatch(string, self._showTitleList) return(showName)" 3640,"def EpisodeNameLookUp(self, showName, season, episode): """""" Get the episode name correspondng to the given show name, season number and episode number. Parameters ---------- showName : string Name of TV show. This must match an entry in the epguides title list (this can be achieved by calling ShowNameLookUp first). season : int Season number. epiosde : int Epiosde number. Returns ---------- string or None If an episode name can be found it is returned, otherwise the return value is None. """""" goodlogging.Log.Info(""EPGUIDE"", ""Looking up episode name for {0} S{1}E{2}"".format(showName, season, episode), verbosity=self.logVerbosity) goodlogging.Log.IncreaseIndent() showID = self._GetShowID(showName) if showID is not None: try: self._showInfoDict[showID] except KeyError: goodlogging.Log.Info(""EPGUIDE"", ""Looking up info for new show: {0}(ID:{1})"".format(showName, showID), verbosity=self.logVerbosity) urlData = util.WebLookup(self.EPISODE_LOOKUP_URL, {self.EP_LOOKUP_TAG: showID}) self._showInfoDict[showID] = self._ExtractDataFromShowHtml(urlData) else: goodlogging.Log.Info(""EPGUIDE"", ""Reusing show info previous obtained for: {0}({1})"".format(showName, showID), verbosity=self.logVerbosity) finally: episodeName = self._GetEpisodeName(showID, season, episode) goodlogging.Log.DecreaseIndent() return episodeName goodlogging.Log.DecreaseIndent()" 3641,"def clone(cls, srcpath, destpath): """"""Clone an existing repository to a new bare repository."""""" # Mercurial will not create intermediate directories for clones. try: os.makedirs(destpath) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath] subprocess.check_call(cmd) return cls(destpath)" 3642,"def create(cls, path): """"""Create a new repository"""""" cmd = [HG, 'init', path] subprocess.check_call(cmd) return cls(path)" 3643,"def private_path(self): """"""Get the path to a directory which can be used to store arbitrary data This directory should not conflict with any of the repository internals. The directory should be created if it does not already exist. """""" path = os.path.join(self.path, '.hg', '.private') try: os.mkdir(path) except OSError as e: if e.errno != errno.EEXIST: raise return path" 3644,"def bookmarks(self): """"""Get list of bookmarks"""""" cmd = [HG, 'bookmarks'] output = self._command(cmd).decode(self.encoding, 'replace') if output.startswith('no bookmarks set'): return [] results = [] for line in output.splitlines(): m = bookmarks_rx.match(line) assert m, 'unexpected output: ' + line results.append(m.group('name')) return results" 3645,"def content(self): """"""Get the file contents. This property is cached. The file is only read once. """""" if not self._content: self._content = self._read() return self._content" 3646,"def config(self): """"""Get a Configuration object from the file contents."""""" conf = config.Configuration() for namespace in self.namespaces: if not hasattr(conf, namespace): if not self._strict: continue raise exc.NamespaceNotRegistered( ""The namespace {0} is not registered."".format(namespace) ) name = getattr(conf, namespace) for item, value in compat.iteritems(self.items(namespace)): if not hasattr(name, item): if not self._strict: continue raise exc.OptionNotRegistered( ""The option {0} is not registered."".format(item) ) setattr(name, item, value) return conf" 3647,"def _read(self): """"""Open the file and return its contents."""""" with open(self.path, 'r') as file_handle: content = file_handle.read() # Py27 INI config parser chokes if the content provided is not unicode. # All other versions seems to work appropriately. Forcing the value to # unicode here in order to resolve this issue. return compat.unicode(content)" 3648,"async def ask(self, body, quick_replies=None, options=None, user=None): """""" simple ask with predefined quick replies :param body: :param quick_replies: (optional) in form of {'title': , 'payload': } :param options: :param user: :return: """""" await self.send_text_message_to_all_interfaces( recipient=user, text=body, quick_replies=quick_replies, options=options, ) return any.Any()" 3649,"async def say(self, body, user, options): """""" say something to user :param body: :param user: :return: """""" return await self.send_text_message_to_all_interfaces( recipient=user, text=body, options=options)" 3650,"async def send_audio(self, url, user, options=None): """""" send audio message :param url: link to the audio file :param user: target user :param options: :return: """""" tasks = [interface.send_audio(user, url, options) for _, interface in self.interfaces.items()] return [body for body in await asyncio.gather(*tasks)]" 3651,"async def send_text_message_to_all_interfaces(self, *args, **kwargs): """""" TODO: we should know from where user has come and use right interface as well right interface can be chosen :param args: :param kwargs: :return: """""" logger.debug('async_send_text_message_to_all_interfaces') tasks = [interface.send_text_message(*args, **kwargs) for _, interface in self.interfaces.items()] logger.debug(' tasks') logger.debug(tasks) res = [body for body in await asyncio.gather(*tasks)] logger.debug(' res') logger.debug(res) return res" 3652,"def connect(self, protocolFactory): """"""Starts a process and connect a protocol to it. """""" deferred = self._startProcess() deferred.addCallback(self._connectRelay, protocolFactory) deferred.addCallback(self._startRelay) return deferred" 3653,"def _startProcess(self): """"""Use the inductor to start the process we want to relay data from. """""" connectedDeferred = defer.Deferred() processProtocol = RelayProcessProtocol(connectedDeferred) self.inductor.execute(processProtocol, *self.inductorArgs) return connectedDeferred" 3654,"def _connectRelay(self, process, protocolFactory): """"""Set up and connect the protocol we want to relay to the process. This method is automatically called when the process is started, and we are ready to relay through it. """""" try: wf = _WrappingFactory(protocolFactory) connector = RelayConnector(process, wf, self.timeout, self.inductor.reactor) connector.connect() except: return defer.fail() # Return a deferred that is called back when the protocol is connected. return wf._onConnection" 3655,"def _startRelay(self, client): """"""Start relaying data between the process and the protocol. This method is called when the protocol is connected. """""" process = client.transport.connector.process # Relay any buffered data that was received from the process before # we got connected and started relaying. for _, data in process.data: client.dataReceived(data) process.protocol = client @process._endedDeferred.addBoth def stopRelay(reason): """"""Stop relaying data. Called when the process has ended. """""" relay = client.transport relay.loseConnection(reason) connector = relay.connector connector.connectionLost(reason) # Pass through the client protocol. return client" 3656,"def connectRelay(self): """"""Builds the target protocol and connects it to the relay transport. """""" self.protocol = self.connector.buildProtocol(None) self.connected = True self.protocol.makeConnection(self)" 3657,"def childDataReceived(self, childFD, data): """"""Relay data received on any file descriptor to the process """""" protocol = getattr(self, 'protocol', None) if protocol: protocol.dataReceived(data) else: self.data.append((childFD, data))" 3658,"def publish(self, user, provider, obj, comment, **kwargs): ''' user - django User or UserSocialAuth instance provider - name of publisher provider obj - sharing object comment - string ''' social_user = self._get_social_user(user, provider) backend = self.get_backend(social_user, provider, context=kwargs) return backend.publish(obj, comment)" 3659,"def check(self, user, provider, permission, **kwargs): ''' user - django User or UserSocialAuth instance provider - name of publisher provider permission - if backend maintains check permissions vk - binary mask in int format facebook - scope string ''' try: social_user = self._get_social_user(user, provider) if not social_user: return False except SocialUserDoesNotExist: return False backend = self.get_backend(social_user, provider, context=kwargs) return backend.check(permission)" 3660,"def recognize_byte(self, image, timeout=10): """"""Process a byte image buffer."""""" result = [] alpr = subprocess.Popen( self._cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL ) # send image try: # pylint: disable=unused-variable stdout, stderr = alpr.communicate(input=image, timeout=10) stdout = io.StringIO(str(stdout, 'utf-8')) except subprocess.TimeoutExpired: _LOGGER.error(""Alpr process timeout!"") alpr.kill() return None tmp_res = {} while True: line = stdout.readline() if not line: if len(tmp_res) > 0: result.append(tmp_res) break new_plate = self.__re_plate.search(line) new_result = self.__re_result.search(line) # found a new plate if new_plate and len(tmp_res) > 0: result.append(tmp_res) tmp_res = {} continue # found plate result if new_result: try: tmp_res[new_result.group(1)] = float(new_result.group(2)) except ValueError: continue _LOGGER.debug(""Process alpr with result: %s"", result) return result" 3661,"def finished(finished_status, update_interval, table, status_column, edit_at_column): """""" Create text sql statement query for sqlalchemy that getting all finished task. :param finished_status: int, status code that greater or equal than this will be considered as finished. :param update_interval: int, the record will be updated every x seconds. :return: sqlalchemy text sql statement. **中文文档** 状态码大于某个值, 并且, 更新时间在最近一段时间以内. """""" sql = select([table]).where( and_(*[ status_column >= finished_status, edit_at_column >= x_seconds_before_now(update_interval) ]) ) return sql" 3662,"def unfinished(finished_status, update_interval, table, status_column, edit_at_column): """""" Create text sql statement query for sqlalchemy that getting all unfinished task. :param finished_status: int, status code that less than this will be considered as unfinished. :param update_interval: int, the record will be updated every x seconds. :return: sqlalchemy text sql statement. **中文文档** 状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. """""" sql = select([table]).where( or_(*[ status_column < finished_status, edit_at_column < x_seconds_before_now(update_interval) ]) ) return sql" 3663,"def find_nearest(x, x0) -> Tuple[int, Any]: """""" This find_nearest function does NOT assume sorted input inputs: x: array (float, int, datetime, h5py.Dataset) within which to search for x0 x0: singleton or array of values to search for in x outputs: idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also) xidx: x[idx] Observe how bisect.bisect() gives the incorrect result! idea based on: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array """""" x = np.asanyarray(x) # for indexing upon return x0 = np.atleast_1d(x0) # %% if x.size == 0 or x0.size == 0: raise ValueError('empty input(s)') if x0.ndim not in (0, 1): raise ValueError('2-D x0 not handled yet') # %% ind = np.empty_like(x0, dtype=int) # NOTE: not trapping IndexError (all-nan) becaues returning None can surprise with slice indexing for i, xi in enumerate(x0): if xi is not None and (isinstance(xi, (datetime.datetime, datetime.date, np.datetime64)) or np.isfinite(xi)): ind[i] = np.nanargmin(abs(x-xi)) else: raise ValueError('x0 must NOT be None or NaN to avoid surprising None return value') return ind.squeeze()[()], x[ind].squeeze()[()]" 3664,"def ensure_context_attribute_exists(context, name, default_value=None): """""" Ensure a behave resource exists as attribute in the behave context. If this is not the case, the attribute is created by using the default_value. """""" if not hasattr(context, name): setattr(context, name, default_value)" 3665,"def ensure_workdir_exists(context): """""" Ensures that the work directory exists. In addition, the location of the workdir is stored as attribute in the context object. """""" ensure_context_attribute_exists(context, ""workdir"", None) if not context.workdir: context.workdir = os.path.abspath(WORKDIR) pathutil.ensure_directory_exists(context.workdir)" 3666,"def sorted_feed_cols(df): """""" takes a dataframe's columns that would be of the form: ['feed003', 'failsafe_feed999', 'override_feed000', 'feed001', 'feed002'] and returns: ['override_feed000', 'feed001', 'feed002', 'feed003', 'failsafe_feed999'] """""" cols = df.columns ind = [int(c.split(""feed"")[1]) for c in cols] cols = zip(ind,cols) cols.sort() cols = [c[1] for c in cols] return cols" 3667,"def mean_fill(adf): """""" Looks at each row, and calculates the mean. Honours the Trump override/failsafe logic. """""" ordpt = adf.values[0] if not pd.isnull(ordpt): return ordpt fdmn = adf.iloc[1:-1].mean() if not pd.isnull(fdmn): return fdmn flspt = adf.values[-1] if not pd.isnull(flspt): return flspt return nan" 3668,"def median_fill(adf): """""" Looks at each row, and chooses the median. Honours the Trump override/failsafe logic. """""" ordpt = adf.values[0] if not pd.isnull(ordpt): return ordpt fdmn = adf.iloc[1:-1].median() if not pd.isnull(fdmn): return fdmn flspt = adf.values[-1] if not pd.isnull(flspt): return flspt return nan" 3669,"def most_populated(adf): """""" Looks at each column, using the one with the most values Honours the Trump override/failsafe logic. """""" # just look at the feeds, ignore overrides and failsafes: feeds_only = adf[adf.columns[1:-1]] # find the most populated feed cnt_df = feeds_only.count() cnt = cnt_df.max() selected_feeds = cnt_df[cnt_df == cnt] # if there aren't any feeds, the first feed will work... if len(selected_feeds) == 0: pre_final = adf['feed001'] # if they are all empty # they should all be # equally empty else: #if there's one or more, take the highest priority one pre_final = adf[selected_feeds.index[0]] # create the final, applying the override and failsafe logic... final_df = pd.concat([adf.override_feed000, pre_final, adf.failsafe_feed999], axis=1) final_df = final_df.apply(_row_wise_priority, axis=1) return final_df" 3670,"def most_recent(adf): """""" Looks at each column, and chooses the feed with the most recent data point. Honours the Trump override/failsafe logic. """""" # just look at the feeds, ignore overrides and failsafes: feeds_only = adf[adf.columns[1:-1]] # find the feeds with the most recent data... feeds_with_data = feeds_only.dropna(how='all') selected_feeds = feeds_with_data.T.dropna().index # if there aren't any feeds, the first feed will work... if len(selected_feeds) == 0: pre_final = adf['feed001'] # if there all empyty # they should all be # equally empty else: #if there's one or more, take the highest priority one pre_final = adf[selected_feeds[0]] # create the final, applying the override and failsafe logic... final_df = pd.concat([adf.override_feed000, pre_final, adf.failsafe_feed999], axis=1) final_df = final_df.apply(_row_wise_priority, axis=1) return final_df" 3671,"def build_tri(adf): """""" Looks at each column, and chooses the feed with the most recent data point. Honours the Trump override/failsafe logic. """""" # just look at the capital (price), in ""feed one"", and income (dividend), in ""feed two"" cap, inc = adf.columns[1:3] data = adf[[cap,inc]] # find the feeds with the most recent data... inc_pct = data[inc].div(data[cap].shift(1)) cap_pct = data[cap].pct_change(1) pre_final = inc_pct + cap_pct # create the final, applying the override and failsafe logic... final_df = pd.concat([adf.override_feed000, pre_final, adf.failsafe_feed999], axis=1) final_df = final_df.apply(_row_wise_priority, axis=1) return final_df" 3672,"async def send_audio(self, url, user, options=None): """""" send audio message :param url: link to the audio file :param user: target user :param options: :return: """""" return await self.chat.send_audio(url, user, options)" 3673,"def use(self, middleware): """""" attache middleware :param middleware: :return: """""" logger.debug('use') logger.debug(middleware) self.middlewares.append(middleware) di.injector.register(instance=middleware) di.bind(middleware, auto=True) # TODO: should use DI somehow if check_spec(['send_text_message'], middleware): self.chat.add_interface(middleware) return middleware" 3674,"def del_by_idx(tree, idxs): """""" Delete a key entry based on numerical indexes into subtree lists. """""" if len(idxs) == 0: tree['item'] = None tree['subtrees'] = [] else: hidx, tidxs = idxs[0], idxs[1:] del_by_idx(tree['subtrees'][hidx][1], tidxs) if len(tree['subtrees'][hidx][1]['subtrees']) == 0: del tree['subtrees'][hidx]" 3675,"def find_in_tree(tree, key, perfect=False): """""" Helper to perform find in dictionary tree. """""" if len(key) == 0: if tree['item'] is not None: return tree['item'], () else: for i in range(len(tree['subtrees'])): if not perfect and tree['subtrees'][i][0] == '*': item, trace = find_in_tree(tree['subtrees'][i][1], (), perfect) return item, (i,) + trace raise KeyError(key) else: head, tail = key[0], key[1:] for i in range(len(tree['subtrees'])): if tree['subtrees'][i][0] == head or \ not perfect and tree['subtrees'][i][0] == '*': try: item, trace = find_in_tree(tree['subtrees'][i][1], tail, perfect) return item, (i,) + trace except KeyError: pass raise KeyError(key)" 3676,"def dominates(p, q): """""" Test for path domination. An individual path element *a* dominates another path element *b*, written as *a* >= *b* if either *a* == *b* or *a* is a wild card. A path *p* = *p1*, *p2*, ..., *pn* dominates another path *q* = *q1*, *q2*, ..., *qm* if *n* == *m* and, for all *i*, *pi* >= *qi*. """""" return (len(p) == len(q) and all(map(lambda es: es[0] == es[1] or es[0] == '*', zip(p, q))))" 3677,"def find(self, key, perfect=False): """""" Find a key path in the tree, matching wildcards. Return value for key, along with index path through subtree lists to the result. Throw ``KeyError`` if the key path doesn't exist in the tree. """""" return find_in_tree(self.root, key, perfect)" 3678,"def _purge_unreachable(self, key): """""" Purge unreachable dominated key paths before inserting a new key path. """""" dels = [] for p in self: if dominates(key, p): dels.append(p) for k in dels: _, idxs = find_in_tree(self.root, k, perfect=True) del_by_idx(self.root, idxs)" 3679,"def register(self, name, namespace): """"""Register a new namespace with the Configuration object. Args: name (str): The name of the section/namespace. namespace (namespace.Namespace): The Namespace object to store. Raises: TypeError: If the namespace is not a Namespace object. ValueError: If the namespace is already registered. """""" if name in self._NAMESPACES: raise ValueError(""Namespace {0} already exists."".format(name)) if not isinstance(namespace, ns.Namespace): raise TypeError(""Namespaces must be of type Namespace."") self._NAMESPACES[name] = namespace" 3680,"def fromSearch(text): """""" Generates a regular expression from 'simple' search terms. :param text | :usage |>>> import projex.regex |>>> projex.regex.fromSearch('*cool*') |'^.*cool.*$' |>>> projex.projex.fromSearch('*cool*,*test*') |'^.*cool.*$|^.*test.*$' :return """""" terms = [] for term in nstr(text).split(','): # assume if no *'s then the user wants to search anywhere as keyword if '*' not in term: term = '*%s*' % term term = term.replace('*', '.*') terms.append('^%s$' % term) return '|'.join(terms)" 3681,"def cli(ctx, monitor, full, stage): """"""Basic statistics"""""" if not ctx.bubble: msg = 'There is no bubble present, will not search stats' if monitor: ctx.say_yellow('Unknown - ' + msg, 0) raise SystemExit(3) else: ctx.say_yellow(msg) raise click.Abort() stats = {} stats_found = False flowing_full = False flowing_decrease = False flowing_increase = False errors = False loader = bubble_lod_load(ctx, 'stats', stage) lod_gen = _find_lod_gen(ctx, loader) if lod_gen: last_stats = {} ctx.say('lod_gen', stuff=lod_gen) for stats_data in lod_gen: last_stats = stats_data if last_stats: ctx.say('found last_stats:', stuff=last_stats, verbosity=10) try: ctx.say('trying:last stats:', stuff=last_stats, verbosity=10) if last_stats: l = last_stats stats['pull_err'] = k0(l, 'pulled_stat_error_count') stats['pull_total'] = k0(l, 'pulled_stat_total_count') stats['trans_err'] = k0(l, 'transformed_stat_error_count') stats['trans_total'] = k0(l, 'transformed_stat_total_count') stats['push_err'] = k0(l, 'pushed_stat_error_count') stats['push_total'] = k0(l, 'pushed_stat_total_count') stats_found = True else: stats_found = False ctx.say('stats:', stuff=stats, verbosity=10) if stats_found and stats['pull_err'] > 0 or \ stats['trans_err'] > 0 or \ stats['push_err'] > 0: errors = True if stats_found and stats['pull_total'] == stats['trans_total'] and \ stats['trans_total'] == stats['push_total']: flowing_full = True if stats_found and stats['pull_total'] >= stats['trans_total'] >= stats['push_total']: flowing_decrease = True if stats_found and stats['pull_total'] <= stats['trans_total'] <= stats['push_total']: flowing_increase = True except KeyError as stat_key_error: errors = True ctx.gbc.cry('cannot create status from last stats', stuff=stat_key_error) if full: ctx.say_yellow('Stats full') ctx.say_yellow('Full flow:' + str(flowing_full)) ctx.say_yellow('Flowing decrease:' + str(flowing_decrease)) ctx.say_yellow('Flowing increase:' + str(flowing_increase)) ctx.say_yellow('Errors:' + str(errors)) ctx.say_yellow('totals:') ctx.say_yellow(pf(stats, indent=8)) if monitor == 'nagios' or full: """""" for Numeric Value Service Status Status Description, please see: https://nagios-plugins.org/doc/guidelines.html#AEN78 0 OK The plugin was able to check the service and it appeared to be functioning properly 1 Warning The plugin was able to check the service, but it appeared to be above some ""warning"" threshold or did not appear to be working properly 2 Critical The plugin detected that either the service was not running or it was above some ""critical"" threshold 3 Unknown Invalid command line arguments were supplied to the plugin or low-level failures internal to the plugin (such as unable to fork, or open a tcp socket) that prevent it from performing the specified operation. Higher-level errors (such as name resolution errors, socket timeouts, etc) are outside of the control of plugins and should generally NOT be reported as UNKNOWN states. http://nagios.sourceforge.net/docs/3_0/perfdata.html http://nagios.sourceforge.net/docs/3_0/pluginapi.html """""" if stats_found: templ_nagios = 'pull: %d %d transform: %d %d push: %d %d ' res_nagios = templ_nagios % (stats['pull_total'], stats['pull_err'], stats['trans_total'], stats['trans_err'], stats['push_total'], stats['push_err'] ) else: res_nagios = 'Cannot find or read stats' if not stats_found: print('Unknown - ' + res_nagios) raise SystemExit(3) # return if not errors and flowing_full: print('Ok - ' + res_nagios) return # magister and databyte with amount if not errors and flowing_decrease: print('Ok - ' + res_nagios) return if not errors and not flowing_full: print('Warning - ' + res_nagios) raise SystemExit(1) # return if errors: print('Critical - ' + res_nagios) raise SystemExit(2) # return return False" 3682,"def ast_smart(val): """"""Return a suitable subclass of :class:`ast.AST` for storing numbers or strings. For other type of objects, return a node class that will indicate that the variable is contained in one of global or local namespaces."""""" if isinstance(val, Number): return Num(n=val) elif isinstance(val, basestring): return Str(s=val) else: return ast_name(str(val))" 3683,"def napi_compare(left, ops, comparators, **kwargs): """"""Make pairwise comparisons of comparators."""""" values = [] for op, right in zip(ops, comparators): value = COMPARE[op](left, right) values.append(value) left = right result = napi_and(values, **kwargs) if isinstance(result, ndarray): return result else: return bool(result)" 3684,"def napi_and(values, **kwargs): """"""Perform element-wise logical *and* operation on arrays. If *values* contains a non-array object with truth_ value **False**, the outcome will be an array of **False**\s with suitable shape without arrays being evaluated. Non-array objects with truth value **True** are omitted. If array shapes do not match (after squeezing when enabled by user), :exc:`ValueError` is raised. This function uses :obj:`numpy.logical_and` or :obj:`numpy.all`."""""" arrays = [] result = None shapes = set() for value in values: if isinstance(value, ndarray) and value.shape: arrays.append(value) shapes.add(value.shape) elif not value: result = value if len(shapes) > 1 and kwargs.get('sq', kwargs.get('squeeze', False)): shapes.clear() for i, a in enumerate(arrays): a = arrays[i] = a.squeeze() shapes.add(a.shape) if len(shapes) > 1: raise ValueError('array shape mismatch, even after squeezing') if len(shapes) > 1: raise ValueError('array shape mismatch') shape = shapes.pop() if shapes else None if result is not None: if shape: return numpy.zeros(shape, bool) else: return result elif arrays: sc = kwargs.get('sc', kwargs.get('shortcircuit', 0)) if sc and numpy.prod(shape) >= sc: return short_circuit_and(arrays, shape) elif len(arrays) == 2: return numpy.logical_and(*arrays) else: return numpy.all(arrays, 0) else: return value" 3685,"def napi_or(values, **kwargs): """"""Perform element-wise logical *or* operation on arrays. If *values* contains a non-array object with truth_ value **True**, the outcome will be an array of **True**\s with suitable shape without arrays being evaluated. Non-array objects with truth value **False** are omitted. If array shapes do not match (after squeezing when enabled by user), :exc:`ValueError` is raised. This function uses :obj:`numpy.logical_or` or :obj:`numpy.any`."""""" arrays = [] result = None shapes = set() for value in values: if isinstance(value, ndarray) and value.shape: arrays.append(value) shapes.add(value.shape) elif value: result = value if len(shapes) > 1 and kwargs.get('squeeze', kwargs.get('sq', False)): shapes.clear() for i, a in enumerate(arrays): a = arrays[i] = a.squeeze() shapes.add(a.shape) if len(shapes) > 1: raise ValueError('array shape mismatch, even after squeezing') if len(shapes) > 1: raise ValueError('array shape mismatch') shape = shapes.pop() if shapes else None if result is not None: if shape: return numpy.ones(shape, bool) else: return result elif arrays: sc = kwargs.get('sc', kwargs.get('shortcircuit', 0)) if sc and numpy.prod(shape) >= sc: return short_circuit_or(arrays, shape) elif len(arrays) == 2: return numpy.logical_or(*arrays) else: return numpy.any(arrays, 0) else: return value" 3686,"def visit_Compare(self, node): """"""Replace chained comparisons with calls to :func:`.napi_compare`."""""" if len(node.ops) > 1: func = Name(id=self._prefix + 'napi_compare', ctx=Load()) args = [node.left, List(elts=[Str(op.__class__.__name__) for op in node.ops], ctx=Load()), List(elts=node.comparators, ctx=Load())] node = Call(func=func, args=args, keywords=self._kwargs) fml(node) self.generic_visit(node) return node" 3687,"def visit_BoolOp(self, node): """"""Replace logical operations with calls to :func:`.napi_and` or :func:`.napi_or`."""""" if isinstance(node.op, And): func = Name(id=self._prefix + 'napi_and', ctx=Load()) else: func = Name(id=self._prefix + 'napi_or', ctx=Load()) args = [List(elts=node.values, ctx=Load())] node = Call(func=func, args=args, keywords=self._kwargs) fml(node) self.generic_visit(node) return node" 3688,"def visit_UnaryOp(self, node): """"""Interfere with ``not`` operation to :func:`numpy.logical_not`."""""" if isinstance(node.op, Not): self._debug('UnaryOp', node.op, incr=1) operand = self[node.operand] self._debug('|-', operand, incr=2) tn = self._tn() result = numpy.logical_not(operand) self._debug('|_', result, incr=2) self[tn] = result return ast_name(tn) else: return self.generic_visit(node)" 3689,"def visit_BoolOp(self, node): """"""Interfere with boolean operations and use :func:`numpy.all` and :func:`numpy.any` functions for ``and`` and ``or`` operations. *axis* argument to these functions is ``0``."""""" self._incr() self._debug('BoolOp', node.op) if isinstance(node.op, And): result = self._and(node) else: result = self._or(node) self._debug('|_', result, incr=1) self._decr() return self._return(result, node)" 3690,"def rec_load_all(self, zone): """""" Lists all DNS records for the given domain :param zone: the domain for which records are being retrieved :type zone: str :return: :rtype: generator """""" has_more = True current_count = 0 while has_more: records = self._request({ 'a': 'rec_load_all', 'o': current_count, 'z': zone }) try: has_more = records['response']['recs']['has_more'] current_count += records['response']['recs']['count'] for record in records['response']['recs']['objs']: yield record except KeyError: has_more = False" 3691,"def zone_ips(self, zone, hours=24, ip_class=None, geo=False): """""" Retrieve IP addresses of recent visitors :param zone: the target domain :type zone: str :param hours: Past number of hours to query. Defaults to 24, maximum is 48. :type hours: int :param ip_class: Optional. Restrict the result set to a given class as given by: ""r"" -- regular ""s"" -- crawler ""t"" -- threat :type ip_class: str :param geo: Optional. Set to True to add longitude and latitude information to response :type geo: bool :return: :rtype: dict """""" params = { 'a': 'zone_ips', 'z': zone, 'hours': hours, 'class': ip_class, } if geo: params['geo'] = geo return self._request(params)" 3692,"def rec_new(self, zone, record_type, name, content, ttl=1, priority=None, service=None, service_name=None, protocol=None, weight=None, port=None, target=None): """""" Create a DNS record for the given zone :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param name: name of the DNS record :type name: str :param content: content of the DNS record :type content: str :param ttl: TTL of the DNS record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict """""" params = { 'a': 'rec_new', 'z': zone, 'type': record_type, 'name': name, 'content': content, 'ttl': ttl } if priority is not None: params['prio'] = priority if service is not None: params['service'] = service if service_name is not None: params['srvname'] = service_name if protocol is not None: params['protocol'] = protocol if weight is not None: params['weight'] = weight if port is not None: params['port'] = port if target is not None: params['target'] = target return self._request(params)" 3693,"def rec_edit(self, zone, record_type, record_id, name, content, ttl=1, service_mode=None, priority=None, service=None, service_name=None, protocol=None, weight=None, port=None, target=None): """""" Edit a DNS record for the given zone. :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param record_id: DNS Record ID. Available by using the rec_load_all call. :type record_id: int :param name: Name of the DNS record :type name: str :param content: The content of the DNS record, will depend on the the type of record being added :type content: str :param ttl: TTL of record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param service_mode: [applies to A/AAAA/CNAME] Status of CloudFlare Proxy, 1 = orange cloud, 0 = grey cloud. :type service_mode: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict """""" params = { 'a': 'rec_edit', 'z': zone, 'type': record_type, 'id': record_id, 'name': name, 'content': content, 'ttl': ttl } if service_mode is not None: params['service_mode'] = service_mode if priority is not None: params['prio'] = priority if service is not None: params['service'] = service if service_name is not None: params['srvname'] = service_name if protocol is not None: params['protocol'] = protocol if weight is not None: params['weight'] = weight if port is not None: params['port'] = port if target is not None: params['target'] = target return self._request(params)" 3694,"def calc_transition_to_state(self, newstate): """"""Given a target state, generate the sequence of transitions that would move this state machine instance to that target state. Args: newstate: A str state name to calculate the path to. Returns: A bitarray containing the bits that would transition this state machine to the target state. The bits read from right to left. For efficiency, this retulting bitarray is cached. Do not edit this bitarray, or it will cause undefined behavior. """""" cached_val = JTAGStateMachine._lookup_cache.\ get((self.state, newstate)) if cached_val: return cached_val if newstate not in self.states: raise ValueError(""%s is not a valid state for this state "" ""machine""%newstate) path = self._find_shortest_path(self._statestr, newstate) if not path: raise ValueError(""No path to the requested state."") res = self._get_steps_from_nodes_path(path) res.reverse() JTAGStateMachine._lookup_cache[(self.state, newstate)] = res return res" 3695,"def prompt(self, error=''): """""" Prompts the user to set the value for this item. :return | success """""" if self.hidden: return True cmd = [self.label] if self.default is not None: cmd.append('(default: {0})'.format(self.default)) elif not self.required: cmd.append('(default: )') if self.type == 'bool': cmd.append('(y/n)') if self.choices: print 'Choices:' for choice in self.choices: print choice if error: print error value = raw_input(' '.join(cmd) + ':') if value == '': value = self.default if self.type == 'bool': if value == 'y': value = True elif value == 'n': value = False else: value = self.default if value is None and self.required: return self.prompt('{0} is required.') if self.regex and not re.match(self.regex, value): error = '{0} must match {1}'.format(self.name, self.regex) return self.prompt(error) self.value = value return True" 3696,"def build(self, outpath, structure=None): """""" Builds this scaffold out to the given filepath with the chosen structure. :param outpath | structure | || None :return | success """""" if not os.path.exists(outpath): return False opts = {'scaffold': self} if structure is not None: xstruct = structure else: xstruct = self.structure() if zipfile.is_zipfile(self.source()): zfile = zipfile.ZipFile(self.source(), 'r') else: zfile = None base = os.path.dirname(self.source()) # build the structure information # noinspection PyShadowingNames def build_level(root, xlevel): # ignore the entry if xlevel.get('enabled', 'True') == 'False': return # create a folder if xlevel.tag == 'folder': name = makotext.render(xlevel.get('name'), opts) dirname = os.path.join(root, name) if not os.path.exists(dirname): os.mkdir(dirname) for xchild in xlevel: build_level(dirname, xchild) # create a file elif xlevel.tag == 'file': name = makotext.render(xlevel.get('name'), opts) fname = os.path.join(root, name) # create from a template templ = xlevel.get('templ') if templ: if zfile: templ_str = zfile.read('templ/{0}'.format(templ)) else: templ_path = os.path.join(base, 'templ', templ) templ_str = open(templ_path, 'r').read() rendered = makotext.render(templ_str, opts) rendered = rendered.replace('\r\n', '\r') f = open(fname, 'w') f.write(rendered) f.close() # create a blank file else: f = open(fname, 'w') f.close() for xlevel in xstruct: build_level(outpath, xlevel) if zfile: zfile.close() return True" 3697,"def render(self, template, fail='## :todo: add {template}'): """""" Returns the rendered value for the inputted template name. :param template | """""" try: return self._templates[template].render(scaffold=self) except KeyError: return fail.format(template=template)" 3698,"def run(self, path=None): """""" Runs the scaffold option generation for this scaffold in the given path. If no path is supplied, then the current path is used. :param path | || None """""" if path is None: path = '.' for prop in self._properties.values(): if not prop.prompt(): return False return self.build(path)" 3699,"def structure(self): """""" Returns the structure for this scaffold. :return || None """""" opts = {'scaffold': self} # build from a zip file if zipfile.is_zipfile(self.source()): zfile = zipfile.ZipFile(self.source(), 'r') try: contents = zfile.read('structure.xml') contents = makotext.render(contents, opts) zfile.close() return ElementTree.fromstring(contents) except StandardError: logger.exception('Failed to load structure.') zfile.close() return None else: try: filename = os.path.join(os.path.dirname(self.source()), 'structure.xml') xdata = open(filename, 'r').read() xdata = makotext.render(xdata, opts) return ElementTree.fromstring(xdata) except StandardError: logger.exception('Failed to load structure.') return None" 3700,"def template(self, key): """""" Returns the template associated with this scaffold. :param key | :return || None """""" try: return self._templates[key] except KeyError: return Template.Plugins[key]" 3701,"def uifile(self): """""" Returns the uifile for this scaffold. :return """""" output = '' # build from a zip file if zipfile.is_zipfile(self.source()): zfile = zipfile.ZipFile(self.source(), 'r') if 'properties.ui' in zfile.namelist(): tempdir = tempfile.gettempdir() output = os.path.join(tempdir, '{0}_properties.ui'.format(self.name())) f = open(output, 'w') f.write(zfile.read('properties.ui')) f.close() zfile.close() else: uifile = os.path.join(os.path.dirname(self.source()), 'properties.ui') if os.path.exists(uifile): output = uifile return output" 3702,"def load(filename): """""" Loads the scaffold from the given XML file. :param filename | :return || None """""" # parse a zipped file if zipfile.is_zipfile(filename): zfile = zipfile.ZipFile(filename, 'r') try: xml = ElementTree.fromstring(zfile.read('scaffold.xml')) except StandardError: logger.exception('Failed to load scaffold: {0}'.format(filename)) zfile.close() return None zfile.close() # parse a standard xml file else: try: xml = ElementTree.parse(filename).getroot() except StandardError: logger.exception('Failed to load scaffold: {0}'.format(filename)) return None # generate a scaffold scaffold = Scaffold() scaffold.setSource(filename) scaffold.setName(xml.get('name', 'Missing')) scaffold.setGroup(xml.get('group', 'Default')) scaffold.setLanguage(xml.get('lang', 'Python')) scaffold.setIcon(xml.get('icon', '')) # define properties xprops = xml.find('properties') if xprops is not None: for xprop in xprops: scaffold.addProperty(Property.fromXml(xprop)) return scaffold" 3703,"def displayhook(value): """""" Runs all of the registered display hook methods with the given value. Look at the sys.displayhook documentation for more information. :param value | """""" global _displayhooks new_hooks = [] for hook_ref in _displayhooks: hook = hook_ref() if hook: hook(value) new_hooks.append(hook_ref) _displayhooks = new_hooks sys.__displayhook__(value)" 3704,"def excepthook(cls, error, trace): """""" Runs all of the registered exception hook methods with the given value. Look at the sys.excepthook documentation for more information. :param cls | error | trace | """""" global _excepthooks new_hooks = [] for hook_ref in _excepthooks: hook = hook_ref() if hook: hook(cls, error, trace) new_hooks.append(hook_ref) _excepthook = new_hooks sys.__excepthook__(cls, error, trace)" 3705,"def formatExcept(cls, error, trace): """""" Formats the inputted class, error, and traceback information to the standard output commonly found in Python interpreters. :param cls | error | trace | :return """""" clsname = cls.__name__ if cls else 'UnknownError' tb = 'Traceback (most recent call last):\n' tb += ''.join(traceback.format_tb(trace)) tb += '{0}: {1}'.format(clsname, error) return tb" 3706,"def registerDisplay(func): """""" Registers a function to the display hook queue to be called on hook. Look at the sys.displayhook documentation for more information. :param func | """""" setup() ref = weakref.ref(func) if ref not in _displayhooks: _displayhooks.append(ref)" 3707,"def registerExcept(func): """""" Registers a function to the except hook queue to be called on hook. Look at the sys.displayhook documentation for more information. :param func | """""" setup() ref = weakref.ref(func) if ref not in _excepthooks: _excepthooks.append(ref)" 3708,"def registerStdErr(func): """""" Registers a function to the print hook queue to be called on hook. This method will also override the current sys.stdout variable with a new instance. This will preserve any current sys.stdout overrides while providing a hookable class for linking multiple methods to. :param func | """""" if not isinstance(sys.stderr, StreamHooks): sys.stderr = StreamHooks(sys.stderr) ref = weakref.ref(func) if ref not in sys.stderr.hooks: sys.stderr.hooks.append(ref)" 3709,"def registerStdOut(func): """""" Registers a function to the print hook queue to be called on hook. This method will also override the current sys.stdout variable with a new instance. This will preserve any current sys.stdout overrides while providing a hookable class for linking multiple methods to. :param func | """""" if not isinstance(sys.stdout, StreamHooks): sys.stdout = StreamHooks(sys.stdout) ref = weakref.ref(func) if ref not in sys.stdout.hooks: sys.stdout.hooks.append(ref)" 3710,"def setup(): """""" Initializes the hook queues for the sys module. This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions. """""" global _displayhooks, _excepthooks if _displayhooks is not None: return _displayhooks = [] _excepthooks = [] # store any current hooks if sys.displayhook != sys.__displayhook__: _displayhooks.append(weakref.ref(sys.displayhook)) if sys.excepthook != sys.__excepthook__: _excepthooks.append(weakref.ref(sys.excepthook)) # replace the current hooks sys.displayhook = displayhook sys.excepthook = excepthook" 3711,"def unregisterStdErr(func): """""" Un-registers a function from the print hook queue. Look at the sys.displayhook documentation for more information. :param func | """""" try: sys.stderr.hooks.remove(weakref.ref(func)) except (AttributeError, ValueError): pass" 3712,"def unregisterStdOut(func): """""" Un-registers a function from the print hook queue. Look at the sys.displayhook documentation for more information. :param func | """""" try: sys.stdout.hooks.remove(weakref.ref(func)) except (AttributeError, ValueError): pass" 3713,"def _parse_iso8601(text): """""" Maybe parse an ISO8601 datetime string into a datetime. :param text: Either a ``unicode`` string to parse or any other object (ideally a ``datetime`` instance) to pass through. :return: A ``datetime.datetime`` representing ``text``. Or ``text`` if it was anything but a ``unicode`` string. """""" if isinstance(text, unicode): try: return parse_iso8601(text) except ValueError: raise CheckedValueTypeError( None, (datetime,), unicode, text, ) # Let pyrsistent reject it down the line. return text" 3714,"def from_path(cls, spec_path): """""" Load a specification from a path. :param FilePath spec_path: The location of the specification to read. """""" with spec_path.open() as spec_file: return cls.from_document(load(spec_file))" 3715,"def add_behavior_for_pclass(self, definition, cls): """""" Define an additional base class for the Python class created for a particular definition. :param unicode definition: The definition the Python class for which the base class will be included. :param type cls: The additional base class. :raise ValueError: If a Python class for the given definition has already been created. Behavior cannot be retroactively added to a Python class. All behaviors must be registered before the first call to ``pclass_for_definition`` for a particular definition. :return: ``None`` """""" if definition in self._pclasses: raise AlreadyCreatedClass(definition) if definition not in self.definitions: raise NoSuchDefinition(definition) self._behaviors.setdefault(definition, []).append(cls)" 3716,"def to_document(self): """""" Serialize this specification to a JSON-compatible object representing a Swagger specification. """""" return dict( info=thaw(self.info), paths=thaw(self.paths), definitions=thaw(self.definitions), securityDefinitions=thaw(self.securityDefinitions), security=thaw(self.security), swagger=thaw(self.swagger), )" 3717,"def pclass_for_definition(self, name): """""" Get a ``pyrsistent.PClass`` subclass representing the Swagger definition in this specification which corresponds to the given name. :param unicode name: The name of the definition to use. :return: A Python class which can be used to represent the Swagger definition of the given name. """""" while True: try: cls = self._pclasses[name] except KeyError: try: original_definition = self.definitions[name] except KeyError: raise NoSuchDefinition(name) if ""$ref"" in original_definition: # Identify definitions that are merely a reference to # another and restart processing. There is some # duplication of logic between this and the $ref handling # in _ClassModel. It would be nice to eliminate this # duplication. name = original_definition[u""$ref""] assert name.startswith(u""#/definitions/"") name = name[len(u""#/definitions/""):] continue definition = self.transform_definition(name, original_definition) kind = self._identify_kind(definition) if kind is None: raise NotClassLike(name, definition) generator = getattr(self, ""_model_for_{}"".format(kind)) model = generator(name, definition) bases = tuple(self._behaviors.get(name, [])) cls = model.pclass(bases) self._pclasses[name] = cls return cls" 3718,"def _model_for_CLASS(self, name, definition): """""" Model a Swagger definition that is like a Python class. :param unicode name: The name of the definition from the specification. :param pyrsistent.PMap definition: A Swagger definition to categorize. This will be a value like the one found at ``spec[""definitions""][name]``. """""" return _ClassModel.from_swagger( self.pclass_for_definition, name, definition, )" 3719,"def pclass_field_for_attribute(self): """""" :return: A pyrsistent field reflecting this attribute and its type model. """""" return self.type_model.pclass_field_for_type( required=self.required, default=self.default, )" 3720,"def from_swagger(cls, pclass_for_definition, name, definition): """""" Create a new ``_ClassModel`` from a single Swagger definition. :param pclass_for_definition: A callable like ``Swagger.pclass_for_definition`` which can be used to resolve type references encountered in the definition. :param unicode name: The name of the definition. :param definition: The Swagger definition to model. This will be a value like the one found at ``spec[""definitions""][name]``. :return: A new model for the given definition. """""" return cls( name=name, doc=definition.get(u""description"", name), attributes=cls._attributes_for_definition( pclass_for_definition, definition, ), )" 3721,"def pclass(self, bases): """""" Create a ``pyrsistent.PClass`` subclass representing this class. :param tuple bases: Additional base classes to give the resulting class. These will appear to the left of ``PClass``. """""" def discard_constant_fields(cls, **kwargs): def ctor(): return super(huh, cls).__new__(cls, **kwargs) try: return ctor() except AttributeError: if u""kind"" in kwargs or u""apiVersion"" in kwargs: kwargs.pop(""kind"", None) kwargs.pop(""apiVersion"", None) return ctor() raise def lt_pclass(self, other): if isinstance(other, self.__class__): return sorted(self.serialize().items()) < sorted(other.serialize().items()) return NotImplemented def eq_pclass(self, other): if isinstance(other, self.__class__): return sorted(self.serialize().items()) == sorted(other.serialize().items()) return NotImplemented content = { attr.name: attr.pclass_field_for_attribute() for attr in self.attributes } content[""__doc__""] = nativeString(self.doc) content[""serialize""] = _serialize_with_omit content[""__new__""] = discard_constant_fields content[""__lt__""] = lt_pclass content[""__eq__""] = eq_pclass content[""__hash__""] = PClass.__hash__ content = total_ordering(content) huh = type(nativeString(self.name), bases + (PClass,), content) return huh" 3722,"def add_behavior_for_pclass(self, cls): """""" Define an additional base class for the Python class created for a particular definition. :param type cls: The additional base class. Its name must exactly match the name of a definition with a version matching this object's version. :return: ``None`` """""" kind = cls.__name__ for version in sorted(self.versions): try: self.spec.add_behavior_for_pclass(self.full_name(version, kind), cls) except NoSuchDefinition: pass else: return None raise NoSuchDefinition(kind)" 3723,"def dumps_bytes(obj): """""" Serialize ``obj`` to JSON formatted ``bytes``. """""" b = dumps(obj) if isinstance(b, unicode): b = b.encode(""ascii"") return b" 3724,"def native_string_to_bytes(s, encoding=""ascii"", errors=""strict""): """""" Ensure that the native string ``s`` is converted to ``bytes``. """""" if not isinstance(s, str): raise TypeError(""{} must be type str, not {}"".format(s, type(s))) if str is bytes: # Python 2 return s else: # Python 3 return s.encode(encoding=encoding, errors=errors)" 3725,"def native_string_to_unicode(s, encoding=""ascii"", errors=""strict""): """""" Ensure that the native string ``s`` is converted to ``unicode``. """""" if not isinstance(s, str): raise TypeError(""{} must be type str, not {}"".format(s, type(s))) if str is unicode: # Python 3 return s else: # Python 2 return s.decode(encoding=encoding, errors=errors)" 3726,"def datetime_handler(x): """""" Allow serializing datetime objects to JSON """""" if isinstance(x, datetime.datetime) or isinstance(x, datetime.date): return x.isoformat() raise TypeError(""Unknown type"")" 3727,"def save(self, *args, **kwargs): """""" **uid**: :code:`party:{apcode}` """""" self.uid = 'party:{}'.format(slugify(self.ap_code)) if not self.slug: if self.organization: self.slug = slugify(self.organization.name) else: self.slug = slugify(self.label) super(Party, self).save(*args, **kwargs)" 3728,"def parsed(self): """"""Get the code object which represents the compiled Python file. This property is cached and only parses the content once. """""" if not self._parsed: self._parsed = compile(self.content, self.path, 'exec') return self._parsed" 3729,"def get_version(): """""" parse __init__.py for version number instead of importing the file see http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package """""" version_file = os.path.join(PKG, 'lib/version.py') ver_str_line = open(version_file, ""rt"").read() version_regex = r'^__version__ = [\'""]([^\'""]*)[\'""]' mo = re.search(version_regex, ver_str_line, re.M) if mo: return mo.group(1) else: raise RuntimeError('Unable to find version string in %s.' % (version_file,))" 3730,"def _chunk(iterable, size): """"""Split an iterable into chunks of a fixed size."""""" # We're going to use some star magic to chunk the iterable. We create a # copy of the iterator size times, then pull a value from each to form a # chunk. The last chunk may have some trailing Nones if the length of the # iterable isn't a multiple of size, so we filter them out. args = (iter(iterable),) * size return ( # pylint: disable=star-args itertools.takewhile(lambda x: x is not None, group) for group in itertools.zip_longest(*args) )" 3731,"def _matrix_add_column(matrix, column, default=0): """"""Given a matrix as a list of lists, add a column to the right, filling in with a default value if necessary. """""" height_difference = len(column) - len(matrix) # The width of the matrix is the length of its longest row. width = max(len(row) for row in matrix) if matrix else 0 # For now our offset is 0. We may need to shift our column down later. offset = 0 # If we need extra rows, add them to the top of the matrix. if height_difference > 0: for _ in range(height_difference): matrix.insert(0, [default] * width) # If the column is shorter, we'll need to shift it down. if height_difference < 0: offset = -height_difference #column = ([default] * offset) + column for index, value in enumerate(column): # The row index is the index in the column plus our offset. row_index = index + offset row = matrix[row_index] # If this row is short, pad it with default values. width_difference = width - len(row) row.extend([default] * width_difference) row.append(value)" 3732,"def vertical_graph(*args, sep='\n'): r""""""Consume an iterable of integers and produce a vertical bar graph using braille characters. The graph is vertical in that its dependent axis is the vertical axis. Thus each value is represented as a row running left to right, and values are listed top to bottom. If the iterable contains more than four integers, it will be chunked into groups of four, separated with newlines by default. >>> vertical_graph([1, 2, 3, 4]) '⣷⣄' >>> vertical_graph([1, 2, 3, 4, 5, 6]) '⣷⣄\n⠛⠛⠓' >>> print(vertical_graph([1, 2, 3, 4, 5, 6])) ⣷⣄ ⠛⠛⠓ Alternately, the arguments can be passed directly: >>> vertical_graph(1, 2, 3, 4) '⣷⣄' The optional sep parameter controls how groups are separated. If sep is not passed (or if it is None), they are put on their own lines. For example, to keep everything on one line, space could be used: >>> vertical_graph(3, 1, 4, 1, 5, 9, 2, 6, sep=' ') '⡯⠥ ⣿⣛⣓⠒⠂' """""" lines = [] # If the arguments were passed as a single iterable, pull it out. # Otherwise, just use them as-is. if len(args) == 1: bars = args[0] else: bars = args # Make sure we use the default when needed if sep is None: sep = '\n' # Break the bars into groups of four, one for each row in the braille # blocks. for bar_group in _chunk(bars, 4): line = [] for braille_row, bar_value in enumerate(bar_group): # The number of full braille blocks needed to draw this bar. Each # block is two dots wide. full_blocks_needed = bar_value // 2 # The number of braille blocks needed to draw this bar. The second # term accounts for a possible half row. blocks_needed = full_blocks_needed + (bar_value % 2) # The number of braille blocks we'll need to append to the current # line to accomodate this bar extra_blocks_needed = blocks_needed - len(line) # If we need extra blocks, add them. if extra_blocks_needed > 0: line.extend([_BRAILLE_EMPTY_BLOCK] * extra_blocks_needed) # Fill in the majority of the bar with full braille rows (two dots). for block_index in range(full_blocks_needed): line[block_index] += _BRAILLE_FULL_ROW[braille_row] # If the bar's value is odd, we'll need to add a single dot at the # end. if bar_value % 2: line[full_blocks_needed] += _BRAILLE_HALF_ROW[braille_row] # Wrap up this line by converting all the code points to characters # and concatenating them. lines.append(''.join(chr(code) for code in line)) # Join all the lines to make the final graph return sep.join(lines)" 3733,"def horizontal_graph(*args): r""""""Consume an iterable of integers and produce a horizontal bar graph using braille characters. The graph is horizontal in that its dependent axis is the horizontal axis. Thus each value is represented as a column running bottom to top, and values are listed left to right. The graph is anchored to the bottom, so columns fill in from the bottom of the current braille character and the next character is added on top when needed. For columns with no dots, the blank braille character is used, not a space character. >>> horizontal_graph([1, 2, 3, 4]) '⣠⣾' >>> horizontal_graph([1, 2, 3, 4, 5, 6]) '⠀⠀⣠\n⣠⣾⣿' >>> print(horizontal_graph([1, 2, 3, 4, 5, 6])) ⠀⠀⣠ ⣠⣾⣿ Alternately, the arguments can be passed directly: >>> horizontal_graph(1, 2, 3, 4) '⣠⣾' """""" lines = [] # If the arguments were passed as a single iterable, pull it out. # Otherwise, just use them as-is. if len(args) == 1: bars = args[0] else: bars = args # Break the bars into groups of two, one for each column in the braille # blocks. for bar_group in _chunk(bars, 2): column = [] for braille_col, bar_value in enumerate(bar_group): # The number of full braille blocks needed to draw this bar. Each # block is four dots tall. full_blocks_needed = bar_value // 4 # The number of braille blocks needed to draw this bar. This # accounts for a possible partial block. blocks_needed = full_blocks_needed + (1 if bar_value % 4 else 0) # The number of new lines we'll need to prepend to accomodate this # bar extra_blocks_needed = blocks_needed - len(column) # If we need extra blocks, add them. column = ([_BRAILLE_EMPTY_BLOCK] * extra_blocks_needed) + column # Fill in the majority of the column with full braille colums (four # dots). We negate the index to access from the end of the list. for block_index in range(-full_blocks_needed, 0, 1): column[block_index] += _BRAILLE_FULL_COL[braille_col] # If we need a partial column, fill it in. We negate the index to # access from the end of the list. if bar_value % 4: partial_index = (bar_value % 4) - 1 column[-blocks_needed] += ( _BRAILLE_PARTIAL_COL[braille_col][partial_index] ) # Add this column to the lines. _matrix_add_column(lines, column, default=_BRAILLE_EMPTY_BLOCK) # Convert all the code points into characters, concatenate them into lines, # then concatenate all the lines to make the final graph. return '\n'.join(''.join(chr(code) for code in line) for line in lines)" 3734,"def isValid(self): """""" Checks to see if the callback pointers are still valid or not. :return """""" if self._callback_func_ref is not None and self._callback_func_ref(): if self._callback_self_ref is None or self._callback_self_ref(): return True return False" 3735,"def clear(self, signal=None): """""" Clears either all the callbacks or the callbacks for a particular signal. :param signal | || None """""" if signal is not None: self._callbacks.pop(signal, None) else: self._callbacks.clear()" 3736,"def connect(self, signal, slot): """""" Creates a new connection between the inputted signal and slot. :param signal | slot | :return | new connection created """""" if self.isConnected(signal, slot): return False callback = Callback(slot) self._callbacks.setdefault(signal, []) self._callbacks[signal].append(callback) return True" 3737,"def disconnect(self, signal, slot): """""" Breaks the connection between the inputted signal and the given slot. :param signal | slot | :return | connection broken """""" sig_calls = self._callbacks.get(signal, []) for callback in sig_calls: if callback == slot: sig_calls.remove(callback) return True return False" 3738,"def isConnected(self, signal, slot): """""" Returns if the given signal is connected to the inputted slot. :param signal | slot | :return | is connected """""" sig_calls = self._callbacks.get(signal, []) for callback in sig_calls: if callback == slot: return True return False" 3739,"def emit(self, signal, *args): """""" Emits the given signal with the inputted args. This will go through its list of connected callback slots and call them. :param signal | *args | variables """""" callbacks = self._callbacks.get(signal, []) new_callbacks = [] for callback in callbacks: # clear out deleted pointers if not callback.isValid(): continue new_callbacks.append(callback) try: callback(*args) except StandardError: logger.exception('Error occurred during callback.') self._callbacks[signal] = new_callbacks" 3740,"def generate_example(config, ext='json'): """"""Generate an example file based on the given Configuration object. Args: config (confpy.core.configuration.Configuration): The configuration object on which to base the example. ext (str): The file extension to render. Choices: JSON and INI. Returns: str: The text of the example file. """""" template_name = 'example.{0}'.format(ext.lower()) template = ENV.get_template(template_name) return template.render(config=config)" 3741,"def run(self): """""" Run at parse time. When the documents are initially being scanned, this method runs and does two things: (a) creates an instance that is added to the site's widgets, and (b) leaves behind a placeholder docutils node that can later be processed after the docs are resolved. The latter needs enough information to retrieve the former. """""" rtype = self.name resource_content = '\n'.join(self.content) resource_class = ResourceDirective.get_resource_class(rtype) this_resource = resource_class(self.docname, rtype, resource_content) # Add this resource to the site self.resources[this_resource.docname] = this_resource # Don't need to return a resource ""node"", the document is the node return []" 3742,"def importProteinDatabase(filePath, proteindb=None, headerParser=None, forceId=False, decoyTag='[decoy]', contaminationTag='[cont]', ignoreIsoleucine=False, cleavageRule='[KR]', minLength=5, maxLength=40, missedCleavage=0, removeNtermM=False): """"""Generates a :class:`ProteinDatabase` by in silico digestion of proteins from a fasta file. :param filePath: File path :param proteindb: optional an existing :class:`ProteinDatabase` can be specified, otherwise a new instance is generated and returned :param decoyTag: If a fasta file contains decoy protein entries, they should be specified with a sequence tag :param contaminationTag: If a fasta file contains contamination protein entries, they should be specified with a sequence tag :param headerParser: optional, allows specifying an individual headerParser :param forceId: bool, if True and no id can be extracted from the fasta header the whole header sequence is used as a protein id instead of raising an exception. :param cleavageRule: cleavage rule expressed in a regular expression, see :attr:`maspy.constants.expasy_rules` :param missedCleavage: number of allowed missed cleavage sites :param removeNtermM: bool, True to consider also peptides with the N-terminal Methionine of the protein removed :param minLength: int, only yield peptides with length >= minLength :param maxLength: int, only yield peptides with length <= maxLength :param ignoreIsoleucine: bool, if True treat Isoleucine and Leucine in peptide sequences as indistinguishable See also :func:`maspy.peptidemethods.digestInSilico` """""" if proteindb is None: proteindb = ProteinDatabase(ignoreIsoleucine=ignoreIsoleucine) # - Add protein entries to the protein database - # for fastaHeader, sequence in _readFastaFile(filePath): #TODO: function, make protein entry or something like this. header, isDecoy = _removeHeaderTag(fastaHeader, decoyTag) header, isContaminant = _removeHeaderTag(header, contaminationTag) headerInfo = _parseFastaHeader(header, headerParser, forceId) proteinId = _idFromHeaderInfo(headerInfo, isDecoy, decoyTag) proteinName = _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag) if not isDecoy: isDecoy = _proteinTagPresent(header, decoyTag) if not isContaminant: isContaminant = _proteinTagPresent(header, contaminationTag) proteindb._addProtein( proteinId, proteinName, sequence, fastaHeader, headerInfo, isDecoy=isDecoy, isContaminant=isContaminant ) # - Perform an insilico digestion and add peptides to the proteindb - # for proteinId in proteindb.proteins: sequence = proteindb.proteins[proteinId].sequence digestInfo = maspy.peptidemethods.digestInSilico( sequence, cleavageRule, missedCleavage, removeNtermM, minLength, maxLength ) for sequence, info in digestInfo: proteindb._addPeptide(sequence, proteinId, info) #Define wheter a peptide is unique to one protein entry for peptide, peptideEntry in viewitems(proteindb.peptides): if len(peptideEntry.proteins) == 1: peptideEntry.isUnique = True else: peptideEntry.isUnique = False #Add peptide as shared or unique to its protein entries for peptide, peptideEntry in viewitems(proteindb.peptides): for proteinId in peptideEntry.proteins: if peptideEntry.isUnique: proteindb.proteins[proteinId].uniquePeptides.add(peptide) else: proteindb.proteins[proteinId].sharedPeptides.add(peptide) #Define unique proteins, i.e. have at least one unique peptide for proteinEntry in viewvalues(proteindb.proteins): if len(proteinEntry.uniquePeptides) > 0: proteinEntry.isUnique = True else: proteinEntry.isUnique = False return proteindb" 3743,"def fastaParserSpectraClusterPy(header): """"""Custom parser for fasta headers adapted from https://github.com/spectra-cluster/spectra-cluster-py :param header: str, protein entry header from a fasta file :returns: dict, parsed header """""" isUniprot = lambda h: h[0:3] in ['sp|', 'tr|', 'up|'] if isUniprot(header): start = 3 end = header.find('|', start) else: start = 0 breakPositions = [header.find(' '), header.find('|')] breakPositions = [i if i > 0 else len(header) for i in breakPositions] end = min(breakPositions) return {'id': header[start:end]}" 3744,"def _removeHeaderTag(header, tag): """"""Removes a tag from the beginning of a header string. :param header: str :param tag: str :returns: (str, bool), header without the tag and a bool that indicates wheter the tag was present. """""" if header.startswith(tag): tagPresent = True header = header[len(tag):] else: tagPresent = False return header, tagPresent" 3745,"def _parseFastaHeader(fastaHeader, parser=None, forceId=False): """"""Parses a fasta header and returns extracted information in a dictionary. Unless a custom parser is specified, a ``Pyteomics`` function is used, which provides parsers for the formats of UniProtKB, UniRef, UniParc and UniMES (UniProt Metagenomic and Environmental Sequences), described at `www.uniprot.org _`. :param fastaHeader: str, protein entry header from a fasta file :param parser: is a function that takes a fastaHeader string and returns a dictionary, containing at least the key ""id"". If None the parser function from pyteomics ``pyteomics.fasta.parse()`` is used. :param forceId: bool, if True and no id can be extracted from the fasta header the whole header sequence is used as a protein id instead of raising an exception. :returns: dict, describing a fasta header. Minimally contains an 'id' key. """""" if parser is None: try: headerInfo = pyteomics.fasta.parse(fastaHeader) except pyteomics.auxiliary.PyteomicsError as raisedPyteomicsError: #If forceId is set True, the whole header is used as id if forceId: headerInfo = {'id': fastaHeader} else: raise raisedPyteomicsError else: headerInfo = parser(fastaHeader) return headerInfo" 3746,"def _idFromHeaderInfo(headerInfo, isDecoy, decoyTag): """"""Generates a protein id from headerInfo. If ""isDecoy"" is True, the ""decoyTag"" is added to beginning of the generated protein id. :param headerInfo: dict, must contain a key ""id"" :param isDecoy: bool, determines if the ""decoyTag"" is added or not. :param decoyTag: str, a tag that identifies decoy / reverse protein entries. :returns: str, protein id """""" proteinId = headerInfo['id'] if isDecoy: proteinId = ''.join((decoyTag, proteinId)) return proteinId" 3747,"def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag): """"""Generates a protein name from headerInfo. If ""isDecoy"" is True, the ""decoyTag"" is added to beginning of the generated protein name. :param headerInfo: dict, must contain a key ""name"" or ""id"" :param isDecoy: bool, determines if the ""decoyTag"" is added or not. :param decoyTag: str, a tag that identifies decoy / reverse protein entries. :returns: str, protein name """""" if 'name' in headerInfo: proteinName = headerInfo['name'] else: proteinName = headerInfo['id'] if isDecoy: proteinName = ''.join((decoyTag, proteinName)) return proteinName" 3748,"def _addProtein(self, proteinId, proteinName, sequence, fastaHeader, headerInfo, isDecoy=False, isContaminant=False): """"""#TODO"""""" proteinEntry = ProteinEntry( proteinId, proteinName, sequence, fastaHeader, headerInfo, isDecoy=isDecoy, isContaminant=isContaminant ) self.proteins[proteinEntry.id] = proteinEntry" 3749,"def _addPeptide(self, sequence, proteinId, digestInfo): """"""Add a peptide to the protein database. :param sequence: str, amino acid sequence :param proteinId: str, proteinId :param digestInfo: dict, contains information about the in silico digest must contain the keys 'missedCleavage', 'startPos' and 'endPos' """""" stdSequence = self.getStdSequence(sequence) if stdSequence not in self.peptides: self.peptides[stdSequence] = PeptideEntry( stdSequence, mc=digestInfo['missedCleavage'] ) if sequence not in self.peptides: self.peptides[sequence] = self.peptides[stdSequence] if proteinId not in self.peptides[stdSequence].proteins: #FUTURE: peptide can appear at multiple positions per protein. #peptideEntry.addSource(proteinId, startPos, endPos) self.peptides[stdSequence].proteins.add(proteinId) self.peptides[stdSequence].proteinPositions[proteinId] = ( digestInfo['startPos'], digestInfo['endPos'] ) self.proteins[proteinId].peptides.add(sequence)" 3750,"def options(self, parser, env): """""" Add options to command line. """""" super(LeakDetectorPlugin, self).options(parser, env) parser.add_option(""--leak-detector-level"", action=""store"", default=env.get('NOSE_LEAK_DETECTOR_LEVEL'), dest=""leak_detector_level"", help=""Level at which to detect leaks and report memory deltas "" ""(0=None, 1=Dir, 2=Module, 3=TestCaseClass, 4=Test)"") parser.add_option(""--leak-detector-report-delta"", action=""store_true"", default=env.get('NOSE_LEAK_DETECTOR_REPORT_DELTA'), dest=""leak_detector_report_delta"", help="""") parser.add_option(""--leak-detector-patch-mock"", action=""store_true"", default=env.get('NOSE_LEAK_DETECTOR_PATCH_MOCK', True), dest=""leak_detector_patch_mock"", help="""") parser.add_option(""--leak-detector-add-traceback"", action=""store_true"", default=env.get('NOSE_LEAK_DETECTOR_SAVE_TRACEBACK', False), dest=""leak_detector_save_traceback"", help="""") parser.add_option(""--leak-detector-ignore-pattern"", action=""append"", default=(list(filter(operator.truth, env.get('NOSE_LEAK_DETECTOR_IGNORE_PATTERNS', '').split(','))) or ['NOSE_LEAK_DETECTOR_IGNORE']), dest=""leak_detector_ignore_patterns"", help="""")" 3751,"def configure(self, options, conf): """""" Configure plugin. """""" super(LeakDetectorPlugin, self).configure(options, conf) if options.leak_detector_level: self.reporting_level = int(options.leak_detector_level) self.report_delta = options.leak_detector_report_delta self.patch_mock = options.leak_detector_patch_mock self.ignore_patterns = options.leak_detector_ignore_patterns self.save_traceback = options.leak_detector_save_traceback self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))" 3752,"def bind(self, instance, auto=False): """""" Bind deps to instance :param instance: :param auto: follow update of DI and refresh binds once we will get something new :return: """""" methods = [ (m, cls.__dict__[m]) for cls in inspect.getmro(type(instance)) for m in cls.__dict__ if inspect.isfunction(cls.__dict__[m]) ] try: deps_of_endpoints = [(method_ptr, self.entrypoint_deps(method_ptr)) for (method_name, method_ptr) in methods] for (method_ptr, method_deps) in deps_of_endpoints: if len(method_deps) > 0: method_ptr(instance, **method_deps) except KeyError: pass if auto and instance not in self.current_scope.get_auto_bind_list(): self.current_scope.auto_bind(instance) return instance" 3753,"def map_dict(self, dict_entity): """""" map dict_entity to current instance(self) """""" self.dict_entity = dict_entity Entity.map(self, self.dict_entity)" 3754,"def as_dict(self): """""" create a dict based on class attributes """""" odict = OrderedDict() for name in self._order: attr_value = getattr(self, name) if isinstance(attr_value, List): _list = [] for item in attr_value: _list.append((item.as_dict() if isinstance(item, Entity) else item)) odict[name] = _list elif isinstance(attr_value, Entity): odict[name] = attr_value.as_dict() else: odict[name] = getattr(self, name) return odict" 3755,"def map(cls, dict_entity): """""" staticmethod which will be used in recursive mode in order to map dict to instance """""" for key, value in dict_entity.items(): if hasattr(cls, key): if isinstance(value, list): _list = getattr(cls, key) if isinstance(_list.expected_type, list): for _dict in value: _list.append(cls.map(_list.typeof(), _dict)) elif isinstance(value, dict): attr = getattr(cls, key) instance = attr.expected_type() Entity.map(instance, value) setattr(cls, key, instance) else: setattr(cls, key, value) else: setattr(cls, key, value)" 3756,"def generateParams(rawfilepath, outputpath, isolationWindow, coElute): """"""Generates a string containing the parameters for a pParse parameter file but doesn't write any file yet. :param rawfilepath: location of the thermo "".raw"" file :param outputpath: path to the output directory of pParse :param isolationWindow: MSn isolation window that was used for the aquisition of the specified thermo raw file :param coElute: 0 or 1, see ""[Advanced Options]"" below :returns: string containing pParse parameters .. note: # pParse.para params template # For help: mail to tuhuijun@ict.ac.cn # Time: 2014.12.08 [Basic Options] datapath = C:\filedirectory\filename logfilepath = C:\filedirectory outputpath = C:\filedirectory [Advanced Options] co-elute = 1 # 0, output single precursor for single scan; # 1, output all co-eluted precursors. input_format = raw # raw / ms1 isolation_width = 1.6 # 2 / 2.5 / 3 / 4 mars_threshold = -0.5 ipv_file = .\IPV.txt trainingset = EmptyPath [Internal Switches] output_mars_y = 0 delete_msn = 0 output_mgf = 1 output_pf = 1 debug_mode = 0 check_activationcenter = 1 output_all_mars_y = 0 rewrite_files = 0 export_unchecked_mono = 0 cut_similiar_mono = 1 mars_model = 4 output_trainingdata = 0 """""" output = str() #Basic options output = '\n'.join([output, ' = '.join(['datapath', rawfilepath])]) output = '\n'.join([output, ' = '.join(['logfilepath', outputpath])]) output = '\n'.join([output, ' = '.join(['outputpath', outputpath])]) #Advanced options output = '\n'.join([output, ' = '.join(['co-elute', str(coElute)])]) output = '\n'.join([output, ' = '.join(['input_format', 'raw'])]) output = '\n'.join([output, ' = '.join(['isolation_width', str(isolationWindow)] )]) output = '\n'.join([output, ' = '.join(['mars_threshold', '-0.5'])]) output = '\n'.join([output, ' = '.join(['ipv_file', '.\IPV.txt'])]) output = '\n'.join([output, ' = '.join(['trainingset', 'EmptyPath'])]) #Internal Switches output = '\n'.join([output, ' = '.join(['output_mars_y', '0'])]) output = '\n'.join([output, ' = '.join(['delete_msn', '0'])]) output = '\n'.join([output, ' = '.join(['output_mgf', '1'])]) output = '\n'.join([output, ' = '.join(['output_pf', '0'])]) output = '\n'.join([output, ' = '.join(['debug_mode', '0'])]) output = '\n'.join([output, ' = '.join(['check_activationcenter', '1'])]) output = '\n'.join([output, ' = '.join(['output_all_mars_y', '0'])]) output = '\n'.join([output, ' = '.join(['rewrite_files', '0'])]) output = '\n'.join([output, ' = '.join(['export_unchecked_mono', '0'])]) output = '\n'.join([output, ' = '.join(['cut_similiar_mono', '1'])]) output = '\n'.join([output, ' = '.join(['mars_model', '4'])]) output = '\n'.join([output, ' = '.join(['output_trainingdata', '0'])]) return output" 3757,"def writeParams(rawfilepath, outputpath, isolationWindow, coElute=0): """"""Generate and write a pParse parameter file. :param rawfilepath: location of the thermo "".raw"" file :param outputpath: path to the output directory of pParse :param isolationWindow: MSn isolation window that was used for the aquisition of the specified thermo raw file :param coElute: :returns: file path of the pParse parameter file """""" paramText = generateParams(rawfilepath, outputpath, isolationWindow, coElute) filename, fileext = os.path.splitext(os.path.basename(rawfilepath)) paramPath = aux.joinpath(outputpath, filename+'.pparse.para') with open(paramPath, 'wb') as openfile: openfile.write(paramText) return paramPath" 3758,"def execute(paramPath, executable='pParse.exe'): """"""Execute pParse with the specified parameter file. :param paramPath: location of the pParse parameter file :param executable: must specify the complete file path of the pParse.exe if its location is not in the ``PATH`` environment variable. :returns: :func:`subprocess.Popen` return code, 0 if pParse was executed successful """""" procArgs = [executable, paramPath] ## run it ## proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush() return proc.returncode" 3759,"def cleanUpPparse(outputpath, rawfilename, mgf=False): """"""Delete temporary files generated by pparse, including the filetypes "".csv"", "".ms1"", "".ms2"", "".xtract"", the files ""pParsePlusLog.txt"" and ""pParse.para"" and optionally also the "".mgf"" file generated by pParse. .. warning: When the parameter ""mgf"" is set to ""True"" all files ending with "".mgf"" and containing the specified ""filename"" are deleted. This could potentially also affect MGF files not generated by pParse. :param outputpath: path to the output directory of pParse :param rawfilename: filename of the thermo "".raw"" file :param mgf: bool, if True the "".mgf"" file generated by pParse is also removed """""" extensions = ['csv', 'ms1', 'ms2', 'xtract'] filename, fileext = os.path.splitext(os.path.basename(rawfilename)) additionalFiles = [aux.joinpath(outputpath, 'pParsePlusLog.txt'), aux.joinpath(outputpath, filename+'.pparse.para'), ] for ext in extensions: filepath = aux.joinpath(outputpath, '.'.join([filename, ext])) if os.path.isfile(filepath): print('Removing file: ', filepath) os.remove(filepath) for filepath in additionalFiles: if os.path.isfile(filepath): print('Removing file: ', filepath) os.remove(filepath) if mgf: for _filename in os.listdir(outputpath): _basename, _fileext = os.path.splitext(_filename) if _fileext.lower() != '.mgf': continue if _basename.find(basename) != -1 and _basename != basename: filepath = aux.joinpath(outputpath, _filename) print('Removing file: ', filepath) os.remove(filepath)" 3760,"def launch_server(message_handler, options): """""" Launch a message server :param handler_function: The handler function to execute for each message :param options: Application options for TCP, etc. """""" logger = logging.getLogger(__name__) if (options.debug): logger.setLevel(logging.DEBUG) if not options.monitor_port: logger.warning( ""Monitoring not enabled. No monitor-port option defined."") else: threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start() # Create the server, binding to specified host on configured port logger.info( 'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3])) server = ThreadedTCPServer((options.host, options.port), StreamHandler.create_handler(message_handler, options.buffer_size, logger)) # Activate the server; this will keep running until you # interrupt the program with Ctrl-C try: server.serve_forever() except KeyboardInterrupt: logger.info(""Ctrl-C, exiting..."") os._exit(142)" 3761,"def launch_monitor_server(host, port, logger): """""" Launch a monitor server :param port: the monitor port :param logger: the logger """""" logger.info('Starting monitor server on host %s port %d' % (host, port)) server = ThreadedTCPServer((host, port), MonitorHandler) server.serve_forever()" 3762,"def create_handler(cls, message_handler, buffer_size, logger): """""" Class variables used here since the framework creates an instance for each connection :param message_handler: the MessageHandler used to process each message. :param buffer_size: the TCP buffer size. :param logger: the global logger. :return: this class. """""" cls.BUFFER_SIZE = buffer_size cls.message_handler = message_handler cls.logger = logger cls.message_handler.logger = logging.getLogger(message_handler.__class__.__name__) cls.message_handler.logger.setLevel(logger.level) return cls" 3763,"def handle(self): """""" The required handle method. """""" logger = StreamHandler.logger logger.debug(""handling requests with message handler %s "" % StreamHandler.message_handler.__class__.__name__) message_handler = StreamHandler.message_handler try: while True: logger.debug('waiting for more data') if not message_handler.handle(self.request, StreamHandler.BUFFER_SIZE): break logger.warning(""connection closed from %s"" % (self.client_address[0])) self.request.close() except: logger.exception(""connection closed from %s"" % (self.client_address[0])) finally: self.request.close()" 3764,"def receiveError(self, reasonCode, description): """""" Called when we receive a disconnect error message from the other side. """""" error = disconnectErrors.get(reasonCode, DisconnectError) self.connectionClosed(error(reasonCode, description)) SSHClientTransport.receiveError(self, reasonCode, description)" 3765,"def get_logger(name): """""" Special get_logger. Typically name is the name of the application using Balsa. :param name: name of the logger to get, which is usually the application name. Optionally it can be a python file name or path (e.g. __file__). :return: the logger for the logger name """""" # if name is a python file, or a path to a python file, extract the module name if name.endswith("".py""): name = name[:-3] if os.sep in name: name = name.split(os.sep)[-1] return logging.getLogger(name)" 3766,"def traceback_string(): """""" Helper function that formats most recent traceback. Useful when a program has an overall try/except and it wants to output the program trace to the log. :return: formatted traceback string (or None if no traceback available) """""" tb_string = None exc_type, exc_value, exc_traceback = traceback.sys.exc_info() if exc_type is not None: display_lines_list = [str(exc_value)] + traceback.format_tb(exc_traceback) tb_string = ""\n"".join(display_lines_list) return tb_string" 3767,"def lookup_path(bin_name): """"""Calls to external binaries can't depend on $PATH """""" paths = ('/usr/local/sbin/', '/usr/local/bin/', '/usr/sbin/', '/usr/bin/') for p in paths: fq_path = p + bin_name found = os.path.isfile(fq_path) and os.access(fq_path, os.X_OK) if found: return fq_path return False" 3768,"def get_message_handler(self, message_handlers): """""" Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler """""" encoder = self.options.encoder try: return message_handlers[encoder] except KeyError: raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)" 3769,"def handle(self, event): """""" Entry point to handle user events. :param event: Received event. See a full list `here `_. """""" callback = getattr(self, 'on_{event}'.format(event=event.event), None) callback(event)" 3770,"def form_valid(self, form): if self.__pk: obj = PurchasesAlbaran.objects.get(pk=self.__pk) self.request.albaran = obj form.instance.albaran = obj form.instance.validator_user = self.request.user raise Exception(""revisar StorageBatch"") """""" batch = StorageBatch.objects.filter(pk=form.data['batch']).first() if not batch: errors = form._errors.setdefault(""batch"", ErrorList()) errors.append(_(""Batch invalid"")) return super(LineAlbaranCreate, self).form_invalid(form) """""" # comprueba si el producto comprado requiere un valor de atributo especial product_final = ProductFinal.objects.filter(pk=form.data['product']).first() feature_special_value = None if not product_final: errors = form._errors.setdefault(""feature_special_value"", ErrorList()) errors.append(_(""Product not selected"")) return super(LineAlbaranCreate, self).form_invalid(form) elif product_final.product.feature_special: # es obligatorio la informacion de caracteristicas especiales if 'feature_special_value' not in form.data or not form.data['feature_special_value']: errors = form._errors.setdefault(""feature_special_value"", ErrorList()) errors.append(_(""Product needs information of feature special"")) return super(LineAlbaranCreate, self).form_invalid(form) else: feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n')))) try: quantity = int(float(form.data['quantity'])) except ValueError: errors = form._errors.setdefault(""quantity"", ErrorList()) errors.append(_(""Quantity is not valid"")) return super(LineAlbaranCreate, self).form_invalid(form) if product_final.product.feature_special.unique: # mismo numero de caracteristicas que de cantidades # si el feature special esta marcado como 'unico' if len(feature_special_value) != quantity: errors = form._errors.setdefault(""feature_special_value"", ErrorList()) errors.append(_(""Quantity and values of feature special not equals"")) return super(LineAlbaranCreate, self).form_invalid(form) # no existen las caracteristicas especiales dadas de alta en el sistema elif ProductUnique.objects.filter(product_final=product_final, value__in=feature_special_value).exists(): errors = form._errors.setdefault(""feature_special_value"", ErrorList()) errors.append(_(""Some value of feature special exists"")) return super(LineAlbaranCreate, self).form_invalid(form) elif len(feature_special_value) != 1: errors = form._errors.setdefault(""feature_special_value"", ErrorList()) errors.append(_(""The special feature must be unique for all products"")) return super(LineAlbaranCreate, self).form_invalid(form) try: with transaction.atomic(): # save line albaran result = super(LineAlbaranCreate, self).form_valid(form) raise Exception(""Cambiar ProductStock por ProductUnique"") """""" if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED: # prepare stock ps = ProductStock() ps.product_final = product_final ps.line_albaran = self.object ps.batch = batch # save stock ps.quantity = self.object.quantity ps.save() if feature_special_value: # prepare product feature special if product_final.product.feature_special.unique: pfs = ProductUnique() pfs.product_final = product_final # save product featureSpecial and stock for fs in feature_special_value: pfs.pk = None pfs.value = fs pfs.save() else: pfs = ProductUnique.objects.filter( value=feature_special_value[0], product_final=product_final ).first() if pfs: pfs.stock_real += self.object.quantity else: pfs = ProductUnique() pfs.product_final = product_final pfs.value = feature_special_value[0] pfs.stock_real = self.object.quantity pfs.save() else: # product unique by default pfs = ProductUnique.objects.filter(product_final=product_final).first() if not pfs: pfs = ProductUnique() pfs.product_final = product_final pfs.stock_real = self.object.quantity else: pfs.stock_real += self.object.quantity pfs.save() """""" return result except IntegrityError as e: errors = form._errors.setdefault(""product"", ErrorList()) errors.append(_(""Integrity Error: {}"".format(e))) return super(LineAlbaranCreate, self).form_invalid(form)" 3771,"def get_form(self, form_class=None): form = super(LineAlbaranUpdate, self).get_form(form_class) raise Exception(""Cambiar ProductStock por ProductUnique"") """""" ps = ProductStock.objects.filter(line_albaran=self.object).first() if ps: # initial field form.fields['storage'].initial = ps.batch.zone.storage form.fields['zone'].initial = ps.batch.zone form.fields['batch'].initial = ps.batch """""" return form" 3772,"def _emplace_pmrna(mrnas, parent, strict=False): """"""Retrieve the primary mRNA and discard all others."""""" mrnas.sort(key=lambda m: (m.cdslen, m.get_attribute('ID'))) pmrna = mrnas.pop() if strict: parent.children = [pmrna] else: parent.children = [c for c in parent.children if c not in mrnas]" 3773,"def _emplace_transcript(transcripts, parent): """"""Retrieve the primary transcript and discard all others."""""" transcripts.sort(key=lambda t: (len(t), t.get_attribute('ID'))) pt = transcripts.pop() parent.children = [pt]" 3774,"def primary_mrna(entrystream, parenttype='gene'): """""" Select a single mRNA as a representative for each protein-coding gene. The primary mRNA is the one with the longest translation product. In cases where multiple isoforms have the same translated length, the feature ID is used for sorting. This function **does not** return only mRNA features, it returns all GFF3 entry types (pragmas, features, sequences, etc). The function **does** modify the gene features that pass through to ensure that they have at most a single mRNA feature. >>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3')) >>> filter = tag.transcript.primary_mrna(reader) >>> for gene in tag.select.features(filter, type='gene'): ... assert gene.num_children == 1 """""" for entry in entrystream: if not isinstance(entry, tag.Feature): yield entry continue for parent in tag.select.features(entry, parenttype, traverse=True): mrnas = [f for f in parent.children if f.type == 'mRNA'] if len(mrnas) == 0: continue _emplace_pmrna(mrnas, parent) yield entry" 3775,"def _get_primary_type(ttypes, parent, logstream=stderr): """"""Check for multiple transcript types and, if possible, select one."""""" if len(ttypes) > 1: if logstream: # pragma: no branch message = '[tag::transcript::primary_transcript]' message += ' WARNING: feature {:s}'.format(parent.slug) message += ' has multiple associated transcript types' message += ' {}'.format(ttypes) print(message, file=logstream) if 'mRNA' not in ttypes: message = ( 'cannot resolve multiple transcript types if ""mRNA"" is' ' not one of those types {}'.format(ttypes) ) raise Exception(message) ttypes = ['mRNA'] return ttypes[0]" 3776,"def primary_transcript(entrystream, parenttype='gene', logstream=stderr): """""" Select a single transcript as a representative for each gene. This function is a generalization of the `primary_mrna` function that attempts, under certain conditions, to select a single transcript as a representative for each gene. If a gene encodes multiple transcript types, one of those types must be **mRNA** or the function will complain loudly and fail. For mRNAs, the primary transcript is selected according to translated length. For all other transcript types, the length of the transcript feature itself is used. I'd be eager to hear suggestions for alternative selection criteria. Like the `primary_mrna` function, this function **does not** return only transcript features. It **does** modify gene features to ensure that each has at most one transcript feature. >>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz')) >>> gene_filter = tag.select.features(reader, type='gene') >>> trans_filter = tag.transcript.primary_transcript(gene_filter) >>> for gene in trans_filter: ... assert gene.num_children == 1 In cases where the direct children of a gene feature have heterogenous types, the `primary_mrna` function will only discard mRNA features. This function, however, will discard all direct children of the gene that are not the primary transcript, including non-transcript children. This is a retty subtle distinction, and anecdotal experience suggests that cases in which the distinction actually matters are extremely rare. """""" for entry in entrystream: if not isinstance(entry, tag.Feature): yield entry continue for parent in tag.select.features(entry, parenttype, traverse=True): if parent.num_children == 0: continue transcripts = defaultdict(list) for child in parent.children: if child.type in type_terms: transcripts[child.type].append(child) if len(transcripts) == 0: continue ttypes = list(transcripts.keys()) ttype = _get_primary_type(ttypes, parent) transcript_list = transcripts[ttype] if ttype == 'mRNA': _emplace_pmrna(transcript_list, parent, strict=True) else: _emplace_transcript(transcript_list, parent) yield entry" 3777,"def parse_parent(docname): """""" Given a docname path, pick apart and return name of parent """""" lineage = docname.split('/') lineage_count = len(lineage) if docname == 'index': # This is the top of the Sphinx project parent = None elif lineage_count == 1: # This is a non-index doc in root, e.g. about parent = 'index' elif lineage_count == 2 and lineage[-1] == 'index': # This is blog/index, parent is the root parent = 'index' elif lineage_count == 2: # This is blog/about parent = lineage[0] + '/index' elif lineage[-1] == 'index': # This is blog/sub/index parent = '/'.join(lineage[:-2]) + '/index' else: # This should be blog/sub/about parent = '/'.join(lineage[:-1]) + '/index' return parent" 3778,"def parents(self, resources): """""" Split the path in name and get parents """""" if self.docname == 'index': # The root has no parents return [] parents = [] parent = resources.get(self.parent) while parent is not None: parents.append(parent) parent = resources.get(parent.parent) return parents" 3779,"def acquire(self, resources, prop_name): """""" Starting with self, walk until you find prop or None """""" # Instance custom_prop = getattr(self.props, prop_name, None) if custom_prop: return custom_prop # Parents...can't use acquire as have to keep going on acquireds for parent in self.parents(resources): acquireds = parent.props.acquireds if acquireds: # First try in the per-type acquireds rtype_acquireds = acquireds.get(self.rtype) if rtype_acquireds: prop_acquired = rtype_acquireds.get(prop_name) if prop_acquired: return prop_acquired # Next in the ""all"" section of acquireds all_acquireds = acquireds.get('all') if all_acquireds: prop_acquired = all_acquireds.get(prop_name) if prop_acquired: return prop_acquired return" 3780,"def template(self, resources): """""" Get the template from: YAML, hierarchy, or class """""" template_name = self.acquire(resources, 'template') if template_name: return template_name else: # We're putting an exception for ""resource"", the built-in # rtype/directive. We want it to work out-of-the-box without # requiring an _templates/resource.html in the docs project. # Instead, use the page.html the ships with Sphinx. if self.rtype == 'resource': return 'page' else: return self.rtype" 3781,"def find_prop_item(self, prop_name, prop_key, prop_value): """""" Look for a list prop with an item where key == value """""" # Image props are a sequence of dicts. We often need one of them. # Where one of the items has a dict key matching a value, and if # nothing matches, return None prop = getattr(self.props, prop_name, None) if prop: return next( (p for p in prop if getattr(p, prop_key) == prop_value), None ) return None" 3782,"def default() : """""" Get default shaman instance by ""data/trained.json"" """""" if Shaman._default_instance is not None : return Shaman._default_instance with open((os.path.dirname(__file__) or '.') + '/data/trained.json') as file : tset = json.loads(file.read()) Shaman._default_instance = Shaman(tset) return Shaman._default_instance" 3783,"def detect(self, code) : """""" Detect language with code """""" keywords = KeywordFetcher.fetch( code ) probabilities = {} for keyword in keywords : if keyword not in self.trained_set['keywords'] : continue data = self.trained_set['keywords'][keyword] p_avg = sum(data.values()) / len(data) # Average probability of all languages for language, probability in data.items() : # By Naïve Bayes Classification p = probability / p_avg probabilities[ language ] = probabilities.get(language, 0) + math.log(1 + p) for pattern, data in self.trained_set['patterns'].items() : matcher = PatternMatcher(pattern) p0 = matcher.getratio(code) for language, p_avg in data.items() : if language not in probabilities : continue p = 1 - abs(p_avg - p0) probabilities[ language ] *= p # Convert `log` operated probability to percentile sum_val = 0 for language, p in probabilities.items() : sum_val += math.pow(math.e / 2, p) for language, p in probabilities.items() : probabilities[language] = math.pow(math.e / 2, p) / sum_val * 100 return sorted(probabilities.items(), key=lambda a: a[1], reverse=True)" 3784,"def fetch(code) : """""" Fetch keywords by Code """""" ret = {} code = KeywordFetcher._remove_strings(code) result = KeywordFetcher.prog.findall(code) for keyword in result : if len(keyword) <= 1: continue # Ignore single-length word if keyword.isdigit(): continue # Ignore number if keyword[0] == '-' or keyword[0] == '*' : keyword = keyword[1:] # Remove first char if string is starting by '-' or '*' (Pointer or Negative numbers) if keyword[-1] == '-' or keyword[-1] == '*' : keyword = keyword[0:-1] # Remove last char if string is finished by '-' or '*' if len(keyword) <= 1: continue ret[ keyword ] = ret.get(keyword, 0) + 1 # `ret[ keyword ] += 1` with initial value return ret" 3785,"def _remove_strings(code) : """""" Remove strings in code """""" removed_string = """" is_string_now = None for i in range(0, len(code)-1) : append_this_turn = False if code[i] == ""'"" and (i == 0 or code[i-1] != '\\') : if is_string_now == ""'"" : is_string_now = None elif is_string_now == None : is_string_now = ""'"" append_this_turn = True elif code[i] == '""' and (i == 0 or code[i-1] != '\\') : if is_string_now == '""' : is_string_now = None elif is_string_now == None : is_string_now = '""' append_this_turn = True if is_string_now == None or append_this_turn == True : removed_string += code[i] return removed_string" 3786,"def getratio(self, code) : """""" Get ratio of code and pattern matched """""" if len(code) == 0 : return 0 code_replaced = self.prog.sub('', code) return (len(code) - len(code_replaced)) / len(code)" 3787,"def loadXmlProperty(self, xprop): """""" Loads an XML property that is a child of the root data being loaded. :param xprop | """""" if xprop.tag == 'property': value = self.dataInterface().fromXml(xprop[0]) self._xmlData[xprop.get('name', '')] = value" 3788,"def toXml(self, xparent=None): """""" Converts this object to XML. :param xparent | || None :return """""" if xparent is None: xml = ElementTree.Element('object') else: xml = ElementTree.SubElement(xparent, 'object') xml.set('class', self.__class__.__name__) for name, value in self._xmlData.items(): xprop = ElementTree.SubElement(xml, 'property') xprop.set('name', name) XmlDataIO.toXml(value, xprop) return xml" 3789,"def fromXml(cls, xml): """""" Restores an object from XML. :param xml | :return subclass of """""" clsname = xml.get('class') if clsname: subcls = XmlObject.byName(clsname) if subcls is None: inst = MissingXmlObject(clsname) else: inst = subcls() else: inst = cls() inst.loadXml(xml) return inst" 3790,"def fromXml(cls, elem): """""" Converts the inputted element to a Python object by looking through the IO addons for the element's tag. :param elem | :return """""" if elem is None: return None addon = cls.byName(elem.tag) if not addon: raise RuntimeError('{0} is not a supported XML tag'.format(elem.tag)) return addon.load(elem)" 3791,"def toXml(cls, data, xparent=None): """""" Converts the inputted element to a Python object by looking through the IO addons for the element's tag. :param data | xparent | || None :return """""" if data is None: return None # store XmlObjects separately from base types if isinstance(data, XmlObject): name = 'object' else: name = type(data).__name__ addon = cls.byName(name) if not addon: raise RuntimeError('{0} is not a supported XML tag'.format(name)) return addon.save(data, xparent)" 3792,"def save(self, data, xparent=None): """""" Parses the element from XML to Python. :param data | xparent | || None :return """""" if xparent is not None: elem = ElementTree.SubElement(xparent, 'bool') else: elem = ElementTree.Element('bool') elem.text = nstr(data) return elem" 3793,"def load(self, elem): """""" Converts the inputted dict tag to Python. :param elem | :return """""" self.testTag(elem, 'dict') out = {} for xitem in elem: key = xitem.get('key') try: value = XmlDataIO.fromXml(xitem[0]) except IndexError: value = None out[key] = value return out" 3794,"def save(self, data, xparent=None): """""" Parses the element from XML to Python. :param data | xparent | || None :return """""" if xparent is not None: elem = ElementTree.SubElement(xparent, 'dict') else: elem = ElementTree.Element('dict') for key, value in sorted(data.items()): xitem = ElementTree.SubElement(elem, 'item') xitem.set('key', nstr(key)) XmlDataIO.toXml(value, xitem) return elem" 3795,"def load(self, elem): """""" Converts the inputted list tag to Python. :param elem | :return """""" self.testTag(elem, 'list') out = [] for xitem in elem: out.append(XmlDataIO.fromXml(xitem)) return out" 3796,"def save(self, data, xparent=None): """""" Parses the element from XML to Python. :param data | xparent | || None :return """""" if xparent is not None: elem = ElementTree.SubElement(xparent, 'list') else: elem = ElementTree.Element('list') for item in data: XmlDataIO.toXml(item, elem) return elem" 3797,"def load(self, elem): """""" Converts the inputted set tag to Python. :param elem | :return """""" self.testTag(elem, 'set') out = set() for xitem in elem: out.add(XmlDataIO.fromXml(xitem)) return out" 3798,"def load(self, elem): """""" Converts the inputted string tag to Python. :param elem | :return """""" self.testTag(elem, 'str') return elem.text if elem.text is not None else ''" 3799,"def template_substitute(text, **kwargs): """""" Replace placeholders in text by using the data mapping. Other placeholders that is not represented by data is left untouched. :param text: Text to search and replace placeholders. :param data: Data mapping/dict for placeholder key and values. :return: Potentially modified text with replaced placeholders. """""" for name, value in kwargs.items(): placeholder_pattern = ""{%s}"" % name if placeholder_pattern in text: text = text.replace(placeholder_pattern, value) return text" 3800,"def text_remove_empty_lines(text): """""" Whitespace normalization: - Strip empty lines - Strip trailing whitespace """""" lines = [ line.rstrip() for line in text.splitlines() if line.strip() ] return ""\n"".join(lines)" 3801,"def text_normalize(text): """""" Whitespace normalization: - Strip empty lines - Strip leading whitespace in a line - Strip trailing whitespace in a line - Normalize line endings """""" # if not isinstance(text, str): if isinstance(text, bytes): # -- MAYBE: command.ouput => bytes, encoded stream output. text = codecs.decode(text) lines = [ line.strip() for line in text.splitlines() if line.strip() ] return ""\n"".join(lines)" 3802,"def _wva(values, weights): """""" Calculates a weighted average """""" assert len(values) == len(weights) and len(weights) > 0 return sum([mul(*x) for x in zip(values, weights)]) / sum(weights)" 3803,"def mode_interactive(options): """"""Interactive Mode: terminal prompts repeatedly for a url to fetch"""""" articles = set() failures = set() url = input('Enter a URL: ') while url != '': article = _get_article(url=url, bodyLines=options.bodyLines, debug=options.debug) if (article): articles.add(article) else: failures.add(url) url = input('Enter a URL (press enter to end): ') _output(articles, options.outputFile, failures, options.failureFile)" 3804,"def mode_clipboard_watch(options): """"""Clipboard Watch Mode: watches for a new string on the clipboard, and tries to fetch that URL"""""" articles = set() failures = set() print('Hello, this is news-scraper. Copy a URL to start!') print('To quit, press CTRL+C in this window.\n') url = pyperclip.paste() while True: try: tmp_value = pyperclip.paste() if tmp_value != url: url = tmp_value print('Fetching article...') if options.debug: print(""Value changed: %s"" % str(url)[:100]) article = _get_article(url=url, bodyLines=options.bodyLines, debug=options.debug) if (article): articles.add(article) else: failures.add(url) time.sleep(0.2) except KeyboardInterrupt: break _output(articles, options.outputFile, failures, options.failureFile)" 3805,"def execute_one_to_many_job(parent_class=None, get_unfinished_kwargs=None, get_unfinished_limit=None, parser_func=None, parser_func_kwargs=None, build_url_func_kwargs=None, downloader_func=None, downloader_func_kwargs=None, post_process_response_func=None, post_process_response_func_kwargs=None, process_item_func_kwargs=None, logger=None, sleep_time=None): """""" A standard one-to-many crawling workflow. :param parent_class: :param get_unfinished_kwargs: :param get_unfinished_limit: :param parser_func: html parser function. :param parser_func_kwargs: other keyword arguments for ``parser_func`` :param build_url_func_kwargs: other keyword arguments for ``parent_class().build_url(**build_url_func_kwargs)`` :param downloader_func: a function that taking ``url`` as first arg, make http request and return response/html. :param downloader_func_kwargs: other keyword arguments for ``downloader_func`` :param post_process_response_func: a callback function taking response/html as first argument. You can put any logic in it. For example, you can make it sleep if you detect that you got banned. :param post_process_response_func_kwargs: other keyword arguments for ``post_process_response_func`` :param process_item_func_kwargs: other keyword arguments for ``ParseResult().process_item(**process_item_func_kwargs)`` :param logger: :param sleep_time: default 0, wait time before making each request. """""" # prepare arguments get_unfinished_kwargs = prepare_kwargs(get_unfinished_kwargs) parser_func_kwargs = prepare_kwargs(parser_func_kwargs) build_url_func_kwargs = prepare_kwargs(build_url_func_kwargs) downloader_func_kwargs = prepare_kwargs(downloader_func_kwargs) post_process_response_func_kwargs = prepare_kwargs( post_process_response_func_kwargs) process_item_func_kwargs = prepare_kwargs(process_item_func_kwargs) if post_process_response_func is None: def post_process_response_func(response, **kwargs): pass if not isinstance(logger, SpiderLogger): raise TypeError if sleep_time is None: sleep_time = 0 # do the real job query_set = parent_class.get_all_unfinished(**get_unfinished_kwargs) if get_unfinished_limit is not None: query_set = query_set.limit(get_unfinished_limit) todo = list(query_set) logger.log_todo_volumn(todo) for parent_instance in todo: url = parent_instance.build_url(**build_url_func_kwargs) logger.log_to_crawl_url(url) logger.log_sleeper(sleep_time) time.sleep(sleep_time) try: response_or_html = downloader_func(url, **downloader_func_kwargs) if isinstance(response_or_html, string_types): parser_func_kwargs[""html""] = response_or_html else: parser_func_kwargs[""response""] = response_or_html post_process_response_func( response_or_html, **post_process_response_func_kwargs) except Exception as e: logger.log_error(e) continue try: parse_result = parser_func( parent=parent_instance, **parser_func_kwargs ) parse_result.process_item(**process_item_func_kwargs) logger.log_status(parse_result) except Exception as e: logger.log_error(e) continue" 3806,"def parse_connection_string_psycopg2(connection_string): """""" parses psycopg2 consumable connection string :param connection_string: :return: return dictionary with connection string parts """""" conn_prepared = {} conn_parsed = urlparse(connection_string) if not conn_parsed.hostname: _re_dbstr = re.compile(r'\bhost=(?P[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'dbname=(?P[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'port=(?P[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'user=(?P[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'password=(?P[0-9a-zA-Z_.!@#$%^&*()~]+)\b', re.IGNORECASE) for match in _re_dbstr.finditer(connection_string): match_dict = match.groupdict() if match_dict['host']: conn_prepared['host'] = match_dict['host'] if match_dict['port']: conn_prepared['port'] = match_dict['port'] if match_dict['dbname']: conn_prepared['dbname'] = match_dict['dbname'] if match_dict['user']: conn_prepared['user'] = match_dict['user'] if match_dict['password']: conn_prepared['password'] = match_dict['password'] else: conn_prepared = { 'host': conn_parsed.hostname, 'port': conn_parsed.port, 'dbname': conn_parsed.path, 'user': conn_parsed.username, 'password': conn_parsed.password } return conn_prepared" 3807,"def get_pgpm_db_version(cls, cur, schema_name='_pgpm'): """""" returns current version of pgpm schema :return: tuple of major, minor and patch components of version """""" cls.set_search_path(cur, schema_name) cur.execute(""SELECT _find_schema('{0}', '{1}')"" .format(schema_name, 'x')) # TODO: make it work with the way it's written below. currently throws error as func returns record # without column list # cur.callproc('_find_schema', [schema_name, 'x']) pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(',')) return pgpm_v_ext[2], pgpm_v_ext[3], pgpm_v_ext[4]" 3808,"def create_db_schema(cls, cur, schema_name): """""" Create Postgres schema script and execute it on cursor """""" create_schema_script = ""CREATE SCHEMA {0} ;\n"".format(schema_name) cur.execute(create_schema_script)" 3809,"def grant_usage_privileges(cls, cur, schema_name, roles): """""" Sets search path """""" cur.execute('GRANT USAGE ON SCHEMA {0} TO {1};' 'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {0} TO {1};' .format(schema_name, roles))" 3810,"def grant_usage_install_privileges(cls, cur, schema_name, roles): """""" Sets search path """""" cur.execute('GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA {0} TO {1};' 'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {0} TO {1};' 'GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA {0} TO {1};' .format(schema_name, roles))" 3811,"def grant_default_usage_install_privileges(cls, cur, schema_name, roles): """""" Sets search path """""" cur.execute('ALTER DEFAULT PRIVILEGES IN SCHEMA {0} ' 'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO {1};' 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT EXECUTE ON FUNCTIONS TO {1};' 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} ' 'GRANT USAGE, SELECT ON SEQUENCES TO {1};' .format(schema_name, roles))" 3812,"def revoke_all(cls, cur, schema_name, roles): """""" Revoke all privileges from schema, tables, sequences and functions for a specific role """""" cur.execute('REVOKE ALL ON SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL TABLES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL SEQUENCES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL FUNCTIONS IN SCHEMA {0} FROM {1};'.format(schema_name, roles))" 3813,"def schema_exists(cls, cur, schema_name): """""" Check if schema exists """""" cur.execute(""SELECT EXISTS (SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}');"" .format(schema_name)) return cur.fetchone()[0]" 3814,"def pandas(self): """"""Return a Pandas dataframe."""""" if self._pandas is None: self._pandas = pd.DataFrame().from_records(self.list_of_dicts) return self._pandas" 3815,"def translate(self, dialect): """"""Return a copy of this ResultSet in a different dialect."""""" new_resultset = copy(self) new_resultset.dialect = dialect for result in new_resultset: for dimensionvalue in result.dimensionvalues: dimensionvalue.value = dimensionvalue.translate(dialect) return new_resultset" 3816,"def append(self, val): """"""Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects. """""" val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)" 3817,"def tuple(self): """""" Tuple conversion to (value, dimensions), e.g.: (123, {dimension_1: ""foo"", dimension_2: ""bar""}) """""" return (self.value, {dv.id: dv.value for dv in self.dimensionvalues})" 3818,"def allowed_values(self): """"""Return a list of allowed values."""""" if self._allowed_values is None: self._allowed_values = ValueList() for val in self.scraper._fetch_allowed_values(self): if isinstance(val, DimensionValue): self._allowed_values.append(val) else: self._allowed_values.append(DimensionValue(val, Dimension())) return self._allowed_values" 3819,"def append(self, val): """"""Connect any new items to the scraper."""""" val.scraper = self.scraper val._collection_path = copy(self.collection._collection_path) val._collection_path.append(val) super(ItemList, self).append(val)" 3820,"def _move_here(self): """"""Move the cursor to this item."""""" cu = self.scraper.current_item # Already here? if self is cu: return # A child? if cu.items and self in cu.items: self.scraper.move_to(self) return # A parent? if self is cu.parent: self.scraper.move_up() # A sibling? if self.parent and self in self.parent.items: self.scraper.move_up() self.scraper.move_to(self) return # Last resort: Move to top and all the way down again self.scraper.move_to_top() for step in self.path: self.scraper.move_to(step)" 3821,"def items(self): """"""ItemList of children."""""" if self.scraper.current_item is not self: self._move_here() if self._items is None: self._items = ItemList() self._items.scraper = self.scraper self._items.collection = self for i in self.scraper._fetch_itemslist(self): i.parent = self if i.type == TYPE_DATASET and i.dialect is None: i.dialect = self.scraper.dialect self._items.append(i) return self._items" 3822,"def _hash(self): """"""Return a hash for the current query. This hash is _not_ a unique representation of the dataset! """""" dump = dumps(self.query, sort_keys=True) if isinstance(dump, str): dump = dump.encode('utf-8') return md5(dump).hexdigest()" 3823,"def fetch_next(self, query=None, **kwargs): """"""Generator to yield data one row at a time. Yields a Result, not the entire ResultSet. The containing ResultSet can be accessed through `Result.resultset`, but be careful not to manipulate the ResultSet until it is populated (when this generator is empty), or you may see unexpected results. """""" if query: self.query = query hash_ = self._hash if hash_ in self._data: for result in self._data[hash_]: yield result if self.scraper.current_item is not self: self._move_here() self._data[hash_] = ResultSet() self._data[hash_].dialect = self.dialect self._data[hash_].dataset = self for result in self.scraper._fetch_data(self, query=self.query, **kwargs): self._data[hash_].append(result) yield result" 3824,"def dimensions(self): """"""Available dimensions, if defined."""""" # First of all: Select this dataset if self.scraper.current_item is not self: self._move_here() if self._dimensions is None: self._dimensions = DimensionList() for d in self.scraper._fetch_dimensions(self): d.dataset = self d.scraper = self.scraper self._dimensions.append(d) return self._dimensions" 3825,"def shape(self): """"""Compute the shape of the dataset as (rows, cols)."""""" if not self.data: return (0, 0) return (len(self.data), len(self.dimensions))" 3826,"def on(cls, hook): """"""Hook decorator."""""" def decorator(function_): cls._hooks[hook].append(function_) return function_ return decorator" 3827,"def move_to_top(self): """"""Move to root item."""""" self.current_item = self.root for f in self._hooks[""top""]: f(self) return self" 3828,"def move_up(self): """"""Move up one level in the hierarchy, unless already on top."""""" if self.current_item.parent is not None: self.current_item = self.current_item.parent for f in self._hooks[""up""]: f(self) if self.current_item is self.root: for f in self._hooks[""top""]: f(self) return self" 3829,"def move_to(self, id_): """"""Select a child item by id (str), reference or index."""""" if self.items: try: self.current_item = self.items[id_] except (StopIteration, IndexError, NoSuchItem): raise NoSuchItem for f in self._hooks[""select""]: f(self, id_) return self" 3830,"def descendants(self): """"""Recursively return every dataset below current item."""""" for i in self.current_item.items: self.move_to(i) if i.type == TYPE_COLLECTION: for c in self.children: yield c else: yield i self.move_up()" 3831,"def children(self): """"""Former, misleading name for descendants."""""" from warnings import warn warn(""Deprecated. Use Scraper.descendants."", DeprecationWarning) for descendant in self.descendants: yield descendant" 3832,"def make_python_name(s, default=None, number_prefix='N',encoding=""utf-8""): """"""Returns a unicode string that can be used as a legal python identifier. :Arguments: *s* string *default* use *default* if *s* is ``None`` *number_prefix* string to prepend if *s* starts with a number """""" if s in ('', None): s = default s = str(s) s = re.sub(""[^a-zA-Z0-9_]"", ""_"", s) if not re.match('\d', s) is None: s = number_prefix+s return unicode(s, encoding)" 3833,"def recarray(self): """"""Returns data as :class:`numpy.recarray`."""""" return numpy.rec.fromrecords(self.records, names=self.names)" 3834,"def main(arguments=None): """""" The main function used when ``yaml_to_database.py`` when installed as a cl tool """""" # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel=""WARNING"", options_first=False, projectName=False ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == ""-"": varname = arg.replace(""-"", """") + ""Flag"" else: varname = arg.replace(""<"", """").replace("">"", """") if isinstance(val, str) or isinstance(val, unicode): exec(varname + "" = '%s'"" % (val,)) else: exec(varname + "" = %s"" % (val,)) if arg == ""--dbConn"": dbConn = val log.debug('%s = %s' % (varname, val,)) if os.path.isfile(pathToYaml): from fundamentals.mysql import yaml_to_database # PARSE YAML FILE CONTENTS AND ADD TO DATABASE yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn ) yaml2db.add_yaml_file_content_to_database( filepath=pathToYaml, deleteFile=deleteFlag ) basename = os.path.basename(pathToYaml) print ""Content of %(basename)s added to database"" % locals() else: from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir=pathToYaml, deleteFiles=deleteFlag ) yaml2db.ingest() print ""Content of %(pathToYaml)s directory added to database"" % locals() return" 3835,"def ingest(self): """""" *ingest the contents of the directory of yaml files into a database* **Return:** - None **Usage:** To import an entire directory of yaml files into a database, use the following: .. code-block:: python from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir=""/path/to/yaml/directory"", deleteFiles=False ) yaml2db.ingest() """""" self.log.debug('starting the ``ingest`` method') for d in os.listdir(self.pathToInputDir): if os.path.isfile(os.path.join(self.pathToInputDir, d)) and ""yaml"" in d.lower(): self.add_yaml_file_content_to_database( filepath=os.path.join(self.pathToInputDir, d), deleteFile=self.deleteFiles ) self.log.debug('completed the ``ingest`` method') return None" 3836,"def add_yaml_file_content_to_database( self, filepath, deleteFile=False ): """"""*given a file to a yaml file, add yaml file content to database* **Key Arguments:** - ``filepath`` -- the path to the yaml file - ``deleteFile`` -- delete the yaml file when its content has been added to the database. Default *False* **Return:** - None **Usage:** To parse and import the contents of a single yaml file into the database, use the following: .. code-block:: python from fundamentals.mysql import yaml_to_database # PARSE YAML FILE CONTENTS AND ADD TO DATABASE yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn ) yaml2db.add_yaml_file_content_to_database( filepath=${1:""/path/to/file.yaml""}, deleteFile=True ) """""" self.log.debug( 'completed the ````add_yaml_file_content_to_database`` method') import codecs try: self.log.debug(""attempting to open the file %s"" % (filepath,)) readFile = codecs.open(filepath, encoding='utf-8', mode='r') thisData = readFile.read() readFile.close() except IOError, e: message = 'could not open the file %s' % (filepath,) self.log.critical(message) raise IOError(message) readFile.close() matchObject = re.finditer( r'(^|\n)(?P[^\:]*)\:\s(?P.*?)(\n|$)', thisData, flags=re.M | re.S # re.S ) yamlContent = {} for match in matchObject: if match.group(""value"")[0] == '""' and match.group(""value"")[-1] == '""': v = match.group(""value"")[1:-1] elif match.group(""value"")[0] == ""'"" and match.group(""value"")[-1] == ""'"": v = match.group(""value"")[1:-1] else: v = match.group(""value"") yamlContent[match.group(""key"")] = v if ""table"" not in yamlContent: self.log.warning( 'A table value is need in the yaml content to indicate which database table to add the content to: %(filepath)s' % locals()) return None # NOTE THERE MAY BE MORE THAN ONE DATABASE TABLE dbTablesTmp = yamlContent[""table""].split("","") del yamlContent[""table""] dbTables = [] dbTables[:] = [d.strip() for d in dbTablesTmp] # UNSHORTEN URL try: r = requests.head(yamlContent[""url""], allow_redirects=True) yamlContent[""url""] = r.url except: pass yamlContent[""original_yaml_path""] = filepath if ""url"" in yamlContent: uniqueKeyList = [""url""] else: uniqueKeyList = [] for t in dbTables: convert_dictionary_to_mysql_table( dbConn=self.dbConn, log=self.log, dictionary=yamlContent, dbTableName=t, uniqueKeyList=uniqueKeyList, dateModified=True, returnInsertOnly=False, replace=True ) if deleteFile: os.remove(filepath) self.log.debug( 'completed the ``add_yaml_file_content_to_database`` method') return None" 3837,"def data_type(self, data_type): """"""Sets the data_type of this Option. :param data_type: The data_type of this Option. :type: str """""" allowed_values = [""string"", ""number"", ""date"", ""color""] if data_type is not None and data_type not in allowed_values: raise ValueError( ""Invalid value for `data_type` ({0}), must be one of {1}"" .format(data_type, allowed_values) ) self._data_type = data_type" 3838,"def create_option(cls, option, **kwargs): """"""Create Option Create a new Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_option(option, async=True) >>> result = thread.get() :param async bool :param Option option: Attributes of option to create (required) :return: Option If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_option_with_http_info(option, **kwargs) else: (data) = cls._create_option_with_http_info(option, **kwargs) return data" 3839,"def delete_option_by_id(cls, option_id, **kwargs): """"""Delete Option Delete an instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_option_by_id_with_http_info(option_id, **kwargs) else: (data) = cls._delete_option_by_id_with_http_info(option_id, **kwargs) return data" 3840,"def get_option_by_id(cls, option_id, **kwargs): """"""Find Option Return single instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to return (required) :return: Option If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_option_by_id_with_http_info(option_id, **kwargs) else: (data) = cls._get_option_by_id_with_http_info(option_id, **kwargs) return data" 3841,"def list_all_options(cls, **kwargs): """"""List Options Return a list of Options This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_options(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Option] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_options_with_http_info(**kwargs) else: (data) = cls._list_all_options_with_http_info(**kwargs) return data" 3842,"def replace_option_by_id(cls, option_id, option, **kwargs): """"""Replace Option Replace all attributes of Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_option_by_id(option_id, option, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to replace (required) :param Option option: Attributes of option to replace (required) :return: Option If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_option_by_id_with_http_info(option_id, option, **kwargs) else: (data) = cls._replace_option_by_id_with_http_info(option_id, option, **kwargs) return data" 3843,"def update_option_by_id(cls, option_id, option, **kwargs): """"""Update Option Update attributes of Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_option_by_id(option_id, option, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to update. (required) :param Option option: Attributes of option to update. (required) :return: Option If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_option_by_id_with_http_info(option_id, option, **kwargs) else: (data) = cls._update_option_by_id_with_http_info(option_id, option, **kwargs) return data" 3844,"def get_callable_signature_as_string(the_callable): """"""Return a string representing a callable. It is executed as if it would have been declared on the prompt. >>> def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd): ... pass >>> get_callable_signature_as_string(foo) def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd) :param the_callable: the callable to be analyzed. :type the_callable: function/callable. :return: the signature. """""" args, varargs, varkw, defaults = inspect.getargspec(the_callable) tmp_args = list(args) args_dict = {} if defaults: defaults = list(defaults) else: defaults = [] while defaults: args_dict[tmp_args.pop()] = defaults.pop() while tmp_args: args_dict[tmp_args.pop()] = None args_list = [] for arg in args: if args_dict[arg] is not None: args_list.append(""%s=%s"" % (arg, repr(args_dict[arg]))) else: args_list.append(arg) if varargs: args_list.append(""*%s"" % varargs) if varkw: args_list.append(""**%s"" % varkw) args_string = ', '.join(args_list) return ""def %s(%s)"" % (the_callable.__name__, args_string)" 3845,"def get_callable_documentation(the_callable): """"""Return a string with the callable signature and its docstring. :param the_callable: the callable to be analyzed. :type the_callable: function/callable. :return: the signature. """""" return wrap_text_in_a_box( title=get_callable_signature_as_string(the_callable), body=(getattr(the_callable, '__doc__') or 'No documentation').replace( '\n', '\n\n'), style='ascii_double')" 3846,"def register_extension_class(ext, base, *args, **kwargs): """"""Instantiate the given extension class and register as a public attribute of the given base. README: The expected protocol here is to instantiate the given extension and pass the base object as the first positional argument, then unpack args and kwargs as additional arguments to the extension's constructor. """""" ext_instance = ext.plugin(base, *args, **kwargs) setattr(base, ext.name.lstrip('_'), ext_instance)" 3847,"def register_extension_method(ext, base, *args, **kwargs): """"""Register the given extension method as a public attribute of the given base. README: The expected protocol here is that the given extension method is an unbound function. It will be bound to the specified base as a method, and then set as a public attribute of that base. """""" bound_method = create_bound_method(ext.plugin, base) setattr(base, ext.name.lstrip('_'), bound_method)" 3848,"def token_auto_auth(func): """"""Wrap class methods with automatic token re-authentication. This wrapper will detect authentication failures coming from its wrapped method. When one is caught, it will request a new token, and simply replay the original request. The one constraint that this wrapper has is that the wrapped method's class must have the :py:class:`objectrocket.client.Client` object embedded in it as the property ``_client``. Such is the design of all current client operations layers. """""" @functools.wraps(func) def wrapper(self, *args, **kwargs): try: response = func(self, *args, **kwargs) # If auth failure occurs, attempt to re-authenticate and replay once at most. except errors.AuthFailure: # Request to have authentication refreshed. self._client.auth._refresh() # Replay original request. response = func(self, *args, **kwargs) return response # TODO(TheDodd): match func call signature and docs. return wrapper" 3849,"def check_auth(args, role=None): """"""Check the user authentication."""""" users = boto3.resource(""dynamodb"").Table(os.environ['people']) if not (args.get('email', None) and args.get('api_key', None)): mesg = ""Invalid request: `email` and `api_key` are required"" return {'success': False, 'message': mesg} user = users.get_item(Key={'email': args.get('email')}) if 'Item' not in user: return {'success': False, 'message': 'User does not exist.'} user = user['Item'] if user['api_key'] != args['api_key']: return {'success': False, 'message': 'API key was invalid.'} if role: if user['role'] not in role: mesg = 'User is not authorized to make this change.' return {'success': False, 'message': mesg} return {'success': True, 'message': None, 'user': user}" 3850,"def lambda_handler(event, context): """"""Main handler."""""" auth = check_auth(event, role=[""admin""]) if not auth['success']: return auth table = boto3.resource(""dynamodb"").Table(os.environ['database']) results = table.scan() output = {'success': True, 'events': list(), 'eventsCount': 0} for item in results.get('Items', list()): output['events'].append(item) output['eventsCount'] = len(output['events']) return output" 3851,"def get_theme_dir(): """""" Returns path to directory containing this package's theme. This is designed to be used when setting the ``html_theme_path`` option within Sphinx's ``conf.py`` file. """""" return os.path.abspath(os.path.join(os.path.dirname(__file__), ""theme""))" 3852,"def create_discount_promotion(cls, discount_promotion, **kwargs): """"""Create DiscountPromotion Create a new DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_discount_promotion(discount_promotion, async=True) >>> result = thread.get() :param async bool :param DiscountPromotion discount_promotion: Attributes of discountPromotion to create (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs) else: (data) = cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs) return data" 3853,"def delete_discount_promotion_by_id(cls, discount_promotion_id, **kwargs): """"""Delete DiscountPromotion Delete an instance of DiscountPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_discount_promotion_by_id(discount_promotion_id, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) else: (data) = cls._delete_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) return data" 3854,"def get_discount_promotion_by_id(cls, discount_promotion_id, **kwargs): """"""Find DiscountPromotion Return single instance of DiscountPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_discount_promotion_by_id(discount_promotion_id, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to return (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) else: (data) = cls._get_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) return data" 3855,"def list_all_discount_promotions(cls, **kwargs): """"""List DiscountPromotions Return a list of DiscountPromotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_discount_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[DiscountPromotion] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_discount_promotions_with_http_info(**kwargs) else: (data) = cls._list_all_discount_promotions_with_http_info(**kwargs) return data" 3856,"def replace_discount_promotion_by_id(cls, discount_promotion_id, discount_promotion, **kwargs): """"""Replace DiscountPromotion Replace all attributes of DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_discount_promotion_by_id(discount_promotion_id, discount_promotion, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to replace (required) :param DiscountPromotion discount_promotion: Attributes of discountPromotion to replace (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) else: (data) = cls._replace_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) return data" 3857,"def update_discount_promotion_by_id(cls, discount_promotion_id, discount_promotion, **kwargs): """"""Update DiscountPromotion Update attributes of DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_discount_promotion_by_id(discount_promotion_id, discount_promotion, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to update. (required) :param DiscountPromotion discount_promotion: Attributes of discountPromotion to update. (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) else: (data) = cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) return data" 3858,"def make_code_readable(s): """"""Add newlines at strategic places in code string for printing. Args: s: str, piece of code. If not str, will attempt to convert to str. Returns: str """""" s = s if isinstance(s, str) else str(s) MAP = {"","": "",\n"", ""{"": ""{\n "", ""}"": ""\n}""} ll = [] state = ""open"" flag_single = False flag_double = False flag_backslash = False for ch in s: if flag_backslash: flag_backslash = False continue if ch == ""\\"": flag_backslash = True continue if flag_single: if ch == ""'"": flag_single = False elif not flag_double and ch == ""'"": flag_single = True if flag_double: if ch == '""': flag_double = False elif not flag_single and ch == '""': flag_double = True if flag_single or flag_double: ll.append(ch) else: ll.append(MAP.get(ch, ch)) return """".join(ll)" 3859,"def chunk_string(string, length): """""" Splits a string into fixed-length chunks. This function returns a generator, using a generator comprehension. The generator returns the string sliced, from 0 + a multiple of the length of the chunks, to the length of the chunks + a multiple of the length of the chunks. Reference: http://stackoverflow.com/questions/18854620 """""" return (string[0 + i:length + i] for i in range(0, len(string), length))" 3860,"def seconds2str(seconds): """"""Returns string such as 1h 05m 55s."""""" if seconds < 0: return ""{0:.3g}s"".format(seconds) elif math.isnan(seconds): return ""NaN"" elif math.isinf(seconds): return ""Inf"" m, s = divmod(seconds, 60) h, m = divmod(m, 60) if h >= 1: return ""{0:g}h {1:02g}m {2:.3g}s"".format(h, m, s) elif m >= 1: return ""{0:02g}m {1:.3g}s"".format(m, s) else: return ""{0:.3g}s"".format(s)" 3861,"def make_fits_keys_dict(keys): """""" Returns a dictionary to translate to unique FITS header keys up to 8 characters long This is similar to Windows making up 8-character names for filenames that are longer than this ""The keyword names may be up to 8 characters long and can only contain uppercase letters A to Z, the digits 0 to 9, the hyphen, and the underscore character."" [1] Arguments: keys -- list of strings Returns: dictionary whose keys are the elements in the ""keys"" argument, and whose values are made-up uppercase names References: [1] http://fits.gsfc.nasa.gov/fits_primer.html """""" key_dict = {} new_keys = [] for key in keys: # converts to valid FITS key according to reference [1] above fits_key = valid_fits_key(key) num_digits = 1 i = -1 i_max = 9 while fits_key in new_keys: i += 1 if i > i_max: i = 0 i_max = i_max * 10 + 9 num_digits += 1 fits_key = fits_key[:(8 - num_digits)] + ((""%0{0:d}d"".format(num_digits)) % i) key_dict[key] = fits_key new_keys.append(fits_key) return key_dict" 3862,"def valid_fits_key(key): """""" Makes valid key for a FITS header ""The keyword names may be up to 8 characters long and can only contain uppercase letters A to Z, the digits 0 to 9, the hyphen, and the underscore character."" (http://fits.gsfc.nasa.gov/fits_primer.html) """""" ret = re.sub(""[^A-Z0-9\-_]"", """", key.upper())[:8] if len(ret) == 0: raise RuntimeError(""key '{0!s}' has no valid characters to be a key in a FITS header"".format(key)) return ret" 3863,"def eval_fieldnames(string_, varname=""fieldnames""): """"""Evaluates string_, must evaluate to list of strings. Also converts field names to uppercase"""""" ff = eval(string_) if not isinstance(ff, list): raise RuntimeError(""{0!s} must be a list"".format(varname)) if not all([isinstance(x, str) for x in ff]): raise RuntimeError(""{0!s} must be a list of strings"".format(varname)) ff = [x.upper() for x in ff] return ff" 3864,"def module_to_dict(module): """"""Creates a dictionary whose keys are module.__all__ Returns: {""(attribute name)"": attribute, ...} """""" lot = [(key, module.__getattribute__(key)) for key in module.__all__] ret = dict(lot) return ret" 3865,"def strip_accents(s): """""" Strip accents to prepare for slugification. """""" nfkd = unicodedata.normalize('NFKD', unicode(s)) return u''.join(ch for ch in nfkd if not unicodedata.combining(ch))" 3866,"def slugify(s): """""" Converts the given string to a URL slug. """""" s = strip_accents(s.replace(""'"", '').lower()) return re.sub('[^a-z0-9]+', ' ', s).strip().replace(' ', '-')" 3867,"def _legacy_status(stat): """"""Legacy status method from the 'qsmobile.js' library. Pass in the 'val' from &devices or the 'data' received after calling a specific ID. """""" # 2d0c00002a0000 if stat[:2] == '30' or stat[:2] == '47': # RX1 CT ooo = stat[4:5] # console.log(""legstat. "" + o); if ooo == '0': return 0 if ooo == '8': return 100 if stat == '7e': return 0 if stat == '7f': return 100 if len(stat) == 6: # old try: val = int(stat[4:], 16) except ValueError: val = 0 hwt = stat[:2] if hwt == '01': # old dim return round(((125 - val) / 125) * 100) if hwt == '02': # old rel return 100 if val == 127 else 0 if hwt == '28': # LED DIM if stat[2:4] == '01': if stat[4:] == '78': return 0 return round(((120 - val) / 120) * 100) # Additional decodes not part of qsmobile.js if stat.upper().find('ON') >= 0: # Relay return 100 if (not stat) or stat.upper().find('OFF') >= 0: return 0 if stat.endswith('%'): # New style dimmers if stat[:-1].isdigit: return int(stat[:-1]) _LOGGER.debug(""val='%s' used a -1 fallback in legacy_status"", stat) return -1" 3868,"def decode_qwikcord(packet, channel=1): """"""Extract the qwikcord current measurements from val (CTavg, CTsum)."""""" val = str(packet.get('val', '')) if len(val) != 16: return None if channel == 1: return int(val[6:12], 16) # CTavg return int(val[12:], 16)" 3869,"def decode_door(packet, channel=1): """"""Decode a door sensor."""""" val = str(packet.get(QSDATA, '')) if len(val) == 6 and val.startswith('46') and channel == 1: return val[-1] == '0' return None" 3870,"def decode_imod(packet, channel=1): """"""Decode an 4 channel imod. May support 6 channels."""""" val = str(packet.get(QSDATA, '')) if len(val) == 8 and val.startswith('4e'): try: _map = ((5, 1), (5, 2), (5, 4), (4, 1), (5, 1), (5, 2))[ channel - 1] return (int(val[_map[0]], 16) & _map[1]) == 0 except IndexError: return None return None" 3871,"def decode_pir(packet, channel=1): """"""Decode a PIR."""""" val = str(packet.get(QSDATA, '')) if len(val) == 8 and val.startswith('0f') and channel == 1: return int(val[-4:], 16) > 0 return None" 3872,"def decode_temperature(packet, channel=1): """"""Decode the temperature."""""" val = str(packet.get(QSDATA, '')) if len(val) == 12 and val.startswith('34') and channel == 1: temperature = int(val[-4:], 16) return round(float((-46.85 + (175.72 * (temperature / pow(2, 16)))))) return None" 3873,"def decode_humidity(packet, channel=1): """"""Decode the humidity."""""" val = str(packet.get(QSDATA, '')) if len(val) == 12 and val.startswith('34') and channel == 1: humidity = int(val[4:-4], 16) return round(float(-6 + (125 * (humidity / pow(2, 16))))) return None" 3874,"def set_value(self, qsid, new): # Set value & encode new to be passed to QSUSB """"""Set a value."""""" try: dev = self[qsid] except KeyError: raise KeyError(""Device {} not found"".format(qsid)) if new < 0: new = 0 if new == dev.value: return if dev.is_dimmer: new = _MAX if new > (_MAX * .9) else new else: # QSType.relay and any other new = _MAX if new > 0 else 0 def success(): """"""Success closure to update value."""""" self[qsid].value = new _LOGGER.debug(""set success %s=%s"", qsid, new) self._cb_value_changed(self, qsid, new) newqs = round(math.pow(round(new / _MAX * 100), 1 / self.dim_adj)) _LOGGER.debug(""%s hass=%s --> %s"", qsid, new, newqs) self._cb_set_qsvalue(qsid, newqs, success)" 3875,"def update_devices(self, devices): """"""Update values from response of URL_DEVICES, callback if changed."""""" for qspacket in devices: try: qsid = qspacket[QS_ID] except KeyError: _LOGGER.debug(""Device without ID: %s"", qspacket) continue if qsid not in self: self[qsid] = QSDev(data=qspacket) dev = self[qsid] dev.data = qspacket # Decode value from QSUSB newqs = _legacy_status(qspacket[QS_VALUE]) if dev.is_dimmer: # Adjust dimmer exponentially to get a smoother effect newqs = min(round(math.pow(newqs, self.dim_adj)), 100) newin = round(newqs * _MAX / 100) if abs(dev.value - newin) > 1: # Significant change _LOGGER.debug(""%s qs=%s --> %s"", qsid, newqs, newin) dev.value = newin self._cb_value_changed(self, qsid, newin)" 3876,"def geist_replay(wrapped, instance, args, kwargs): """"""Wraps a test of other function and injects a Geist GUI which will enable replay (set environment variable GEIST_REPLAY_MODE to 'record' to active record mode."""""" path_parts = [] file_parts = [] if hasattr(wrapped, '__module__'): module = wrapped.__module__ module_file = sys.modules[module].__file__ root, _file = os.path.split(module_file) path_parts.append(root) _file, _ = os.path.splitext(_file) file_parts.append(_file) if hasattr(wrapped, '__objclass__'): file_parts.append(wrapped.__objclass__.__name__) elif hasattr(wrapped, '__self__'): file_parts.append(wrapped.__self__.__class__.__name__) file_parts.append(wrapped.__name__ + '.log') path_parts.append('_'.join(file_parts)) filename = os.path.join(*path_parts) if is_in_record_mode(): platform_backend = get_platform_backend() backend = RecordingBackend( source_backend=platform_backend, recording_filename=filename ) else: backend = PlaybackBackend( recording_filename=filename ) gui = GUI(backend) return wrapped(gui, *args, **kwargs)" 3877,"def reverse_cyk_transforms(root): # type: (Nonterminal) -> Nonterminal """""" Reverse transformation made to grammar before CYK. Performs following steps: - transform from chomsky normal form - restore unit rules - restore epsilon rules :param root: Root node of the parsed tree. :return: Restored parsed tree. """""" root = InverseContextFree.transform_from_chomsky_normal_form(root) root = InverseContextFree.unit_rules_restore(root) root = InverseContextFree.epsilon_rules_restore(root) return root" 3878,"def _geom_solve_p_from_mu(mu, b): """""" For the geom_uptrunc, given mu and b, return p. Ref: Harte 2011, Oxford U Press. Eq. 7.50. """""" def p_eq(x, mu, b): x, mu, b = Decimal(x), Decimal(mu), Decimal(b) return ( (x / (1 - x)) - ((b + 1) / (x**-b - 1)) - mu ) # x here is the param raised to the k_agg power, or 1 - p return 1 - optim.brentq(p_eq, 1e-16, 100, args=(mu, b), disp=True)" 3879,"def _nbinom_ztrunc_p(mu, k_agg): """""" Calculates p parameter for truncated negative binomial Function given in Sampford 1955, equation 4 Note that omega = 1 / 1 + p in Sampford """""" p_eq = lambda p, mu, k_agg: (k_agg * p) / (1 - (1 + p)**-k_agg) - mu # The upper bound needs to be large. p will increase with increasing mu # and decreasing k_agg p = optim.brentq(p_eq, 1e-10, 1e10, args=(mu, k_agg)) return p" 3880,"def _ln_choose(n, k_agg): ''' log binomial coefficient with extended gamma factorials. n and k_agg may be int or array - if both array, must be the same length. ''' gammaln = special.gammaln return gammaln(n + 1) - (gammaln(k_agg + 1) + gammaln(n - k_agg + 1))" 3881,"def _solve_k_from_mu(data, k_array, nll, *args): """""" For given args, return k_agg from searching some k_range. Parameters ---------- data : array k_range : array nll : function args : Returns -------- :float Minimum k_agg """""" # TODO: See if a root finder like fminbound would work with Decimal used in # logpmf method (will this work with arrays?) nll_array = np.zeros(len(k_array)) for i in range(len(k_array)): nll_array[i] = nll(data, k_array[i], *args) min_nll_idx = np.argmin(nll_array) return k_array[min_nll_idx]" 3882,"def _trunc_logser_solver(bins, b): """""" Given bins (S) and b (N) solve for MLE of truncated logseries parameter p Parameters ----------- bins : float Number of bins. Considered S in an ecological context b : float Upper truncation of distribution. Considered N in an ecological context Returns ------- : float MLE estimate of p Notes ------ Adapted from Ethan White's macroecology_tools """""" if bins == b: p = 0 else: BOUNDS = [0, 1] DIST_FROM_BOUND = 10 ** -15 m = np.array(np.arange(1, np.int(b) + 1)) y = lambda x: np.sum(x ** m / b * bins) - np.sum((x ** m) / m) p = optim.bisect(y, BOUNDS[0] + DIST_FROM_BOUND, min((sys.float_info[0] / bins) ** (1 / b), 2), xtol=1.490116e-08, maxiter=1000) return p" 3883,"def _expon_solve_lam_from_mu(mu, b): """""" For the expon_uptrunc, given mu and b, return lam. Similar to geom_uptrunc """""" def lam_eq(lam, mu, b): # Small offset added to denominator to avoid 0/0 erors lam, mu, b = Decimal(lam), Decimal(mu), Decimal(b) return ( (1 - (lam*b + 1) * np.exp(-lam*b)) / (lam - lam * np.exp(-lam*b) + Decimal(1e-32)) - mu ) return optim.brentq(lam_eq, -100, 100, args=(mu, b), disp=True)" 3884,"def _make_rank(dist_obj, n, mu, sigma, crit=0.5, upper=10000, xtol=1): """""" Make rank distribution using both ppf and brute force. Setting crit = 1 is equivalent to just using the ppf Parameters ---------- {0} """""" qs = (np.arange(1, n + 1) - 0.5) / n rank = np.empty(len(qs)) brute_ppf = lambda val, prob: prob - dist_obj.cdf(val, mu, sigma) qs_less = qs <= crit ind = np.sum(qs_less) # Use ppf if qs are below crit rank[qs_less] = dist_obj.ppf(qs[qs_less], mu, sigma) # Use brute force if they are above for i, tq in enumerate(qs[~qs_less]): j = ind + i try: # TODO: Use an adaptable lower bound to increase speed rank[j] = np.abs(np.ceil(optim.brentq(brute_ppf, -1, upper, args=(tq,), xtol=xtol))) except ValueError: # If it is above the upper bound set all remaining values # to the previous value rank[j:] = np.repeat(rank[j - 1], len(rank[j:])) break return rank" 3885,"def _mean_var(vals, pmf): """""" Calculates the mean and variance from vals and pmf Parameters ---------- vals : ndarray Value range for a distribution pmf : ndarray pmf values corresponding with vals Returns ------- : tuple (mean, variance) """""" mean = np.sum(vals * pmf) var = np.sum(vals ** 2 * pmf) - mean ** 2 return mean, var" 3886,"def rank(self, n, *args): """"""{0}"""""" return self.ppf((np.arange(1, n+1) - 0.5) / n, *args)" 3887,"def rvs_alt(self, *args, **kwargs): """"""{0}"""""" l = kwargs.get('l', 1) b = kwargs.get('b', 1e5) size = kwargs.get('size', 1) model_cdf = self.cdf(np.arange(l, b + 1), *args) unif_rands = np.random.random(size) model_rands = np.array([np.where(tx <= model_cdf)[0][0] + l for tx in unif_rands]) return model_rands" 3888,"def fit_mle(self, data, b=None): """"""%(super)s In addition to data, requires ``b``, the upper limit of the distribution. """""" # Take mean of data as MLE of distribution mean, then calculate p mu = np.mean(data) if not b: b = np.sum(data) p = _geom_solve_p_from_mu_vect(mu, b) # Just return float, not len 1 array if len(np.atleast_1d(p)) == 1: return float(p), b else: return p, b" 3889,"def fit_mle(self, data, init_vals=(80, 80)): """"""%(super)s In addition to data, can take init_vals which allows the user to specify initial values for (alpha, theta) during the optimization. """""" if len(data) > 1: mu = np.mean(data) var = np.var(data) theta0 = var / mu alpha0 = mu / theta0 else: alpha0 = init_vals[0] theta0 = init_vals[1] def mle(params): return -np.sum(np.log(self.pmf(data, params[0], params[1]))) # Bounded fmin? alpha, theta = optim.fmin(mle, x0=[alpha0, theta0], disp=0) return alpha, theta" 3890,"def fit_mle(self, data, k_array=np.arange(0.1, 100, 0.1)): """"""%(super)s In addition to data, gives an optional keyword argument k_array containing the values to search for k_agg. A brute force search is then used to find the parameter k_agg. """""" # todo: check and mention in docstring biases of mle for k_agg data = np.array(data) mu = np.mean(data) return mu, _solve_k_from_mu(data, k_array, nbinom_nll, mu)" 3891,"def translate_args(self, mu, k_agg, return_p=False): """"""%(super)s The keyword argument return_p computes the p values used to define the the truncated negative binomial """""" if return_p: return nbinom_ztrunc_p(mu, k_agg), k_agg else: return mu, k_agg" 3892,"def fit_mle(self, data, k_agg0=0.5): """"""%(super)s In addition to data, gives an optional keyword argument k_agg0 that specifies the initial value of k_agg used in the optimization. """""" mu = np.mean(data) def mle(k): return -np.sum(np.log(self.pmf(data, mu, k))) k = optim.fmin(mle, x0=k_agg0, disp=0) return mu, k[0]" 3893,"def fit_mle(self, data, b=None): """"""%(super)s b : float The upper bound of the distribution. If None, fixed at sum(data) """""" data = np.array(data) length = len(data) if not b: b = np.sum(data) return _trunc_logser_solver(length, b), b" 3894,"def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1): """"""%(super)s Additional Parameters ---------------------- {0} """""" return _make_rank(self, n, mu, sigma, crit=crit, upper=upper, xtol=xtol)" 3895,"def fit_mle(self, data, b=None): """"""%(super)s Additional Parameters ---------------------- b : float The upper limit of the distribution """""" # Take mean of data as MLE of distribution mean, then calculate p mu = np.mean(data) if not b: b = np.sum(data) lam = _expon_solve_lam_from_mu_vect(mu, b) # Just return float, not len 1 array if len(np.atleast_1d(lam)) == 1: return float(lam), b else: return lam, b" 3896,"def fit_mle(self, data, fix_mean=False): """"""%(super)s Additional Parameters ---------------------- fix_mean : bool Default False. If True, fixes mean before optimizing sigma """""" if not fix_mean: sigma, _, scale = stats.lognorm.fit(data, floc=0) return np.log(scale), sigma else: mean = np.mean(data) # MLE fxn to be optmimized mle = lambda sigma, x, mean: -1 *\ np.sum(self._pdf_w_mean(x, mean, sigma)) sigma = optim.fmin(mle, np.array([np.std(np.log(data), ddof=1)]), args=(data, mean), disp=0)[0] return self.translate_args(mean, sigma)" 3897,"def _pdf_w_mean(self, x, mean, sigma): """""" Calculates the pdf of a lognormal distribution with parameters mean and sigma Parameters ---------- mean : float or ndarray Mean of the lognormal distribution sigma : float or ndarray Sigma parameter of the lognormal distribution Returns ------- : float or ndarray pdf of x """""" # Lognorm pmf with mean for optimization mu, sigma = self.translate_args(mean, sigma) return self.logpdf(x, mu, sigma)" 3898,"def union_join(left, right, left_as='left', right_as='right'): """""" Join function truest to the SQL style join. Merges both objects together in a sum-type, saving references to each parent in ``left`` and ``right`` attributes. >>> Dog = namedtuple('Dog', ['name', 'woof', 'weight']) >>> dog = Dog('gatsby', 'Ruff!', 15) >>> Cat = namedtuple('Cat', ['name', 'meow', 'weight']) >>> cat = Cat('pleo', 'roooowwwr', 12) >>> catdog = union_join(cat, dog, 'cat', 'dog') >>> catdog.name pleo >>> catdog.woof Ruff! >>> catdog.dog.name gatsby :param left: left object to be joined with right :param right: right object to be joined with left :return: joined object with attrs/methods from both parents available """""" attrs = {} attrs.update(get_object_attrs(right)) attrs.update(get_object_attrs(left)) attrs[left_as] = left attrs[right_as] = right if isinstance(left, dict) and isinstance(right, dict): return attrs else: joined_class = type(left.__class__.__name__ + right.__class__.__name__, (Union,), {}) return joined_class(attrs)" 3899,"def setKeyButton( self, btnId, keyCallback, bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ): """"""! \~english Set key button event @param btnId: Key button pin number in BCM @param keyCallback: A interrupt callback_function or None.
    If set to None means keybutton work in query mode
    then uses RPiKeyButtons#readKeyButton for get keybutton status @param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL @param pullUpDown: Default set to GPIO.PUD_UP @param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~chinese 设置按键事件 @param btnId: 按键IO号(in BCM mode) @param keyCallback: 按键中断回调函数 callback_functionNone
    如果设置为None,则表示按键工作在查询模式
                    然后使用 RPiKeyButtons#readKeyButton 获取 keybutton 状态 @param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL @param pullUpDown: 默认 GPIO.PUD_UP @param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~ \n @see DEF_BOUNCE_TIME_SHORT_MON (10ms) @see DEF_BOUNCE_TIME_SHORT (50ms) @see DEF_BOUNCE_TIME_NORMAL (100ms) @see DEF_BOUNCE_TIME_LONG (200ms) @note * setKeyButton(btnId = 12, keyCallback = None ) * setKeyButton(btnId = 14, keyCallback = aKeyCallbackFun )
                    \# a simple callback function
                    def aKeyCallbackFun(channel):
                        print(channel)
                        pass
                    
    """""" GPIO.setup( btnId, GPIO.IN, pull_up_down=pullUpDown) # The keyCallback is None means setting keybutton in query mode, # then uses readKeyButton for get keybutton status # event can be { RISING, FALLING, BOTH } if keyCallback != None: try: GPIO.add_event_detect( btnId, event, callback=keyCallback, bouncetime=bounceTime ) except: pass pass" 3900,"def removeKeyButtonEvent(self, buttons= [] ): """"""! \~english Remove key button event callbacks @param buttons: an array of button Ids. eg. [ 12,13,15, ...] \~chinese 移除按键事件回调 @param buttons: 按钮ID数组。 例如: [12,13,15,...] """""" for i in range( 0, len(buttons)-1 ): GPIO.remove_event_detect( buttons[i] )" 3901,"def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ): """"""! \~english Config multi key buttons IO and event on same time @param enableButtons: an array of key button configs. eg.
    [{ ""id"":BUTTON_ACT_A, ""callback"": aCallbackFun }, ... ] @param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL @param pullUpDown: Default set to GPIO.PUD_UP @param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~chinese 同时配置多个按键IO和事件 @param enableButtons: 组按键配置 例如:
    [{ ""id"":BUTTON_ACT_A, ""callback"": aCallbackFun }, ... ] @param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL @param pullUpDown: 默认 GPIO.PUD_UP @param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~ \n @see DEF_BOUNCE_TIME_SHORT_MON (10ms) @see DEF_BOUNCE_TIME_SHORT (50ms) @see DEF_BOUNCE_TIME_NORMAL (100ms) @see DEF_BOUNCE_TIME_LONG (200ms) """""" for key in enableButtons: self.setKeyButton( key[""id""], key[""callback""], bounceTime, pullUpDown, event ) pass" 3902,"def matches_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns the members of ``options`` that best matches ``item``. Will prioritize exact matches, then filename-style matching, then fuzzy matching. Returns a tuple of item, index, match type, and fuzziness (if applicable) :item: string to match :options: list of examples to test against :fuzzy: integer (out of 100) describing how close to match string :fname_match: use filename globbing to match files? :fuzzy_fragment: if not ``None``, will accept substring matches of at least ``fuzzy_fragment`` fuzziness :guess: if ``True``, shortcut for setting ``fuzzy`` and ``min_fragment`` to very lenient options ''' matches = [] if guess: fuzzy = min(fuzzy,80) fuzzy_fragment = min(fuzzy_fragment,70) option_not_in = lambda item,match_list: all([x[0]!=item for x in match_list]) # Exact matches if item in options: matches += [(options[i],i,'exact',None) for i in xrange(len(options)) if options[i].lower()==item.lower()] # If we have exact matches, don't bother with fuzzy matching return matches # Filename-style matches if fname_match: matches += [(x,options.index(x),'fname',None) for x in fnmatch.filter(options,item) if option_not_in(x,matches)] # Fuzzy matches if fuzzy: sub_matches = [] for i in xrange(len(options)): r = fuzz.ratio(item.lower(),options[i].lower()) if r>=fuzzy and option_not_in(options[i],matches): sub_matches.append((r,i)) matches += [(options[x[1]],x[1],'fuzzy',x[0]) for x in sorted(sub_matches)] # Fragment matches if fuzzy_fragment: sub_matches = [] for i in xrange(len(options)): r = fuzz.partial_ratio(item.lower(),options[i].lower()) if r>=fuzzy_fragment and option_not_in(options[i],matches): sub_matches.append((r,i)) matches += [(options[x[1]],x[1],'fuzzy_fragment',x[0]) for x in sorted(sub_matches)] return matches" 3903,"def best_match_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns the best match from :meth:`matches_from_list` or ``None`` if no good matches''' matches = matches_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess) if len(matches)>0: return matches[0] return None" 3904,"def best_item_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns just the best item, or ``None``''' match = best_match_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess) if match: return match[0] return None" 3905,"async def create_hlk_sw16_connection(port=None, host=None, disconnect_callback=None, reconnect_callback=None, loop=None, logger=None, timeout=None, reconnect_interval=None): """"""Create HLK-SW16 Client class."""""" client = SW16Client(host, port=port, disconnect_callback=disconnect_callback, reconnect_callback=reconnect_callback, loop=loop, logger=logger, timeout=timeout, reconnect_interval=reconnect_interval) await client.setup() return client" 3906,"def _reset_timeout(self): """"""Reset timeout for date keep alive."""""" if self._timeout: self._timeout.cancel() self._timeout = self.loop.call_later(self.client.timeout, self.transport.close)" 3907,"def reset_cmd_timeout(self): """"""Reset timeout for command execution."""""" if self._cmd_timeout: self._cmd_timeout.cancel() self._cmd_timeout = self.loop.call_later(self.client.timeout, self.transport.close)" 3908,"def _handle_lines(self): """"""Assemble incoming data into per-line packets."""""" while b'\xdd' in self._buffer: linebuf, self._buffer = self._buffer.rsplit(b'\xdd', 1) line = linebuf[-19:] self._buffer += linebuf[:-19] if self._valid_packet(line): self._handle_raw_packet(line) else: self.logger.warning('dropping invalid data: %s', binascii.hexlify(line))" 3909,"def _valid_packet(raw_packet): """"""Validate incoming packet."""""" if raw_packet[0:1] != b'\xcc': return False if len(raw_packet) != 19: return False checksum = 0 for i in range(1, 17): checksum += raw_packet[i] if checksum != raw_packet[18]: return False return True" 3910,"def _handle_raw_packet(self, raw_packet): """"""Parse incoming packet."""""" if raw_packet[1:2] == b'\x1f': self._reset_timeout() year = raw_packet[2] month = raw_packet[3] day = raw_packet[4] hour = raw_packet[5] minute = raw_packet[6] sec = raw_packet[7] week = raw_packet[8] self.logger.debug( 'received date: Year: %s, Month: %s, Day: %s, Hour: %s, ' 'Minute: %s, Sec: %s, Week %s', year, month, day, hour, minute, sec, week) elif raw_packet[1:2] == b'\x0c': states = {} changes = [] for switch in range(0, 16): if raw_packet[2+switch:3+switch] == b'\x01': states[format(switch, 'x')] = True if (self.client.states.get(format(switch, 'x'), None) is not True): changes.append(format(switch, 'x')) self.client.states[format(switch, 'x')] = True elif raw_packet[2+switch:3+switch] == b'\x02': states[format(switch, 'x')] = False if (self.client.states.get(format(switch, 'x'), None) is not False): changes.append(format(switch, 'x')) self.client.states[format(switch, 'x')] = False for switch in changes: for status_cb in self.client.status_callbacks.get(switch, []): status_cb(states[switch]) self.logger.debug(states) if self.client.in_transaction: self.client.in_transaction = False self.client.active_packet = False self.client.active_transaction.set_result(states) while self.client.status_waiters: waiter = self.client.status_waiters.popleft() waiter.set_result(states) if self.client.waiters: self.send_packet() else: self._cmd_timeout.cancel() elif self._cmd_timeout: self._cmd_timeout.cancel() else: self.logger.warning('received unknown packet: %s', binascii.hexlify(raw_packet))" 3911,"def send_packet(self): """"""Write next packet in send queue."""""" waiter, packet = self.client.waiters.popleft() self.logger.debug('sending packet: %s', binascii.hexlify(packet)) self.client.active_transaction = waiter self.client.in_transaction = True self.client.active_packet = packet self.reset_cmd_timeout() self.transport.write(packet)" 3912,"def format_packet(command): """"""Format packet to be sent."""""" frame_header = b""\xaa"" verify = b""\x0b"" send_delim = b""\xbb"" return frame_header + command.ljust(17, b""\x00"") + verify + send_delim" 3913,"def connection_lost(self, exc): """"""Log when connection is closed, if needed call callback."""""" if exc: self.logger.error('disconnected due to error') else: self.logger.info('disconnected because of close/abort.') if self.disconnect_callback: asyncio.ensure_future(self.disconnect_callback(), loop=self.loop)" 3914,"async def setup(self): """"""Set up the connection with automatic retry."""""" while True: fut = self.loop.create_connection( lambda: SW16Protocol( self, disconnect_callback=self.handle_disconnect_callback, loop=self.loop, logger=self.logger), host=self.host, port=self.port) try: self.transport, self.protocol = \ await asyncio.wait_for(fut, timeout=self.timeout) except asyncio.TimeoutError: self.logger.warning(""Could not connect due to timeout error."") except OSError as exc: self.logger.warning(""Could not connect due to error: %s"", str(exc)) else: self.is_connected = True if self.reconnect_callback: self.reconnect_callback() break await asyncio.sleep(self.reconnect_interval)" 3915,"def stop(self): """"""Shut down transport."""""" self.reconnect = False self.logger.debug(""Shutting down."") if self.transport: self.transport.close()" 3916,"async def handle_disconnect_callback(self): """"""Reconnect automatically unless stopping."""""" self.is_connected = False if self.disconnect_callback: self.disconnect_callback() if self.reconnect: self.logger.debug(""Protocol disconnected...reconnecting"") await self.setup() self.protocol.reset_cmd_timeout() if self.in_transaction: self.protocol.transport.write(self.active_packet) else: packet = self.protocol.format_packet(b""\x1e"") self.protocol.transport.write(packet)" 3917,"def register_status_callback(self, callback, switch): """"""Register a callback which will fire when state changes."""""" if self.status_callbacks.get(switch, None) is None: self.status_callbacks[switch] = [] self.status_callbacks[switch].append(callback)" 3918,"def _send(self, packet): """"""Add packet to send queue."""""" fut = self.loop.create_future() self.waiters.append((fut, packet)) if self.waiters and self.in_transaction is False: self.protocol.send_packet() return fut" 3919,"async def turn_on(self, switch=None): """"""Turn on relay."""""" if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b""\x10"" + switch + b""\x01"") else: packet = self.protocol.format_packet(b""\x0a"") states = await self._send(packet) return states" 3920,"async def turn_off(self, switch=None): """"""Turn off relay."""""" if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b""\x10"" + switch + b""\x02"") else: packet = self.protocol.format_packet(b""\x0b"") states = await self._send(packet) return states" 3921,"async def status(self, switch=None): """"""Get current relay status."""""" if switch is not None: if self.waiters or self.in_transaction: fut = self.loop.create_future() self.status_waiters.append(fut) states = await fut state = states[switch] else: packet = self.protocol.format_packet(b""\x1e"") states = await self._send(packet) state = states[switch] else: if self.waiters or self.in_transaction: fut = self.loop.create_future() self.status_waiters.append(fut) state = await fut else: packet = self.protocol.format_packet(b""\x1e"") state = await self._send(packet) return state" 3922,"def value(self, item): # type: (Any) -> Any """""" Return value stored in weakref. :param item: Object from which get the value. :return: Value stored in the weakref, otherwise original value. :raise TreeDeletedException: when weakref is already deleted. """""" if isinstance(item, weakref.ReferenceType): if item() is None: raise TreeDeletedException() return item() return item" 3923,"def remove_all(self, item): # type: (Any) -> None """""" Remove all occurrence of the parameter. :param item: Value to delete from the WeakList. """""" item = self.ref(item) while list.__contains__(self, item): list.remove(self, item)" 3924,"def index(self, item, **kwargs): # type: (Any, dict) -> int """""" Get index of the parameter. :param item: Item for which get the index. :return: Index of the parameter in the WeakList. """""" return list.index(self, self.ref(item), **kwargs)" 3925,"def insert(self, index, item): # type: (int, Any) -> None """""" Insert item at the specific index. :param index: Index where to insert the item. :param item: Item to insert. """""" return list.insert(self, index, self.ref(item))" 3926,"def sort(self, *, key: Optional[Callable[[Any], Any]] = None, reverse: bool = False) -> None: """""" Sort _WeakList. :param key: Key by which to sort, default None. :param reverse: True if return reversed WeakList, false by default. """""" return list.sort(self, key=self._sort_key(key), reverse=reverse)" 3927,"def get_feed(self, datasource_id): """""" Получение настроек для фида :param datasource_id: идентификатор фида :return: FeedDataSource """""" info = self.__metadb.one( """""" SELECT to_json(ds) as datasource , to_json(fc) as connector , to_json(fct) as connector_type , to_json(ctp) as connector_type_preset , json_build_object('email', u.email, 'full_name', u.full_name) as author_user FROM meta.feed_datasource ds LEFT JOIN meta.feed_connector fc ON fc.id=ds.connector_id LEFT JOIN meta.feed_connector_type fct ON fct.id=fc.connector_type_id LEFT JOIN meta.feed_connector_type_preset ctp ON ctp.id=ds.connector_type_preset_id LEFT JOIN meta.user_list u ON u.id=ds.author_user_id WHERE ds.id = :datasource_id::uuid """""", {""datasource_id"": datasource_id} ) return FeedDataSource(**info)" 3928,"def get_data(self, datasource, callback): """""" Сохранение медиафайла :param task: :param media_metadata: :param file_suffix: :param callback: :return: """""" task = self.__app.worker.current_task media_metadata = datasource.connector_type_preset['preset_data']['media_metadata'] result_data = task['result_data'] tmp_file = NamedTemporaryFile(delete=False, suffix=SOURCE_FORMAT_EXTENSION.get(media_metadata['sourceFormat'])) self.__app.log.info(""Открываем файл"", {""filename"": tmp_file.name}) with open(tmp_file.name, 'wb') as f: callback(f) self.__app.log.info(""start media upload"") result_data['stage_id'] = ""persist_media_file"" self.__starter.update_task_result_data(task) result = self.__media.upload(open(tmp_file.name), { ""ttlInSec"": 60 * 60 * 24, # 24h ""entityId"": 2770, ""objectId"": task.get('data', {}).get(""ds_id""), ""info"": {""metadata"": media_metadata} }) result_data['stage_id'] = ""generate_media_finish"" result_data['media_id'] = result['id'] self.__starter.update_task_result_data(task) return result" 3929,"def datasource_process(self, datasource_id): """""" deprecated Запускает настроенные обработки в фиде :param datasource_id: uuid """""" # TODO Выпилить потом класс используется для другого # TODO без applicationId не выбираются поля сущностей. Подумать на сколько это НЕ нормально response = self.__app.native_api_call('feed', 'datasource/' + datasource_id + '/process?applicationId=1', {}, self.__options, False, None, False, http_method=""POST"") return json.loads(response.text)" 3930,"def _delta_dir(): """"""returns the relative path of the current directory to the git repository. This path will be added the 'filename' path to find the file. It current_dir is the git root, this function returns an empty string. Keyword Arguments: Returns: str -- relative path of the current dir to git root dir empty string if current dir is the git root dir """""" repo = Repo() current_dir = os.getcwd() repo_dir = repo.tree().abspath delta_dir = current_dir.replace(repo_dir, '') if delta_dir: return delta_dir + '/' else: return ''" 3931,"def commit(filename): """"""Commit (git) a specified file This method does the same than a :: $ git commit -a ""message"" Keyword Arguments: :filename: (str) -- name of the file to commit Returns: """""" try: repo = Repo() # gitcmd = repo.git # gitcmd.commit(filename) index = repo.index index.commit(""Updated file: {0}"".format(filename)) except Exception as e: print(""exception while commit: %s"" % e.message)" 3932,"def add_file_to_repo(filename): """"""Add a file to the git repo This method does the same than a :: $ git add filename Keyword Arguments: :filename: (str) -- name of the file to commit Returns: """""" try: repo = Repo() index = repo.index index.add([_delta_dir() + filename]) except Exception as e: print(""exception while gitadding file: %s"" % e.message)" 3933,"def reset_to_last_commit(): """"""reset a modified file to his last commit status This method does the same than a :: $ git reset --hard Keyword Arguments: Returns: """""" try: repo = Repo() gitcmd = repo.git gitcmd.reset(hard=True) except Exception: pass" 3934,"def commit_history(filename): """"""Retrieve the commit history for a given filename. Keyword Arguments: :filename: (str) -- full name of the file Returns: list of dicts -- list of commit if the file is not found, returns an empty list """""" result = [] repo = Repo() for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename): result.append({'date': datetime.fromtimestamp(commit.committed_date + commit.committer_tz_offset), 'hexsha': commit.hexsha}) return result" 3935,"def read_committed_file(gitref, filename): """"""Retrieve the content of a file in an old commit and returns it. Ketword Arguments: :gitref: (str) -- full reference of the git commit :filename: (str) -- name (full path) of the file Returns: str -- content of the file """""" repo = Repo() commitobj = repo.commit(gitref) blob = commitobj.tree[_delta_dir() + filename] return blob.data_stream.read()" 3936,"def get(self, key, _else=None): """"""The method to get an assets value """""" with self._lock: self.expired() # see if everything expired try: value = self._dict[key].get() return value except KeyError: return _else except ValueError: return _else" 3937,"def set(self, key, value, expires=None, future=None): """"""Set a value """""" # assert the values above with self._lock: try: self._dict[key].set(value, expires=expires, future=future) except KeyError: self._dict[key] = moment(value, expires=expires, future=future, lock=self._lock) return value" 3938,"def values(self): """"""Will only return the current values """""" self.expired() values = [] for key in self._dict.keys(): try: value = self._dict[key].get() values.append(value) except: continue return values" 3939,"def has_key(self, key): """"""Does the key exist? This method will check to see if it has expired too. """""" if key in self._dict: try: self[key] return True except ValueError: return False except KeyError: return False return False" 3940,"def dicom2db(file_path, file_type, is_copy, step_id, db_conn, sid_by_patient=False, pid_in_vid=False, visit_in_path=False, rep_in_path=False): """"""Extract some meta-data from a DICOM file and store in a DB. Arguments: :param file_path: File path. :param file_type: File type (should be 'DICOM'). :param is_copy: Indicate if this file is a copy. :param step_id: Step ID :param db_conn: Database connection. :param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. :param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you to enable this flag. This will try to split PatientID into VisitID and PatientID. :param visit_in_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param rep_in_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :return: A dictionary containing the following IDs : participant_id, visit_id, session_id, sequence_type_id, sequence_id, repetition_id, file_id. """""" global conn conn = db_conn tags = dict() logging.info(""Extracting DICOM headers from '%s'"" % file_path) try: dcm = dicom.read_file(file_path) dataset = db_conn.get_dataset(step_id) tags['participant_id'] = _extract_participant(dcm, dataset, pid_in_vid) if visit_in_path: tags['visit_id'] = _extract_visit_from_path( dcm, file_path, pid_in_vid, sid_by_patient, dataset, tags['participant_id']) else: tags['visit_id'] = _extract_visit(dcm, dataset, tags['participant_id'], sid_by_patient, pid_in_vid) tags['session_id'] = _extract_session(dcm, tags['visit_id']) tags['sequence_type_id'] = _extract_sequence_type(dcm) tags['sequence_id'] = _extract_sequence(tags['session_id'], tags['sequence_type_id']) if rep_in_path: tags['repetition_id'] = _extract_repetition_from_path(dcm, file_path, tags['sequence_id']) else: tags['repetition_id'] = _extract_repetition(dcm, tags['sequence_id']) tags['file_id'] = extract_dicom(file_path, file_type, is_copy, tags['repetition_id'], step_id) except InvalidDicomError: logging.warning(""%s is not a DICOM file !"" % step_id) except IntegrityError: # TODO: properly deal with concurrency problems logging.warning(""A problem occurred with the DB ! A rollback will be performed..."") conn.db_session.rollback() return tags" 3941,"def regularized_function(x,y,func,bins=None,range=None): """"""Compute func() over data aggregated in bins. (x,y) --> (x', func(Y')) with Y' = {y: y(x) where x in x' bin} First the data is collected in bins x' along x and then func is applied to all data points Y' that have been collected in the bin. :Arguments: x abscissa values (for binning) y ordinate values (func is applied) func a numpy ufunc that takes one argument, func(Y') bins number or array range limits (used with number of bins) :Returns: F,edges function and edges (midpoints = 0.5*(edges[:-1]+edges[1:])) """""" _x = numpy.asarray(x) _y = numpy.asarray(y) # setup of bins from numpy.histogram if (range is not None): mn, mx = range if (mn > mx): raise ValueError('max must be larger than min in range parameter.') if not numpy.iterable(bins): if range is None: range = (_x.min(), _x.max()) mn, mx = [float(mi) for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = numpy.linspace(mn, mx, bins+1, endpoint=True) else: bins = numpy.asarray(bins) if (numpy.diff(bins) < 0).any(): raise ValueError('bins must increase monotonically.') sorting_index = numpy.argsort(_x) sx = _x[sorting_index] sy = _y[sorting_index] # boundaries in SORTED data that demarcate bins; position in bin_index is the bin number bin_index = numpy.r_[sx.searchsorted(bins[:-1], 'left'), sx.searchsorted(bins[-1], 'right')] # naive implementation: apply operator to each chunk = sy[start:stop] separately # # It's not clear to me how one could effectively block this procedure (cf # block = 65536 in numpy.histogram) because there does not seem to be a # general way to combine the chunks for different blocks, just think of # func=median F = numpy.zeros(len(bins)-1) # final function F[:] = [func(sy[start:stop]) for start,stop in izip(bin_index[:-1],bin_index[1:])] return F,bins" 3942,"def unit_rules_restore(root): # type: (Nonterminal) -> Nonterminal """""" Transform parsed tree for grammar with removed unit rules. The unit rules will be returned back to the tree. :param root: Root of the parsed tree. :return: Modified tree. """""" items = Traversing.post_order(root) items = filter(lambda x: isinstance(x, ReducedUnitRule), items) for rule in items: parent_nonterm = rule.from_symbols[0] # type: Nonterminal # restore chain of unit rules for r in rule.by_rules: created_rule = r() # type: Rule parent_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(parent_nonterm) created_nonterm = r.toSymbol() # type: Nonterminal created_rule._to_symbols.append(created_nonterm) created_nonterm._set_from_rule(created_rule) parent_nonterm = created_nonterm # restore last rule last_rule = rule.end_rule() # type: Rule last_rule._from_symbols.append(parent_nonterm) parent_nonterm._set_to_rule(last_rule) for ch in rule.to_symbols: # type: Nonterminal ch._set_from_rule(last_rule) last_rule._to_symbols.append(ch) return root" 3943,"def remaining_bytes(self, meta=True): """""" Returns the remaining, unread bytes from the buffer. """""" pos, self._pos = self._pos, len(self.buffer) return self.buffer[pos:]" 3944,"def decode(self, bytes): """""" Decodes the packet off the byte string. """""" self.buffer = bytes self._pos = 0 Packet = identifier.get_packet_from_id(self._read_variunt()) # unknown packets will be None from the identifier if Packet is None: return None packet = Packet() packet.ParseFromString(self.remaining_bytes()) return packet" 3945,"def encode(self, packet): """""" Pushes a packet to the writer, encoding it on the internal buffer. """""" id = identifier.get_packet_id(packet) if id is None: raise EncoderException('unknown packet') self._write_variunt(id) self._write(packet.SerializeToString()) return bytes(self.buffer)" 3946,"def setup_logging(level): """""" Setup logger. """""" logging.root.setLevel(level) logging.root.addHandler(STREAM_HANDLER)" 3947,"def create(self, data): """""" Create object from the given data. The given data may or may not have been validated prior to calling this function. This function will try its best in creating the object. If the resulting object cannot be produced, raises ``ValidationError``. The spec can affect how individual fields will be created by implementing ``clean()`` for the fields needing customization. :param data: the data as a dictionary. :return: instance of ``klass`` or dictionary. :raises: ``ValidationError`` if factory is unable to create object. """""" # todo: copy-paste code from representation.validate -> refactor if data is None: return None prototype = {} errors = {} # create and populate the prototype for field_name, field_spec in self.spec.fields.items(): try: value = self._create_value(data, field_name, self.spec) except ValidationError, e: if field_name not in self.default_create_values: if hasattr(e, 'message_dict'): # prefix error keys with top level field name errors.update(dict(zip( [field_name + '.' + key for key in e.message_dict.keys()], e.message_dict.values()))) else: errors[field_name] = e.messages else: key_name = self.property_name_map[field_name] prototype[key_name] = value # check extra fields if self.prevent_extra_fields: extras = set(data.keys()) - set(self.property_name_map.keys()) if extras: errors[', '.join(extras)] = ['field(s) not allowed'] # if errors, raise ValidationError if errors: raise ValidationError(errors) # return dict or object based on the prototype _data = deepcopy(self.default_create_values) _data.update(prototype) if self.klass: instance = self.klass() instance.__dict__.update(prototype) return instance else: return prototype" 3948,"def serialize(self, entity, request=None): """""" Serialize entity into dictionary. The spec can affect how individual fields will be serialized by implementing ``serialize()`` for the fields needing customization. :returns: dictionary """""" def should_we_insert(value, field_spec): return value not in self.missing or field_spec.required errors = {} ret = {} for field_name, field_spec in self.spec.fields.items(): value = self._get_value_for_serialization(entity, field_name, field_spec) func = self._get_serialize_func(field_name, self.spec) try: # perform serialization value = func(value, entity, request) if should_we_insert(value, field_spec): ret[field_name] = value except ValidationError, e: if hasattr(e, 'message_dict'): # prefix error keys with top level field name errors.update(dict(zip( [field_name + '.' + key for key in e.message_dict.keys()], e.message_dict.values()))) else: errors[field_name] = e.messages if errors: raise ValidationError(errors) return None if ret == {} else ret" 3949,"def _create_value(self, data, name, spec): """""" Create the value for a field. :param data: the whole data for the entity (all fields). :param name: name of the initialized field. :param spec: spec for the whole entity. """""" field = getattr(self, 'create_' + name, None) if field: # this factory has a special creator function for this field return field(data, name, spec) value = data.get(name) return spec.fields[name].clean(value)" 3950,"def _get_serialize_func(self, name, spec): """""" Return the function that is used for serialization. """""" func = getattr(self, 'serialize_' + name, None) if func: # this factory has a special serializer function for this field return func func = getattr(spec.fields[name], 'serialize', None) if func: return func return lambda value, entity, request: value" 3951,"def _get_value_for_serialization(self, data, name, spec): """""" Return the value of the field in entity (or ``None``). """""" name = self.property_name_map[name] return getattr(data, name, None)" 3952,"def _create_mappings(self, spec): """""" Create property name map based on aliases. """""" ret = dict(zip(set(spec.fields), set(spec.fields))) ret.update(dict([(n, s.alias) for n, s in spec.fields.items() if s.alias])) return ret" 3953,"def all_substrings(s): ''' yields all substrings of a string ''' join = ''.join for i in range(1, len(s) + 1): for sub in window(s, i): yield join(sub)" 3954,"def equivalent_release_for_product(self, product): """""" Returns the release for a specified product with the same channel and major version with the highest minor version, or None if no such releases exist """""" releases = self._default_manager.filter( version__startswith=self.major_version() + '.', channel=self.channel, product=product).order_by('-version') if not getattr(settings, 'DEV', False): releases = releases.filter(is_public=True) if releases: return sorted( sorted(releases, reverse=True, key=lambda r: len(r.version.split('.'))), reverse=True, key=lambda r: r.version.split('.')[1])[0]" 3955,"def notes(self, public_only=False): """""" Retrieve a list of Note instances that should be shown for this release, grouped as either new features or known issues, and sorted first by sort_num highest to lowest and then by created date, which is applied to both groups, and then for new features we also sort by tag in the order specified by Note.TAGS, with untagged notes coming first, then finally moving any note with the fixed tag that starts with the release version to the top, for what we call ""dot fixes"". """""" tag_index = dict((tag, i) for i, tag in enumerate(Note.TAGS)) notes = self.note_set.order_by('-sort_num', 'created') if public_only: notes = notes.filter(is_public=True) known_issues = [n for n in notes if n.is_known_issue_for(self)] new_features = sorted( sorted( (n for n in notes if not n.is_known_issue_for(self)), key=lambda note: tag_index.get(note.tag, 0)), key=lambda n: n.tag == 'Fixed' and n.note.startswith(self.version), reverse=True) return new_features, known_issues" 3956,"def to_dict(self): """"""Return a dict all all data about the release"""""" data = model_to_dict(self, exclude=['id']) data['title'] = unicode(self) data['slug'] = self.slug data['release_date'] = self.release_date.date().isoformat() data['created'] = self.created.isoformat() data['modified'] = self.modified.isoformat() new_features, known_issues = self.notes(public_only=False) for note in known_issues: note.tag = 'Known' data['notes'] = [n.to_dict(self) for n in chain(new_features, known_issues)] return data" 3957,"def to_simple_dict(self): """"""Return a dict of only the basic data about the release"""""" return { 'version': self.version, 'product': self.product, 'channel': self.channel, 'is_public': self.is_public, 'slug': self.slug, 'title': unicode(self), }" 3958,"def playTone(self, freq, reps = 1, delay = 0.1, muteDelay = 0.0): """"""! \~english Play a tone \~chinese 播放音符 \~english @param freq @param reps @param delay >= 0(s) if 0 means do not delay. tone play will be Stop immediately
    @param muteDelay >= 0(s) If 0 means no pause after playing, play the next note immediately \~chinese @param freq: 频率 @param reps: 重复次数 @param delay >= 0(s) 如果是 0 意味着不延迟。 音符会立即停止播放
    @param muteDelay >= 0(s) 如果是 0 表示音符播放结束后没有停顿,立刻播放下一个音符 """""" if freq == 0: self.stopTone() self._delay(delay) #sleep(delay) return False if self._pwmPlayer == None: self._initPWMPlayer(freq) for r in range(0,reps): self._pwmPlayer.start(self.TONE_DUTY) self._pwmPlayer.ChangeFrequency( freq ) self._delay(delay) #sleep(delay) if muteDelay>0: self.stopTone() self._delay(muteDelay) #sleep(muteDelay) return True" 3959,"def playToneList(self, playList = None): """"""! \~english Play tone from a tone list @param playList a array of tones \~chinese 播放音调列表 @param playList: 音调数组 \~english @note playList format:\n \~chinese @note playList 格式:\n \~
                [
                  {""freq"": 440, ""reps"": 1, ""delay"": 0.08, ""muteDelay"": 0.15},
                  {""freq"": 567, ""reps"": 3, ""delay"": 0.08, ""muteDelay"": 0.15},
                  ...
                ]
               
    \n \~english \e delay: >= 0(s) if 0 means do not delay. tone play will be Stop immediately
    \e muteDelay: 0.15 >= 0(s) If 0 means no pause after playing, play the next note immediately \~chinese \e delay: >= 0(s)如果是 0 意味着不延迟。 音调会立即停止播放
    \e muteDelay: >= 0(s)如果是 0 表示播放音符结束后没有停顿,立刻播放下一个音符 """""" if playList == None: return False for t in playList: self.playTone(t[""freq""], t[""reps""], t[""delay""], t[""muteDelay""]) self.stopTone() return True" 3960,"def all(self, instance): """"""Get all ACLs associated with the instance specified by name. :param str instance: The name of the instance from which to fetch ACLs. :returns: A list of :py:class:`Acl` objects associated with the specified instance. :rtype: list """""" url = self._url.format(instance=instance) response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_acl_list(data)" 3961,"def create(self, instance, cidr_mask, description, **kwargs): """"""Create an ACL entry for the specified instance. :param str instance: The name of the instance to associate the new ACL entry with. :param str cidr_mask: The IPv4 CIDR mask for the new ACL entry. :param str description: A short description for the new ACL entry. :param collector kwargs: (optional) Additional key=value pairs to be supplied to the creation payload. **Caution:** fields unrecognized by the API will cause this request to fail with a 400 from the API. """""" # Build up request data. url = self._url.format(instance=instance) request_data = { 'cidr_mask': cidr_mask, 'description': description } request_data.update(kwargs) # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new ACL for instance {} with: {}.' .format(instance, request_data)) else: logger.info('Failed to create a new ACL for instance {} with: {}.' .format(instance, request_data)) data = self._get_response_data(response) return self._concrete_acl(data)" 3962,"def get(self, instance, acl): """"""Get an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance from which to fetch the ACL. :param str acl: The ID of the ACL to fetch. :returns: An :py:class:`Acl` object, or None if ACL does not exist. :rtype: :py:class:`Acl` """""" base_url = self._url.format(instance=instance) url = '{base}{aclid}/'.format(base=base_url, aclid=acl) response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_acl(data)" 3963,"def delete(self, instance, acl): """"""Delete an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance on which the ACL exists. :param str acll: The ID of the ACL to delete. """""" base_url = self._url.format(instance=instance) url = '{base}{aclid}/'.format(base=base_url, aclid=acl) response = requests.delete(url, **self._default_request_kwargs) if response.status_code == 200: logger.info('Successfully deleted ACL {}'.format(acl)) else: logger.info('Failed to delete ACL {}'.format(acl)) logger.info('Response: [{0}] {1}'.format(response.status_code, response.content)) raise errors.ObjectRocketException('Failed to delete ACL.')" 3964,"def _concrete_acl(self, acl_doc): """"""Concretize an ACL document. :param dict acl_doc: A document describing an ACL entry. Should come from the API. :returns: An :py:class:`Acl`, or None. :rtype: :py:class:`bases.BaseInstance` """""" if not isinstance(acl_doc, dict): return None # Attempt to instantiate an Acl object with the given dict. try: return Acl(document=acl_doc, acls=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error('Could not instantiate ACL document. You probably need to upgrade to a ' 'recent version of the client. Document which caused this error: {}' .format(acl_doc)) return None" 3965,"def _concrete_acl_list(self, acl_docs): """"""Concretize a list of ACL documents. :param list acl_docs: A list of ACL documents. Should come from the API. :returns: A list of :py:class:`ACL` objects. :rtype: list """""" if not acl_docs: return [] return list(filter(None, [self._concrete_acl(acl_doc=doc) for doc in acl_docs]))" 3966,"def _default_request_kwargs(self): """"""The default request keyword arguments to be passed to the requests library."""""" defaults = copy.deepcopy(super(Acls, self)._default_request_kwargs) defaults.setdefault('headers', {}).update({ 'X-Auth-Token': self._client.auth._token }) return defaults" 3967,"def _url(self): """"""The URL of this ACL object."""""" base_url = self._client._url.rstrip('/') return '{}/instances/{}/acls/{}/'.format(base_url, self.instance_name, self.id)" 3968,"def first(pipe, items=1): ''' first is essentially the next() function except it's second argument determines how many of the first items you want. If items is more than 1 the output is an islice of the generator. If items is 1, the first item is returned ''' pipe = iter(pipe) return next(pipe) if items == 1 else islice(pipe, 0, items)" 3969,"def create_state(cls, state, **kwargs): """"""Create State Create a new State This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_state(state, async=True) >>> result = thread.get() :param async bool :param State state: Attributes of state to create (required) :return: State If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_state_with_http_info(state, **kwargs) else: (data) = cls._create_state_with_http_info(state, **kwargs) return data" 3970,"def delete_state_by_id(cls, state_id, **kwargs): """"""Delete State Delete an instance of State by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_state_by_id(state_id, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_state_by_id_with_http_info(state_id, **kwargs) else: (data) = cls._delete_state_by_id_with_http_info(state_id, **kwargs) return data" 3971,"def get_state_by_id(cls, state_id, **kwargs): """"""Find State Return single instance of State by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_state_by_id(state_id, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to return (required) :return: State If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_state_by_id_with_http_info(state_id, **kwargs) else: (data) = cls._get_state_by_id_with_http_info(state_id, **kwargs) return data" 3972,"def list_all_states(cls, **kwargs): """"""List States Return a list of States This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_states(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[State] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_states_with_http_info(**kwargs) else: (data) = cls._list_all_states_with_http_info(**kwargs) return data" 3973,"def replace_state_by_id(cls, state_id, state, **kwargs): """"""Replace State Replace all attributes of State This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_state_by_id(state_id, state, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to replace (required) :param State state: Attributes of state to replace (required) :return: State If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_state_by_id_with_http_info(state_id, state, **kwargs) else: (data) = cls._replace_state_by_id_with_http_info(state_id, state, **kwargs) return data" 3974,"def update_state_by_id(cls, state_id, state, **kwargs): """"""Update State Update attributes of State This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_state_by_id(state_id, state, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to update. (required) :param State state: Attributes of state to update. (required) :return: State If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_state_by_id_with_http_info(state_id, state, **kwargs) else: (data) = cls._update_state_by_id_with_http_info(state_id, state, **kwargs) return data" 3975,"def insert_list_of_dictionaries_into_database_tables( dbConn, log, dictList, dbTableName, uniqueKeyList=[], dateModified=False, dateCreated=True, batchSize=2500, replace=False, dbSettings=False): """"""insert list of dictionaries into database tables **Key Arguments:** - ``dbConn`` -- mysql database connection - ``log`` -- logger - ``dictList`` -- list of python dictionaries to add to the database table - ``dbTableName`` -- name of the database table - ``uniqueKeyList`` -- a list of column names to append as a unique constraint on the database - ``dateModified`` -- add the modification date as a column in the database - ``dateCreated`` -- add the created date as a column in the database - ``batchSize`` -- batch the insert commands into *batchSize* batches - ``replace`` -- repalce row if a duplicate is found - ``dbSettings`` -- pass in the database settings so multiprocessing can establish one connection per process (might not be faster) **Return:** - None **Usage:** .. code-block:: python from fundamentals.mysql import insert_list_of_dictionaries_into_database_tables insert_list_of_dictionaries_into_database_tables( dbConn=dbConn, log=log, dictList=dictList, dbTableName=""test_insert_many"", uniqueKeyList=[""col1"", ""col3""], dateModified=False, batchSize=2500 ) """""" log.debug( 'completed the ````insert_list_of_dictionaries_into_database_tables`` function') global count global totalCount global globalDbConn global sharedList reDate = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T') if dbSettings: globalDbConn = dbSettings else: globalDbConn = dbConn if len(dictList) == 0: log.warning( 'the dictionary to be added to the database is empty' % locals()) return None if len(dictList): convert_dictionary_to_mysql_table( dbConn=dbConn, log=log, dictionary=dictList[0], dbTableName=dbTableName, uniqueKeyList=uniqueKeyList, dateModified=dateModified, reDatetime=reDate, replace=replace, dateCreated=dateCreated) dictList = dictList[1:] dbConn.autocommit(False) if len(dictList): total = len(dictList) batches = int(total / batchSize) start = 0 end = 0 sharedList = [] for i in range(batches + 1): end = end + batchSize start = i * batchSize thisBatch = dictList[start:end] sharedList.append((thisBatch, end)) totalCount = total + 1 ltotalCount = totalCount print ""Starting to insert %(ltotalCount)s rows into %(dbTableName)s"" % locals() print dbSettings if dbSettings == False: fmultiprocess( log=log, function=_insert_single_batch_into_database, inputArray=range(len(sharedList)), dbTableName=dbTableName, uniqueKeyList=uniqueKeyList, dateModified=dateModified, replace=replace, batchSize=batchSize, reDatetime=reDate, dateCreated=dateCreated ) else: fmultiprocess(log=log, function=_add_dictlist_to_database_via_load_in_file, inputArray=range(len(sharedList)), dbTablename=dbTableName, dbSettings=dbSettings, dateModified=dateModified) sys.stdout.write(""\x1b[1A\x1b[2K"") print ""%(ltotalCount)s / %(ltotalCount)s rows inserted into %(dbTableName)s"" % locals() log.debug( 'completed the ``insert_list_of_dictionaries_into_database_tables`` function') return None" 3976,"def _insert_single_batch_into_database( batchIndex, log, dbTableName, uniqueKeyList, dateModified, replace, batchSize, reDatetime, dateCreated): """"""*summary of function* **Key Arguments:** - ``batchIndex`` -- the index of the batch to insert - ``dbConn`` -- mysql database connection - ``log`` -- logger **Return:** - None **Usage:** .. todo:: add usage info create a sublime snippet for usage .. code-block:: python usage code """""" log.debug('starting the ``_insert_single_batch_into_database`` function') global totalCount global globalDbConn global sharedList batch = sharedList[batchIndex] reDate = reDatetime if isinstance(globalDbConn, dict): # SETUP ALL DATABASE CONNECTIONS dbConn = database( log=log, dbSettings=globalDbConn, autocommit=False ).connect() else: dbConn = globalDbConn count = batch[1] if count > totalCount: count = totalCount ltotalCount = totalCount inserted = False while inserted == False: if not replace: insertVerb = ""INSERT IGNORE"" else: insertVerb = ""INSERT IGNORE"" uniKeys = set().union(*(d.keys() for d in batch[0])) tmp = [] tmp[:] = [m.replace("" "", ""_"").replace( ""-"", ""_"") for m in uniKeys] uniKeys = tmp myKeys = '`,`'.join(uniKeys) vals = [tuple([None if d[k] in [""None"", None] else str(d[k]) for k in uniKeys]) for d in batch[0]] valueString = (""%s, "" * len(vals[0]))[:-2] insertCommand = insertVerb + """""" INTO `"""""" + dbTableName + \ """"""` (`"""""" + myKeys + """"""`, dateCreated) VALUES ("""""" + \ valueString + """""", NOW())"""""" if not dateCreated: insertCommand = insertCommand.replace( "", dateCreated)"", "")"").replace("", NOW())"", "")"") dup = """" if replace: dup = "" ON DUPLICATE KEY UPDATE "" for k in uniKeys: dup = """"""%(dup)s %(k)s=values(%(k)s),"""""" % locals() dup = """"""%(dup)s updated=1, dateLastModified=NOW()"""""" % locals() insertCommand = insertCommand + dup insertCommand = insertCommand.replace('\\""""', '\\"" ""') insertCommand = insertCommand.replace('""""', ""null"") insertCommand = insertCommand.replace('""None""', 'null') message = """" # log.debug('adding new data to the %s table; query: %s' % # (dbTableName, addValue)) try: message = writequery( log=log, sqlQuery=insertCommand, dbConn=dbConn, Force=True, manyValueList=vals ) except: theseInserts = [] for aDict in batch[0]: insertCommand, valueTuple = convert_dictionary_to_mysql_table( dbConn=dbConn, log=log, dictionary=aDict, dbTableName=dbTableName, uniqueKeyList=uniqueKeyList, dateModified=dateModified, returnInsertOnly=True, replace=replace, reDatetime=reDate, skipChecks=True ) theseInserts.append(valueTuple) message = """" # log.debug('adding new data to the %s table; query: %s' % # (dbTableName, addValue)) message = writequery( log=log, sqlQuery=insertCommand, dbConn=dbConn, Force=True, manyValueList=theseInserts ) if message == ""unknown column"": for aDict in batch: convert_dictionary_to_mysql_table( dbConn=dbConn, log=log, dictionary=aDict, dbTableName=dbTableName, uniqueKeyList=uniqueKeyList, dateModified=dateModified, reDatetime=reDate, replace=replace ) else: inserted = True dbConn.commit() log.debug('completed the ``_insert_single_batch_into_database`` function') return ""None""" 3977,"def _add_dictlist_to_database_via_load_in_file( masterListIndex, log, dbTablename, dbSettings, dateModified=False): """"""*load a list of dictionaries into a database table with load data infile* **Key Arguments:** - ``masterListIndex`` -- the index of the sharedList of dictionary lists to process - ``dbTablename`` -- the name of the database table to add the list to - ``dbSettings`` -- the dictionary of database settings - ``log`` -- logger - ``dateModified`` -- add a dateModified stamp with an updated flag to rows? **Return:** - None **Usage:** .. todo:: add usage info create a sublime snippet for usage .. code-block:: python usage code """""" log.debug('starting the ``_add_dictlist_to_database_via_load_in_file`` function') global sharedList dictList = sharedList[masterListIndex][0] count = sharedList[masterListIndex][1] if count > totalCount: count = totalCount ltotalCount = totalCount # SETUP ALL DATABASE CONNECTIONS dbConn = database( log=log, dbSettings=dbSettings ).connect() now = datetime.now() tmpTable = now.strftime(""tmp_%Y%m%dt%H%M%S%f"") # CREATE A TEMPORY TABLE TO ADD DATA TO sqlQuery = """"""CREATE TEMPORARY TABLE %(tmpTable)s SELECT * FROM %(dbTablename)s WHERE 1=0;"""""" % locals() writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn ) csvColumns = [k for d in dictList for k in d.keys()] csvColumns = list(set(csvColumns)) csvColumnsString = (', ').join(csvColumns) df = pd.DataFrame(dictList) df.replace(['nan', 'None', '', 'NaN', np.nan], '\\N', inplace=True) df.to_csv('/tmp/%(tmpTable)s' % locals(), sep=""|"", index=False, escapechar=""\\"", quotechar='""', columns=csvColumns, encoding='utf-8') sqlQuery = """"""LOAD DATA LOCAL INFILE '/tmp/%(tmpTable)s' INTO TABLE %(tmpTable)s FIELDS TERMINATED BY '|' OPTIONALLY ENCLOSED BY '""' IGNORE 1 LINES (%(csvColumnsString)s);"""""" % locals() writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn ) updateStatement = """" for i in csvColumns: updateStatement += ""`%(i)s` = VALUES(`%(i)s`), "" % locals() if dateModified: updateStatement += ""dateLastModified = NOW(), updated = 1"" else: updateStatement = updateStatement[0:-2] sqlQuery = """""" INSERT IGNORE INTO %(dbTablename)s SELECT * FROM %(tmpTable)s ON DUPLICATE KEY UPDATE %(updateStatement)s;"""""" % locals() writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn ) sqlQuery = """"""DROP TEMPORARY TABLE %(tmpTable)s;"""""" % locals() writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn ) try: os.remove('/tmp/%(tmpTable)s' % locals()) except: pass log.debug( 'completed the ``_add_dictlist_to_database_via_load_in_file`` function') return None" 3978,"def make_directory(path): """""" Create directory if that not exists. """""" try: makedirs(path) logging.debug('Directory created: {0}'.format(path)) except OSError as e: if e.errno != errno.EEXIST: raise" 3979,"def copy_file(self, from_path, to_path): """""" Copy file. """""" if not op.exists(op.dirname(to_path)): self.make_directory(op.dirname(to_path)) shutil.copy(from_path, to_path) logging.debug('File copied: {0}'.format(to_path))" 3980,"def params(self): """""" Read self params from configuration. """""" parser = JinjaInterpolationNamespace() parser.read(self.configuration) return dict(parser['params'] or {})" 3981,"def scan(cls, path): """""" Scan directory for templates. """""" result = [] try: for _p in listdir(path): try: result.append(Template(_p, op.join(path, _p))) except ValueError: continue except OSError: pass return result" 3982,"def copy(self): """""" Prepare and paste self templates. """""" templates = self.prepare_templates() if self.params.interactive: keys = list(self.parser.default) for key in keys: if key.startswith('_'): continue prompt = ""{0} (default is \""{1}\"")? "".format( key, self.parser.default[key]) if _compat.PY2: value = raw_input(prompt.encode('utf-8')).decode('utf-8') else: value = input(prompt.encode('utf-8')) value = value.strip() if value: self.parser.default[key] = value self.parser.default['templates'] = tt = ','.join( t.name for t in templates) logging.warning(""Paste templates: {0}"".format(tt)) self.make_directory(self.params.TARGET) logging.debug(""\nDefault context:\n----------------"") logging.debug( ''.join('{0:<15} {1}\n'.format(*v) for v in self.parser.default.items()) ) return [t.paste( **dict(self.parser.default.items())) for t in templates]" 3983,"def iterate_templates(self): """""" Iterate self starter templates. :returns: A templates generator """""" return [t for dd in self.dirs for t in Template.scan(dd)]" 3984,"def _dump_files_to_local_drive(bodies, theseUrls, log): """""" *takes the files stored in memory and dumps them to the local drive* ****Key Arguments:**** - ``bodies`` -- array of file data (currently stored in memory) - ``theseUrls`` -- array of local files paths to dump the file data into - ``log`` -- the logger **Return:** - ``None`` """""" j = 0 log.debug(""attempting to write file data to local drive"") log.debug('%s URLS = %s' % (len(theseUrls), str(theseUrls),)) for body in bodies: try: if theseUrls[j]: with open(theseUrls[j], 'w') as f: f.write(body) f.close() j += 1 except Exception, e: log.error( ""could not write downloaded file to local drive - failed with this error %s: "" % (str(e),)) return -1 return" 3985,"def register_shortcuts(self): ''' .. versionchanged:: 0.14 Add keyboard shortcuts to set neighbouring electrode states based on directional input using ```` key plus the corresponding direction (e.g., ``Up``) ''' def control_protocol(command): if self.plugin is not None: self.plugin.execute_async('microdrop.gui.protocol_controller', command) def actuate_direction(direction): if self.plugin is not None: self.plugin.execute_async('microdrop.electrode_controller_plugin', 'set_electrode_direction_states', direction=direction) # Tie shortcuts to protocol controller commands (next, previous, etc.) shortcuts = {'r': lambda *args: control_protocol('run_protocol'), 'z': lambda *args: self.undo(), 'y': lambda *args: self.redo(), 'A': lambda *args: control_protocol('first_step'), 'S': lambda *args: control_protocol('prev_step'), 'D': lambda *args: control_protocol('next_step'), 'F': lambda *args: control_protocol('last_step'), 'Up': lambda *args: actuate_direction('up'), 'Down': lambda *args: actuate_direction('down'), 'Left': lambda *args: actuate_direction('left'), 'Right': lambda *args: actuate_direction('right')} register_shortcuts(self.widget.parent, shortcuts)" 3986,"def cleanup_video(self): ''' .. versionchanged:: 0.6.1 Log terminated video source process ID. ''' if self.video_source_process is not None: self.video_source_process.terminate() logger.info('Terminated video process: %s', self.video_source_process.pid) self.video_source_process = None" 3987,"def on_canvas_slave__electrode_selected(self, slave, data): ''' .. versionchanged:: 0.11 Clear any temporary routes (drawn while mouse is down) from routes list. .. versionchanged:: 0.11.3 Clear temporary routes by setting ``df_routes`` property of :attr:`canvas_slave`. ''' if self.plugin is None: return # XXX Negative `route_i` corresponds to temporary route being # drawn. Since electrode selection terminates route drawing, clear any # rows corresponding to negative `route_i` values from the routes # table. slave.df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() state = self.canvas_slave.electrode_states.get(data['electrode_id'], 0) self.plugin.execute_async('microdrop.electrode_controller_plugin', 'set_electrode_states', electrode_states=pd .Series([not state], index=[data['electrode_id']]))" 3988,"def on_canvas_slave__electrode_pair_selected(self, slave, data): ''' Process pair of selected electrodes. For now, this consists of finding the shortest path between the two electrodes and appending it to the list of droplet routes for the current step. Note that the droplet routes for a step are stored in a frame/table in the `DmfDeviceController` step options. .. versionchanged:: 0.11 Clear any temporary routes (drawn while mouse is down) from routes list. .. versionchanged:: 0.11.3 Clear temporary routes by setting ``df_routes`` property of :attr:`canvas_slave`. ''' import networkx as nx source_id = data['source_id'] target_id = data['target_id'] if self.canvas_slave.device is None or self.plugin is None: return # XXX Negative `route_i` corresponds to temporary route being # drawn. Since electrode pair selection terminates route drawing, # clear any rows corresponding to negative `route_i` values from the # routes table. slave.df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() try: shortest_path = self.canvas_slave.device.find_path(source_id, target_id) self.plugin.execute_async('droplet_planning_plugin', 'add_route', drop_route=shortest_path) except nx.NetworkXNoPath: logger.error('No path found between %s and %s.', source_id, target_id)" 3989,"def on_canvas_slave__route_electrode_added(self, slave, electrode_id): ''' .. versionchanged:: 0.11 Draw temporary route currently being formed. .. versionchanged:: 0.11.3 Update routes table by setting ``df_routes`` property of :attr:`canvas_slave`. ''' logger.debug('Route electrode added: %s', electrode_id) if slave._route.electrode_ids is None: return df_route = pd.DataFrame([[-1, e, i] for i, e in enumerate(slave._route.electrode_ids)], columns=['route_i', 'electrode_i', 'transition_i']) # XXX Negative `route_i` corresponds to temporary route being # drawn. Append row entries for temporary route to existing routes # table. df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() self.canvas_slave.df_routes = pd.concat([df_routes, df_route])" 3990,"def ping_hub(self): ''' Attempt to ping the ZeroMQ plugin hub to verify connection is alive. If ping is successful, record timestamp. If ping is unsuccessful, call `on_heartbeat_error` method. ''' if self.plugin is not None: try: self.plugin.execute(self.plugin.hub_name, 'ping', timeout_s=1, silent=True) except IOError: self.on_heartbeat_error() else: self.heartbeat_alive_timestamp = datetime.now() logger.debug('Hub connection alive as of %s', self.heartbeat_alive_timestamp) return True" 3991,"def on_electrode_states_updated(self, states): ''' .. versionchanged:: 0.12 Refactor to use :meth:`on_electrode_states_set`. ''' states['electrode_states'] = \ states['electrode_states'].combine_first(self.canvas_slave .electrode_states) self.on_electrode_states_set(states)" 3992,"def on_electrode_states_set(self, states): ''' Render and draw updated **static** electrode actuations layer on canvas. ''' if (self.canvas_slave.electrode_states .equals(states['electrode_states'])): return self.canvas_slave.electrode_states = states['electrode_states'] surface = self.canvas_slave.render_static_electrode_state_shapes() self.canvas_slave.set_surface('static_electrode_state_shapes', surface) self.canvas_slave.cairo_surface = flatten_surfaces(self.canvas_slave .df_surfaces) gobject.idle_add(self.canvas_slave.draw)" 3993,"def on_dynamic_electrode_states_set(self, states): ''' Render and draw updated **dynamic** electrode actuations layer on canvas. .. versionadded:: 0.12 ''' self.canvas_slave._dynamic_electrodes = states surface = self.canvas_slave.render_dynamic_electrode_state_shapes() self.canvas_slave.set_surface('dynamic_electrode_state_shapes', surface) self.canvas_slave.cairo_surface = flatten_surfaces(self.canvas_slave .df_surfaces) gobject.idle_add(self.canvas_slave.draw)" 3994,"def set_video_config(self, video_config): ''' .. versionchanged:: 0.6.1 Log video source process ID. ''' self.video_config = video_config if video_config is None: self.disable_video() return py_exe = sys.executable port = self.canvas_slave.video_sink.socket_info['port'] transport = self.canvas_slave.video_sink.socket_info['transport'] host = (self.canvas_slave.video_sink.socket_info['host'] .replace('*', 'localhost')) # Terminate existing process (if running). self.cleanup_video() # Launch new video source process using JSON serialized video # configuration. command = [py_exe, '-m', 'pygst_utils.video_view.video_source', 'fromjson', '-p', str(port), transport, host, video_config.to_json()] logger.info(' '.join(command)) self.video_source_process = sp.Popen(command) logger.info('Launched video source process: %s', self.video_source_process.pid) self.canvas_slave.enable()" 3995,"def on_canvas_slave__routes_set(self, slave, df_routes): ''' .. versionadded:: 0.11.3 ''' self.canvas_slave.set_surface('routes', self.canvas_slave.render_routes()) self.canvas_slave.cairo_surface = flatten_surfaces(self.canvas_slave .df_surfaces) gtk.idle_add(self.canvas_slave.draw)" 3996,"def on_canvas_slave__global_command(self, slave, group, command, data): ''' .. versionadded:: 0.13 Execute global command (i.e., command not tied to a specific electrode or route). ''' def command_callback(reply): _L().debug('%s.%s()', group, command) # Decode content to raise error, if necessary. try: decode_content_data(reply) except Exception: _L().error('Global command error.', exc_info=True) self.plugin.execute_async(group, command, callback=command_callback)" 3997,"def get_string_version(name, default=DEFAULT_STRING_NOT_FOUND, allow_ambiguous=True): """""" Get string version from installed package information. It will return :attr:`default` value when the named package is not installed. Parameters ----------- name : string An application name used to install via setuptools. default : string A default returning value used when the named application is not installed yet allow_ambiguous : boolean ``True`` for allowing ambiguous version information. Turn this argument to ``False`` if ``get_string_version`` report wrong version. Returns -------- string A version string or not found message (:attr:`default`) Examples -------- >>> import re >>> v = get_string_version('app_version', allow_ambiguous=True) >>> re.match('^\d+.\d+\.\d+', v) is not None True >>> get_string_version('distribution_which_is_not_installed') 'Please install this application with setup.py' """""" # get filename of callar callar = inspect.getouterframes(inspect.currentframe())[1][1] if callar.startswith('>> v = get_tuple_version('app_version', allow_ambiguous=True) >>> len(v) >= 3 True >>> isinstance(v[0], int) True >>> isinstance(v[1], int) True >>> isinstance(v[2], int) True >>> get_tuple_version('distribution_which_is_not_installed') (0, 0, 0) """""" def _prefer_int(x): try: return int(x) except ValueError: return x version = get_string_version(name, default=default, allow_ambiguous=allow_ambiguous) # convert string version to tuple version # prefer integer for easy handling if isinstance(version, tuple): # not found return version return tuple(map(_prefer_int, version.split('.')))" 3999,"def get_versions(name, default_string=DEFAULT_STRING_NOT_FOUND, default_tuple=DEFAULT_TUPLE_NOT_FOUND, allow_ambiguous=True): """""" Get string and tuple versions from installed package information It will return :attr:`default_string` and :attr:`default_tuple` values when the named package is not installed. Parameters ----------- name : string An application name used to install via setuptools. default : string A default returning value used when the named application is not installed yet default_tuple : tuple A default returning value used when the named application is not installed yet allow_ambiguous : boolean ``True`` for allowing ambiguous version information. Returns -------- tuple A version string and version tuple Examples -------- >>> import re >>> v1, v2 = get_versions('app_version', allow_ambiguous=True) >>> isinstance(v1, str) True >>> isinstance(v2, tuple) True >>> get_versions('distribution_which_is_not_installed') ('Please install this application with setup.py', (0, 0, 0)) """""" version_string = get_string_version(name, default_string, allow_ambiguous) version_tuple = get_tuple_version(name, default_tuple, allow_ambiguous) return version_string, version_tuple" 4000,"def _get_toSymbol(cls): # type: (_MetaRule) -> object """""" Get symbol from which the rule is rewrote. :param cls: Rule for which return the symbol. :return: Symbol from which the rule is rewrote. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the left is more. """""" if cls._traverse: raise RuleNotDefinedException(cls) if len(cls.rules) > 1: raise CantCreateSingleRuleException(cls) right = cls.rules[0][1] if len(right) > 1: raise NotASingleSymbolException(right) return right[0]" 4001,"def _get_fromSymbol(cls): # type: (_MetaRule) -> object """""" Get symbol to which the rule is rewrote. :param cls: Rule for which return the symbol. :return: Symbol to which the rule is rewrote. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the right is more. """""" if cls._traverse: raise RuleNotDefinedException(cls) if len(cls.rules) > 1: raise CantCreateSingleRuleException(cls) left = cls.rules[0][0] if len(left) > 1: raise NotASingleSymbolException(left) return left[0]" 4002,"def _get_right(cls): # type: (_MetaRule) -> List[object] """""" Get right part of the rule. :param cls: Rule for which return the right side. :return: Symbols on the right side of the array. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the left is more. """""" if cls._traverse: return [cls.toSymbol] if len(cls.rules) > 1: raise CantCreateSingleRuleException(cls) return cls.rules[0][1]" 4003,"def _get_left(cls): # type: (_MetaRule) -> List[object] """""" Get left part of the rule. :param cls: Rule for which return the left side. :return: Symbols on the left side of the array. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the left is more. """""" if cls._traverse: return [cls.fromSymbol] if len(cls.rules) > 1: raise CantCreateSingleRuleException(cls) return cls.rules[0][0]" 4004,"def _get_rule(cls): # type: (_MetaRule) -> (List[object], List[object]) """""" Get rule on the Rule class. :param cls: Rule for which return the rule. :return: Rule inside the Rule class. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the left is more. """""" if cls._traverse: return (cls.left, cls.right) if len(cls.rules) > 1: raise CantCreateSingleRuleException(cls) return cls.rules[0]" 4005,"def _get_rules(cls): # type: (_MetaRule) -> List[(List[object], List[object])] """""" Get rules on the Rule class. :param cls: Rule for which return the rules. :return: Rules inside the Rule class. :raise RuleNotDefinedException: If the rule is not defined. :raise CantCreateSingleRuleException: If the rule consists of more rules. :raise NotASingleSymbolException: If number of symbols on the left is more. """""" cls._traverse = True r = cls.rule cls._traverse = False return [r]" 4006,"def _controlSide(cls, side, grammar): # type: (_MetaRule, List[object], Grammar) -> None """""" Validate one side of the rule. :param side: Iterable side of the rule. :param grammar: Grammar on which to validate. :raise RuleSyntaxException: If invalid syntax is use. :raise UselessEpsilonException: If useless epsilon is used. :raise TerminalDoesNotExistsException: If terminal does not exists in the grammar. :raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar. """""" if not isinstance(side, list): raise RuleSyntaxException(cls, 'One side of rule is not enclose by list', side) if len(side) == 0: raise RuleSyntaxException(cls, 'One side of rule is not define', side) if EPS in side and len(side) > 1: raise UselessEpsilonException(cls) for symb in side: if isclass(symb) and issubclass(symb, Nonterminal): if symb not in grammar.nonterminals: raise NonterminalDoesNotExistsException(cls, symb, grammar) elif symb is EPS: continue elif symb not in grammar.terminals: raise TerminalDoesNotExistsException(cls, symb, grammar)" 4007,"def validate(cls, grammar): # type: (_MetaRule, Grammar) -> None """""" Perform rules validation of the class. :param grammar: Grammar on which to validate. :raise RuleSyntaxException: If invalid syntax is used. :raise UselessEpsilonException: If epsilon used in rules in useless. :raise TerminalDoesNotExistsException: If terminal does not exists in the grammar. :raise NonterminalDoesNotExistsException: If nonterminal does not exists in the grammar. """""" # check if the rule is not defined multiple times defined = set(dir(cls)) if 'rules' in defined and len(defined & {'rule', 'left', 'right', 'toSymbol', 'fromSymbol'}) > 0 or \ 'rule' in defined and len(defined & {'left', 'right', 'toSymbol', 'fromSymbol'}) > 0 or \ 'left' in defined and 'fromSymbol' in defined or \ 'right' in defined and 'toSymbol' in defined: raise MultipleDefinitionException(cls, 'Rule is defined multiple times') # check if the rule is defined properly all = cls.rules if not isinstance(all, list): raise RuleSyntaxException(cls, 'Rules property is not enclose in list') for rule in all: if not isinstance(rule, tuple): raise RuleSyntaxException(cls, 'One of the rules is not enclose in tuple', rule) if len(rule) != 2: raise RuleSyntaxException(cls, 'One of the rules does not have define left and right part', rule) left = rule[0] right = rule[1] cls._controlSide(left, grammar) cls._controlSide(right, grammar) if left == [EPS] and right == [EPS]: raise UselessEpsilonException(cls)" 4008,"def no_empty_value(func): """"""Raises an exception if function argument is empty."""""" @wraps(func) def wrapper(value): if not value: raise Exception(""Empty value not allowed"") return func(value) return wrapper" 4009,"def to_bool(value): """"""Converts human boolean-like values to Python boolean. Falls back to :class:`bool` when ``value`` is not recognized. :param value: the value to convert :returns: ``True`` if value is truthy, ``False`` otherwise :rtype: bool """""" cases = { '0': False, 'false': False, 'no': False, '1': True, 'true': True, 'yes': True, } value = value.lower() if isinstance(value, basestring) else value return cases.get(value, bool(value))" 4010,"def etree_to_dict(t, trim=True, **kw): u""""""Converts an lxml.etree object to Python dict. >>> etree_to_dict(etree.Element('root')) {'root': None} :param etree.Element t: lxml tree to convert :returns d: a dict representing the lxml tree ``t`` :rtype: dict """""" d = {t.tag: {} if t.attrib else None} children = list(t) etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw) if children: dd = defaultdict(list) d = {t.tag: {}} for dc in map(etree_to_dict_w_args, children): for k, v in dc.iteritems(): # do not add Comment instance to the key if k is not etree.Comment: dd[k].append(v) d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()} if t.attrib: d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems()) if trim and t.text: t.text = t.text.strip() if t.text: if t.tag is etree.Comment and not kw.get('without_comments'): # adds a comments node d['#comments'] = t.text elif children or t.attrib: d[t.tag]['#text'] = t.text else: d[t.tag] = t.text return d" 4011,"def dict_to_etree(d, root): u""""""Converts a dict to lxml.etree object. >>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS :param dict d: dict representing the XML tree :param etree.Element root: XML node which will be assigned the resulting tree :returns: Textual representation of the XML tree :rtype: str """""" def _to_etree(d, node): if d is None or len(d) == 0: return elif isinstance(d, basestring): node.text = d elif isinstance(d, dict): for k, v in d.items(): assert isinstance(k, basestring) if k.startswith('#'): assert k == '#text' assert isinstance(v, basestring) node.text = v elif k.startswith('@'): assert isinstance(v, basestring) node.set(k[1:], v) elif isinstance(v, list): # No matter the child count, their parent will be the same. sub_elem = etree.SubElement(node, k) for child_num, e in enumerate(v): if e is None: if child_num == 0: # Found the first occurrence of an empty child, # skip creating of its XML repr, since it would be # the same as ``sub_element`` higher up. continue # A list with None element means an empty child node # in its parent, thus, recreating tags we have to go # up one level. # <=> {'node': 'child': [None, None]} _to_etree(node, k) else: # If this isn't first child and it's a complex # value (dict), we need to check if it's value # is equivalent to None. if child_num != 0 and not (isinstance(e, dict) and not all(e.values())): # At least one child was None, we have to create # a new parent-node, which will not be empty. sub_elem = etree.SubElement(node, k) _to_etree(e, sub_elem) else: _to_etree(v, etree.SubElement(node, k)) elif etree.iselement(d): # Supports the case, when we got an empty child and want to recreate it. etree.SubElement(d, node) else: raise AttributeError('Argument is neither dict nor basestring.') _to_etree(d, root) return root" 4012,"def objwalk(self, obj, path=(), memo=None): """"""Traverse a dictionary recursively and save path Taken from: http://code.activestate.com/recipes/577982-recursively-walk-python-objects/ """""" # dual python 2/3 compatability, inspired by the ""six"" library string_types = (str, unicode) if str is bytes else (str, bytes) iteritems = lambda mapping: getattr(mapping, 'iteritems', mapping.items)() if memo is None: memo = set() iterator = None if isinstance(obj, Mapping): iterator = iteritems elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types): iterator = enumerate if iterator: if id(obj) not in memo: memo.add(id(obj)) for path_component, value in iterator(obj): for result in self.objwalk(value, path + (path_component,), memo): yield result memo.remove(id(obj)) else: yield path, obj" 4013,"def set_cache_dir(directory): """"""Set the directory to cache JSON responses from most API endpoints. """""" global cache_dir if directory is None: cache_dir = None return if not os.path.exists(directory): os.makedirs(directory) if not os.path.isdir(directory): raise ValueError(""not a directory"") cache_dir = directory" 4014,"def create_element_tree(elem_or_name=None, text=None, **attribute_kwargs): """""" Creates an ElementTree from elem_or_name, updated it with text and attributes. If elem_or_name is None, a permanently empty ElementTree is returned. :param elem_or_name: an Element or the name of the root element tag :param text: optional text with which to update the root element :param attribute_kwargs: optional attributes to add to the root element :return: a new ElementTree with the specified root and attributes """""" if elem_or_name is None: return ElementTree() is_elem = isinstance(elem_or_name, ElementType) element = elem_or_name if is_elem else Element(elem_or_name) if text is not None: element.text = text element.attrib.update(attribute_kwargs) return ElementTree(element)" 4015,"def clear_children(parent_to_parse, element_path=None): """""" Clears only children (not text or attributes) from the parsed parent or named element. """""" element = get_element(parent_to_parse, element_path) if element is None: return parent_to_parse else: elem_txt = element.text elem_atr = element.attrib element.clear() element.text = elem_txt element.attrib = elem_atr return element" 4016,"def clear_element(parent_to_parse, element_path=None): """""" Clears everything (text, attributes and children) from the parsed parent or named element. """""" element = get_element(parent_to_parse, element_path) if element is None: return parent_to_parse else: element.clear() return element" 4017,"def copy_element(from_element, to_element=None, path_to_copy=None): """""" Copies the element at path_to_copy in from_element and uses it to create or update the first element found at the same location (path_to_copy) in to_element. If path_to_copy is not provided, from_element is copied to the root of to_element. """""" from_element = get_element(from_element, path_to_copy) dest_element = get_element(to_element, path_to_copy) if from_element is None: return None if dest_element is None: if path_to_copy is None: dest_element = Element(from_element.tag) else: dest_element = insert_element(Element(from_element.tag), 0, path_to_copy) dest_element.tag = from_element.tag dest_element.text = from_element.text dest_element.tail = from_element.tail dest_element.attrib = from_element.attrib copied_children = [] for elem in from_element: copied_children.append(copy_element(elem)) for idx, child in enumerate(copied_children): dest_element.insert(idx, child) return dest_element" 4018,"def get_element_tree(parent_to_parse): """""" :return: an ElementTree initialized with the parsed element. :see: get_element(parent_to_parse, element_path) """""" if isinstance(parent_to_parse, ElementTree): return parent_to_parse element = get_element(parent_to_parse) return ElementTree() if element is None else ElementTree(element)" 4019,"def get_element(parent_to_parse, element_path=None): """""" :return: an element from the parent or parsed from a Dictionary, XML string or file. If element_path is not provided the root element is returned. """""" if parent_to_parse is None: return None elif isinstance(parent_to_parse, ElementTree): parent_to_parse = parent_to_parse.getroot() elif hasattr(parent_to_parse, 'read'): parent_to_parse = string_to_element(parent_to_parse.read()) elif isinstance(parent_to_parse, STRING_TYPES): parent_to_parse = string_to_element(parent_to_parse) elif isinstance(parent_to_parse, dict): parent_to_parse = dict_to_element(parent_to_parse) if parent_to_parse is None: return None elif not isinstance(parent_to_parse, ElementType): element_type = type(parent_to_parse).__name__ raise TypeError('Invalid element type: {0}'.format(element_type)) return parent_to_parse.find(element_path) if element_path else parent_to_parse" 4020,"def get_remote_element(url, element_path=None): """""" :return: an element initialized with the content at the specified file or URL :see: get_element(parent_to_parse, element_path) """""" content = None if url is None: return content elif _FILE_LOCATION_REGEX.match(url): with open(url, 'rb') as xml: content = xml.read() else: try: urllib = getattr(six_moves, 'urllib') remote = urllib.request.urlopen(url) content = remote.read() finally: # For Python 2 compliance: fails in `with` block (no `__exit__`) remote.close() return get_element(strip_namespaces(content), element_path)" 4021,"def elements_exist(elem_to_parse, element_paths=None, all_exist=False): """""" :return: true if any of the named elements exist in the parent by default, unless all_exist is true, in which case all the named elements must exist """""" element = get_element(elem_to_parse) if element is None: return False if not element_paths or isinstance(element_paths, string_types): return element_exists(element, element_paths) exists = False for element_path in element_paths: exists = element_exists(element, element_path) if all_exist and not exists: return False if exists and not all_exist: return True return exists" 4022,"def element_is_empty(elem_to_parse, element_path=None): """""" Returns true if the element is None, or has no text, tail, children or attributes. Whitespace in the element is stripped from text and tail before making the determination. """""" element = get_element(elem_to_parse, element_path) if element is None: return True is_empty = ( (element.text is None or not element.text.strip()) and (element.tail is None or not element.tail.strip()) and (element.attrib is None or not len(element.attrib)) and (not len(element.getchildren())) ) return is_empty" 4023,"def insert_element(elem_to_parse, elem_idx, elem_path, elem_txt=u'', **attrib_kwargs): """""" Creates an element named after elem_path, containing elem_txt, with kwargs as attributes, inserts it into elem_to_parse at elem_idx and returns it. If elem_path is an XPATH pointing to a non-existent element, elements not in the path are inserted and the text and index are applied to the last one. If elem_path is an XPATH pointing to an existing element, the new element is inserted as a sibling of the last one in the path at the index specified. """""" element = get_element(elem_to_parse) if element is None or not elem_path: return None if not elem_idx: elem_idx = 0 if elem_path and XPATH_DELIM in elem_path: tags = elem_path.split(XPATH_DELIM) if element_exists(element, elem_path): # Get the next to last element in the XPATH parent = get_element(element, XPATH_DELIM.join(tags[:-1])) # Insert the new element as sibling to the last one return insert_element(parent, elem_idx, tags[-1], elem_txt, **attrib_kwargs) else: this_elem = element last_idx = len(tags) - 1 # Iterate over tags from root to leaf for idx, tag in enumerate(tags): next_elem = get_element(this_elem, tag) # Insert missing elements in the path or continue if next_elem is None: # Apply text and index to last element only if idx == last_idx: next_elem = insert_element(this_elem, elem_idx, tag, elem_txt, **attrib_kwargs) else: next_elem = insert_element(this_elem, 0, tag, u'', **attrib_kwargs) this_elem = next_elem return this_elem subelem = Element(elem_path, attrib_kwargs) subelem.text = elem_txt element.insert(elem_idx, subelem) return subelem" 4024,"def remove_element(parent_to_parse, element_path, clear_empty=False): """""" Searches for a sub-element named after element_name in the parsed element, and if it exists, removes them all and returns them as a list. If clear_empty is True, removes empty parents if all children are removed. :see: remove_empty_element(parent_to_parse, element_path, target_element=None) :see: get_element(parent_to_parse, element_path) """""" element = get_element(parent_to_parse) removed = [] if element is None or not element_path: return None if element_exists(element, element_path): if XPATH_DELIM not in element_path: for subelem in get_elements(element, element_path): removed.append(subelem) element.remove(subelem) else: xpath_segments = element_path.split(XPATH_DELIM) parent_segment = XPATH_DELIM.join(xpath_segments[:-1]) last_segment = xpath_segments[-1] for parent in get_elements(element, parent_segment): rem = remove_element(parent, last_segment) removed.extend(rem if isinstance(rem, list) else [rem]) if clear_empty: removed.extend(remove_empty_element(element, parent_segment)) return removed[0] if len(removed) == 1 else (removed or None)" 4025,"def remove_elements(parent_to_parse, element_paths, clear_empty=False): """""" Removes all elements named after each elements_or_paths. If clear_empty is True, for each XPATH, empty parents are removed if all their children are removed. :see: remove_element(parent_to_parse, element_path) """""" element = get_element(parent_to_parse) removed = [] if element is None or not element_paths: return removed if isinstance(element_paths, string_types): rem = remove_element(element, element_paths, clear_empty) removed.extend(rem if isinstance(rem, list) else [rem]) else: for xpath in element_paths: rem = remove_element(element, xpath, clear_empty) removed.extend(rem if isinstance(rem, list) else [rem]) return removed" 4026,"def remove_empty_element(parent_to_parse, element_path, target_element=None): """""" Searches for all empty sub-elements named after element_name in the parsed element, and if it exists, removes them all and returns them as a list. """""" element = get_element(parent_to_parse) removed = [] if element is None or not element_path: return removed if target_element: # Always deal with just the element path if not element_path.endswith(target_element): element_path = XPATH_DELIM.join([element_path, target_element]) target_element = None if XPATH_DELIM not in element_path: # Loop over and remove empty sub-elements directly for subelem in get_elements(element, element_path): if element_is_empty(subelem): removed.append(subelem) element.remove(subelem) else: # Parse target element from last node in element path xpath_segments = element_path.split(XPATH_DELIM) element_path = XPATH_DELIM.join(xpath_segments[:-1]) target_element = xpath_segments[-1] # Loop over children and remove empty ones directly for parent in get_elements(element, element_path): for child in get_elements(parent, target_element): if element_is_empty(child): removed.append(child) parent.remove(child) # Parent may be empty now: recursively remove empty elements in XPATH if element_is_empty(parent): if len(xpath_segments) == 2: removed.extend(remove_empty_element(element, xpath_segments[0])) else: next_element_path = XPATH_DELIM.join(xpath_segments[:-2]) next_target_element = parent.tag removed.extend(remove_empty_element(element, next_element_path, next_target_element)) return removed" 4027,"def get_elements(parent_to_parse, element_path): """""" :return: all elements by name from the parsed parent element. :see: get_element(parent_to_parse, element_path) """""" element = get_element(parent_to_parse) if element is None or not element_path: return [] return element.findall(element_path)" 4028,"def get_element_attribute(elem_to_parse, attrib_name, default_value=u''): """""" :return: an attribute from the parsed element if it has the attribute, otherwise the default value """""" element = get_element(elem_to_parse) if element is None: return default_value return element.attrib.get(attrib_name, default_value)" 4029,"def get_element_attributes(parent_to_parse, element_path=None): """""" :return: all the attributes for the parsed element if it has any, or an empty dict """""" element = get_element(parent_to_parse, element_path) return {} if element is None else element.attrib" 4030,"def set_element_attributes(elem_to_parse, **attrib_kwargs): """""" Adds the specified key/value pairs to the element's attributes, and returns the updated set of attributes. If the element already contains any of the attributes specified in attrib_kwargs, they are updated accordingly. """""" element = get_element(elem_to_parse) if element is None: return element if len(attrib_kwargs): element.attrib.update(attrib_kwargs) return element.attrib" 4031,"def remove_element_attributes(elem_to_parse, *args): """""" Removes the specified keys from the element's attributes, and returns a dict containing the attributes that have been removed. """""" element = get_element(elem_to_parse) if element is None: return element if len(args): attribs = element.attrib return {key: attribs.pop(key) for key in args if key in attribs} return {}" 4032,"def get_element_tail(parent_to_parse, element_path=None, default_value=u''): """""" :return: text following the parsed parent element if it exists, otherwise the default value. :see: get_element(parent_to_parse, element_path) """""" parent_element = get_element(parent_to_parse, element_path) if parent_element is None: return default_value if parent_element.tail: return parent_element.tail.strip() or default_value return default_value" 4033,"def get_element_text(parent_to_parse, element_path=None, default_value=u''): """""" :return: text from the parsed parent element if it has a text value, otherwise the default value. :see: get_element(parent_to_parse, element_path) """""" parent_element = get_element(parent_to_parse, element_path) if parent_element is None: return default_value if parent_element.text: return parent_element.text.strip() or default_value return default_value" 4034,"def get_elements_attributes(parent_to_parse, element_path=None, attrib_name=None): """""" :return: list of text representing an attribute of parent or each element at element path, or a list of dicts representing all the attributes parsed from each element """""" attrs = _get_elements_property(parent_to_parse, element_path, 'attrib') if not attrib_name: return attrs return [attr[attrib_name] for attr in attrs if attrib_name in attr]" 4035,"def _get_elements_property(parent_to_parse, element_path, prop_name): """""" A helper to construct a list of values from """""" parent_element = get_element(parent_to_parse) if parent_element is None: return [] if element_path and not element_exists(parent_element, element_path): return [] if not element_path: texts = getattr(parent_element, prop_name) texts = texts.strip() if isinstance(texts, string_types) else texts texts = [texts] if texts else [] else: texts = [t for t in ( prop.strip() if isinstance(prop, string_types) else prop for prop in (getattr(node, prop_name) for node in parent_element.findall(element_path)) if prop ) if t] return texts" 4036,"def set_element_tail(parent_to_parse, element_path=None, element_tail=u''): """""" Assigns the text following the parsed parent element and then returns it. If element_path is provided and doesn't exist, it is inserted with element_tail. :see: get_element(parent_to_parse, element_path) """""" return _set_element_property(parent_to_parse, element_path, _ELEM_TAIL, element_tail)" 4037,"def set_element_text(parent_to_parse, element_path=None, element_text=u''): """""" Assigns a string value to the parsed parent element and then returns it. If element_path is provided and doesn't exist, it is inserted with element_text. :see: get_element(parent_to_parse, element_path) """""" return _set_element_property(parent_to_parse, element_path, _ELEM_TEXT, element_text)" 4038,"def _set_element_property(parent_to_parse, element_path, prop_name, value): """""" Assigns the value to the parsed parent element and then returns it """""" element = get_element(parent_to_parse) if element is None: return None if element_path and not element_exists(element, element_path): element = insert_element(element, 0, element_path) if not isinstance(value, string_types): value = u'' setattr(element, prop_name, value) return element" 4039,"def set_elements_tail(parent_to_parse, element_path=None, tail_values=None): """""" Assigns an array of tail values to each of the elements parsed from the parent. The tail values are assigned in the same order they are provided. If there are less values then elements, the remaining elements are skipped; but if there are more, new elements will be inserted for each with the remaining tail values. """""" if tail_values is None: tail_values = [] return _set_elements_property(parent_to_parse, element_path, _ELEM_TAIL, tail_values)" 4040,"def set_elements_text(parent_to_parse, element_path=None, text_values=None): """""" Assigns an array of text values to each of the elements parsed from the parent. The text values are assigned in the same order they are provided. If there are less values then elements, the remaining elements are skipped; but if there are more, new elements will be inserted for each with the remaining text values. """""" if text_values is None: text_values = [] return _set_elements_property(parent_to_parse, element_path, _ELEM_TEXT, text_values)" 4041,"def _set_elements_property(parent_to_parse, element_path, prop_name, values): """""" Assigns an array of string values to each of the elements parsed from the parent. The values must be strings, and they are assigned in the same order they are provided. The operation stops when values run out; extra values will be inserted as new elements. :see: get_element(parent_to_parse, element_path) """""" element = get_element(parent_to_parse) if element is None or not values: return [] if isinstance(values, string_types): values = [values] if not element_path: return [_set_element_property(element, None, prop_name, values[0])] elements = get_elements(element, element_path) affected = [] for idx, val in enumerate(values): if idx < len(elements): next_elem = elements[idx] else: next_elem = insert_element(element, idx, element_path) affected.append( _set_element_property(next_elem, None, prop_name, val) ) return affected" 4042,"def dict_to_element(element_as_dict): """""" Converts a Dictionary object to an element. The Dictionary can include any of the following tags, only name is required: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements """""" if element_as_dict is None: return None elif isinstance(element_as_dict, ElementTree): return element_as_dict.getroot() elif isinstance(element_as_dict, ElementType): return element_as_dict elif not isinstance(element_as_dict, dict): raise TypeError('Invalid element dict: {0}'.format(element_as_dict)) if len(element_as_dict) == 0: return None try: converted = Element( element_as_dict[_ELEM_NAME], element_as_dict.get(_ELEM_ATTRIBS, {}) ) converted.tail = element_as_dict.get(_ELEM_TAIL, u'') converted.text = element_as_dict.get(_ELEM_TEXT, u'') for child in element_as_dict.get(_ELEM_CHILDREN, []): converted.append(dict_to_element(child)) except KeyError: raise SyntaxError('Invalid element dict: {0}'.format(element_as_dict)) return converted" 4043,"def element_to_dict(elem_to_parse, element_path=None, recurse=True): """""" :return: an element losslessly as a dictionary. If recurse is True, the element's children are included, otherwise they are omitted. The resulting Dictionary will have the following attributes: - name: the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements """""" element = get_element(elem_to_parse, element_path) if element is not None: converted = { _ELEM_NAME: element.tag, _ELEM_TEXT: element.text, _ELEM_TAIL: element.tail, _ELEM_ATTRIBS: element.attrib, _ELEM_CHILDREN: [] } if recurse is True: for child in element: converted[_ELEM_CHILDREN].append(element_to_dict(child, recurse=recurse)) return converted return {}" 4044,"def element_to_object(elem_to_parse, element_path=None): """""" :return: the root key, and a dict with all the XML data, but without preserving structure, for instance: nested textnested dict textnested dict tail {'elem': { 'val': [ u'nested text', {'prop': u'attr', 'value': [u'nested dict text', u'nested dict tail']}, u'attribute' ] }} """""" if isinstance(elem_to_parse, STRING_TYPES) or hasattr(elem_to_parse, 'read'): # Always strip namespaces if not already parsed elem_to_parse = strip_namespaces(elem_to_parse) if element_path is not None: elem_to_parse = get_element(elem_to_parse, element_path) element_tree = get_element_tree(elem_to_parse) element_root = element_tree.getroot() root_tag = u'' if element_root is None else element_root.tag return root_tag, {root_tag: _element_to_object(element_root)}" 4045,"def element_to_string(element, include_declaration=True, encoding=DEFAULT_ENCODING, method='xml'): """""" :return: the string value of the element or element tree """""" if isinstance(element, ElementTree): element = element.getroot() elif not isinstance(element, ElementType): element = get_element(element) if element is None: return u'' element_as_string = tostring(element, encoding, method).decode(encoding=encoding) if include_declaration: return element_as_string else: return strip_xml_declaration(element_as_string)" 4046,"def string_to_element(element_as_string, include_namespaces=False): """""" :return: an element parsed from a string value, or the element as is if already parsed """""" if element_as_string is None: return None elif isinstance(element_as_string, ElementTree): return element_as_string.getroot() elif isinstance(element_as_string, ElementType): return element_as_string else: element_as_string = _xml_content_to_string(element_as_string) if not isinstance(element_as_string, string_types): # Let cElementTree handle the error return fromstring(element_as_string) elif not strip_xml_declaration(element_as_string): # Same as ElementTree().getroot() return None elif include_namespaces: return fromstring(element_as_string) else: return fromstring(strip_namespaces(element_as_string))" 4047,"def iter_elements(element_function, parent_to_parse, **kwargs): """""" Applies element_function to each of the sub-elements in parent_to_parse. The passed in function must take at least one element, and an optional list of kwargs which are relevant to each of the elements in the list: def elem_func(each_elem, **kwargs) """""" parent = get_element(parent_to_parse) if not hasattr(element_function, '__call__'): return parent for child in ([] if parent is None else parent): element_function(child, **kwargs) return parent" 4048,"def iterparse_elements(element_function, file_or_path, **kwargs): """""" Applies element_function to each of the sub-elements in the XML file. The passed in function must take at least one element, and an optional list of **kwarg which are relevant to each of the elements in the list: def elem_func(each_elem, **kwargs) Implements the recommended cElementTree iterparse pattern, which is efficient for reading in a file, making changes and writing it again. """""" if not hasattr(element_function, '__call__'): return file_path = getattr(file_or_path, 'name', file_or_path) context = iter(iterparse(file_path, events=('start', 'end'))) root = None # Capture root for Memory management # Start event loads child; by the End event it's ready for processing for event, child in context: if root is None: root = child if event == 'end': # Ensures the element has been fully read element_function(child, **kwargs) root.clear()" 4049,"def strip_namespaces(file_or_xml): """""" Removes all namespaces from the XML file or string passed in. If file_or_xml is not a file or string, it is returned as is. """""" xml_content = _xml_content_to_string(file_or_xml) if not isinstance(xml_content, string_types): return xml_content # This pattern can have overlapping matches, necessitating the loop while _NAMESPACES_FROM_DEC_REGEX.search(xml_content) is not None: xml_content = _NAMESPACES_FROM_DEC_REGEX.sub(r'\1', xml_content) # Remove namespaces at the tag level xml_content = _NAMESPACES_FROM_TAG_REGEX.sub(r'\1', xml_content) # Remove namespaces at the attribute level xml_content = _NAMESPACES_FROM_ATTR_REGEX.sub(r'\1\3', xml_content) return xml_content" 4050,"def strip_xml_declaration(file_or_xml): """""" Removes XML declaration line from file or string passed in. If file_or_xml is not a file or string, it is returned as is. """""" xml_content = _xml_content_to_string(file_or_xml) if not isinstance(xml_content, string_types): return xml_content # For Python 2 compliance: replacement string must not specify unicode u'' return _XML_DECLARATION_REGEX.sub(r'', xml_content, 1)" 4051,"def write_element(elem_to_parse, file_or_path, encoding=DEFAULT_ENCODING): """""" Writes the contents of the parsed element to file_or_path :see: get_element(parent_to_parse, element_path) """""" xml_header = ''.format(encoding) get_element_tree(elem_to_parse).write(file_or_path, encoding, xml_header)" 4052,"def floating_point_to_datetime(day, fp_time): """"""Convert a floating point time to a datetime."""""" result = datetime(year=day.year, month=day.month, day=day.day) result += timedelta(minutes=math.ceil(60 * fp_time)) return result" 4053,"def adhan(day, location, parameters, timezone_offset=0): """"""Calculate adhan times given the parameters. This function will compute the adhan times for a certain location on certain day. The method for calculating the prayers as well as the time for Asr can also be specified. The timezone offset naively adds the specified number of hours to each time that is returned. :param day: The datetime.date to calculate for :param location: 2-tuple of floating point coordiantes for latitude and longitude of location in degrees :param parameters: A dictionary-like object of parameters for computing adhan times. Commonly used calculation methods are available in the adhan.methods module :param timezone_offset: The number of hours to add to each prayer time to account for timezones. Can be floating point """""" latitude, longitude = location # # To reduce a little repetitiveness, using a partial function that has the # day and latitude already set # time_at_sun_angle = partial( compute_time_at_sun_angle, day=day, latitude=latitude ) zuhr_time = compute_zuhr_utc(day, longitude) shuruq_time = zuhr_time - time_at_sun_angle(angle=SUNRISE_ANGLE) maghrib_time = zuhr_time + time_at_sun_angle(angle=SUNSET_ANGLE) fajr_time = zuhr_time - time_at_sun_angle(angle=parameters['fajr_angle']) # # Most methods define Isha as a certain angle the sun has to be below # the horizon, but some methods define it as a certain number of minutes # after Maghrib # if parameters.get('isha_delay', None): isha_time = maghrib_time + parameters['isha_delay'] else: isha_time = ( zuhr_time + time_at_sun_angle(angle=parameters['isha_angle']) ) # # Default to standard Asr method if not specified # asr_multiplier = parameters.get('asr_multiplier', ASR_STANDARD) asr_time = zuhr_time + time_at_shadow_length( day=day, latitude=latitude, multiplier=asr_multiplier ) offset = timedelta(minutes=60 * timezone_offset) return { 'fajr': floating_point_to_datetime(day, fajr_time) + offset, 'zuhr': floating_point_to_datetime(day, zuhr_time) + offset, 'shuruq': floating_point_to_datetime(day, shuruq_time) + offset, 'asr': floating_point_to_datetime(day, asr_time) + offset, 'maghrib': floating_point_to_datetime(day, maghrib_time) + offset, 'isha': floating_point_to_datetime(day, isha_time) + offset, }" 4054,"def _make_fn_text(self): """"""Makes filename text"""""" if not self._f: text = ""(not loaded)"" elif self._f.filename: text = os.path.relpath(self._f.filename, ""."") else: text = ""(filename not set)"" return text" 4055,"def format_BLB(): """"""Sets some formatting options in Matplotlib."""""" rc(""figure"", facecolor=""white"") rc('font', family = 'serif', size=10) #, serif = 'cmr10') rc('xtick', labelsize=10) rc('ytick', labelsize=10) rc('axes', linewidth=1) rc('xtick.major', size=4, width=1) rc('xtick.minor', size=2, width=1) rc('ytick.major', size=4, width=1) rc('ytick.minor', size=2, width=1)" 4056,"def set_figure_size(fig, width, height): """"""Sets MatPlotLib figure width and height in pixels Reference: https://github.com/matplotlib/matplotlib/issues/2305/ """""" dpi = float(fig.get_dpi()) fig.set_size_inches(float(width) / dpi, float(height) / dpi)" 4057,"def create_zip_codes_geo_zone(cls, zip_codes_geo_zone, **kwargs): """"""Create ZipCodesGeoZone Create a new ZipCodesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_zip_codes_geo_zone(zip_codes_geo_zone, async=True) >>> result = thread.get() :param async bool :param ZipCodesGeoZone zip_codes_geo_zone: Attributes of zipCodesGeoZone to create (required) :return: ZipCodesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_zip_codes_geo_zone_with_http_info(zip_codes_geo_zone, **kwargs) else: (data) = cls._create_zip_codes_geo_zone_with_http_info(zip_codes_geo_zone, **kwargs) return data" 4058,"def delete_zip_codes_geo_zone_by_id(cls, zip_codes_geo_zone_id, **kwargs): """"""Delete ZipCodesGeoZone Delete an instance of ZipCodesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) else: (data) = cls._delete_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) return data" 4059,"def get_zip_codes_geo_zone_by_id(cls, zip_codes_geo_zone_id, **kwargs): """"""Find ZipCodesGeoZone Return single instance of ZipCodesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to return (required) :return: ZipCodesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) else: (data) = cls._get_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) return data" 4060,"def list_all_zip_codes_geo_zones(cls, **kwargs): """"""List ZipCodesGeoZones Return a list of ZipCodesGeoZones This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_zip_codes_geo_zones(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ZipCodesGeoZone] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs) else: (data) = cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs) return data" 4061,"def replace_zip_codes_geo_zone_by_id(cls, zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs): """"""Replace ZipCodesGeoZone Replace all attributes of ZipCodesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, zip_codes_geo_zone, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to replace (required) :param ZipCodesGeoZone zip_codes_geo_zone: Attributes of zipCodesGeoZone to replace (required) :return: ZipCodesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs) else: (data) = cls._replace_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs) return data" 4062,"def update_zip_codes_geo_zone_by_id(cls, zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs): """"""Update ZipCodesGeoZone Update attributes of ZipCodesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, zip_codes_geo_zone, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to update. (required) :param ZipCodesGeoZone zip_codes_geo_zone: Attributes of zipCodesGeoZone to update. (required) :return: ZipCodesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs) else: (data) = cls._update_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, zip_codes_geo_zone, **kwargs) return data" 4063,"def get_declared_fields(bases, attrs): """""" Find all fields and return them as a dictionary. note:: this function is copied and modified from django.forms.get_declared_fields """""" def is_field(prop): return isinstance(prop, forms.Field) or \ isinstance(prop, BaseRepresentation) fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if is_field(obj)] # add fields from base classes: for base in bases[::-1]: if hasattr(base, 'base_fields'): fields = base.base_fields.items() + fields return dict(fields)" 4064,"def validate(self, data=None): """""" Validate the data Check also that no extra properties are present. :raises: ValidationError if the data is not valid. """""" errors = {} data = self._getData(data) # validate each field, one by one for name, field in self.fields.items(): try: field.clean(data.get(name)) except ValidationError, e: errors[name] = e.messages except AttributeError, e: raise ValidationError('data should be of type dict but is %s' % (type(data),)) # check for extra fields extras = set(data.keys()) - set(self.fields.keys()) if extras: errors[', '.join(extras)] = ['field(s) not allowed'] # if errors, raise ValidationError if errors: raise ValidationError(errors)" 4065,"def _getData(self, data): """""" Check that data is acceptable and return it. Default behavior is that the data has to be of type `dict`. In derived classes this method could for example allow `None` or empty strings and just return empty dictionary. :raises: ``ValidationError`` if data is missing or wrong type :return: the data to be validated """""" if not isinstance(data, dict): raise ValidationError( 'data is not a valid dictionary: %s' % (str(type(data)),)) return data" 4066,"def lambda_handler(event, context): """"""Main handler."""""" users = boto3.resource(""dynamodb"").Table(os.environ['people']) auth = check_auth(event, role=[""admin""]) if not auth['success']: return auth user_email = event.get('user_email', None) if not user_email: msg = ""Missing user_email parameter in your request."" return {'success': False, 'message': msg} user_role = event.get('user_role', None) if not user_role: msg = ""Missing user role: `admin`, `analyst`"" return {'success': False, 'message': msg} user_name = event.get('user_name', '') seed = random.randint(100000000, 999999999) hash_key = ""{}{}"".format(user_email, seed) api_key = hashlib.sha256(hash_key).hexdigest() if auth.get('init', False): user_role = 'admin' else: user_role = user_role obj = {'email': user_email, 'name': user_name, 'api_key': api_key, 'role': user_role} response = users.put_item(Item=obj) return obj" 4067,"def get_connection(self, internal=False): """"""Get a live connection to this instance. :param bool internal: Whether or not to use a DC internal network connection. :rtype: :py:class:`redis.client.StrictRedis` """""" # Determine the connection string to use. connect_string = self.connect_string if internal: connect_string = self.internal_connect_string # Stripe Redis protocol prefix coming from the API. connect_string = connect_string.strip('redis://') host, port = connect_string.split(':') # Build and return the redis client. return redis.StrictRedis(host=host, port=port, password=self._password)" 4068,"def get_cached(self, path, cache_name, **kwargs): """"""Request a resource form the API, first checking if there is a cached response available. Returns the parsed JSON data. """""" if gw2api.cache_dir and gw2api.cache_time and cache_name: cache_file = os.path.join(gw2api.cache_dir, cache_name) if mtime(cache_file) >= time.time() - gw2api.cache_time: with open(cache_file, ""r"") as fp: tmp = json.load(fp) return self.make_response(tmp[""data""], tmp[""meta""]) else: cache_file = None meta, data = self._get(path, **kwargs) if cache_file: with open(cache_file, ""w"") as fp: json.dump({""meta"": meta, ""data"": data}, fp, indent=2) return self.make_response(data, meta)" 4069,"def main(): """""" Loop over a list of input text strings. Parse each string using a list of parsers, one included in megaparsex and one defined in this script. If a confirmation is requested, seek confirmation, otherwise display any response text and engage any triggered functions. """""" for text in [ ""how are you"", ""ip address"", ""restart"", ""run command"", ""rain EGPF"", ""reverse SSH"" ]: print(""\nparse text: "" + text + ""\nWait 3 seconds, then parse."") time.sleep(3) response = megaparsex.multiparse( text = text, parsers = [ megaparsex.parse, parse_networking ], help_message = ""Does not compute. I can report my IP address and I "" ""can restart my script."" ) if type(response) is megaparsex.confirmation: while response.confirmed() is None: response.test( text = megaparsex.get_input( prompt = response.prompt() + "" "" ) ) if response.confirmed(): print(response.feedback()) response.run() else: print(response.feedback()) elif type(response) is megaparsex.command: output = response.engage_command( command = megaparsex.get_input( prompt = response.prompt() + "" "" ), background = False ) if output: print(""output:\n{output}"".format(output = output)) else: print(response)" 4070,"def get_packet_id(self, packet): """""" Returns the ID of a protocol buffer packet. Returns None if no ID was found. """""" for p in self._packets: if isinstance(packet, p['cls']): return p['id'] return None" 4071,"def main(*args): """""" Enter point. """""" args = args or sys.argv[1:] params = PARSER.parse_args(args) from .log import setup_logging setup_logging(params.level.upper()) from .core import Starter starter = Starter(params) if not starter.params.TEMPLATES or starter.params.list: setup_logging('WARN') for t in sorted(starter.iterate_templates()): logging.warn(""%s -- %s"", t.name, t.params.get( 'description', 'no description')) return True try: starter.copy() except Exception as e: # noqa logging.error(e) sys.exit(1)" 4072,"def summarize(df,preview_rows = 8, display_max_cols = None,display_width = None, output_path = None, output_safe = True,to_folder = False): """""" Prints information about the DataFrame to a file or to the prompt. Parameters ---------- df - DataFrame The DataFrame to summarize preview_rows - int, default 5 Amount of rows to preview from the head and tail of the DataFrame display_max_cols - int, default None Maximum amount of columns to display. If set to None, all columns will be displayed. If set to 0, only as many as fit in the screen's width will be displayed display_width - int, default None Width of output. Can be width of file or width of console for printing. Set to None for pandas to detect it from console. output_path - path-like, default None If not None, this will be used as the path of the output file, and this function will print to a file instead of to the prompt output_safe - boolean, default True If True and output_file is not None, this function will not overwrite any existing files. output_csv: boolean, default False If True, will output to a directory with name of output_path with all data in csv format. WARNING: If set to true, this function will overwrite existing files in the directory with the following names: ['Preview.csv','Describe.csv','Info.csv','Percentile Details.csv', 'Missing Values Summary.csv','Potential Outliers.csv','Correlation Matrix.csv'] """""" assert type(df) is pd.DataFrame # Reformat displays initial_settings = pd_settings(display_max_cols, None, display_width) # --------Values of data----------- df_preview = _io.preview(df,preview_rows) df_desc_num, df_desc_cat = detailed_desc(df) percent_values = stats.percentiles(df) potential_outliers = stats.df_outliers(df).dropna(axis = 1,how = 'all') potential_outliers = potential_outliers if _utils.rows(potential_outliers) else None corr_values = regstats.corr_matrix(df) # ----------Build lists------------ title_list = \ ['Preview','Describe (Numerical)','Describe (Categorical)','Percentile Details', 'Potential Outliers','Correlation Matrix'] info_list = \ [df_preview,df_desc_num, df_desc_cat,percent_values, potential_outliers,corr_values] error_list = [None,'No numerical data.','All numerical data.','No numerical data.', 'No potential outliers.','No categorical, bool, or numerical data.'] # ----------Build output------------ output = '' for title, value,error_text in zip(title_list,info_list,error_list): if value is None: value = ""{} skipped: {}"".format(title,error_text) if str(value).endswith('\n'): value = value[:-1] output+='{}\n{}\n\n'.format(_io.title_line(title),value) # ----------Send to file/print to console------------ if output_path is None: # Potentially could change this to allow for output_safe to work with directories print(output) else: if not to_folder: print('Outputting to file...') _io.output_to_file(output,output_path,output_safe) else: print('Outputting to folder...') if not os.path.exists(output_path): os.mkdir(output_path) for title, value,error_text in zip(title_list,info_list,error_list): if value is None: print(""{} skipped: {}"".format(title,error_text)) else: file_dir = os.path.join(output_path,""{}.csv"".format(title)) if type(value) is pd.DataFrame: # Eventually add a check to see if file exists value.to_csv(file_dir) else: _io.output_to_file(value,file_dir,False) # Change to output_safe when directory output_safe is implemented print('Done!') # Reset display settings pd_settings(*initial_settings)" 4073,"def timed_pipe(generator, seconds=3): ''' This is a time limited pipeline. If you have a infinite pipeline and want it to stop yielding after a certain amount of time, use this! ''' # grab the highest precision timer # when it started start = ts() # when it will stop end = start + seconds # iterate over the pipeline for i in generator: # if there is still time if ts() < end: # yield the next item yield i # otherwise else: # stop break" 4074,"def destruct(particles, index): """"""Fermion annihilation operator in matrix representation for a indexed particle in a bounded N-particles fermion fock space"""""" mat = np.zeros((2**particles, 2**particles)) flipper = 2**index for i in range(2**particles): ispin = btest(i, index) if ispin == 1: mat[i ^ flipper, i] = phase(i, index) return csr_matrix(mat)" 4075,"def on(self): """"""! \~english Open Audio output. set pin mode to ALT0 @return a boolean value. if True means open audio output is OK otherwise failed to open. \~chinese 打开音频输出。 将引脚模式设置为ALT0 @return 布尔值。 如果 True 表示打开音频输出成功,否则不成功。 """""" isOK = True try: if self.channelR!=None: sub.call([""gpio"", ""-g"", ""mode"", ""{}"".format(self.channelR), self.PIN_MODE_AUDIO ]) except: isOK = False print(""Open audio right channel failed."") try: if self.channelL!=None: sub.call([""gpio"",""-g"",""mode"", ""{}"".format(self.channelL), self.PIN_MODE_AUDIO ]) except: isOK = False print(""Open audio left channel failed."") return isOK" 4076,"def off(self): """"""! \~english Close Audio output. set pin mode to output @return a boolean value. if True means close audio output is OK otherwise failed to close. \~chinese 关闭音频输出。 将引脚模式设置为输出 @return 布尔值。 如果为 True 关闭音频输出成功,否则关闭不成功。 """""" isOK = True try: if self.channelR!=None: sub.call([""gpio"",""-g"",""mode"", ""{}"".format(self.channelR), self.PIN_MODE_OUTPUT ]) except: isOK = False print(""Close audio right channel failed."") try: if self.channelL!=None: sub.call([""gpio"",""-g"",""mode"", ""{}"".format(self.channelL), self.PIN_MODE_OUTPUT ]) except: isOK = False print(""Close audio left channel failed."") return isOK" 4077,"def parse_database_url(url): """"""Parses a database URL."""""" if url == 'sqlite://:memory:': # this is a special case, because if we pass this URL into # urlparse, urlparse will choke trying to interpret ""memory"" # as a port number return { 'ENGINE': DATABASE_SCHEMES['sqlite'], 'NAME': ':memory:' } # note: no other settings are required for sqlite # otherwise parse the url as normal config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # if we are using sqlite and we have no path, then assume we # want an in-memory database (this is the behaviour of sqlalchemy) if url.scheme == 'sqlite' and path == '': path = ':memory:' # Update with environment configuration. config.update({ 'NAME': path or '', 'USER': url.username or '', 'PASSWORD': url.password or '', 'HOST': url.hostname or '', 'PORT': url.port or '', }) if url.scheme in DATABASE_SCHEMES: config['ENGINE'] = DATABASE_SCHEMES[url.scheme] return config" 4078,"def config(name='DATABASE_URL', default='sqlite://:memory:'): """"""Returns configured DATABASE dictionary from DATABASE_URL."""""" config = {} s = env(name, default) if s: config = parse_database_url(s) return config" 4079,"def json_unicode_to_utf8(data): """"""Change all strings in a JSON structure to UTF-8."""""" if isinstance(data, unicode): return data.encode('utf-8') elif isinstance(data, dict): newdict = {} for key in data: newdict[json_unicode_to_utf8( key)] = json_unicode_to_utf8(data[key]) return newdict elif isinstance(data, list): return [json_unicode_to_utf8(elem) for elem in data] else: return data" 4080,"def json_decode_file(filename): """""" Parses a textfile using json to build a python object representation """""" seq = open(filename).read() # The JSON standard has no comments syntax. We have to remove them # before feeding python's JSON parser seq = json_remove_comments(seq) # Parse all the unicode stuff to utf-8 return json_unicode_to_utf8(json.loads(seq))" 4081,"def wash_for_js(text): """""" DEPRECATED: use htmlutils.escape_javascript_string() instead, and take note that returned value is no longer enclosed into quotes. """""" from invenio_utils.html import escape_javascript_string if isinstance(text, six.string_types): return '""%s""' % escape_javascript_string( text, escape_for_html=False, escape_CDATA=False, escape_script_tag_with_quote=None) else: return text" 4082,"def _post_init(self): """"""A post init trigger"""""" try: return self.postinit() except Exception as exc: return self._onerror(Result.from_exception(exc, uuid=self.uuid))" 4083,"def check_required_params(self): """""" Check if all required parameters are set"""""" for param in self.REQUIRED_FIELDS: if param not in self.params: raise ValidationError(""Missing parameter: {}"".format(param))" 4084,"def _onsuccess(self, result): """""" To execute on execution success :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """""" if KSER_METRICS_ENABLED == ""yes"": KSER_TASKS_STATUS.labels( __hostname__, self.__class__.path, 'SUCCESS' ).inc() if result: result = self.result + result else: result = self.result logger.info( ""{}.Success: {}[{}]: {}"".format( self.__class__.__name__, self.__class__.path, self.uuid, result ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return self.onsuccess(result)" 4085,"def _onerror(self, result): """""" To execute on execution failure :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """""" if KSER_METRICS_ENABLED == ""yes"": KSER_TASKS_STATUS.labels( __hostname__, self.__class__.path, 'FAILED' ).inc() if result: result = self.result + result else: result = self.result logger.error( ""{}.Failed: {}[{}]: {}"".format( self.__class__.__name__, self.__class__.path, self.uuid, result ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return self.onerror(result)" 4086,"def _postrun(self, result): """""" To execute after exection :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """""" logger.debug( ""{}.PostRun: {}[{}]"".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump() ) ) return self.postrun(result)" 4087,"def _run(self): """""" Execution body :return: Execution result :rtype: kser.result.Result """""" if KSER_METRICS_ENABLED == ""yes"": KSER_TASK_COUNT.inc() logger.debug( ""{}.Run: {}[{}]"".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump() ) ) return self.run()" 4088,"def unsafe_execute(self, result=None): """""" un-wrapped execution, can raise excepetion :return: Execution result :rtype: kser.result.Result """""" if result: self.result += result self._prerun() return self._onsuccess(self._postrun(self._run()))" 4089,"def execute(self, result=None): """""" Execution 'wrapper' to make sure that it return a result :return: Execution result :rtype: kser.result.Result """""" try: return self.unsafe_execute(result=result) except Exception as exc: return self._onerror(Result.from_exception(exc, uuid=self.uuid))" 4090,"def to_Message(self, result=None): """""" Entrypoint -> Message :param kser.result.Result result: Execution result :return: Kafka message :rtype kser.schemas.Message """""" return Message( uuid=self.uuid, entrypoint=self.__class__.path, params=self.params, result=result if result else self.result, metadata=self.metadata )" 4091,"def from_Message(cls, kmsg): """""" Message -> Entrypoint :param kser.schemas.Message kmsg: Kafka message :return: a entrypoint :rtype kser.entry.Entrypoint """""" return cls( uuid=kmsg.uuid, params=kmsg.params, result=kmsg.result, metadata=kmsg.metadata )" 4092,"def save_as(self, filename=None): """""" Dumps object contents into file on disk. Args: filename (optional): defaults to self.filename. If passed, self.filename will be updated to filename. """""" if filename is None: filename = self.filename if filename is None: filename = self.default_filename if filename is None: raise RuntimeError(""Class '{}' has no default filename"".format(self.__class__.__name__)) self._do_save_as(filename) self.filename = filename" 4093,"def load(self, filename=None): """"""Loads file and registers filename as attribute."""""" assert not self.__flag_loaded, ""File can be loaded only once"" if filename is None: filename = self.default_filename assert filename is not None, \ ""{0!s} class has no default filename"".format(self.__class__.__name__) # Convention: trying to open empty file is an error, # because it could be of (almost) any type. size = os.path.getsize(filename) if size == 0: raise RuntimeError(""Empty file: '{0!s}'"".format(filename)) self._test_magic(filename) self._do_load(filename) self.filename = filename self.__flag_loaded = True" 4094,"def init_default(self): """""" Initializes object with its default values Tries to load self.default_filename from default data directory. For safety, filename is reset to None so that it doesn't point to the original file. """""" import f311 if self.default_filename is None: raise RuntimeError(""Class '{}' has no default filename"".format(self.__class__.__name__)) fullpath = f311.get_default_data_path(self.default_filename, class_=self.__class__) self.load(fullpath) self.filename = None" 4095,"def availability(self, availability): """"""Sets the availability of this Product. :param availability: The availability of this Product. :type: str """""" allowed_values = [""available"", ""comingSoon"", ""retired""] if availability is not None and availability not in allowed_values: raise ValueError( ""Invalid value for `availability` ({0}), must be one of {1}"" .format(availability, allowed_values) ) self._availability = availability" 4096,"def stock_status(self, stock_status): """"""Sets the stock_status of this Product. :param stock_status: The stock_status of this Product. :type: str """""" allowed_values = [""available"", ""alert"", ""unavailable""] if stock_status is not None and stock_status not in allowed_values: raise ValueError( ""Invalid value for `stock_status` ({0}), must be one of {1}"" .format(stock_status, allowed_values) ) self._stock_status = stock_status" 4097,"def create_product(cls, product, **kwargs): """"""Create Product Create a new Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_product(product, async=True) >>> result = thread.get() :param async bool :param Product product: Attributes of product to create (required) :return: Product If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_product_with_http_info(product, **kwargs) else: (data) = cls._create_product_with_http_info(product, **kwargs) return data" 4098,"def delete_product_by_id(cls, product_id, **kwargs): """"""Delete Product Delete an instance of Product by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_product_by_id(product_id, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_product_by_id_with_http_info(product_id, **kwargs) else: (data) = cls._delete_product_by_id_with_http_info(product_id, **kwargs) return data" 4099,"def get_product_by_id(cls, product_id, **kwargs): """"""Find Product Return single instance of Product by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_product_by_id(product_id, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to return (required) :return: Product If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_product_by_id_with_http_info(product_id, **kwargs) else: (data) = cls._get_product_by_id_with_http_info(product_id, **kwargs) return data" 4100,"def list_all_products(cls, **kwargs): """"""List Products Return a list of Products This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_products(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Product] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_products_with_http_info(**kwargs) else: (data) = cls._list_all_products_with_http_info(**kwargs) return data" 4101,"def replace_product_by_id(cls, product_id, product, **kwargs): """"""Replace Product Replace all attributes of Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_product_by_id(product_id, product, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to replace (required) :param Product product: Attributes of product to replace (required) :return: Product If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) else: (data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) return data" 4102,"def update_product_by_id(cls, product_id, product, **kwargs): """"""Update Product Update attributes of Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_product_by_id(product_id, product, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to update. (required) :param Product product: Attributes of product to update. (required) :return: Product If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_product_by_id_with_http_info(product_id, product, **kwargs) else: (data) = cls._update_product_by_id_with_http_info(product_id, product, **kwargs) return data" 4103,"def asserts(input_value, rule, message=''): """""" this function allows you to write asserts in generators since there are moments where you actually want the program to halt when certain values are seen. """""" assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean' assert isinstance(message, str), 'asserts needs message to be a string' # if the message is empty and rule is callable, fill message with rule's source code if len(message)==0 and callable(rule): try: s = getsource(rule).splitlines()[0].strip() except: s = repr(rule).strip() message = 'illegal input of {} breaks - {}'.format(input_value, s) if callable(rule): # if rule is a function, run the function and assign it to rule rule = rule(input_value) # now, assert the rule and return the input value assert rule, message return input_value" 4104,"def print(*a): """""" print just one that returns what you give it instead of None """""" try: _print(*a) return a[0] if len(a) == 1 else a except: _print(*a)" 4105,"def pattern2re(pattern): """"""Makes a unicode regular expression from a pattern. Returns ``(start, full_re, int_re)`` where: * `start` is either empty or the subdirectory in which to start searching, * `full_re` is a regular expression object that matches the requested files, i.e. a translation of the pattern * `int_re` is either None of a regular expression object that matches the requested paths or their ancestors (i.e. if a path doesn't match `int_re`, no path under it will match `full_re`) This uses extended patterns, where: * a slash '/' always represents the path separator * a backslash '\' escapes other special characters * an initial slash '/' anchors the match at the beginning of the (relative) path * a trailing '/' suffix is removed * an asterisk '*' matches a sequence of any length (including 0) of any characters (except the path separator) * a '?' matches exactly one character (except the path separator) * '[abc]' matches characters 'a', 'b' or 'c' * two asterisks '**' matches one or more path components (might match '/' characters) """""" pattern_segs = filter(None, pattern.split('/')) # This anchors the first component either at the start of the string or at # the start of a path component if not pattern: return '', re.compile(''), None elif '/' in pattern: full_regex = '^' # Start at beginning of path int_regex = [] int_regex_done = False start_dir = [] start_dir_done = False else: full_regex = '(?:^|/)' # Skip any number of full components int_regex = None int_regex_done = True start_dir = [] start_dir_done = True # Handles each component for pnum, pat in enumerate(pattern_segs): comp = patterncomp2re(pat) # The first component is already anchored if pnum > 0: full_regex += '/' full_regex += comp if not int_regex_done: if pat == '**': int_regex_done = True else: int_regex.append(comp) if not start_dir_done and no_special_chars.match(pat): start_dir.append(pat) else: start_dir_done = True full_regex = re.compile(full_regex.rstrip('/') + '$') if int_regex is not None: n = len(int_regex) int_regex_s = '' for i, c in enumerate(reversed(int_regex)): if i == n - 1: # Last iteration (first component) int_regex_s = '^(?:%s%s)?' % (c, int_regex_s) elif int_regex_s: int_regex_s = '(?:/%s%s)?' % (c, int_regex_s) else: # First iteration (last component) int_regex_s = '(?:/%s)?' % c int_regex = re.compile(int_regex_s + '$') start_dir = '/'.join(start_dir) return start_dir, full_regex, int_regex" 4106,"def _to_backend(self, p): """"""Converts something to the correct path representation. If given a Path, this will simply unpack it, if it's the correct type. If given the correct backend, it will return that. If given bytes for unicode of unicode for bytes, it will encode/decode with a reasonable encoding. Note that these operations can raise UnicodeError! """""" if isinstance(p, self._cmp_base): return p.path elif isinstance(p, self._backend): return p elif self._backend is unicode and isinstance(p, bytes): return p.decode(self._encoding) elif self._backend is bytes and isinstance(p, unicode): return p.encode(self._encoding, 'surrogateescape' if PY3 else 'strict') else: raise TypeError(""Can't construct a %s from %r"" % ( self.__class__.__name__, type(p)))" 4107,"def parent(self): """"""The parent directory of this path. """""" p = self._lib.dirname(self.path) p = self.__class__(p) return p" 4108,"def unicodename(self): """"""The name of this path as unicode. """""" n = self._lib.basename(self.path) if self._backend is unicode: return n else: return n.decode(self._encoding, 'replace')" 4109,"def split_root(self): """"""Splits this path into a pair (drive, location). Note that, because all paths are normalized, a root of ``'.'`` will be returned for relative paths. """""" if not PY3 and hasattr(self._lib, 'splitunc'): root, rest = self._lib.splitunc(self.path) if root: if rest.startswith(self._sep): root += self._sep rest = rest[1:] return self.__class__(root), self.__class__(rest) root, rest = self._lib.splitdrive(self.path) if root: if rest.startswith(self._sep): root += self._sep rest = rest[1:] return self.__class__(root), self.__class__(rest) if self.path.startswith(self._sep): return self.__class__(self._sep), self.__class__(rest[1:]) return self.__class__(''), self" 4110,"def rel_path_to(self, dest): """"""Builds a relative path leading from this one to the given `dest`. Note that these paths might be both relative, in which case they'll be assumed to start from the same directory. """""" dest = self.__class__(dest) orig_list = self.norm_case()._components() dest_list = dest._components() i = -1 for i, (orig_part, dest_part) in enumerate(zip(orig_list, dest_list)): if orig_part != self._normcase(dest_part): up = ['..'] * (len(orig_list) - i) return self.__class__(*(up + dest_list[i:])) if len(orig_list) <= len(dest_list): if len(dest_list) > i + 1: return self.__class__(*dest_list[i + 1:]) else: return self.__class__('') else: up = ['..'] * (len(orig_list) - i - 1) return self.__class__(*up)" 4111,"def lies_under(self, prefix): """"""Indicates if the `prefix` is a parent of this path. """""" orig_list = self.norm_case()._components() pref_list = self.__class__(prefix).norm_case()._components() return (len(orig_list) >= len(pref_list) and orig_list[:len(pref_list)] == pref_list)" 4112,"def tempfile(cls, suffix='', prefix=None, dir=None, text=False): """"""Returns a new temporary file. The return value is a pair (fd, path) where fd is the file descriptor returned by :func:`os.open`, and path is a :class:`~rpaths.Path` to it. :param suffix: If specified, the file name will end with that suffix, otherwise there will be no suffix. :param prefix: Is specified, the file name will begin with that prefix, otherwise a default prefix is used. :param dir: If specified, the file will be created in that directory, otherwise a default directory is used. :param text: If true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. The caller is responsible for deleting the file when done with it. """""" if prefix is None: prefix = tempfile.template if dir is not None: # Note that this is not safe on Python 2 # There is no work around, apart from not using the tempfile module dir = str(Path(dir)) fd, filename = tempfile.mkstemp(suffix, prefix, dir, text) return fd, cls(filename).absolute()" 4113,"def tempdir(cls, suffix='', prefix=None, dir=None): """"""Returns a new temporary directory. Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the `text` argument is not accepted. The directory is readable, writable, and searchable only by the creating user. The caller is responsible for deleting the directory when done with it. """""" if prefix is None: prefix = tempfile.template if dir is not None: # Note that this is not safe on Python 2 # There is no work around, apart from not using the tempfile module dir = str(Path(dir)) dirname = tempfile.mkdtemp(suffix, prefix, dir) return cls(dirname).absolute()" 4114,"def rel_path_to(self, dest): """"""Builds a relative path leading from this one to another. Note that these paths might be both relative, in which case they'll be assumed to be considered starting from the same directory. Contrary to :class:`~rpaths.AbstractPath`'s version, this will also work if one path is relative and the other absolute. """""" return super(Path, self.absolute()).rel_path_to(Path(dest).absolute())" 4115,"def listdir(self, pattern=None): """"""Returns a list of all the files in this directory. The special entries ``'.'`` and ``'..'`` will not be returned. :param pattern: A pattern to match directory entries against. :type pattern: NoneType | Callable | Pattern | unicode | bytes """""" files = [self / self.__class__(p) for p in os.listdir(self.path)] if pattern is None: pass elif callable(pattern): files = filter(pattern, files) else: if isinstance(pattern, backend_types): if isinstance(pattern, bytes): pattern = pattern.decode(self._encoding, 'replace') start, full_re, _int_re = pattern2re(pattern) elif isinstance(pattern, Pattern): start, full_re = pattern.start_dir, pattern.full_regex else: raise TypeError(""listdir() expects pattern to be a callable, "" ""a regular expression or a string pattern, "" ""got %r"" % type(pattern)) # If pattern contains slashes (other than first and last chars), # listdir() will never match anything if start: return [] files = [f for f in files if full_re.search(f.unicodename)] return files" 4116,"def recursedir(self, pattern=None, top_down=True, follow_links=False, handle_errors=None): """"""Recursively lists all files under this directory. :param pattern: An extended patterns, where: * a slash '/' always represents the path separator * a backslash '\' escapes other special characters * an initial slash '/' anchors the match at the beginning of the (relative) path * a trailing '/' suffix is removed * an asterisk '*' matches a sequence of any length (including 0) of any characters (except the path separator) * a '?' matches exactly one character (except the path separator) * '[abc]' matches characters 'a', 'b' or 'c' * two asterisks '**' matches one or more path components (might match '/' characters) :type pattern: NoneType | Callable | Pattern | unicode | bytes :param follow_links: If False, symbolic links will not be followed (the default). Else, they will be followed, but directories reached through different names will *not* be listed multiple times. :param handle_errors: Can be set to a callback that will be called when an error is encountered while accessing the filesystem (such as a permission issue). If set to None (the default), exceptions will be propagated. """""" if not self.is_dir(): raise ValueError(""recursedir() called on non-directory %s"" % self) start = '' int_pattern = None if pattern is None: pattern = lambda p: True elif callable(pattern): pass else: if isinstance(pattern, backend_types): if isinstance(pattern, bytes): pattern = pattern.decode(self._encoding, 'replace') start, full_re, int_re = pattern2re(pattern) elif isinstance(pattern, Pattern): start, full_re, int_re = \ pattern.start_dir, pattern.full_regex, pattern.int_regex else: raise TypeError(""recursedir() expects pattern to be a "" ""callable, a regular expression or a string "" ""pattern, got %r"" % type(pattern)) if self._lib.sep != '/': pattern = lambda p: full_re.search( unicode(p).replace(self._lib.sep, '/')) if int_re is not None: int_pattern = lambda p: int_re.search( unicode(p).replace(self._lib.sep, '/')) else: pattern = lambda p: full_re.search(unicode(p)) if int_re is not None: int_pattern = lambda p: int_re.search(unicode(p)) if not start: path = self else: path = self / start if not path.exists(): return [] elif not path.is_dir(): return [path] return path._recursedir(pattern=pattern, int_pattern=int_pattern, top_down=top_down, seen=set(), path=self.__class__(start), follow_links=follow_links, handle_errors=handle_errors)" 4117,"def mkdir(self, name=None, parents=False, mode=0o777): """"""Creates that directory, or a directory under this one. ``path.mkdir(name)`` is a shortcut for ``(path/name).mkdir()``. :param name: Path component to append to this path before creating the directory. :param parents: If True, missing directories leading to the path will be created too, recursively. If False (the default), the parent of that path needs to exist already. :param mode: Permissions associated with the directory on creation, without race conditions. """""" if name is not None: return (self / name).mkdir(parents=parents, mode=mode) if self.exists(): return if parents: os.makedirs(self.path, mode) else: os.mkdir(self.path, mode) return self" 4118,"def rmdir(self, parents=False): """"""Removes this directory, provided it is empty. Use :func:`~rpaths.Path.rmtree` if it might still contain files. :param parents: If set to True, it will also destroy every empty directory above it until an error is encountered. """""" if parents: os.removedirs(self.path) else: os.rmdir(self.path)" 4119,"def rename(self, new, parents=False): """"""Renames this path to the given new location. :param new: New path where to move this one. :param parents: If set to True, it will create the parent directories of the target if they don't exist. """""" if parents: os.renames(self.path, self._to_backend(new)) else: os.rename(self.path, self._to_backend(new))" 4120,"def copyfile(self, target): """"""Copies this file to the given `target` location. """""" shutil.copyfile(self.path, self._to_backend(target))" 4121,"def copymode(self, target): """"""Copies the mode of this file on the `target` file. The owner is not copied. """""" shutil.copymode(self.path, self._to_backend(target))" 4122,"def copystat(self, target): """"""Copies the permissions, times and flags from this to the `target`. The owner is not copied. """""" shutil.copystat(self.path, self._to_backend(target))" 4123,"def copy(self, target): """"""Copies this file the `target`, which might be a directory. The permissions are copied. """""" shutil.copy(self.path, self._to_backend(target))" 4124,"def copytree(self, target, symlinks=False): """"""Recursively copies this directory to the `target` location. The permissions and times are copied (like :meth:`~rpaths.Path.copystat`). If the optional `symlinks` flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. """""" shutil.copytree(self.path, self._to_backend(target), symlinks)" 4125,"def move(self, target): """"""Recursively moves a file or directory to the given target location. """""" shutil.move(self.path, self._to_backend(target))" 4126,"def open(self, mode='r', name=None, **kwargs): """"""Opens this file, or a file under this directory. ``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``. Note that this uses :func:`io.open()` which behaves differently from :func:`open()` on Python 2; see the appropriate documentation. :param name: Path component to append to this path before opening the file. """""" if name is not None: return io.open((self / name).path, mode=mode, **kwargs) else: return io.open(self.path, mode=mode, **kwargs)" 4127,"def rewrite(self, mode='r', name=None, temp=None, tempext='~', **kwargs): r""""""Replaces this file with new content. This context manager gives you two file objects, (r, w), where r is readable and has the current content of the file, and w is writable and will replace the file at the end of the context (unless an exception is raised, in which case it is rolled back). Keyword arguments will be used for both files, unless they are prefixed with ``read_`` or ``write_``. For instance:: with Path('test.txt').rewrite(read_newline='\n', write_newline='\r\n') as (r, w): w.write(r.read()) :param name: Path component to append to this path before opening the file. :param temp: Temporary file name to write, and then move over this one. By default it's this filename with a ``~`` suffix. :param tempext: Extension to add to this file to get the temporary file to write then move over this one. Defaults to ``~``. """""" if name is not None: pathr = self / name else: pathr = self for m in 'war+': mode = mode.replace(m, '') # Build options common_kwargs = {} readable_kwargs = {} writable_kwargs = {} for key, value in kwargs.items(): if key.startswith('read_'): readable_kwargs[key[5:]] = value elif key.startswith('write_'): writable_kwargs[key[6:]] = value else: common_kwargs[key] = value readable_kwargs = dict_union(common_kwargs, readable_kwargs) writable_kwargs = dict_union(common_kwargs, writable_kwargs) with pathr.open('r' + mode, **readable_kwargs) as readable: if temp is not None: pathw = Path(temp) else: pathw = pathr + tempext try: pathw.remove() except OSError: pass writable = pathw.open('w' + mode, **writable_kwargs) try: yield readable, writable except Exception: # Problem, delete writable writable.close() pathw.remove() raise else: writable.close() # Alright, replace pathr.copymode(pathw) pathr.remove() pathw.rename(pathr)" 4128,"def matches(self, path): """"""Tests if the given path matches the pattern. Note that the unicode translation of the patch is matched, so replacement characters might have been added. """""" path = self._prepare_path(path) return self.full_regex.search(path) is not None" 4129,"def may_contain_matches(self, path): """"""Tests whether it's possible for paths under the given one to match. If this method returns None, no path under the given one will match the pattern. """""" path = self._prepare_path(path) return self.int_regex.search(path) is not None" 4130,"def _fetch(url,): """""" *Retrieve an HTML document or file from the web at a given URL* **Key Arguments:** - ``url`` -- the URL of the document or file **Return:** - ``url`` -- the URL of the document or file, or None if an error occured - ``body`` -- the text content of the HTML document. """""" import logging as log import socket from eventlet import Timeout from eventlet.green import urllib2 import sys # TRY AND DOWNLOAD X TIMES BEFORE QUITING tries = 10 count = 1 downloaded = False while count < tries and downloaded == False: try: log.debug('downloading ' + url.get_full_url()) body = urllib2.urlopen(url).read() downloaded = True except socket.timeout, e: print ""timeout on URL, trying again"" count += 1 except Exception, e: if ""[Errno 60]"" in str(e): log.warning('timeout on URL, trying again' % locals()) count += 1 if ""Error 502"" in str(e): log.warning('proxy error on URL, trying again' % locals()) count += 1 else: log.warning( ""could not download "" + url.get_full_url() + "" : "" + str(e) + ""\n"") url = None body = None downloaded = True return url, body" 4131,"def tgcanrecruit(self, region=None): """"""Whether the nation will receive a recruitment telegram. Useful in conjunction with the Telegrams API. Parameters ---------- region : str Name of the region you are recruiting for. Returns ------- an :class:`ApiQuery` of bool """""" params = {'from': normalize(region)} if region is not None else {} @api_query('tgcanrecruit', **params) async def result(_, root): return bool(int(root.find('TGCANRECRUIT').text)) return result(self)" 4132,"async def freedom(self, root): """"""Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as expressive adjectives. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys and values of str Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``. """""" elem = root.find('FREEDOM') result = OrderedDict() result['Civil Rights'] = elem.find('CIVILRIGHTS').text result['Economy'] = elem.find('ECONOMY').text result['Political Freedom'] = elem.find('POLITICALFREEDOM').text return result" 4133,"async def freedomscores(self, root): """"""Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of int Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``. """""" elem = root.find('FREEDOMSCORES') result = OrderedDict() result['Civil Rights'] = int(elem.find('CIVILRIGHTS').text) result['Economy'] = int(elem.find('ECONOMY').text) result['Political Freedom'] = int(elem.find('POLITICALFREEDOM').text) return result" 4134,"async def govt(self, root): """"""Nation's government expenditure, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of float Keys being, in order: ``Administration``, ``Defense``, ``Education``, ``Environment``, ``Healthcare``, ``Industry``, ``International Aid``, ``Law & Order``, ``Public Transport``, ``Social Policy``, ``Spirituality``, and ``Welfare``. """""" elem = root.find('GOVT') result = OrderedDict() result['Administration'] = float(elem.find('ADMINISTRATION').text) result['Defense'] = float(elem.find('DEFENCE').text) # match the web UI result['Education'] = float(elem.find('EDUCATION').text) result['Environment'] = float(elem.find('ENVIRONMENT').text) result['Healthcare'] = float(elem.find('HEALTHCARE').text) result['Industry'] = float(elem.find('COMMERCE').text) # Don't ask result['International Aid'] = float(elem.find('INTERNATIONALAID').text) result['Law & Order'] = float(elem.find('LAWANDORDER').text) result['Public Transport'] = float(elem.find('PUBLICTRANSPORT').text) result['Social Policy'] = float(elem.find('SOCIALEQUALITY').text) # Shh result['Spirituality'] = float(elem.find('SPIRITUALITY').text) result['Welfare'] = float(elem.find('WELFARE').text) return result" 4135,"async def sectors(self, root): """"""Components of the nation's economy, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of float Keys being, in order: ``Black Market (estimated)``, ``Government``, ``Private Industry``, and ``State-Owned Industry``. """""" elem = root.find('SECTORS') result = OrderedDict() result['Black Market (estimated)'] = float(elem.find('BLACKMARKET').text) result['Government'] = float(elem.find('GOVERNMENT').text) result['Private Industry'] = float(elem.find('INDUSTRY').text) result['State-Owned Industry'] = float(elem.find('PUBLIC').text) return result" 4136,"async def deaths(self, root): """"""Causes of death in the nation, as percentages. Returns ------- an :class:`ApiQuery` of dict with keys of str and values of float """""" return { elem.get('type'): float(elem.text) for elem in root.find('DEATHS') }" 4137,"async def endorsements(self, root): """"""Regional neighbours endorsing the nation. Returns ------- an :class:`ApiQuery` of a list of :class:`Nation` """""" text = root.find('ENDORSEMENTS').text return [Nation(name) for name in text.split(',')] if text else []" 4138,"def verify(self, checksum, *, token=None): """"""Interface to the `NationStates Verification API `_. Parameters ---------- checksum : str The user-supplied verification code. Expires if the nation logs out, if it performs a significant in-game action such as moving regions or endorsing another nation, and after it is successfully verified. token : str A token specific to your service and the nation being verified. Returns ------- an :class:`ApiQuery` of bool """""" params = {'a': 'verify', 'checksum': checksum} if token: params['token'] = token # Needed so that we get output in xml, as opposed to # plain text. It doesn't actually matter what the # q param is, it's just important that it's not empty. @api_query('i_need_the_output_in_xml', **params) async def result(self, root): return bool(int(root.find('VERIFY').text)) return result(self)" 4139,"async def description(self): """"""Nation's full description, as seen on its in-game page. Returns ------- an awaitable of str """""" resp = await self._call_web(f'nation={self.id}') return html.unescape( re.search( '
    (.+?)

    ', resp.text, flags=re.DOTALL ) .group(1) .replace('\n', '') .replace('

    ', '') .replace('

    ', '\n\n') .strip() )" 4140,"def accept(self): """"""Accept the option. Returns ------- an awaitable of :class:`IssueResult` """""" return self._issue._nation._accept_issue(self._issue.id, self._id)" 4141,"def list_all_promotions(cls, **kwargs): """"""List Promotions Return a list of Promotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Promotion] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_promotions_with_http_info(**kwargs) else: (data) = cls._list_all_promotions_with_http_info(**kwargs) return data" 4142,"def pip_upgrade_all_user(line): """"""Attempt to upgrade all packages installed with --user"""""" import pip for dist in pip.get_installed_distributions(user_only=True): do_pip([""install"", ""--upgrade"", ""--user"", dist.project_name])" 4143,"def pip_upgrade_all(line): """"""Attempt to upgrade all packages"""""" from pip import get_installed_distributions user = set(d.project_name for d in get_installed_distributions(user_only=True)) all = set(d.project_name for d in get_installed_distributions()) for dist in all - user: do_pip([""install"", ""--upgrade"", dist]) for dist in user: do_pip([""install"", ""--upgrade"", ""--user"", dist])" 4144,"def enc_name_descr(name, descr, color=a99.COLOR_DESCR): """"""Encodes html given name and description."""""" return enc_name(name, color)+""
    ""+descr" 4145,"def style_checkboxes(widget): """""" Iterates over widget children to change checkboxes stylesheet. The default rendering of checkboxes does not allow to tell a focused one from an unfocused one. """""" ww = widget.findChildren(QCheckBox) for w in ww: w.setStyleSheet(""QCheckBox:focus {border: 1px solid #000000;}"")" 4146,"def check_return_space(event, callable_): """"""Checks if event corresponds to Return/Space being pressed and calls callable_ if so."""""" if event.type() == QEvent.KeyPress: if event.key() in [Qt.Key_Return, Qt.Key_Space]: callable_() return True return False" 4147,"def are_you_sure(flag_changed, evt, parent=None, title=""File has been changed"", msg=""Are you sure you want to exit?""): """""" ""Are you sure you want to exit"" question dialog. If flag_changed, shows question dialog. If answer is not yes, calls evt.ignore() Arguments: flag_changed evt -- QCloseEvent instance parent=None -- parent form, used to centralize the question dialog at title -- title for question dialog msg -- text of question dialog Returns True or False. True means: ""yes, I want to exit"" """""" if flag_changed: r = QMessageBox.question(parent, title, msg, QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes) if r != QMessageBox.Yes: evt.ignore()" 4148,"def reset_table_widget(t, rowCount, colCount): """"""Clears and resizes a table widget."""""" t.reset() t.horizontalHeader().reset() t.clear() t.sortItems(-1) t.setRowCount(rowCount) t.setColumnCount(colCount)" 4149,"def show_edit_form(obj, attrs=None, title="""", toolTips=None): """"""Shows parameters editor modal form. Arguments: obj: object to extract attribute values from, or a dict-like attrs: list of attribute names title: toolTips: """""" if attrs is None: if hasattr(obj, ""keys""): attrs = list(obj.keys()) else: raise RuntimeError(""attrs is None and cannot determine it from obj"") specs = [] for i, name in enumerate(attrs): # Tries as attribute, then as key try: value = obj.__getattribute__(name) except AttributeError: value = obj[name] if value is None: value = """" # None becomes str dict_ = {""value"": value} if toolTips is not None: dict_[""toolTip""] = toolTips[i] dict_[""tooltip""] = toolTips[i] specs.append((name, dict_)) form = XParametersEditor(specs=specs, title=title) r = form.exec_() return r, form" 4150,"def place_left_top(window, width=None, height=None): """"""Places window in top left corner of screen. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed) height=None -- window height, in case you want to change it (if not passed, not changed) """""" if width is None: width = window.width() if height is None: height = window.height() window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, height)" 4151,"def place_center(window, width=None, height=None): """"""Places window in the center of the screen."""""" screenGeometry = QApplication.desktop().screenGeometry() w, h = window.width(), window.height() if width is not None or height is not None: w = width if width is not None else w h = height if height is not None else h window.setGeometry(0, 0, w, h) x = (screenGeometry.width() - w) / 2 y = (screenGeometry.height() - h) / 2 window.move(x, y)" 4152,"def snap_left(window, width=None): """"""Snaps window to left of desktop. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed) """""" if not width: width = window.width() rect = QApplication.desktop().screenGeometry() window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, rect.height())" 4153,"def snap_right(window, width=None): """"""Snaps window to right of desktop. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed) """""" if not width: width = window.width() rect = QApplication.desktop().screenGeometry() window.setGeometry(rect.width()-width, _DESKTOP_OFFSET_TOP, width, rect.height())" 4154,"def get_matplotlib_layout(widget, flag_toolbar=True): """""" Creates figure, toolbar, layout, sets widget layout Returns figure, canvas, layout Reference: http://stackoverflow.com/questions/12459811 """""" fig = plt.figure() canvas = FigureCanvas(fig) # self.canvas.mpl_connect('button_press_event', self.on_plot_click) layout = QVBoxLayout(widget) if flag_toolbar: toolbar = NavigationToolbar2QT(canvas, widget) layout.addWidget(toolbar) layout.addWidget(canvas) a99.set_margin(layout, 0) return fig, canvas, layout" 4155,"def get_icon(keyword): """""" Transforms a PNG file in a QIcon Looks for a file named .png in the ""icons"" directory If file does not exist, returns None """""" filename = a99.get_path( ""icons"", keyword + "".png"") if not os.path.isfile(filename): raise FileNotFoundError(""File '{}' does not exist"".format(filename)) return QIcon(filename)" 4156,"def get_QApplication(args=[]): """"""Returns the QApplication instance, creating it is does not yet exist."""""" global _qapp if _qapp is None: QCoreApplication.setAttribute(Qt.AA_X11InitThreads) _qapp = QApplication(args) return _qapp" 4157,"def table_info_to_parameters(table_info): """""" Converts a list of MyDBRow into a parameters.Parameters object This facilitates transfering data from SQLite table row to a XParameterEditor window See also: get_table_info() """""" # Example of item in table_info: # MyDBRow([('cid', 0), ('name', 'id'), ('type', 'integer'), ('notnull', 0), ('dflt_value', None), ('pk', 1)]) opbj = a99.Parameters() for field_info in table_info.values(): p = a99.Parameter() if field_info.type == ""integer"": p.type = int elif field_info.type == ""real"": p.type = float else: p.type = str p.name = field_info.name if field_info.dflt_value is not None: p.value = field_info.dflt_value opbj.params.append(p) return opbj" 4158,"def get_frame(): """"""Returns a QFrame formatted in a particular way"""""" ret = QFrame() ret.setLineWidth(1) ret.setMidLineWidth(0) ret.setFrameShadow(QFrame.Sunken) ret.setFrameShape(QFrame.Box) return ret" 4159,"def set_checkbox_value(w, value): """""" Sets a checkbox's ""checked"" property + signal blocking + value tolerance Args: w: QCheckBox instance value: something that can be converted to a bool """""" save = w.blockSignals(True) try: w.setChecked(bool(value)) finally: w.blockSignals(save)" 4160,"def add_signal(self, signal): """"""Adds ""input"" signal to connected signals. Internally connects the signal to a control slot."""""" self.__signals.append(signal) if self.__connected: # Connects signal if the current state is ""connected"" self.__connect_signal(signal)" 4161,"def connect_all(self): """"""[Re-]connects all signals and slots. If already in ""connected"" state, ignores the call. """""" if self.__connected: return # assert not self.__connected, ""connect_all() already in \""connected\"" state"" with self.__lock: for signal in self.__signals: self.__connect_signal(signal) if self.__slot is not None: self.__sigDelayed.connect(self.__slot, Qt.QueuedConnection) self.__connected = True" 4162,"def disconnect_all(self): """"""Disconnects all signals and slots. If already in ""disconnected"" state, ignores the call. """""" if not self.__connected: return # assert self.__connected, ""disconnect_all() already in \""disconnected\"" state"" self.__disconnecting = True try: for signal in self.__signals: signal.disconnect(self.__signalReceived) if self.__slot is not None: self.__sigDelayed.disconnect(self.__slot) self.__connected = False finally: self.__disconnecting = False" 4163,"def __signalReceived(self, *args): """"""Received signal. Cancel previous timer and store args to be forwarded later."""""" if self.__disconnecting: return with self.__lock: self.__args = args if self.__rateLimit == 0: self.__timer.stop() self.__timer.start((self.__delay * 1000) + 1) else: now = time.time() if self.__lastFlushTime is None: leakTime = 0 else: lastFlush = self.__lastFlushTime leakTime = max(0, (lastFlush + (1.0 / self.__rateLimit)) - now) self.__timer.stop() # Note: original was min() below. timeout = (max(leakTime, self.__delay) * 1000) + 1 self.__timer.start(timeout)" 4164,"def __flush(self): """"""If there is a signal queued up, send it now."""""" if self.__args is None or self.__disconnecting: return False #self.emit(self.signal, *self.args) self.__sigDelayed.emit(self.__args) self.__args = None self.__timer.stop() self.__lastFlushTime = time.time() return True" 4165,"def clean_indicators(indicators): """"""Remove any extra details from indicators."""""" output = list() for indicator in indicators: strip = ['http://', 'https://'] for item in strip: indicator = indicator.replace(item, '') indicator = indicator.strip('.').strip() parts = indicator.split('/') if len(parts) > 0: indicator = parts.pop(0) output.append(indicator) output = list(set(output)) return output" 4166,"def hash_values(values, alg=""md5""): """"""Hash a list of values."""""" import hashlib if alg not in ['md5', 'sha1', 'sha256']: raise Exception(""Invalid hashing algorithm!"") hasher = getattr(hashlib, alg) if type(values) == str: output = hasher(values).hexdigest() elif type(values) == list: output = list() for item in values: output.append(hasher(item).hexdigest()) return output" 4167,"def check_whitelist(values): """"""Check the indicators against known whitelists."""""" import os import tldextract whitelisted = list() for name in ['alexa.txt', 'cisco.txt']: config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, name) whitelisted += [x.strip() for x in open(file_path, 'r').readlines()] output = list() for item in values: ext = tldextract.extract(item) if ext.registered_domain in whitelisted: continue output.append(item) return output" 4168,"def cache_items(values): """"""Cache indicators that were successfully sent to avoid dups."""""" import os config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, 'cache.txt') if not os.path.isfile(file_path): file(file_path, 'w').close() written = [x.strip() for x in open(file_path, 'r').readlines()] handle = open(file_path, 'a') for item in values: # Because of the option to submit in clear or hashed, we need to make # sure we're not re-hashing before adding. if is_hashed(item): hashed = item else: hashed = hash_values(item) if hashed in written: continue handle.write(hashed + ""\n"") handle.close() return True" 4169,"def prune_cached(values): """"""Remove the items that have already been cached."""""" import os config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, 'cache.txt') if not os.path.isfile(file_path): return values cached = [x.strip() for x in open(file_path, 'r').readlines()] output = list() for item in values: hashed = hash_values(item) if hashed in cached: continue output.append(item) return output" 4170,"def get_logger(name): """"""Get a logging instance we can use."""""" import logging import sys logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) shandler = logging.StreamHandler(sys.stdout) fmt = """" fmt += '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' fmtr = logging.Formatter(fmt) shandler.setFormatter(fmtr) logger.addHandler(shandler) return logger" 4171,"def process_whitelists(): """"""Download approved top 1M lists."""""" import csv import grequests import os import StringIO import zipfile mapping = { 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip': { 'name': 'alexa.txt' }, 'http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip': { 'name': 'cisco.txt' } } rs = (grequests.get(u) for u in mapping.keys()) responses = grequests.map(rs) for r in responses: data = zipfile.ZipFile(StringIO.StringIO(r.content)).read('top-1m.csv') stream = StringIO.StringIO(data) reader = csv.reader(stream, delimiter=',', quoting=csv.QUOTE_MINIMAL) items = [row[1].strip() for row in reader] stream.close() config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, mapping[r.url]['name']) handle = open(file_path, 'w') for item in items: if item.count('.') == 0: continue handle.write(item + ""\n"") handle.close() return True" 4172,"def mode(self, mode): """"""Sets the mode of this BraintreeGateway. :param mode: The mode of this BraintreeGateway. :type: str """""" allowed_values = [""test"", ""live""] if mode is not None and mode not in allowed_values: raise ValueError( ""Invalid value for `mode` ({0}), must be one of {1}"" .format(mode, allowed_values) ) self._mode = mode" 4173,"def create_braintree_gateway(cls, braintree_gateway, **kwargs): """"""Create BraintreeGateway Create a new BraintreeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_braintree_gateway(braintree_gateway, async=True) >>> result = thread.get() :param async bool :param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to create (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs) else: (data) = cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs) return data" 4174,"def delete_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs): """"""Delete BraintreeGateway Delete an instance of BraintreeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_braintree_gateway_by_id(braintree_gateway_id, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) else: (data) = cls._delete_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) return data" 4175,"def get_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs): """"""Find BraintreeGateway Return single instance of BraintreeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_braintree_gateway_by_id(braintree_gateway_id, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to return (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) else: (data) = cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) return data" 4176,"def list_all_braintree_gateways(cls, **kwargs): """"""List BraintreeGateways Return a list of BraintreeGateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_braintree_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[BraintreeGateway] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_braintree_gateways_with_http_info(**kwargs) else: (data) = cls._list_all_braintree_gateways_with_http_info(**kwargs) return data" 4177,"def replace_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs): """"""Replace BraintreeGateway Replace all attributes of BraintreeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to replace (required) :param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to replace (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) else: (data) = cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) return data" 4178,"def update_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs): """"""Update BraintreeGateway Update attributes of BraintreeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to update. (required) :param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to update. (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) else: (data) = cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) return data" 4179,"def diagnose(df,preview_rows = 2, display_max_cols = 0,display_width = None): """""" Prints information about the DataFrame pertinent to data cleaning. Parameters ---------- df - DataFrame The DataFrame to summarize preview_rows - int, default 5 Amount of rows to preview from the head and tail of the DataFrame display_max_cols - int, default None Maximum amount of columns to display. If set to None, all columns will be displayed. If set to 0, only as many as fit in the screen's width will be displayed display_width - int, default None Width of output. Can be width of file or width of console for printing. Set to None for pandas to detect it from console. """""" assert type(df) is pd.DataFrame # Diagnose problems with the data formats that can be addressed in cleaning # Get initial display settings initial_max_cols = pd.get_option('display.max_columns') initial_max_rows = pd.get_option('display.max_rows') initial_width = pd.get_option('display.width') # Reformat displays pd.set_option('display.max_columns', display_max_cols) pd.set_option('display.max_rows',None) if display_width is not None: pd.set_option('display.width',display_width) # --------Values of data----------- df_preview = _io.preview(df,preview_rows) df_info = _io.get_info(df,verbose = True, max_cols = display_max_cols, memory_usage = 'deep',null_counts = True) dtypes = stats.dtypes_summary(df).apply(_io.format_row,args = [_utils.rows(df)],axis = 1) potential_outliers = stats.df_outliers(df).dropna(axis = 1,how = 'all') potential_outliers = potential_outliers if _utils.rows(potential_outliers) \ else None # ----------Build lists------------ title_list = \ ['Preview','Info', 'Data Types Summary','Potential Outliers'] info_list = \ [df_preview,df_info, dtypes,potential_outliers] error_list = [None,None, None,'No potential outliers.'] # ----------Build output------------ output = '' for title, value,error_text in zip(title_list,info_list,error_list): if value is None: value = ""{} skipped: {}"".format(title,error_text) if str(value).endswith('\n'): value = value[:-1] output+='{}\n{}\n\n'.format(_io.title_line(title),value) # ----------Send to file/print to console------------ # Potentially could change this to allow for output_safe to work with directories print(output) # Reset display settings pd.set_option('display.max_columns', initial_max_cols) pd.set_option('display.max_rows', initial_max_rows) pd.set_option('display.width', initial_width)" 4180,"def cut_spectrum(sp, l0, lf): """""" Cuts spectrum given a wavelength interval, leaving origina intact Args: sp: Spectrum instance l0: initial wavelength lf: final wavelength Returns: Spectrum: cut spectrum """""" if l0 >= lf: raise ValueError(""l0 must be lower than lf"") idx0 = np.argmin(np.abs(sp.x - l0)) idx1 = np.argmin(np.abs(sp.x - lf)) out = copy.deepcopy(sp) out.x = out.x[idx0:idx1] out.y = out.y[idx0:idx1] return out" 4181,"def remove_nongenerating_nonterminals(grammar, inplace=False): # type: (Grammar, bool) -> Grammar """""" Remove nongenerating symbols from the grammar. Nongenerating symbols are symbols, that don't generate sequence of terminals. For example never ending recursion. :param grammar: Grammar where to remove nongenerating symbols. :param inplace: True if transformation should be performed in place. False by default. :return: Grammar without nongenerating symbols. """""" # copy if required if inplace is False: grammar = copy(grammar) # create working sets generates = grammar.terminals.copy() generates.add(EPSILON) rules = grammar.rules.copy() # iterate until the set doesn't change while True: # create set for the next iteration additional = generates.copy() # iterate over unprocessed rules for rule in rules.copy(): rightPart = rule.right allIn = True # check if all symbols on the right part of rule are in generates set for symbol in rightPart: if symbol not in generates: allIn = False break # Symbol is missing so rule is not process if not allIn: continue # Rule is process - remove it from processing rules and make symbol as generating additional.add(rule.fromSymbol) rules.remove(rule) # end of rules iterations # ff current and previous iterations are same, than end iterations if additional == generates: break # swap sets from previous and current iterations generates = additional # remove nonterms that are not generating nongenerating = grammar.nonterminals.difference(generates) grammar.nonterminals.remove(*nongenerating) # return the grammar return grammar" 4182,"def skip_first(pipe, items=1): ''' this is an alias for skip to parallel the dedicated skip_last function to provide a little more readability to the code. the action of actually skipping does not occur until the first iteration is done ''' pipe = iter(pipe) for i in skip(pipe, items): yield i" 4183,"def find(self, query=None, **kwargs): """""" You can pass in the appropriate model object from the queries module, or a dictionary with the keys and values for the query, or a set of key=value parameters. """""" url = self.getUrl() if query is not None: if isinstance(query, queries.SlickQuery): url = url + ""?"" + urlencode(query.to_dict()) elif isinstance(query, dict): url = url + ""?"" + urlencode(query) elif len(kwargs) > 0: url = url + ""?"" + urlencode(kwargs) # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug(""Making request to slick at url %s"", url) r = requests.get(url) self.logger.debug(""Request returned status code %d"", r.status_code) if r.status_code is 200: retval = [] objects = r.json() for dct in objects: retval.append(self.model.from_dict(dct)) return retval else: self.logger.error(""Slick returned an error when trying to access %s: status code %s"" % (url, str(r.status_code))) self.logger.error(""Slick response: "", pprint.pformat(r)) except BaseException as error: self.logger.warn(""Received exception while connecting to slick at %s"", url, exc_info=sys.exc_info()) raise SlickCommunicationError( ""Tried 3 times to request data from slick at url %s without a successful status code."", url)" 4184,"def findOne(self, query=None, mode=FindOneMode.FIRST, **kwargs): """""" Perform a find, with the same options present, but only return a maximum of one result. If find returns an empty array, then None is returned. If there are multiple results from find, the one returned depends on the mode parameter. If mode is FindOneMode.FIRST, then the first result is returned. If the mode is FindOneMode.LAST, then the last is returned. If the mode is FindOneMode.ERROR, then a SlickCommunicationError is raised. """""" results = self.find(query, **kwargs) if len(results) is 0: return None elif len(results) is 1 or mode == FindOneMode.FIRST: return results[0] elif mode == FindOneMode.LAST: return results[-1]" 4185,"def get(self): """"""Get the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object. Example: slick.projects(""4fd8cd95e4b0ee7ba54b9885"").get() """""" url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug(""Making request to slick at url %s"", url) r = requests.get(url) self.logger.debug(""Request returned status code %d"", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug(""Body of what slick returned: %s"", r.text) except BaseException as error: self.logger.warn(""Received exception while connecting to slick at %s"", url, exc_info=sys.exc_info()) raise SlickCommunicationError( ""Tried 3 times to request data from slick at url %s without a successful status code."", url)" 4186,"def update(self): """"""Update the specified object from slick. You specify the object as a parameter, using the parent object as a function. Example: proj = slick.projects.findByName(""foo"") ... update proj here slick.projects(proj).update() """""" obj = self.data url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out last_stats_code = None last_body = None for retry in range(3): try: json_data = obj.to_json() self.logger.debug(""Making request to slick at url %s, with data: %s"", url, json_data) r = requests.put(url, data=json_data, headers=json_content) self.logger.debug(""Request returned status code %d"", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: last_stats_code = r.status_code last_body = r.text self.logger.warn(""Slick status code: %d"", r.status_code) self.logger.warn(""Body of what slick returned: %s"", r.text) except BaseException as error: self.logger.warn(""Received exception while connecting to slick at %s"", url, exc_info=sys.exc_info()) traceback.print_exc() raise SlickCommunicationError( ""Tried 3 times to request data from slick at url %s without a successful status code. Last status code: %d, body: %s"", url, last_stats_code, last_body)" 4187,"def create(self): """"""Create the specified object (perform a POST to the api). You specify the object as a parameter, using the parent object as a function. Example: proj = Project() ... add project data here proj = slick.projects(proj).create() """""" obj = self.data self.data = None url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: json_data = obj.to_json() self.logger.debug(""Making request to slick at url %s, with data: %s"", url, json_data) r = requests.post(url, data=json_data, headers=json_content) self.logger.debug(""Request returned status code %d"", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug(""Body of what slick returned: %s"", r.text) except BaseException as error: self.logger.warn(""Received exception while connecting to slick at %s"", url, exc_info=sys.exc_info()) raise SlickCommunicationError( ""Tried 3 times to request data from slick at url %s without a successful status code."", url)" 4188,"def remove(self): """"""Remove or delete the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object, using it as a function. Example: slick.projects(""4fd8cd95e4b0ee7ba54b9885"").remove() """""" url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug(""Making DELETE request to slick at url %s"", url) r = requests.delete(url) self.logger.debug(""Request returned status code %d"", r.status_code) if r.status_code is 200: return None else: self.logger.debug(""Body of what slick returned: %s"", r.text) except BaseException as error: self.logger.warn(""Received exception while connecting to slick at %s"", url, exc_info=sys.exc_info()) raise SlickCommunicationError( ""Tried 3 times to request data from slick at url %s without a successful status code."", url)" 4189,"def upload_local_file(self, local_file_path, file_obj=None): """"""Create a Stored File and upload it's data. This is a one part do it all type method. Here is what it does: 1. ""Discover"" information about the file (mime-type, size) 2. Create the stored file object in slick 3. Upload (chunked) all the data in the local file 4. re-fetch the stored file object from slick, and return it """""" if file_obj is None and not os.path.exists(local_file_path): return storedfile = StoredFile() storedfile.mimetype = mimetypes.guess_type(local_file_path)[0] storedfile.filename = os.path.basename(local_file_path) if file_obj is None: storedfile.length = os.stat(local_file_path).st_size else: file_obj.seek(0,os.SEEK_END) storedfile.length = file_obj.tell() file_obj.seek(0) storedfile = self(storedfile).create() md5 = hashlib.md5() url = self(storedfile).getUrl() + ""/addchunk"" if file_obj is None: with open(local_file_path, 'rb') as filecontents: upload_chunks(url, storedfile, filecontents) else: upload_chunks(url, storedfile, file_obj) return self(storedfile).update()" 4190,"def lookup_cc_partner(nu_pid): """"""Lookup the charge current partner Takes as an input neutrino nu_pid is a PDG code, then returns the charged lepton partner. So 12 (nu_e) returns 11. Keeps sign """""" neutrino_type = math.fabs(nu_pid) assert neutrino_type in [12, 14, 16] cc_partner = neutrino_type - 1 # get e, mu, tau cc_partner = math.copysign( cc_partner, nu_pid) # make sure matter/antimatter cc_partner = int(cc_partner) # convert to int return cc_partner" 4191,"def _set_vector_value(self, var_name, value): """"""Private"""""" self.particle[var_name] = convert_3vector_to_dict(value) for coord in self.particle[var_name].keys(): new_value = Distribution(self.particle[var_name][coord]) self.particle[var_name][coord] = new_value" 4192,"def _get_next_events(self, material): """"""Get next events from Genie ROOT file Looks over the generator"""""" f = ROOT.TFile(self.filenames[material]) try: t = f.Get('gst') n = t.GetEntries() except: self.log.critical('Could not open the ROOT file with Genie events') raise for i in range(n): t.GetEntry(i) next_events = [] position = convert_3vector_to_dict([self.particle['position']['x'].get_cache(), self.particle['position']['y'].get_cache(), self.particle['position']['z'].get_cache()]) lepton_event = {} if t.El ** 2 - (t.pxl ** 2 + t.pyl ** 2 + t.pzl ** 2) < 1e-7: lepton_event['pid'] = self.particle[ 'pid'].get() # Either NC or ES else: lepton_event['pid'] = lookup_cc_partner( self.particle['pid'].get()) # units: GeV -> MeV momentum_vector = [1000 * x for x in [t.pxl, t.pyl, t.pzl]] lepton_event['momentum'] = convert_3vector_to_dict(momentum_vector) lepton_event['position'] = position next_events.append(lepton_event) for j in range(t.nf): # nf, number final hadronic states hadron_event = {} hadron_event['pid'] = t.pdgf[j] hadron_event['position'] = position # units: GeV -> MeV momentum_vector = [1000 * x for x in [t.pxf[j], t.pyf[j], t.pzf[j]]] hadron_event['momentum'] = convert_3vector_to_dict(momentum_vector) next_events.append(hadron_event) event_type = {} event_type['vertex'] = position to_save = {} # maps our names to Genie gst names to_save['incoming_neutrino'] = 'neu' to_save['neutrino_energy'] = 'Ev' to_save['target_material'] = 'tgt' for key, value in to_save.iteritems(): self.log.info('%s : %s' % (key, str(t.__getattr__(value)))) event_type[key] = t.__getattr__(value) self.log.debug('Event type:') for my_type in ['qel', 'res', 'dis', 'coh', 'dfr', 'imd', 'nuel', 'em']: if t.__getattr__(my_type) == 1: self.log.debug('\t%s', my_type) event_type[my_type] = t.__getattr__(my_type) self.log.debug('Propogator:') for prop in ['nc', 'cc']: if t.__getattr__(prop) == 1: self.log.debug('\t%s', prop) event_type[prop] = t.__getattr__(prop) yield next_events, event_type f.Close() os.remove(self.filenames[material])" 4193,"def block_comment(solver, start, end): '''embedable block comment''' text, pos = solver.parse_state length = len(text) startlen = len(start) endlen = len(end) if pos==length: return if not text[pos:].startswith(start): return level = 1 p = pos+1 while p`. If the generated key exists or memcache cannot store it, a :class:`KeyInsertError ` is raised (or a :class:`TokenInsertError ` if a token exists or cannot be stored). """""" key, token, formatted_key, formatted_token = self.next_formatted_pair() if self.has_key(key): raise KeyInsertError(key) if self.has_token(token): raise TokenInsertError(token) # Memcache is down or read-only if not self._mc.add(formatted_key, (val, token)): raise KeyInsertError(key, 'key could not be stored') if not self._mc.add(formatted_token, key): raise TokenInsertError(token, 'token could not be stored') return Pair(key, token)" 4196,"def pip_install(*args): ''' Run pip install ... Explicitly ignores user's config. ''' pip_cmd = os.path.join(os.path.dirname(sys.executable), 'pip') with set_env('PIP_CONFIG_FILE', os.devnull): cmd = [pip_cmd, 'install'] + list(args) print_command(cmd) subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr)" 4197,"def indent_text(text, nb_tabs=0, tab_str="" "", linebreak_input=""\n"", linebreak_output=""\n"", wrap=False): r""""""Add tabs to each line of text. :param text: the text to indent :param nb_tabs: number of tabs to add :param tab_str: type of tab (could be, for example ""\t"", default: 2 spaces :param linebreak_input: linebreak on input :param linebreak_output: linebreak on output :param wrap: wethever to apply smart text wrapping. (by means of wrap_text_in_a_box) :return: indented text as string """""" if not wrap: lines = text.split(linebreak_input) tabs = nb_tabs * tab_str output = """" for line in lines: output += tabs + line + linebreak_output return output else: return wrap_text_in_a_box(body=text, style='no_border', tab_str=tab_str, tab_num=nb_tabs)" 4198,"def wrap_text_in_a_box(body='', title='', style='double_star', **args): r""""""Return a nicely formatted text box. e.g. ****************** ** title ** **--------------** ** body ** ****************** Indentation and newline are respected. :param body: the main text :param title: an optional title :param style: the name of one of the style in CFG_WRAP_STYLES. By default the double_star style is used. You can further tune the desired style by setting various optional parameters: :param horiz_sep: a string that is repeated in order to produce a separator row between the title and the body (if needed) or a tuple of three characters in the form (l, c, r) :param max_col: the maximum number of coulmns used by the box (including indentation) :param min_col: the symmetrical minimum number of columns :param tab_str: a string to represent indentation :param tab_num: the number of leveles of indentations :param border: a tuple of 8 element in the form (tl, t, tr, l, r, bl, b, br) of strings that represent the different corners and sides of the box :param prefix: a prefix string added before the box :param suffix: a suffix string added after the box :param break_long: wethever to break long words in order to respect max_col :param force_horiz: True in order to print the horizontal line even when there is no title e.g.: print wrap_text_in_a_box(title='prova', body=' 123 prova.\n Vediamo come si indenta', horiz_sep='-', style='no_border', max_col=20, tab_num=1) prova ---------------- 123 prova. Vediamo come si indenta """""" def _wrap_row(row, max_col, break_long): """"""Wrap a single row."""""" spaces = _RE_BEGINNING_SPACES.match(row).group() row = row[len(spaces):] spaces = spaces.expandtabs() return textwrap.wrap(row, initial_indent=spaces, subsequent_indent=spaces, width=max_col, break_long_words=break_long) def _clean_newlines(text): text = _RE_LONELY_NEWLINES.sub(' \n', text) return _RE_NEWLINES_CLEANER.sub(lambda x: x.group()[:-1], text) body = unicode(body, 'utf-8') title = unicode(title, 'utf-8') astyle = dict(CFG_WRAP_TEXT_IN_A_BOX_STYLES['__DEFAULT']) if style in CFG_WRAP_TEXT_IN_A_BOX_STYLES: astyle.update(CFG_WRAP_TEXT_IN_A_BOX_STYLES[style]) astyle.update(args) horiz_sep = astyle['horiz_sep'] border = astyle['border'] tab_str = astyle['tab_str'] * astyle['tab_num'] max_col = max(astyle['max_col'] - len(border[3]) - len(border[4]) - len(tab_str), 1) min_col = astyle['min_col'] prefix = astyle['prefix'] suffix = astyle['suffix'] force_horiz = astyle['force_horiz'] break_long = astyle['break_long'] body = _clean_newlines(body) tmp_rows = [_wrap_row(row, max_col, break_long) for row in body.split('\n')] body_rows = [] for rows in tmp_rows: if rows: body_rows += rows else: body_rows.append('') if not ''.join(body_rows).strip(): # Concrete empty body body_rows = [] title = _clean_newlines(title) tmp_rows = [_wrap_row(row, max_col, break_long) for row in title.split('\n')] title_rows = [] for rows in tmp_rows: if rows: title_rows += rows else: title_rows.append('') if not ''.join(title_rows).strip(): # Concrete empty title title_rows = [] max_col = max([len(row) for row in body_rows + title_rows] + [min_col]) mid_top_border_len = max_col + \ len(border[3]) + len(border[4]) - len(border[0]) - len(border[2]) mid_bottom_border_len = max_col + \ len(border[3]) + len(border[4]) - len(border[5]) - len(border[7]) top_border = border[0] + \ (border[1] * mid_top_border_len)[:mid_top_border_len] + border[2] bottom_border = border[5] + \ (border[6] * mid_bottom_border_len)[:mid_bottom_border_len] + \ border[7] if isinstance(horiz_sep, tuple) and len(horiz_sep) == 3: horiz_line = horiz_sep[0] + \ (horiz_sep[1] * (max_col + 2))[:(max_col + 2)] + horiz_sep[2] else: horiz_line = border[3] + (horiz_sep * max_col)[:max_col] + border[4] title_rows = [tab_str + border[3] + row + ' ' * (max_col - len(row)) + border[4] for row in title_rows] body_rows = [tab_str + border[3] + row + ' ' * (max_col - len(row)) + border[4] for row in body_rows] ret = [] if top_border: ret += [tab_str + top_border] ret += title_rows if title_rows or force_horiz: ret += [tab_str + horiz_line] ret += body_rows if bottom_border: ret += [tab_str + bottom_border] return (prefix + '\n'.join(ret) + suffix).encode('utf-8')" 4199,"def wait_for_user(msg=""""): """""" Print MSG and a confirmation prompt. Waiting for user's confirmation, unless silent '--yes-i-know' command line option was used, in which case the function returns immediately without printing anything. """""" if '--yes-i-know' in sys.argv: return print(msg) try: answer = raw_input(""Please confirm by typing 'Yes, I know!': "") except KeyboardInterrupt: print() answer = '' if answer != 'Yes, I know!': sys.stderr.write(""ERROR: Aborted.\n"") sys.exit(1) return" 4200,"def guess_minimum_encoding(text, charsets=('ascii', 'latin1', 'utf8')): """"""Try to guess the minimum charset that is able to represent. Try to guess the minimum charset that is able to represent the given text using the provided charsets. text is supposed to be encoded in utf8. Returns (encoded_text, charset) where charset is the first charset in the sequence being able to encode text. Returns (text_in_utf8, 'utf8') in case no charset is able to encode text. @note: If the input text is not in strict UTF-8, then replace any non-UTF-8 chars inside it. """""" text_in_unicode = text.decode('utf8', 'replace') for charset in charsets: try: return (text_in_unicode.encode(charset), charset) except (UnicodeEncodeError, UnicodeDecodeError): pass return (text_in_unicode.encode('utf8'), 'utf8')" 4201,"def encode_for_xml(text, wash=False, xml_version='1.0', quote=False): """"""Encode special characters in a text so that it would be XML-compliant. :param text: text to encode :return: an encoded text """""" text = text.replace('&', '&') text = text.replace('<', '<') if quote: text = text.replace('""', '"') if wash: text = wash_for_xml(text, xml_version=xml_version) return text" 4202,"def wash_for_xml(text, xml_version='1.0'): """"""Remove any character which isn't a allowed characters for XML. The allowed characters depends on the version of XML. - XML 1.0: - XML 1.1: :param text: input string to wash. :param xml_version: version of the XML for which we wash the input. Value for this parameter can be '1.0' or '1.1' """""" if xml_version == '1.0': return RE_ALLOWED_XML_1_0_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8') else: return RE_ALLOWED_XML_1_1_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8')" 4203,"def wash_for_utf8(text, correct=True): """"""Return UTF-8 encoded binary string with incorrect characters washed away. :param text: input string to wash (can be either a binary or Unicode string) :param correct: whether to correct bad characters or throw exception """""" if isinstance(text, unicode): return text.encode('utf-8') errors = ""ignore"" if correct else ""strict"" return text.decode(""utf-8"", errors).encode(""utf-8"", errors)" 4204,"def nice_number(number, thousands_separator=',', max_ndigits_after_dot=None): """"""Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale(). """""" if isinstance(number, float): if max_ndigits_after_dot is not None: number = round(number, max_ndigits_after_dot) int_part, frac_part = str(number).split('.') return '%s.%s' % (nice_number(int(int_part), thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number - i - 1]) chars_out.reverse() return ''.join(chars_out)" 4205,"def nice_size(size): """"""Nice size. :param size: the size. :type size: int :return: a nicely printed size. :rtype: string """""" unit = 'B' if size > 1024: size /= 1024.0 unit = 'KB' if size > 1024: size /= 1024.0 unit = 'MB' if size > 1024: size /= 1024.0 unit = 'GB' return '%s %s' % (nice_number(size, max_ndigits_after_dot=2), unit)" 4206,"def remove_line_breaks(text): """"""Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters. """""" return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \ .replace('\r', '').replace(u'\xe2\x80\xa8', '') \ .replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \ .encode('utf-8')" 4207,"def decode_to_unicode(text, default_encoding='utf-8'): """"""Decode input text into Unicode representation. Decode input text into Unicode representation by first using the default encoding utf-8. If the operation fails, it detects the type of encoding used in the given text. For optimal result, it is recommended that the 'chardet' module is installed. NOTE: Beware that this might be slow for *very* large strings. If chardet detection fails, it will try to decode the string using the basic detection function guess_minimum_encoding(). Also, bear in mind that it is impossible to detect the correct encoding at all times, other then taking educated guesses. With that said, this function will always return some decoded Unicode string, however the data returned may not be the same as original data in some cases. :param text: the text to decode :type text: string :param default_encoding: the character encoding to use. Optional. :type default_encoding: string :return: input text as Unicode :rtype: string """""" if not text: return """" try: return text.decode(default_encoding) except (UnicodeError, LookupError): pass detected_encoding = None if CHARDET_AVAILABLE: # We can use chardet to perform detection res = chardet.detect(text) if res['confidence'] >= 0.8: detected_encoding = res['encoding'] if detected_encoding is None: # No chardet detection, try to make a basic guess dummy, detected_encoding = guess_minimum_encoding(text) return text.decode(detected_encoding)" 4208,"def to_unicode(text): """"""Convert to unicode."""""" if isinstance(text, unicode): return text if isinstance(text, six.string_types): return decode_to_unicode(text) return unicode(text)" 4209,"def translate_latex2unicode(text, kb_file=None): """"""Translate latex text to unicode. This function will take given text, presumably containing LaTeX symbols, and attempts to translate it to Unicode using the given or default KB translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb. The translated Unicode string will then be returned. If the translation table and compiled regular expression object is not previously generated in the current session, they will be. :param text: a text presumably containing LaTeX symbols. :type text: string :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: Unicode representation of translated text :rtype: unicode """""" if kb_file is None: kb_file = get_kb_filename() # First decode input text to Unicode try: text = decode_to_unicode(text) except UnicodeDecodeError: text = unicode(wash_for_utf8(text)) # Load translation table, if required if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}: _load_latex2unicode_constants(kb_file) # Find all matches and replace text for match in CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'] \ .finditer(text): # If LaTeX style markers {, } and $ are before or after the # matching text, it will replace those as well text = re.sub(""[\{\$]?%s[\}\$]?"" % (re.escape(match.group()),), CFG_LATEX_UNICODE_TRANSLATION_CONST[ 'table'][match.group()], text) # Return Unicode representation of translated text return text" 4210,"def _load_latex2unicode_constants(kb_file=None): """"""Load LaTeX2Unicode translation table dictionary. Load LaTeX2Unicode translation table dictionary and regular expression object from KB to a global dictionary. :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: dict of type: {'regexp_obj': regexp match object, 'table': dict of LaTeX -> Unicode mappings} :rtype: dict """""" if kb_file is None: kb_file = get_kb_filename() try: data = open(kb_file) except IOError: # File not found or similar sys.stderr.write( ""\nCould not open LaTeX to Unicode KB file. "" ""Aborting translation.\n"") return CFG_LATEX_UNICODE_TRANSLATION_CONST latex_symbols = [] translation_table = {} for line in data: # The file has form of latex|--|utf-8. First decode to Unicode. line = line.decode('utf-8') mapping = line.split('|--|') translation_table[mapping[0].rstrip('\n')] = mapping[1].rstrip('\n') latex_symbols.append(re.escape(mapping[0].rstrip('\n'))) data.close() CFG_LATEX_UNICODE_TRANSLATION_CONST[ 'regexp_obj'] = re.compile(""|"".join(latex_symbols)) CFG_LATEX_UNICODE_TRANSLATION_CONST['table'] = translation_table" 4211,"def translate_to_ascii(values): r""""""Transliterate the string into ascii representation. Transliterate the string contents of the given sequence into ascii representation. Returns a sequence with the modified values if the module 'unidecode' is available. Otherwise it will fall back to the inferior strip_accents function. For example: H\xc3\xb6hne becomes Hohne. Note: Passed strings are returned as a list. :param values: sequence of strings to transform :type values: sequence :return: sequence with values transformed to ascii :rtype: sequence """""" if not values and not isinstance(values, str): return values if isinstance(values, str): values = [values] for index, value in enumerate(values): if not value: continue unicode_text = decode_to_unicode(value) if u""[?]"" in unicode_text: decoded_text = [] for unicode_char in unicode_text: decoded_char = unidecode(unicode_char) # Skip unrecognized characters if decoded_char != ""[?]"": decoded_text.append(decoded_char) ascii_text = ''.join(decoded_text).encode('ascii') else: ascii_text = unidecode(unicode_text).replace( u""[?]"", u"""").encode('ascii') values[index] = ascii_text return values" 4212,"def xml_entities_to_utf8(text, skip=('lt', 'gt', 'amp')): """"""Translate HTML or XML character references to UTF-8. Removes HTML or XML character references and entities from a text string and replaces them with their UTF-8 representation, if possible. :param text: The HTML (or XML) source text. :type text: string :param skip: list of entity names to skip when transforming. :type skip: iterable :return: The plain text, as a Unicode string, if necessary. @author: Based on http://effbot.org/zone/re-sub.htm#unescape-html """""" def fixup(m): text = m.group(0) if text[:2] == ""&#"": # character reference try: if text[:3] == ""&#x"": return unichr(int(text[3:-1], 16)).encode(""utf-8"") else: return unichr(int(text[2:-1])).encode(""utf-8"") except ValueError: pass else: # named entity if text[1:-1] not in skip: try: text = unichr( html_entities.name2codepoint[text[1:-1]]) \ .encode(""utf-8"") except KeyError: pass return text # leave as is return re.sub(""&#?\w+;"", fixup, text)" 4213,"def strip_accents(x): u""""""Strip accents in the input phrase X. Strip accents in the input phrase X (assumed in UTF-8) by replacing accented characters with their unaccented cousins (e.g. é by e). :param x: the input phrase to strip. :type x: string :return: Return such a stripped X. """""" x = re_latex_lowercase_a.sub(""a"", x) x = re_latex_lowercase_ae.sub(""ae"", x) x = re_latex_lowercase_oe.sub(""oe"", x) x = re_latex_lowercase_e.sub(""e"", x) x = re_latex_lowercase_i.sub(""i"", x) x = re_latex_lowercase_o.sub(""o"", x) x = re_latex_lowercase_u.sub(""u"", x) x = re_latex_lowercase_y.sub(""x"", x) x = re_latex_lowercase_c.sub(""c"", x) x = re_latex_lowercase_n.sub(""n"", x) x = re_latex_uppercase_a.sub(""A"", x) x = re_latex_uppercase_ae.sub(""AE"", x) x = re_latex_uppercase_oe.sub(""OE"", x) x = re_latex_uppercase_e.sub(""E"", x) x = re_latex_uppercase_i.sub(""I"", x) x = re_latex_uppercase_o.sub(""O"", x) x = re_latex_uppercase_u.sub(""U"", x) x = re_latex_uppercase_y.sub(""Y"", x) x = re_latex_uppercase_c.sub(""C"", x) x = re_latex_uppercase_n.sub(""N"", x) # convert input into Unicode string: try: y = unicode(x, ""utf-8"") except Exception: return x # something went wrong, probably the input wasn't UTF-8 # asciify Latin-1 lowercase characters: y = re_unicode_lowercase_a.sub(""a"", y) y = re_unicode_lowercase_ae.sub(""ae"", y) y = re_unicode_lowercase_oe.sub(""oe"", y) y = re_unicode_lowercase_e.sub(""e"", y) y = re_unicode_lowercase_i.sub(""i"", y) y = re_unicode_lowercase_o.sub(""o"", y) y = re_unicode_lowercase_u.sub(""u"", y) y = re_unicode_lowercase_y.sub(""y"", y) y = re_unicode_lowercase_c.sub(""c"", y) y = re_unicode_lowercase_n.sub(""n"", y) y = re_unicode_lowercase_ss.sub(""ss"", y) # asciify Latin-1 uppercase characters: y = re_unicode_uppercase_a.sub(""A"", y) y = re_unicode_uppercase_ae.sub(""AE"", y) y = re_unicode_uppercase_oe.sub(""OE"", y) y = re_unicode_uppercase_e.sub(""E"", y) y = re_unicode_uppercase_i.sub(""I"", y) y = re_unicode_uppercase_o.sub(""O"", y) y = re_unicode_uppercase_u.sub(""U"", y) y = re_unicode_uppercase_y.sub(""Y"", y) y = re_unicode_uppercase_c.sub(""C"", y) y = re_unicode_uppercase_n.sub(""N"", y) # return UTF-8 representation of the Unicode string: return y.encode(""utf-8"")" 4214,"def slugify(text, delim=u'-'): """"""Generate an ASCII-only slug."""""" result = [] for word in _punct_re.split(text.lower()): result.extend(unidecode(word).split()) return unicode(delim.join(result))" 4215,"def show_diff(original, modified, prefix='', suffix='', prefix_unchanged=' ', suffix_unchanged='', prefix_removed='-', suffix_removed='', prefix_added='+', suffix_added=''): """"""Return the diff view between original and modified strings. Function checks both arguments line by line and returns a string with a: - prefix_unchanged when line is common to both sequences - prefix_removed when line is unique to sequence 1 - prefix_added when line is unique to sequence 2 and a corresponding suffix in each line :param original: base string :param modified: changed string :param prefix: prefix of the output string :param suffix: suffix of the output string :param prefix_unchanged: prefix of the unchanged line :param suffix_unchanged: suffix of the unchanged line :param prefix_removed: prefix of the removed line :param suffix_removed: suffix of the removed line :param prefix_added: prefix of the added line :param suffix_added: suffix of the added line :return: string with the comparison of the records :rtype: string """""" import difflib differ = difflib.Differ() result = [prefix] for line in differ.compare(modified.splitlines(), original.splitlines()): if line[0] == ' ': # Mark as unchanged result.append( prefix_unchanged + line[2:].strip() + suffix_unchanged) elif line[0] == '-': # Mark as removed result.append(prefix_removed + line[2:].strip() + suffix_removed) elif line[0] == '+': # Mark as added/modified result.append(prefix_added + line[2:].strip() + suffix_added) result.append(suffix) return '\n'.join(result)" 4216,"def escape_latex(text): r""""""Escape characters of given text. This function takes the given text and escapes characters that have a special meaning in LaTeX: # $ % ^ & _ { } ~ \ """""" text = unicode(text.decode('utf-8')) CHARS = { '&': r'\&', '%': r'\%', '$': r'\$', '#': r'\#', '_': r'\_', '{': r'\{', '}': r'\}', '~': r'\~{}', '^': r'\^{}', '\\': r'\textbackslash{}', } escaped = """".join([CHARS.get(char, char) for char in text]) return escaped.encode('utf-8')" 4217,"def _copy_attr(self, module, varname, cls, attrname=None): """""" Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname """""" if not hasattr(module, varname): raise RuntimeError(""Variable '{}' not found"".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( ""Expecting fobj to be a {}, not a '{}'"".format(cls.__name__, obj.__class__.__name__)) if attrname is None: attrname = varname setattr(self, attrname, obj)" 4218,"def __check_to_permit(self, entry_type, entry_filename): """"""Applying the filter rules."""""" rules = self.__filter_rules[entry_type] # Should explicitly include? for pattern in rules[fss.constants.FILTER_INCLUDE]: if fnmatch.fnmatch(entry_filename, pattern): _LOGGER_FILTER.debug(""Entry explicitly INCLUDED: [%s] [%s] "" ""[%s]"", entry_type, pattern, entry_filename) return True # Should explicitly exclude? for pattern in rules[fss.constants.FILTER_EXCLUDE]: if fnmatch.fnmatch(entry_filename, pattern): _LOGGER_FILTER.debug(""Entry explicitly EXCLUDED: [%s] [%s] "" ""[%s]"", entry_type, pattern, entry_filename) return False # Implicitly include. _LOGGER_FILTER.debug(""Entry IMPLICITLY included: [%s] [%s]"", entry_type, entry_filename) return True" 4219,"def get_next_item(self): """"""Override the default functionality to not only try to pull things off the external input-queue, but to first try to pull things from a local input-queue that we'll primarily depend on. We'll only use the external input-queue to get the initial root-path (we could reuse it to do the recursion, but it's more costly and prone to delay). """""" # Try to pop something off the local input-queue. try: return self.__local_input_q.get(block=False) except queue.Empty: pass # Try to pop something off the external input-queue. return self.input_q.get(block=False)" 4220,"def index_nearest(array, value): """""" Finds index of nearest value in array. Args: array: numpy array value: Returns: int http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array """""" idx = (np.abs(array-value)).argmin() return idx" 4221,"def BSearch(a, x, lo=0, hi=None): """"""Returns index of x in a, or -1 if x not in a. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose."""""" if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos != hi and a[pos] == x else -1" 4222,"def BSearchRound(a, x, lo=0, hi=None): """"""Returns index of a that is closest to x. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose."""""" if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) if pos >= hi: return hi - 1 elif a[pos] == x or pos == lo: return pos else: return pos - 1 if x - a[pos - 1] <= a[pos] - x else pos" 4223,"def BSearchCeil(a, x, lo=0, hi=None): """"""Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence ""Ceil"". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search"""""" if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos < hi else -1" 4224,"def BSearchFloor(a, x, lo=0, hi=None): """"""Returns highest i such as a[i] <= x, or -1 if x < all elements in a So, if x is in between two elements in a, this function will return the index of the lower element, hence ""Floor"". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search"""""" if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos - 1 if pos >= hi \ else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))" 4225,"def FindNotNaNBackwards(x, i): """"""Returns last position (starting at i backwards) which is not NaN, or -1."""""" while i >= 0: if not np.isnan(x[i]): return i i -= 1 return -1" 4226,"def census(self, *scales): """"""Current World Census data. By default returns data on today's featured World Census scale, use arguments to get results on specific scales. In order to request data on all scales at once you can do ``x.census(*range(81))``. Parameters ---------- scales : int World Census scales, integers between 0 and 85 inclusive. Returns ------- an :class:`ApiQuery` of a list of :class:`CensusScaleCurrent` """""" params = {'mode': 'score+rank+rrank+prank+prrank'} if scales: params['scale'] = '+'.join(str(x) for x in scales) @api_query('census', **params) async def result(_, root): return [ CensusScaleCurrent(scale_elem) for scale_elem in root.find('CENSUS') ] return result(self)" 4227,"def censushistory(self, *scales): """"""Historical World Census data. Was split into its own method for the sake of simplicity. By default returns data on today's featured World Census scale, use arguments to get results on specific scales. In order to request data on all scales at once you can do ``x.censushistory(*range(81))``. Returns data for the entire length of history NationStates stores. There is no way to override that. Parameters ---------- scales : int World Census scales, integers between 0 and 85 inclusive. Returns ------- an :class:`ApiQuery` of a list of :class:`CensusScaleHistory` """""" params = {'mode': 'history'} if scales: params['scale'] = '+'.join(str(x) for x in scales) @api_query('census', **params) async def result(_, root): return [ CensusScaleHistory(scale_elem) for scale_elem in root.find('CENSUS') ] return result(self)" 4228,"async def censusranks(self, scale): """"""Iterate through nations ranked on the World Census scale. If the ranks change while you interate over them, they may be inconsistent. Parameters ---------- scale : int A World Census scale, an integer between 0 and 85 inclusive. Returns ------- asynchronous iterator of :class:`CensusRank` """""" order = count(1) for offset in count(1, 20): census_ranks = await self._get_censusranks( scale=scale, start=offset) for census_rank in census_ranks: assert census_rank.rank == next(order) yield census_rank if len(census_ranks) < 20: break" 4229,"def loads(astring): """"""Decompress and deserialize string into Python object via marshal."""""" try: return marshal.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object (""{}"")'.format(str(e)) ) except Exception as e: # marshal module does not provide a proper Exception model raise SerializerError( 'Cannot restore object (""{}"")'.format(str(e)) )" 4230,"def loads(astring): """"""Decompress and deserialize string into Python object via pickle."""""" try: return pickle.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object (""{}"")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object (""{}"")'.format(str(e)) )" 4231,"def loads(astring): """"""Decompress and deserialize string into a Python object via pickle."""""" try: return pickle.loads(lzma.decompress(astring)) except lzma.LZMAError as e: raise SerializerError( 'Cannot decompress object (""{}"")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object (""{}"")'.format(str(e)) )" 4232,"def search(self, path_expression, mode=UXP, values=None, ifunc=lambda x: x): """""" find matches for the given path expression in the data :param path_expression: path tuple or string :return: """""" # keys = path_expression if isinstance(path_expression, six.string_types) else path_expression[-1] path_and_value_list = iterutils.search( self.data, path_expression=path_expression, required_values=values, exact=(mode[1] == ""x"")) return self.__return_value(path_and_value_list, mode, ifunc)" 4233,"def __visit_index_path(self, src, p, k, v): """""" Called during processing of source data """""" cp = p + (k,) self.path_index[cp] = self.indexed_obj_factory(p, k, v, self.path_index.get(cp)) if cp in self.path_index: # if self.path_index[cp].assert_val_equals(v): # raise ValueError('unexpected value change at path_index[{}]'.format(cp)) self.path_index[cp].add_src(src) else: self.path_index[cp] = Flobject(val=v, path=cp, srcs=set([src]))" 4234,"def get_default_data_path(*args, module=None, class_=None, flag_raise=True): """""" Returns path to default data directory Arguments 'module' and 'class' give the chance to return path relative to package other than f311.filetypes Args: module: Python module object. It is expected that this module has a sub-subdirectory named 'data/default' class_: Python class object to extract path information from. If this argument is used, it will be expected that the class ""root"" package will have a sub-subdirectory named 'data/default'. Argument 'class_' **has precedence over argument 'module'** flag_raise: raises error if file is not found. This can be turned off for whichever purpose """""" if module is None: module = __get_filetypes_module() if class_ is not None: pkgname = class_.__module__ mseq = pkgname.split(""."") if len(mseq) < 2 or mseq[1] != ""filetypes"": raise ValueError(""Invalid module name for class '{}': '{}' "" ""(must be '(...).filetypes[.(...)]')"".format(class_.__name__, pkgname)) # gets ""root"" module object # For example, if pkgname is ""pyfant.filetypes.filemain"", module below will be # the ""pyfant"" module object module = sys.modules[mseq[0]] module_path = os.path.split(module.__file__)[0] p = os.path.abspath(os.path.join(module_path, ""data"", ""default"", *args)) if flag_raise: if not os.path.isfile(p): raise RuntimeError(""Path not found '{}'"".format(p)) return p" 4235,"def copy_default_data_file(filename, module=None): """"""Copies file from default data directory to local directory."""""" if module is None: module = __get_filetypes_module() fullpath = get_default_data_path(filename, module=module) shutil.copy(fullpath, ""."")" 4236,"def _find_display(self): """""" Find a usable display, which doesn't have an existing Xvfb file """""" self.display_num = 2 while os.path.isdir(XVFB_PATH % (self.display_num,)): self.display_num += 1" 4237,"def comments(recid): """"""Display comments."""""" from invenio_access.local_config import VIEWRESTRCOLL from invenio_access.mailcookie import \ mail_cookie_create_authorize_action from .api import check_user_can_view_comments auth_code, auth_msg = check_user_can_view_comments(current_user, recid) if auth_code and current_user.is_guest: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, { 'collection': g.collection}) url_args = {'action': cookie, 'ln': g.ln, 'referer': request.referrer} flash(_(""Authorization failure""), 'error') return redirect(url_for('webaccount.login', **url_args)) elif auth_code: flash(auth_msg, 'error') abort(401) # FIXME check restricted discussion comments = CmtRECORDCOMMENT.query.filter(db.and_( CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.in_reply_to_id_cmtRECORDCOMMENT == 0, CmtRECORDCOMMENT.star_score == 0 )).order_by(CmtRECORDCOMMENT.date_creation).all() return render_template('comments/comments.html', comments=comments, option='comments')" 4238,"def getattr(self, key, default=None, callback=None): u""""""Getting the attribute of an element. >>> xml = etree.Element('root') >>> xml.text = 'text' >>> Node(xml).getattr('text') 'text' >>> Node(xml).getattr('text', callback=str.upper) 'TEXT' >>> Node(xml).getattr('wrong_attr', default='default') 'default' """""" value = self._xml.text if key == 'text' else self._xml.get(key, default) return callback(value) if callback else value" 4239,"def setattr(self, key, value): u""""""Sets an attribute on a node. >>> xml = etree.Element('root') >>> Node(xml).setattr('text', 'text2') >>> Node(xml).getattr('text') 'text2' >>> Node(xml).setattr('attr', 'val') >>> Node(xml).getattr('attr') 'val' """""" if key == 'text': self._xml.text = str(value) else: self._xml.set(key, str(value))" 4240,"def get(self, default=None, callback=None): u""""""Returns leaf's value."""""" value = self._xml.text if self._xml.text else default return callback(value) if callback else value" 4241,"def to_str(self, pretty_print=False, encoding=None, **kw): u""""""Converts a node with all of it's children to a string. Remaining arguments are passed to etree.tostring as is. kwarg without_comments: bool because it works only in C14N flags: 'pretty print' and 'encoding' are ignored. :param bool pretty_print: whether to format the output :param str encoding: which encoding to use (ASCII by default) :rtype: str :returns: node's representation as a string """""" if kw.get('without_comments') and not kw.get('method'): kw.pop('without_comments') kw['method'] = 'c14n' kw['with_comments'] = False return etree.tostring( self._xml, pretty_print=pretty_print, encoding=encoding, **kw )" 4242,"def iter_children(self, key=None): u""""""Iterates over children. :param key: A key for filtering children by tagname. """""" tag = None if key: tag = self._get_aliases().get(key) if not tag: raise KeyError(key) for child in self._xml.iterchildren(tag=tag): if len(child): yield self.__class__(child) else: yield Literal(child)" 4243,"def update(self, **kwargs): u""""""Updating or creation of new simple nodes. Each dict key is used as a tagname and value as text. """""" for key, value in kwargs.items(): helper = helpers.CAST_DICT.get(type(value), str) tag = self._get_aliases().get(key, key) elements = list(self._xml.iterchildren(tag=tag)) if elements: for element in elements: element.text = helper(value) else: element = etree.Element(key) element.text = helper(value) self._xml.append(element) self._aliases = None" 4244,"def sget(self, path, default=NONE_NODE): u""""""Enables access to nodes if one or more of them don't exist. Example: >>> m = Mappet('text value') >>> m.sget('tag') text value >>> m.sget('tag.@attr1') 'attr text' >>> m.sget('tag.#text') 'text value' >>> m.sget('reply.vms_model_cars.car.0.params.doors') NONE_NODE Accessing nonexistent path returns None-like object with mocked converting functions which returns None: >>> m.sget('reply.fake_node').to_dict() is None True """""" attrs = str(path).split(""."") text_or_attr = None last_attr = attrs[-1] # Case of getting text or attribute if last_attr == '#text' or last_attr.startswith('@'): # #text => text, @attr => attr text_or_attr = last_attr[1:] attrs = attrs[:-1] # When getting #text and @attr we want default value to be None. if default is NONE_NODE: default = None my_object = self for attr in attrs: try: if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr): my_object_next = my_object[int(attr)] else: my_object_next = getattr(my_object, attr) my_object = my_object_next except (AttributeError, KeyError, IndexError): return default # Return #text or @attr if text_or_attr: try: return my_object.getattr(text_or_attr) except AttributeError: # myObject can be a list. return None else: return my_object" 4245,"def create(self, tag, value): u""""""Creates a node, if it doesn't exist yet. Unlike attribute access, this allows to pass a node's name with hyphens. Those hyphens will be normalized automatically. In case the required element already exists, raises an exception. Updating/overwriting should be done using `update``. """""" child_tags = {child.tag for child in self._xml} if tag in child_tags: raise KeyError('Node {} already exists in XML tree.'.format(tag)) self.set(tag, value)" 4246,"def set(self, name, value): u""""""Assigns a new XML structure to the node. A literal value, dict or list can be passed in. Works for all nested levels. Dictionary: >>> m = Mappet('') >>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}} >>> m.head.to_str() 'AB' List: >>> m.head = [{'a': i} for i in 'ABC'] >>> m.head.to_str() 'ABC' Literals: >>> m.head.leaf = 'A' >>> m.head.leaf.get() 'A' """""" try: # Searches for a node to assign to. element = next(self._xml.iterchildren(tag=name)) except StopIteration: # There is no such node in the XML tree. We create a new one # with current root as parent (self._xml). element = etree.SubElement(self._xml, name) if isinstance(value, dict): self.assign_dict(element, value) elif isinstance(value, (list, tuple, set)): self.assign_sequence_or_set(element, value) else: # Literal value. self.assign_literal(element, value) # Clear the aliases. self._aliases = None" 4247,"def assign_dict(self, node, xml_dict): """"""Assigns a Python dict to a ``lxml`` node. :param node: A node to assign the dict to. :param xml_dict: The dict with attributes/children to use. """""" new_node = etree.Element(node.tag) # Replaces the previous node with the new one self._xml.replace(node, new_node) # Copies #text and @attrs from the xml_dict helpers.dict_to_etree(xml_dict, new_node)" 4248,"def assign_literal(element, value): u""""""Assigns a literal. If a given node doesn't exist, it will be created. :param etree.Element element: element to which we assign. :param value: the value to assign """""" # Searches for a conversion method specific to the type of value. helper = helpers.CAST_DICT.get(type(value), str) # Removes all children and attributes. element.clear() element.text = helper(value)" 4249,"def to_dict(self, **kw): u""""""Converts the lxml object to a dict. possible kwargs: without_comments: bool """""" _, value = helpers.etree_to_dict(self._xml, **kw).popitem() return value" 4250,"def _get_aliases(self): u""""""Creates a dict with aliases. The key is a normalized tagname, value the original tagname. """""" if self._aliases is None: self._aliases = {} if self._xml is not None: for child in self._xml.iterchildren(): self._aliases[helpers.normalize_tag(child.tag)] = child.tag return self._aliases" 4251,"def xpath( self, path, namespaces=None, regexp=False, smart_strings=True, single_use=False, ): u""""""Executes XPath query on the ``lxml`` object and returns a correct object. :param str path: XPath string e.g., 'cars'/'car' :param str/dict namespaces: e.g., 'exslt', 're' or ``{'re': ""http://exslt.org/regular-expressions""}`` :param bool regexp: if ``True`` and no namespaces is provided, it will use ``exslt`` namespace :param bool smart_strings: :param bool single_use: faster method for using only once. Does not create ``XPathEvaluator`` instance. >>> root = mappet.Mappet(""aBaBc"") >>> root.XPath( ""//*[re:test(., '^abc$', 'i')]"", namespaces='exslt', regexp=True, ) """""" if ( namespaces in ['exslt', 're'] or (regexp and not namespaces) ): namespaces = {'re': ""http://exslt.org/regular-expressions""} if single_use: node = self._xml.xpath(path) else: xpe = self.xpath_evaluator( namespaces=namespaces, regexp=regexp, smart_strings=smart_strings ) node = xpe(path) if len(node) == 1: node = node[0] if len(node): return self.__class__(node) else: return Literal(node) return node" 4252,"def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True): u""""""Creates an XPathEvaluator instance for an ElementTree or an Element. :returns: ``XPathEvaluator`` instance """""" return etree.XPathEvaluator( self._xml, namespaces=namespaces, regexp=regexp, smart_strings=smart_strings )" 4253,"def get_last_modified_date(*args, **kwargs): """"""Returns the date of the last modified Note or Release. For use with Django's last_modified decorator. """""" try: latest_note = Note.objects.latest() latest_release = Release.objects.latest() except ObjectDoesNotExist: return None return max(latest_note.modified, latest_release.modified)" 4254,"def using_ios_stash(): ''' returns true if sys path hints the install is running on ios ''' print('detected install path:') print(os.path.dirname(__file__)) module_names = set(sys.modules.keys()) return 'stash' in module_names or 'stash.system' in module_names" 4255,"def pad_bin_image_to_shape(image, shape): """""" Padd image to size :shape: with zeros """""" h, w = shape ih, iw = image.shape assert ih <= h assert iw <= w if iw < w: result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool))) else: result = image if ih < h: result = numpy.vstack((result, numpy.zeros((h - ih, w), bool))) return result" 4256,"def best_convolution(bin_template, bin_image, tollerance=0.5, overlap_table=OVERLAP_TABLE): """""" Selects and applies the best convolution method to find template in image. Returns a list of matches in (width, height, x offset, y offset) format (where the x and y offsets are from the top left corner). As the images are binary images, we can utilise the extra bit space in the float64's by cutting the image into tiles and stacking them into variable grayscale values. This allows converting a sparse binary image into a dense(r) grayscale one. """""" template_sum = numpy.count_nonzero(bin_template) th, tw = bin_template.shape ih, iw = bin_image.shape if template_sum == 0 or th == 0 or tw == 0: # If we don't have a template return [] if th > ih or tw > iw: # If the template is bigger than the image return [] # How many cells can we split the image into? max_vert_cells = ih // th max_hor_cells = iw // th # Try to work out how many times we can stack the image usable_factors = {n: factors for n, factors in overlap_table.iteritems() if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT} overlap_options = [(factor, n // factor) for n, factors in usable_factors.iteritems() for factor in factors if (factor <= max_vert_cells and n // factor <= max_hor_cells)] if not overlap_options: # We can't stack the image return convolution(bin_template, bin_image, tollerance=tollerance) best_overlap = min(overlap_options, key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw))) return overlapped_convolution(bin_template, bin_image, tollerance=tollerance, splits=best_overlap)" 4257,"def overlapped_convolution(bin_template, bin_image, tollerance=0.5, splits=(4, 2)): """""" As each of these images are hold only binary values, and RFFT2 works on float64 greyscale values, we can make the convolution more efficient by breaking the image up into :splits: sectons. Each one of these sections then has its greyscale value adjusted and then stacked. We then apply the convolution to this 'stack' of images, and adjust the resultant position matches. """""" th, tw = bin_template.shape ih, iw = bin_image.shape hs, ws = splits h = ih // hs w = iw // ws count = numpy.count_nonzero(bin_template) assert count > 0 assert h >= th assert w >= tw yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)] xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)] # image_stacks is Origin (x,y), array, z (height in stack) image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num))) for num, (x1, x2, y1, y2) in enumerate((x1, x2, y1, y2) for (x1, x2) in xoffset for (y1, y2) in yoffset)] pad_h = max(i.shape[0] for _, i, _ in image_stacks) pad_w = max(i.shape[1] for _, i, _ in image_stacks) # rfft metrics must be an even size - why ... maths? pad_w += pad_w % 2 pad_h += pad_h % 2 overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w)) * num for _, i, num in image_stacks) #print ""Overlap splits %r, Image Size (%d,%d), #Overlapped Size (%d,%d)"" % (splits,iw,ih,pad_w,pad_h) # Calculate the convolution of the FFT's of the overlapped image & template convolution_freqs = (rfft2(overlapped_image) * rfft2(bin_template[::-1, ::-1], overlapped_image.shape)) # Reverse the FFT to find the result overlapped image convolution_image = irfft2(convolution_freqs) # At this point, the maximum point in convolution_image should be the # bottom right (why?) of the area of greatest match results = set() for (x, y), _, num in image_stacks[::-1]: test = convolution_image / num filtered = ((test >= (count - tollerance)) & (test <= (count + tollerance))) match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right for (fy, fx) in match_points: if fx < (tw - 1) or fy < (th - 1): continue results.add((x + fx - (tw - 1), y + fy - (th - 1))) convolution_image %= num return list(results)" 4258,"def get_partition_scores(image, min_w=1, min_h=1): """"""Return list of best to worst binary splits along the x and y axis. """""" h, w = image.shape[:2] if w == 0 or h == 0: return [] area = h * w cnz = numpy.count_nonzero total = cnz(image) if total == 0 or area == total: return [] if h < min_h * 2: y_c = [] else: y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))), y, 0) for count, y in ((cnz(image[y:]), y) for y in range(min_h, image.shape[0] - min_h))] if w < min_w * 2: x_c = [] else: x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))), x, 1) for count, x in ((cnz(image[:, x:]), x) for x in range(min_w, image.shape[1] - min_w))] return sorted(x_c + y_c)" 4259,"def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1): """"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf nodes == None. If max_depth < 0 this function will continue until all leaf nodes have been found, if it is >= 0 leaf nodes will be created at that depth. min_w and min_h are the minimum width or height of a partition. """""" if max_depth >= 0 and depth >= max_depth: return None partition = get_best_partition(image, min_w=min_w, min_h=min_h) if partition is None: return None pos, axis = partition if axis == 0: p1 = binary_partition_image( image[pos:], min_w, min_h, depth + 1, max_depth) p2 = binary_partition_image( image[:pos], min_w, min_h, depth + 1, max_depth) elif axis == 1: p1 = binary_partition_image( image[:, pos:], min_w, min_h, depth + 1, max_depth) p2 = binary_partition_image( image[:, :pos], min_w, min_h, depth + 1, max_depth) return [pos, axis, [p1, p2]]" 4260,"def find_threshold_near_density(img, density, low=0, high=255): """"""Find a threshold where the fraction of pixels above the threshold is closest to density where density is (count of pixels above threshold / count of pixels). The highest threshold closest to the desired density will be returned. Use low and high to exclude undesirable thresholds. :param img: target image :type img: 2d :class:`numpy.ndarray` :param density: target density :type density: float between 0.0 and 1.0 :param low: min threshold to test :type low: ubyte :param migh: max threshold to test :type low: ubyte :rtype: ubyte """""" size = numpy.size(img) densities = [] last_t = None while True: t = ((high - low) // 2) + low if t == last_t: densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1])) return densities[0][1] else: last_t = t d = numpy.count_nonzero(img > t) / size densities.append((d, t)) if d < density: high = t elif d >= density: # search away from low low = t" 4261,"def filter_greys_using_image(image, target): """"""Filter out any values in target not in image :param image: image containing values to appear in filtered image :param target: the image to filter :rtype: 2d :class:`numpy.ndarray` containing only value in image and with the same dimensions as target """""" maskbase = numpy.array(range(256), dtype=numpy.uint8) mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0) return mask[target]" 4262,"def get_swagger_view(title=None, url=None, generator_class=SchemaGenerator): """""" Returns schema view which renders Swagger/OpenAPI. """""" return schemas.get_schema_view( title=title, url=url, renderer_classes=[ CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer], generator_class=generator_class)" 4263,"def __init_defaults(self, config): """"""Initializes the default connection settings."""""" provider = self.__provider if provider == 'sqlite': config.setdefault('dbname', ':memory:') config.setdefault('create_db', True) elif provider == 'mysql': config.setdefault('port', 3306) config.setdefault('charset', 'utf8') elif provider == 'postgres': config.setdefault('port', 5432) elif provider == 'oracle': config.setdefault('port', 1521) else: raise ValueError('Unsupported provider ""{}""'.format(provider)) if provider != 'sqlite': config.setdefault('host', 'localhost') config.setdefault('user', None) config.setdefault('password', None) config.setdefault('dbname', None)" 4264,"def persist_one(self, file_base64_content, filename, extension, mime, is_private=True): """""" Загружает файл в облако :type origin: string Принимает значения ROBOT, USER """""" return self.__app.api_call(""MediaService"", ""persist_one"", locals(), {})" 4265,"def upload(self, file_descriptor, settings): """""" Загружает файл в облако :param file_descriptor: открытый дескриптор :param settings: настройки загрузки :rtype: requests.Response """""" multipart_form_data = { 'file': file_descriptor } params = {""settings"": json.dumps(settings)} dr = self.__app.native_api_call('media', 'upload', params, self.__options, True, multipart_form_data, False, http_path=""/api/meta/v1/"", http_method='POST', connect_timeout_sec=60 * 10) return json.loads(dr.text)" 4266,"def download(self, media_id, as_stream=False): """""" Скачивает указанный файл :param media_id: string :rtype: requests.Response """""" response = self.__app.native_api_call('media', 'd/' + media_id, {}, self.__options, False, None, as_stream, http_path=""/api/meta/v1/"", http_method='GET') return response" 4267,"def info(self, media_id): """""" Получить информацию по файлу :param media_id: :rtype: requests.Response """""" dr = self.__app.native_api_call('media', 'i/' + media_id, {}, self.__options, False, None, False, http_path=""/api/meta/v1/"", http_method='GET') return json.loads(dr.text)" 4268,"def create_order(cls, order, **kwargs): """"""Create Order Create a new Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_order(order, async=True) >>> result = thread.get() :param async bool :param Order order: Attributes of order to create (required) :return: Order If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_order_with_http_info(order, **kwargs) else: (data) = cls._create_order_with_http_info(order, **kwargs) return data" 4269,"def delete_order_by_id(cls, order_id, **kwargs): """"""Delete Order Delete an instance of Order by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_order_by_id(order_id, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_order_by_id_with_http_info(order_id, **kwargs) else: (data) = cls._delete_order_by_id_with_http_info(order_id, **kwargs) return data" 4270,"def get_order_by_id(cls, order_id, **kwargs): """"""Find Order Return single instance of Order by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_order_by_id(order_id, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to return (required) :return: Order If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_order_by_id_with_http_info(order_id, **kwargs) else: (data) = cls._get_order_by_id_with_http_info(order_id, **kwargs) return data" 4271,"def list_all_orders(cls, **kwargs): """"""List Orders Return a list of Orders This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_orders(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Order] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_orders_with_http_info(**kwargs) else: (data) = cls._list_all_orders_with_http_info(**kwargs) return data" 4272,"def replace_order_by_id(cls, order_id, order, **kwargs): """"""Replace Order Replace all attributes of Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_order_by_id(order_id, order, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to replace (required) :param Order order: Attributes of order to replace (required) :return: Order If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_order_by_id_with_http_info(order_id, order, **kwargs) else: (data) = cls._replace_order_by_id_with_http_info(order_id, order, **kwargs) return data" 4273,"def update_order_by_id(cls, order_id, order, **kwargs): """"""Update Order Update attributes of Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_order_by_id(order_id, order, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to update. (required) :param Order order: Attributes of order to update. (required) :return: Order If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_order_by_id_with_http_info(order_id, order, **kwargs) else: (data) = cls._update_order_by_id_with_http_info(order_id, order, **kwargs) return data" 4274,"async def newnations(self, root): """"""Most recently founded nations, from newest. Returns ------- an :class:`ApiQuery` of a list of :class:`Nation` """""" return [aionationstates.Nation(n) for n in root.find('NEWNATIONS').text.split(',')]" 4275,"async def regions(self, root): """"""List of all the regions, seemingly in order of creation. Returns ------- an :class:`ApiQuery` of a list of :class:`Region` """""" return [aionationstates.Region(r) for r in root.find('REGIONS').text.split(',')]" 4276,"def regionsbytag(self, *tags): """"""All regions with any of the named tags. Parameters ---------- *tags : str Regional tags. Can be preceded by a ``-`` to select regions without that tag. Returns ------- an :class:`ApiQuery` of a list of :class:`Region` """""" if len(tags) > 10: raise ValueError('You can specify up to 10 tags') if not tags: raise ValueError('No tags specified') # We don't check for invalid tags here because the behaviour is # fairly intuitive - quering for a non-existent tag returns no # regions, excluding it returns all of them. @api_query('regionsbytag', tags=','.join(tags)) async def result(_, root): text = root.find('REGIONS').text return ([aionationstates.Region(r) for r in text.split(',')] if text else []) return result(self)" 4277,"def dispatch(self, id): """"""Dispatch by id. Parameters ---------- id : int Dispatch id. Returns ------- an :class:`ApiQuery` of :class:`Dispatch` Raises ------ :class:`NotFound` If a dispatch with the requested id doesn't exist. """""" @api_query('dispatch', dispatchid=str(id)) async def result(_, root): elem = root.find('DISPATCH') if not elem: raise NotFound(f'No dispatch found with id {id}') return Dispatch(elem) return result(self)" 4278,"def dispatchlist(self, *, author=None, category=None, subcategory=None, sort='new'): """"""Find dispatches by certain criteria. Parameters ---------- author : str Name of the nation authoring the dispatch. category : str Dispatch's primary category. subcategory : str Dispatch's secondary category. sort : str Sort order, 'new' or 'best'. Returns ------- an :class:`ApiQuery` of a list of :class:`DispatchThumbnail` """""" params = {'sort': sort} if author: params['dispatchauthor'] = author # Here we do need to ensure that our categories are valid, cause # NS just ignores the categories it doesn't recognise and returns # whatever it feels like. if category and subcategory: if (category not in dispatch_categories or subcategory not in dispatch_categories[category]): raise ValueError('Invalid category/subcategory') params['dispatchcategory'] = f'{category}:{subcategory}' elif category: if category not in dispatch_categories: raise ValueError('Invalid category') params['dispatchcategory'] = category else: raise ValueError('Cannot request subcategory without category') @api_query('dispatchlist', **params) async def result(_, root): return [ DispatchThumbnail._from_elem(elem) for elem in root.find('DISPATCHLIST') ] return result(self)" 4279,"def poll(self, id): """"""Poll with a given id. Parameters ---------- id : int Poll id. Returns ------- an :class:`ApiQuery` of :class:`Poll` Raises ------ :class:`NotFound` If a poll with the requested id doesn't exist. """""" @api_query('poll', pollid=str(id)) async def result(_, root): elem = root.find('POLL') if not elem: raise NotFound(f'No poll found with id {id}') return Poll(elem) return result(self)" 4280,"def banner(self, *ids, _expand_macros=None): """"""Get data about banners by their ids. Macros in banners' names and descriptions are not expanded. Parameters ---------- *ids : str Banner ids. Returns ------- an :class:`ApiQuery` of a list of :class:`Banner` Raises ------ :class:`NotFound` If any of the provided ids is invalid. """""" async def noop(s): return s _expand_macros = _expand_macros or noop @api_query('banner', banner=','.join(ids)) async def result(_, root): banners = [await Banner(elem, _expand_macros) for elem in root.find('BANNERS')] if not len(banners) == len(ids): raise NotFound('one of the banner ids provided is invalid') return banners return result(self)" 4281,"async def send_telegram(self, *, client_key, telegram_id, telegram_key, recepient): """"""A basic interface to the Telegrams API. Parameters ---------- client_key : str Telegrams API Client Key. telegram_id : int or str Telegram id. telegram_key : str Telegram key. recepient : str Name of the nation you want to telegram. Returns ------- an awaitable """""" params = { 'a': 'sendTG', 'client': client_key, 'tgid': str(telegram_id), 'key': telegram_key, 'to': recepient } return await self._call_api(params)" 4282,"async def happenings(self, *, nations=None, regions=None, filters=None, beforeid=None, beforetime=None): """"""Iterate through happenings from newest to oldest. Parameters ---------- nations : iterable of str Nations happenings of which will be requested. Cannot be specified at the same time with ``regions``. regions : iterable of str Regions happenings of which will be requested. Cannot be specified at the same time with ``nations``. filters : iterable of str Categories to request happenings by. Available filters are: ``law``, ``change``, ``dispatch``, ``rmb``, ``embassy``, ``eject``, ``admin``, ``move``, ``founding``, ``cte``, ``vote``, ``resolution``, ``member``, and ``endo``. beforeid : int Only request happenings before this id. beforetime : :class:`datetime.datetime` Only request happenings that were emitted before this moment. Returns ------- an asynchronous iterator yielding any of the classes from \ the :mod:`~aionationstates.happenings` module """""" while True: happening_bunch = await self._get_happenings( nations=nations, regions=regions, filters=filters, beforeid=beforeid, beforetime=beforetime ) for happening in happening_bunch: yield happening if len(happening_bunch) < 100: break beforeid = happening_bunch[-1].id" 4283,"async def new_happenings(self, poll_period=30, *, nations=None, regions=None, filters=None): """"""Iterate through new happenings as they arrive:: async for happening in \\ world.new_happenings(region='the north pacific'): # Your processing code here print(happening.text) # As an example Guarantees that: * Every happening is generated from the moment the generator is started; * No happening is generated more than once; * Happenings are generated in order from oldest to newest. Parameters ---------- poll_period : int How long to wait between requesting the next portion of happenings, in seconds. Note that this should only be tweaked for latency reasons, as the function gives a guarantee that all happenings will be generated. Also note that, regardless of the ``poll_period`` set, all of the code in your loop body still has to execute (likely several times) before a new portion of happenings can be requested. Consider wrapping your happening-processing code in a coroutine and launching it as a task from the loop body if you suspect this might become an issue. Requests made by this generator are, of course, subject to the API rate limit, and if the limiter has to temporarily block new requests the time spent waiting will be added on top of ``poll_period``. nations : iterable of str Nations happenings of which will be requested. Cannot be specified at the same time with ``regions``. regions : iterable of str Regions happenings of which will be requested. Cannot be specified at the same time with ``nations``. filters : iterable of str Categories to request happenings by. Available filters are: ``law``, ``change``, ``dispatch``, ``rmb``, ``embassy``, ``eject``, ``admin``, ``move``, ``founding``, ``cte``, ``vote``, ``resolution``, ``member``, and ``endo``. Returns ------- an asynchronous iterator yielding any of the classes from \ the :mod:`~aionationstates.happenings` module """""" try: # We only need the happenings from this point forwards last_id = (await self._get_happenings( nations=nations, regions=regions, filters=filters, limit=1))[0].id except IndexError: # Happenings before this point have all been deleted last_id = 0 while True: # Sleep before the loop body to avoid wasting the first request await sleep(poll_period) # I don't think there's a cleaner solution, sadly. happenings = [] async for happening in self.happenings( nations=nations, regions=regions, filters=filters): if happening.id <= last_id: break happenings.append(happening) with suppress(IndexError): last_id = happenings[0].id for happening in reversed(happenings): yield happening" 4284,"def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666): """"""To prevent prohibitively slow calculation of normalisation coefficient at each point in image find potential match points, and normalise these only these. This function uses the definitions of the matching functions to calculate the expected match value and finds positions in the transformed array matching these- normalisation will then eliminate false positives """""" if method == 'correlation': match_value = np.sum(template**2) # this will be the value of the match in the elif method == 'squared difference': match_value = 0 elif method == 'correlation coefficient': temp_minus_mean = template - np.mean(template) match_value = np.sum(temp_minus_mean**2) else: raise ValueError('Matching method not implemented') condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) & (np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance))) return np.transpose(condition.nonzero())" 4285,"def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1): """"""Calculates the normalisation coefficients of potential match positions Then normalises the correlation at these positions, and returns them if they do indeed constitute a match """""" template_norm = np.linalg.norm(template) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape #points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points] image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return result.keys()" 4286,"def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1): """"""As above, but for when the correlation coefficient matching method is used """""" template_mean = np.mean(template) template_minus_mean = template - template_mean template_norm = np.linalg.norm(template_minus_mean) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return normalised_matches.keys()" 4287,"def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1): """"""As above, but for when the squared differences matching method is used """""" template_norm_squared = np.sum(template**2) image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))} #print image_matches_normalised cutoff = h*w*255**2*sq_diff_tolerance normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff} return normalised_matches.keys()" 4288,"def __init_os_api(self): """""" Initialise client objects for talking to OpenStack API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """""" loader = loading.get_plugin_loader('password') auth = loader.load_from_options(auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, project_name=self._os_tenant_name) sess = session.Session(auth=auth) self.nova_client = nova_client.Client(self.nova_api_version, session=sess) self.neutron_client = neutron_client.Client(session=sess) self.glance_client = glance_client.Client('2', session=sess) self.cinder_client = cinder_client.Client('2', session=sess)" 4289,"def stop_instance(self, instance_id): """"""Stops the instance gracefully. :param str instance_id: instance identifier """""" instance = self._load_instance(instance_id) instance.delete() del self._instances[instance_id]" 4290,"def get_ips(self, instance_id): """"""Retrieves all IP addresses associated to a given instance. :return: tuple (IPs) """""" instance = self._load_instance(instance_id) IPs = sum(instance.networks.values(), []) return IPs" 4291,"def is_instance_running(self, instance_id): """"""Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise """""" # Here, it's always better if we update the instance. instance = self._load_instance(instance_id, force_reload=True) return instance.status == 'ACTIVE'" 4292,"def _check_keypair(self, name, public_key_path, private_key_path): """"""First checks if the keypair is valid, then checks if the keypair is registered with on the cloud. If not the keypair is added to the users ssh keys. :param str name: name of the ssh key :param str public_key_path: path to the ssh public key file :param str private_key_path: path to the ssh private key file :raises: `KeypairError` if key is not a valid RSA or DSA key, the key could not be uploaded or the fingerprint does not match to the one uploaded to the cloud. """""" # Read key. We do it as first thing because we need it either # way, to check the fingerprint of the remote keypair if it # exists already, or to create a new keypair. pkey = None try: pkey = DSSKey.from_private_key_file(private_key_path) except PasswordRequiredException: warn(""Unable to check key file `{0}` because it is encrypted with a "" ""password. Please, ensure that you added it to the SSH agent "" ""with `ssh-add {1}`"" .format(private_key_path, private_key_path)) except SSHException: try: pkey = RSAKey.from_private_key_file(private_key_path) except PasswordRequiredException: warn(""Unable to check key file `{0}` because it is encrypted with a "" ""password. Please, ensure that you added it to the SSH agent "" ""with `ssh-add {1}`"" .format(private_key_path, private_key_path)) except SSHException: raise KeypairError('File `%s` is neither a valid DSA key ' 'or RSA key.' % private_key_path) try: # Check if a keypair `name` exists on the cloud. keypair = self.nova_client.keypairs.get(name) # Check if it has the correct keypair, but only if we can read the local key if pkey: fingerprint = str.join( ':', (i.encode('hex') for i in pkey.get_fingerprint())) if fingerprint != keypair.fingerprint: raise KeypairError( ""Keypair `%s` is present but has "" ""different fingerprint. Aborting!"" % name) else: warn(""Unable to check if the keypair is using the correct key."") except NotFound: log.warning( ""Keypair `%s` not found on resource `%s`, Creating a new one"", name, self._os_auth_url) # Create a new keypair with open(os.path.expanduser(public_key_path)) as f: key_material = f.read() try: self.nova_client.keypairs.create(name, key_material) except Exception as ex: log.error( ""Could not import key `%s` with name `%s` to `%s`"", name, public_key_path, self._os_auth_url) raise KeypairError( ""could not create keypair `%s`: %s"" % (name, ex))" 4293,"def _load_instance(self, instance_id, force_reload=True): """""" Return instance with the given id. For performance reasons, the instance ID is first searched for in the collection of VM instances started by ElastiCluster (`self._instances`), then in the list of all instances known to the cloud provider at the time of the last update (`self._cached_instances`), and finally the cloud provider is directly queried. :param str instance_id: instance identifier :param bool force_reload: if ``True``, skip searching caches and reload instance from server and immediately reload instance data from cloud provider :return: py:class:`novaclient.v1_1.servers.Server` - instance :raises: `InstanceError` is returned if the instance can't be found in the local cache or in the cloud. """""" if force_reload: try: # Remove from cache and get from server again vm = self.nova_client.servers.get(instance_id) except NotFound: raise InstanceNotFoundError( ""Instance `{instance_id}` not found"" .format(instance_id=instance_id)) # update caches self._instances[instance_id] = vm self._cached_instances[instance_id] = vm # if instance is known, return it if instance_id in self._instances: return self._instances[instance_id] # else, check (cached) list from provider if instance_id not in self._cached_instances: # Refresh the cache, just in case self._cached_instances = dict( (vm.id, vm) for vm in self.nova_client.servers.list()) if instance_id in self._cached_instances: inst = self._cached_instances[instance_id] self._instances[instance_id] = inst return inst # If we reached this point, the instance was not found neither # in the caches nor on the website. raise InstanceNotFoundError( ""Instance `{instance_id}` not found"" .format(instance_id=instance_id))" 4294,"def _allocate_address(self, instance, network_ids): """""" Allocates a floating/public ip address to the given instance. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. :return: public ip address """""" with OpenStackCloudProvider.__node_start_lock: try: # Use the `novaclient` API (works with python-novaclient <8.0.0) free_ips = [ip for ip in self.nova_client.floating_ips.list() if not ip.fixed_ip] if not free_ips: free_ips.append(self.nova_client.floating_ips.create()) except AttributeError: # Use the `neutronclient` API # # for some obscure reason, using `fixed_ip_address=None` in the # call to `list_floatingips()` returns *no* results (not even, # in fact, those with `fixed_ip_address: None`) whereas # `fixed_ip_address=''` acts as a wildcard and lists *all* the # addresses... so filter them out with a list comprehension free_ips = [ip for ip in self.neutron_client.list_floatingips(fixed_ip_address='')['floatingips'] if ip['fixed_ip_address'] is None] if not free_ips: # FIXME: OpenStack Network API v2 requires that we specify # a network ID along with the request for a floating IP. # However, ElastiCluster configuration allows for multiple # networks to be connected to a VM, but does not give any # hint as to which one(s) should be used for such requests. # So we try them all, ignoring errors until one request # succeeds and hope that it's the OK. One can imagine # scenarios where this is *not* correct, but: (1) these # scenarios are unlikely, and (2) the old novaclient code # above has not even had the concept of multiple networks # for floating IPs and no-one has complained in 5 years... allocated_ip = None for network_id in network_ids: log.debug( ""Trying to allocate floating IP on network %s ..."", network_id) try: allocated_ip = self.neutron_client.create_floatingip({ 'floatingip': {'floating_network_id':network_id}}) except BadNeutronRequest as err: log.debug( ""Failed allocating floating IP on network %s: %s"", network_id, err) if allocated_ip: free_ips.append(allocated_ip) break else: continue # try next network if free_ips: ip = free_ips.pop() else: raise RuntimeError( ""Could not allocate floating IP for VM {0}"" .format(vm.id)) instance.add_floating_ip(ip) return ip.ip" 4295,"async def post(self): """"""Get the message lodged. Returns ------- an :class:`aionationstates.ApiQuery` of :class:`aionationstates.Post` """""" post = (await self.region._get_messages( fromid=self._post_id, limit=1))[0] assert post.id == self._post_id return post" 4296,"async def resolution(self): """"""Get the resolution voted on. Returns ------- awaitable of :class:`aionationstates.ResolutionAtVote` The resolution voted for. Raises ------ aionationstates.NotFound If the resolution has since been passed or defeated. """""" resolutions = await asyncio.gather( aionationstates.ga.resolution_at_vote, aionationstates.sc.resolution_at_vote, ) for resolution in resolutions: if (resolution is not None and resolution.name == self.resolution_name): return resolution raise aionationstates.NotFound" 4297,"async def proposal(self): """"""Get the proposal in question. Actually just the first proposal with the same name, but the chance of a collision is tiny. Returns ------- awaitable of :class:`aionationstates.Proposal` The proposal submitted. Raises ------ aionationstates.NotFound If the proposal has since been withdrawn or promoted. """""" proposals = await aionationstates.wa.proposals() for proposal in proposals: if (proposal.name == self.proposal_name): return proposal raise aionationstates.NotFound" 4298,"def create_free_shipping_promotion(cls, free_shipping_promotion, **kwargs): """"""Create FreeShippingPromotion Create a new FreeShippingPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_free_shipping_promotion(free_shipping_promotion, async=True) >>> result = thread.get() :param async bool :param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to create (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs) else: (data) = cls._create_free_shipping_promotion_with_http_info(free_shipping_promotion, **kwargs) return data" 4299,"def delete_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, **kwargs): """"""Delete FreeShippingPromotion Delete an instance of FreeShippingPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_promotion_by_id(free_shipping_promotion_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) else: (data) = cls._delete_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) return data" 4300,"def get_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, **kwargs): """"""Find FreeShippingPromotion Return single instance of FreeShippingPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_promotion_by_id(free_shipping_promotion_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to return (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) else: (data) = cls._get_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, **kwargs) return data" 4301,"def list_all_free_shipping_promotions(cls, **kwargs): """"""List FreeShippingPromotions Return a list of FreeShippingPromotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shipping_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShippingPromotion] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shipping_promotions_with_http_info(**kwargs) else: (data) = cls._list_all_free_shipping_promotions_with_http_info(**kwargs) return data" 4302,"def replace_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, free_shipping_promotion, **kwargs): """"""Replace FreeShippingPromotion Replace all attributes of FreeShippingPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_promotion_by_id(free_shipping_promotion_id, free_shipping_promotion, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to replace (required) :param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to replace (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) else: (data) = cls._replace_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) return data" 4303,"def update_free_shipping_promotion_by_id(cls, free_shipping_promotion_id, free_shipping_promotion, **kwargs): """"""Update FreeShippingPromotion Update attributes of FreeShippingPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_free_shipping_promotion_by_id(free_shipping_promotion_id, free_shipping_promotion, async=True) >>> result = thread.get() :param async bool :param str free_shipping_promotion_id: ID of freeShippingPromotion to update. (required) :param FreeShippingPromotion free_shipping_promotion: Attributes of freeShippingPromotion to update. (required) :return: FreeShippingPromotion If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) else: (data) = cls._update_free_shipping_promotion_by_id_with_http_info(free_shipping_promotion_id, free_shipping_promotion, **kwargs) return data" 4304,"def append(self, electrode_id): ''' Append the specified electrode to the route. The route is not modified (i.e., electrode is not appended) if electrode is not connected to the last electrode in the existing route. Parameters ---------- electrode_id : str Electrode identifier. ''' do_append = False if not self.electrode_ids: do_append = True elif self.device.shape_indexes.shape[0] > 0: source = self.electrode_ids[-1] target = electrode_id if not (source == target): source_id, target_id = self.device.shape_indexes[[source, target]] try: if self.device.adjacency_matrix[source_id, target_id]: # Electrodes are connected, so append target to current # route. do_append = True except IndexError: logger.warning('Electrodes `%s` and `%s` are not ' 'connected.', source, target) if do_append: self.electrode_ids.append(electrode_id) return do_append" 4305,"def df_routes(self, value): ''' .. versionadded:: 0.11.3 ''' self._df_routes = value try: self.emit('routes-set', self._df_routes.copy()) except TypeError: pass" 4306,"def create_ui(self): ''' .. versionchanged:: 0.9 Update device registration in real-time while dragging video control point to new position. .. versionchanged:: 0.12 Add ``dynamic_electrode_state_shapes`` layer to show dynamic electrode actuations. ''' super(DmfDeviceCanvas, self).create_ui() self.video_sink = VideoSink(*[self.socket_info[k] for k in ['transport', 'host', 'port']]) # Initialize video sink socket. self.video_sink.reset() # Required to have key-press and key-release events trigger. self.widget.set_flags(gtk.CAN_FOCUS) self.widget.add_events(gtk.gdk.KEY_PRESS_MASK | gtk.gdk.KEY_RELEASE_MASK) # Create initial (empty) cairo surfaces. surface_names = ('background', 'shapes', 'connections', 'routes', 'channel_labels', 'static_electrode_state_shapes', 'dynamic_electrode_state_shapes', 'registration') self.df_surfaces = pd.DataFrame([[self.get_surface(), 1.] for i in xrange(len(surface_names))], columns=['surface', 'alpha'], index=pd.Index(surface_names, name='name')) def _update_registration(event): try: start_event = self.start_event.copy() self.start_event = event.copy() self.emit('point-pair-selected', {'start_event': start_event, 'end_event': event}) except AttributeError: # Mouse button was released, causing `self.start_event` to be # `None` before event was handled here. pass # Debounce calls to `_update_registration` function to prevent too many # calls being triggered from mouse movement events. update_registration = debounce.Debounce(_update_registration, wait=10) def _on_mouse_move(area, event): # XXX Need to make a copy of the event here since the original # event will be deallocated before the debounced # `update_registration` function is called. event = event.copy() if self.mode == 'register_video' and self.start_event is not None: update_registration(event.copy()) # Connect video registration update event to mouse movement event. self.widget.connect(""motion_notify_event"", _on_mouse_move)" 4307,"def insert_surface(self, position, name, surface, alpha=1.): ''' Insert Cairo surface as new layer. Args ---- position (int) : Index position to insert layer at. name (str) : Name of layer. surface (cairo.Context) : Surface to render. alpha (float) : Alpha/transparency level in the range `[0, 1]`. ''' if name in self.df_surfaces.index: raise NameError('Surface already exists with `name=""{}""`.' .format(name)) self.df_surfaces.loc[name] = surface, alpha # Reorder layers such that the new surface is placed at the specified # layer position (relative to the background surface). surfaces_order = self.df_surfaces.index.values.tolist() surfaces_order.remove(name) base_index = surfaces_order.index('background') + 1 if position < 0: position = len(surfaces_order) + position surfaces_order.insert(base_index + position, name) self.reorder_surfaces(surfaces_order)" 4308,"def append_surface(self, name, surface, alpha=1.): ''' Append Cairo surface as new layer on top of existing layers. Args ---- name (str) : Name of layer. surface (cairo.ImageSurface) : Surface to render. alpha (float) : Alpha/transparency level in the range `[0, 1]`. ''' self.insert_surface(position=self.df_surfaces.index.shape[0], name=name, surface=surface, alpha=alpha)" 4309,"def remove_surface(self, name): ''' Remove layer from rendering stack and flatten remaining layers. Args ---- name (str) : Name of layer. ''' self.df_surfaces.drop(name, axis=0, inplace=True) # Order of layers may have changed after removing a layer. Trigger # refresh of surfaces. self.reorder_surfaces(self.df_surfaces.index)" 4310,"def clone_surface(self, source_name, target_name, target_position=-1, alpha=1.): ''' Clone surface from existing layer to a new name, inserting new surface at specified position. By default, new surface is appended as the top surface layer. Args ---- source_name (str) : Name of layer to clone. target_name (str) : Name of new layer. ''' source_surface = self.df_surfaces.surface.ix[source_name] source_width = source_surface.get_width() source_height = source_surface.get_height() source_format = source_surface.get_format() target_surface = cairo.ImageSurface(source_format, source_width, source_height) target_cairo_context = cairo.Context(target_surface) target_cairo_context.set_source_surface(source_surface, 0, 0) target_cairo_context.paint() self.insert_surface(target_position, target_name, target_surface, alpha)" 4311,"def render_dynamic_electrode_state_shapes(self): ''' Render **dynamic** states reported by the electrode controller. **Dynamic** electrode states are only applied while a protocol is running -- _not_ while in real-time programming mode. See also :meth:`render_electrode_shapes()`. .. versionadded:: 0.12 ''' df_shapes = self.canvas.df_canvas_shapes.copy() # Only include shapes for electrodes reported as actuated. on_electrodes = self._dynamic_electrodes[self._dynamic_electrodes > 0] df_shapes = (df_shapes.set_index('id').loc[on_electrodes.index] .reset_index()) return self.render_electrode_shapes(df_shapes=df_shapes, shape_scale=0.75, # Lignt blue fill=(136 / 255., 189 / 255., 230 / 255.))" 4312,"def render_static_electrode_state_shapes(self): ''' Render **static** states reported by the electrode controller. **Static** electrode states are applied while a protocol is **running** _or_ while **real-time** control is activated. See also :meth:`render_electrode_shapes()`. .. versionadded:: 0.12 ''' df_shapes = self.canvas.df_canvas_shapes.copy() if self.electrode_states.shape[0]: df_shapes['state'] = self.electrode_states.ix[df_shapes.id].values else: df_shapes['state'] = 0 df_shapes = df_shapes.loc[df_shapes.state > 0].dropna(subset=['state']) return self.render_electrode_shapes(df_shapes=df_shapes)" 4313,"def render_electrode_shapes(self, df_shapes=None, shape_scale=0.8, fill=(1, 1, 1)): ''' Render electrode state shapes. By default, draw each electrode shape filled white. See also :meth:`render_shapes()`. Parameters ---------- df_shapes = : pandas.DataFrame .. versionadded:: 0.12 ''' surface = self.get_surface() if df_shapes is None: if hasattr(self.canvas, 'df_canvas_shapes'): df_shapes = self.canvas.df_canvas_shapes else: return surface if 'x_center' not in df_shapes or 'y_center' not in df_shapes: # No center points have been computed for shapes. return surface cairo_context = cairo.Context(surface) df_shapes = df_shapes.copy() # Scale shapes to leave shape edges uncovered. df_shapes[['x', 'y']] = (df_shapes[['x_center', 'y_center']] + df_shapes[['x_center_offset', 'y_center_offset']].values * shape_scale) for path_id, df_path_i in (df_shapes.groupby(self.canvas .shape_i_columns)[['x', 'y']]): # Use attribute lookup for `x` and `y`, since it is considerably # faster than `get`-based lookup using columns name strings. vertices_x = df_path_i.x.values vertices_y = df_path_i.y.values cairo_context.move_to(vertices_x[0], vertices_y[0]) for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]): cairo_context.line_to(x, y) cairo_context.close_path() # Draw filled shape to indicate actuated electrode state. cairo_context.set_source_rgba(*fill) cairo_context.fill() return surface" 4314,"def render_shapes(self, df_shapes=None, clip=False): ''' Render static electrode shapes (independent of actuation state). If video is enabled, draw white outline for each electrode (no fill). If video is disabled, draw white outline for each electrode and fill blue. See also :meth:`render_electrode_state_shapes()`. ''' surface = self.get_surface() if df_shapes is None: if hasattr(self.canvas, 'df_canvas_shapes'): df_shapes = self.canvas.df_canvas_shapes else: return surface cairo_context = cairo.Context(surface) for path_id, df_path_i in (df_shapes .groupby(self.canvas .shape_i_columns)[['x', 'y']]): # Use attribute lookup for `x` and `y`, since it is considerably # faster than `get`-based lookup using columns name strings. vertices_x = df_path_i.x.values vertices_y = df_path_i.y.values cairo_context.move_to(vertices_x[0], vertices_y[0]) for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]): cairo_context.line_to(x, y) cairo_context.close_path() if self.enabled: # Video is enabled. # Draw white border around electrode. line_width = 1 if path_id not in self.electrode_channels.index: # on off on off dashes = [10, 10] color = (1, 0, 1) line_width *= 2 else: dashes = [] color = (1, 1, 1) cairo_context.set_dash(dashes) cairo_context.set_line_width(line_width) cairo_context.set_source_rgb(*color) cairo_context.stroke() else: # Video is enabled. Fill electrode blue. color = ((0, 0, 1) if path_id in self.electrode_channels.index else (1, 0, 1)) cairo_context.set_source_rgb(*color) cairo_context.fill_preserve() # Draw white border around electrode. cairo_context.set_line_width(1) cairo_context.set_source_rgba(1, 1, 1) cairo_context.stroke() return surface" 4315,"def render_registration(self): ''' Render pinned points on video frame as red rectangle. ''' surface = self.get_surface() if self.canvas is None or self.df_canvas_corners.shape[0] == 0: return surface corners = self.df_canvas_corners.copy() corners['w'] = 1 transform = self.canvas.shapes_to_canvas_transform canvas_corners = corners.values.dot(transform.T.values).T points_x = canvas_corners[0] points_y = canvas_corners[1] cairo_context = cairo.Context(surface) cairo_context.move_to(points_x[0], points_y[0]) for x, y in zip(points_x[1:], points_y[1:]): cairo_context.line_to(x, y) cairo_context.line_to(points_x[0], points_y[0]) cairo_context.set_source_rgb(1, 0, 0) cairo_context.stroke() return surface" 4316,"def render(self): ''' .. versionchanged:: 0.12 Add ``dynamic_electrode_state_shapes`` layer to show dynamic electrode actuations. ''' # Render each layer and update data frame with new content for each # surface. surface_names = ('background', 'shapes', 'connections', 'routes', 'channel_labels', 'static_electrode_state_shapes', 'dynamic_electrode_state_shapes', 'registration') for k in surface_names: self.set_surface(k, getattr(self, 'render_' + k)()) self.emit('surfaces-reset', self.df_surfaces) self.cairo_surface = flatten_surfaces(self.df_surfaces)" 4317,"def draw_route(self, df_route, cr, color=None, line_width=None): ''' Draw a line between electrodes listed in a route. Arguments --------- - `df_route`: * A `pandas.DataFrame` containing a column named `electrode_i`. * For each row, `electrode_i` corresponds to the integer index of the corresponding electrode. - `cr`: Cairo context. - `color`: Either a RGB or RGBA tuple, with each color channel in the range [0, 1]. If `color` is `None`, the electrode color is set to white. ''' df_route_centers = (self.canvas.df_shape_centers .ix[df_route.electrode_i][['x_center', 'y_center']]) df_endpoint_marker = (.6 * self.get_endpoint_marker(df_route_centers) + df_route_centers.iloc[-1].values) # Save cairo context to restore after drawing route. cr.save() if color is None: # Colors from [""Show me the numbers""][1]. # # [1]: http://blog.axc.net/its-the-colors-you-have/ # LiteOrange = rgb(251,178,88); # MedOrange = rgb(250,164,58); # LiteGreen = rgb(144,205,151); # MedGreen = rgb(96,189,104); color_rgb_255 = np.array([96,189,104, .8 * 255]) color = (color_rgb_255 / 255.).tolist() if len(color) < 4: color += [1.] * (4 - len(color)) cr.set_source_rgba(*color) cr.move_to(*df_route_centers.iloc[0]) for electrode_i, center_i in df_route_centers.iloc[1:].iterrows(): cr.line_to(*center_i) if line_width is None: line_width = np.sqrt((df_endpoint_marker.max().values - df_endpoint_marker.min().values).prod()) * .1 cr.set_line_width(4) cr.stroke() cr.move_to(*df_endpoint_marker.iloc[0]) for electrode_i, center_i in df_endpoint_marker.iloc[1:].iterrows(): cr.line_to(*center_i) cr.close_path() cr.set_source_rgba(*color) cr.fill() # Restore cairo context after drawing route. cr.restore()" 4318,"def on_widget__button_press_event(self, widget, event): ''' Called when any mouse button is pressed. .. versionchanged:: 0.11 Do not trigger `route-electrode-added` event if `ALT` key is pressed. ''' if self.mode == 'register_video' and event.button == 1: self.start_event = event.copy() return elif self.mode == 'control': shape = self.canvas.find_shape(event.x, event.y) if shape is None: return state = event.get_state() if event.button == 1: # Start a new route. self._route = Route(self.device) self._route.append(shape) self.last_pressed = shape if not (state & gtk.gdk.MOD1_MASK): # `` key is not held down. self.emit('route-electrode-added', shape)" 4319,"def on_widget__button_release_event(self, widget, event): ''' Called when any mouse button is released. .. versionchanged:: 0.11.3 Always reset pending route, regardless of whether a route was completed. This includes a) removing temporary routes from routes table, and b) resetting the state of the current route electrode queue. This fixes https://github.com/sci-bots/microdrop/issues/256. ''' event = event.copy() if self.mode == 'register_video' and (event.button == 1 and self.start_event is not None): self.emit('point-pair-selected', {'start_event': self.start_event, 'end_event': event.copy()}) self.start_event = None return elif self.mode == 'control': # XXX Negative `route_i` corresponds to temporary route being # drawn. Since release of mouse button terminates route drawing, # clear any rows corresponding to negative `route_i` values from # the routes table. self.df_routes = self.df_routes.loc[self.df_routes.route_i >= 0].copy() shape = self.canvas.find_shape(event.x, event.y) if shape is not None: electrode_data = {'electrode_id': shape, 'event': event.copy()} if event.button == 1: if gtk.gdk.BUTTON1_MASK == event.get_state(): if self._route.append(shape): self.emit('route-electrode-added', shape) if len(self._route.electrode_ids) == 1: # Single electrode, so select electrode. self.emit('electrode-selected', electrode_data) else: # Multiple electrodes, so select route. route = self._route self.emit('route-selected', route) elif (event.get_state() == (gtk.gdk.MOD1_MASK | gtk.gdk.BUTTON1_MASK) and self.last_pressed != shape): # `` key was held down. self.emit('electrode-pair-selected', {'source_id': self.last_pressed, 'target_id': shape, 'event': event.copy()}) self.last_pressed = None elif event.button == 3: # Create right-click pop-up menu. menu = self.create_context_menu(event, shape) # Display menu popup menu.popup(None, None, None, event.button, event.time) # Clear route. self._route = None" 4320,"def create_context_menu(self, event, shape): ''' Parameters ---------- event : gtk.gdk.Event GTK mouse click event. shape : str Electrode shape identifier (e.g., `""electrode028""`). Returns ------- gtk.Menu Context menu. .. versionchanged:: 0.13 - Deprecate hard-coded commands (e.g., clear electrodes, clear routes). - Add anonymous global commands section at head of menu (i.e., commands not specific to an electrode or route). - Add ""Electrode"" and ""Route(s)"" sub-menus. ''' routes = self.df_routes.loc[self.df_routes.electrode_i == shape, 'route_i'].astype(int).unique().tolist() def _connect_callback(menu_item, command_signal, group, command, data): callback_called = threading.Event() def _callback(signal, widget, *args): if callback_called.is_set(): return callback_called.set() _L().debug('`%s`: %s %s %s', signal, group, command, data) gtk.idle_add(self.emit, command_signal, group, command, data) menu_item.connect('activate', ft.partial(_callback, 'activate')) menu_item.connect('button-press-event', ft.partial(_callback, 'button-press-event')) if group is not None: menu_item.set_tooltip_text(group) menu = gtk.Menu() # Add menu items/groups for registered global commands. if self.global_commands: data = {'event': event.copy()} command_signal = 'global-command' for group, commands in self.global_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) # Add menu items/groups for registered electrode commands. if self.electrode_commands: separator = gtk.SeparatorMenuItem() menu.append(separator) # Add electrode sub-menu. menu_e = gtk.Menu() menu_head_e = gtk.MenuItem('_Electrode') menu_head_e.set_submenu(menu_e) menu_head_e.set_use_underline(True) menu.append(menu_head_e) command_signal = 'electrode-command' data = {'electrode_id': shape, 'event': event.copy()} for group, commands in self.electrode_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_e.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) # Add menu items/groups for registered route commands. if routes and self.route_commands: # TODO: Refactor electrode/route command menu code to reduce code # duplication (i.e., DRY). separator = gtk.SeparatorMenuItem() menu.append(separator) # Add route sub-menu. menu_r = gtk.Menu() menu_head_r = gtk.MenuItem('_Route(s)') menu_head_r.set_submenu(menu_r) menu_head_r.set_use_underline(True) menu.append(menu_head_r) command_signal = 'route-command' data = {'route_ids': routes, 'event': event.copy()} for group, commands in self.route_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_r.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) menu.show_all() return menu" 4321,"def on_widget__motion_notify_event(self, widget, event): ''' Called when mouse pointer is moved within drawing area. .. versionchanged:: 0.11 Do not trigger `route-electrode-added` event if `ALT` key is pressed. ''' if self.canvas is None: # Canvas has not been initialized. Nothing to do. return elif event.is_hint: pointer = event.window.get_pointer() x, y, mod_type = pointer else: x = event.x y = event.y shape = self.canvas.find_shape(x, y) # Grab focus to [enable notification on key press/release events][1]. # # [1]: http://mailman.daa.com.au/cgi-bin/pipermail/pygtk/2003-August/005770.html self.widget.grab_focus() if shape != self.last_hovered: if self.last_hovered is not None: # Leaving shape self.emit('electrode-mouseout', {'electrode_id': self.last_hovered, 'event': event.copy()}) self.last_hovered = None elif shape is not None: # Entering shape self.last_hovered = shape if self._route is not None: if self._route.append(shape) and not (event.get_state() & gtk.gdk.MOD1_MASK): # `` key was not held down. self.emit('route-electrode-added', shape) self.emit('electrode-mouseover', {'electrode_id': self.last_hovered, 'event': event.copy()})" 4322,"def register_global_command(self, command, title=None, group=None): ''' .. versionadded:: 0.13 Register global command (i.e., not specific to electrode or route). Add global command to context menu. ''' commands = self.global_commands.setdefault(group, OrderedDict()) if title is None: title = (command[:1].upper() + command[1:]).replace('_', ' ') commands[command] = title" 4323,"def register_electrode_command(self, command, title=None, group=None): ''' Register electrode command. Add electrode plugin command to context menu. ''' commands = self.electrode_commands.setdefault(group, OrderedDict()) if title is None: title = (command[:1].upper() + command[1:]).replace('_', ' ') commands[command] = title" 4324,"def register_route_command(self, command, title=None, group=None): ''' Register route command. Add route plugin command to context menu. ''' commands = self.route_commands.setdefault(group, OrderedDict()) if title is None: title = (command[:1].upper() + command[1:]).replace('_', ' ') commands[command] = title" 4325,"def list_all_gateways(cls, **kwargs): """"""List Gateways Return a list of Gateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Gateway] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_gateways_with_http_info(**kwargs) else: (data) = cls._list_all_gateways_with_http_info(**kwargs) return data" 4326,"def list_of_dictionaries_to_mysql_inserts( log, datalist, tableName): """"""Convert a python list of dictionaries to pretty csv output **Key Arguments:** - ``log`` -- logger - ``datalist`` -- a list of dictionaries - ``tableName`` -- the name of the table to create the insert statements for **Return:** - ``output`` -- the mysql insert statements (as a string) **Usage:** .. code-block:: python from fundamentals.files import list_of_dictionaries_to_mysql_inserts mysqlInserts = list_of_dictionaries_to_mysql_inserts( log=log, datalist=dataList, tableName=""my_new_table"" ) print mysqlInserts this output the following: .. code-block:: plain INSERT INTO `testing_table` (a_newKey,and_another,dateCreated,uniqueKey2,uniquekey1) VALUES (""cool"" ,""super cool"" ,""2016-09-14T13:17:26"" ,""burgers"" ,""cheese"") ON DUPLICATE KEY UPDATE a_newKey=""cool"", and_another=""super cool"", dateCreated=""2016-09-14T13:17:26"", uniqueKey2=""burgers"", uniquekey1=""cheese"" ; ... ... """""" log.debug('starting the ``list_of_dictionaries_to_mysql_inserts`` function') if not len(datalist): return ""NO MATCH"" inserts = [] for d in datalist: insertCommand = convert_dictionary_to_mysql_table( log=log, dictionary=d, dbTableName=""testing_table"", uniqueKeyList=[], dateModified=False, returnInsertOnly=True, replace=True, batchInserts=False ) inserts.append(insertCommand) output = "";\n"".join(inserts) + "";"" log.debug('completed the ``list_of_dictionaries_to_mysql_inserts`` function') return output" 4327,"def after(self): """""" Return a deferred that will fire after the request is finished. Returns: Deferred: a new deferred that will fire appropriately """""" d = Deferred() self._after_deferreds.append(d) return d.chain" 4328,"def after_response(self, request, fn, *args, **kwargs): """""" Call the given callable after the given request has its response. Arguments: request: the request to piggyback fn (callable): a callable that takes at least two arguments, the request and the response (in that order), along with any additional positional and keyword arguments passed to this function which will be passed along. If the callable returns something other than ``None``, it will be used as the new response. """""" self._requests[id(request)][""callbacks""].append((fn, args, kwargs))" 4329,"def plot_degbandshalffill(): """"""Plot of Quasiparticle weight for degenerate half-filled bands, showing the Mott transition"""""" ulim = [3.45, 5.15, 6.85, 8.55] bands = range(1, 5) for band, u_int in zip(bands, ulim): name = 'Z_half_'+str(band)+'band' dop = [0.5] data = ssplt.calc_z(band, dop, np.arange(0, u_int, 0.1),0., name) plt.plot(data['u_int'], data['zeta'][0, :, 0], label='$N={}$'.format(str(band))) ssplt.label_saves('Z_half_multiorb.png')" 4330,"def plot_dop(bands, int_max, dop, hund_cu, name): """"""Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states"""""" data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.plot_curves_z(data, name)" 4331,"def plot_dop_phase(bands, int_max, hund_cu): """"""Phase plot of Quasiparticle weight for N degenerate bands under doping shows transition only at interger filling the rest are metallic states"""""" name = 'Z_dop_phase_'+str(bands)+'bands_U'+str(int_max)+'J'+str(hund_cu) dop = np.sort(np.hstack((np.linspace(0.01,0.99,50), np.arange(1./2./bands, 1, 1/2/bands)))) data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.imshow_z(data, name) ssplt.surf_z(data, name)" 4332,"def create_store_credit(cls, store_credit, **kwargs): """"""Create StoreCredit Create a new StoreCredit This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_store_credit(store_credit, async=True) >>> result = thread.get() :param async bool :param StoreCredit store_credit: Attributes of storeCredit to create (required) :return: StoreCredit If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_store_credit_with_http_info(store_credit, **kwargs) else: (data) = cls._create_store_credit_with_http_info(store_credit, **kwargs) return data" 4333,"def delete_store_credit_by_id(cls, store_credit_id, **kwargs): """"""Delete StoreCredit Delete an instance of StoreCredit by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_by_id(store_credit_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs) else: (data) = cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs) return data" 4334,"def get_store_credit_by_id(cls, store_credit_id, **kwargs): """"""Find StoreCredit Return single instance of StoreCredit by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_credit_by_id(store_credit_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to return (required) :return: StoreCredit If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs) else: (data) = cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs) return data" 4335,"def list_all_store_credits(cls, **kwargs): """"""List StoreCredits Return a list of StoreCredits This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_store_credits(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StoreCredit] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_store_credits_with_http_info(**kwargs) else: (data) = cls._list_all_store_credits_with_http_info(**kwargs) return data" 4336,"def replace_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs): """"""Replace StoreCredit Replace all attributes of StoreCredit This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_credit_by_id(store_credit_id, store_credit, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to replace (required) :param StoreCredit store_credit: Attributes of storeCredit to replace (required) :return: StoreCredit If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) else: (data) = cls._replace_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) return data" 4337,"def update_store_credit_by_id(cls, store_credit_id, store_credit, **kwargs): """"""Update StoreCredit Update attributes of StoreCredit This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_store_credit_by_id(store_credit_id, store_credit, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to update. (required) :param StoreCredit store_credit: Attributes of storeCredit to update. (required) :return: StoreCredit If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) else: (data) = cls._update_store_credit_by_id_with_http_info(store_credit_id, store_credit, **kwargs) return data" 4338,"def on_mouse_motion(x, y, dx, dy): """""" 当鼠标没有按下时移动的时候触发 """""" mouse.x, mouse.y = x, y mouse.move() window.update_caption(mouse)" 4339,"def on_mouse_drag(x, y, dx, dy, buttons, modifiers): """""" 当鼠标按下并且移动的时候触发 """""" mouse.x, mouse.y = x, y mouse.move()" 4340,"def on_mouse_press(x, y, button, modifiers): """""" 按下鼠标时 """""" if button == MouseKeyCode.LEFT: mouse.press() elif button == MouseKeyCode.RIGHT: mouse.right_press() # 判断是否有图形的点击事件被触发了 shapes = list(all_shapes) while shapes: shape = shapes.pop() if(shape._press and shape_clicked(shape)): shape._press()" 4341,"def on_mouse_release(x, y, button, modifiers): """""" 松开鼠标时 """""" if button == MouseKeyCode.LEFT: mouse.release() elif button == MouseKeyCode.RIGHT: mouse.right_release()" 4342,"def _register_extensions(self, namespace): """"""Register any extensions under the given namespace."""""" # Register any extension classes for this class. extmanager = ExtensionManager( 'extensions.classes.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_class, base=self) # Register any extension methods for this class. extmanager = ExtensionManager( 'extensions.methods.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_method, base=self)" 4343,"def acls(self): """"""The instance bound ACLs operations layer."""""" if self._acls is None: self._acls = InstanceAcls(instance=self) return self._acls" 4344,"def all(self): """"""Get all ACLs for this instance."""""" return self._instance._client.acls.all(self._instance.name)" 4345,"def create(self, cidr_mask, description, **kwargs): """"""Create an ACL for this instance. See :py:meth:`Acls.create` for call signature. """""" return self._instance._client.acls.create( self._instance.name, cidr_mask, description, **kwargs )" 4346,"def get(self, acl): """"""Get the ACL specified by ID belonging to this instance. See :py:meth:`Acls.get` for call signature. """""" return self._instance._client.acls.get(self._instance.name, acl)" 4347,"def is_number(num, if_bool=False): """""" :return: True if num is either an actual number, or an object that converts to one """""" if isinstance(num, bool): return if_bool elif isinstance(num, int): return True try: number = float(num) return not (isnan(number) or isinf(number)) except (TypeError, ValueError): return False" 4348,"def _VarintDecoder(mask): """"""Return an encoder for a basic varint value (does not include tag). Decoded values will be bitwise-anded with the given mask before being returned, e.g. to limit them to 32 bits. The returned decoder does not take the usual ""end"" parameter -- the caller is expected to do bounds checking after the fact (often the caller can defer such checking until later). The decoder returns a (value, new_pos) pair. """""" def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: if pos > len(buffer) -1: raise NotEnoughDataException( ""Not enough data to decode varint"" ) b = buffer[pos] result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): result &= mask return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint" 4349,"def _SignedVarintDecoder(mask): """"""Like _VarintDecoder() but decodes signed values."""""" def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: if pos > len(buffer) -1: raise NotEnoughDataException( ""Not enough data to decode varint"" ) b = local_ord(buffer[pos]) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): if result > 0x7fffffffffffffff: result -= (1 << 64) result |= ~mask else: result &= mask return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint" 4350,"def varintSize(value): """"""Compute the size of a varint value."""""" if value <= 0x7f: return 1 if value <= 0x3fff: return 2 if value <= 0x1fffff: return 3 if value <= 0xfffffff: return 4 if value <= 0x7ffffffff: return 5 if value <= 0x3ffffffffff: return 6 if value <= 0x1ffffffffffff: return 7 if value <= 0xffffffffffffff: return 8 if value <= 0x7fffffffffffffff: return 9 return 10" 4351,"def signedVarintSize(value): """"""Compute the size of a signed varint value."""""" if value < 0: return 10 if value <= 0x7f: return 1 if value <= 0x3fff: return 2 if value <= 0x1fffff: return 3 if value <= 0xfffffff: return 4 if value <= 0x7ffffffff: return 5 if value <= 0x3ffffffffff: return 6 if value <= 0x1ffffffffffff: return 7 if value <= 0xffffffffffffff: return 8 if value <= 0x7fffffffffffffff: return 9 return 10" 4352,"def _VarintEncoder(): """"""Return an encoder for a basic varint value."""""" local_chr = chr def EncodeVarint(write, value): bits = value & 0x7f value >>= 7 while value: write(0x80|bits) bits = value & 0x7f value >>= 7 return write(bits) return EncodeVarint" 4353,"def _SignedVarintEncoder(): """"""Return an encoder for a basic signed varint value."""""" local_chr = chr def EncodeSignedVarint(write, value): if value < 0: value += (1 << 64) bits = value & 0x7f value >>= 7 while value: write(0x80|bits) bits = value & 0x7f value >>= 7 return write(bits) return EncodeSignedVarint" 4354,"def match(value, query): """""" Determine whether a value satisfies a query. """""" if type(query) in [str, int, float, type(None)]: return value == query elif type(query) == dict and len(query.keys()) == 1: for op in query: if op == ""$eq"": return value == query[op] elif op == ""$lt"": return value < query[op] elif op == ""$lte"": return value <= query[op] elif op == ""$gt"": return value > query[op] elif op == ""$gte"": return value >= query[op] elif op == ""$ne"": return value != query[op] elif op == ""$in"": return value in query[op] elif op == ""$nin"": return value not in query[op] else: GeoQLError(""Not a valid query operator: "" + op) else: raise GeoQLError(""Not a valid query: "" + str(query))" 4355,"def features_properties_null_remove(obj): """""" Remove any properties of features in the collection that have entries mapping to a null (i.e., None) value """""" features = obj['features'] for i in tqdm(range(len(features))): if 'properties' in features[i]: properties = features[i]['properties'] features[i]['properties'] = {p:properties[p] for p in properties if properties[p] is not None} return obj" 4356,"def features_tags_parse_str_to_dict(obj): """""" Parse tag strings of all features in the collection into a Python dictionary, if possible. """""" features = obj['features'] for i in tqdm(range(len(features))): tags = features[i]['properties'].get('tags') if tags is not None: try: tags = json.loads(""{"" + tags.replace(""=>"", "":"") + ""}"") except: try: tags = eval(""{"" + tags.replace(""=>"", "":"") + ""}"") except: tags = None if type(tags) == dict: features[i]['properties']['tags'] = {k:tags[k] for k in tags} elif tags is None and 'tags' in features[i]['properties']: del features[i]['properties']['tags'] return obj" 4357,"def features_keep_by_property(obj, query): """""" Filter all features in a collection by retaining only those that satisfy the provided query. """""" features_keep = [] for feature in tqdm(obj['features']): if all([match(feature['properties'].get(prop), qry) for (prop, qry) in query.items()]): features_keep.append(feature) obj['features'] = features_keep return obj" 4358,"def features_keep_within_radius(obj, center, radius, units): """""" Filter all features in a collection by retaining only those that fall within the specified radius. """""" features_keep = [] for feature in tqdm(obj['features']): if all([getattr(geopy.distance.vincenty((lat,lon), center), units) < radius for (lon,lat) in geojson.utils.coords(feature)]): features_keep.append(feature) obj['features'] = features_keep return obj" 4359,"def features_keep_using_features(obj, bounds): """""" Filter all features in a collection by retaining only those that fall within the features in the second collection. """""" # Build an R-tree index of bound features and their shapes. bounds_shapes = [ (feature, shapely.geometry.shape(feature['geometry'])) for feature in tqdm(bounds['features']) if feature['geometry'] is not None ] index = rtree.index.Index() for i in tqdm(range(len(bounds_shapes))): (feature, shape) = bounds_shapes[i] index.insert(i, shape.bounds) features_keep = [] for feature in tqdm(obj['features']): if 'geometry' in feature and 'coordinates' in feature['geometry']: coordinates = feature['geometry']['coordinates'] if any([ shape.contains(shapely.geometry.Point(lon, lat)) for (lon, lat) in coordinates for (feature, shape) in [bounds_shapes[i] for i in index.nearest((lon,lat,lon,lat), 1)] ]): features_keep.append(feature) continue obj['features'] = features_keep return obj" 4360,"def features_node_edge_graph(obj): """""" Transform the features into a more graph-like structure by appropriately splitting LineString features into two-point ""edges"" that connect Point ""nodes"". """""" points = {} features = obj['features'] for feature in tqdm(obj['features']): for (lon, lat) in geojson.utils.coords(feature): points.setdefault((lon, lat), 0) points[(lon, lat)] += 1 points = [p for (p, c) in points.items() if c > 1] features = [geojson.Point(p) for p in points] # For each feature, split it into ""edge"" features # that occur between every point. for f in tqdm(obj['features']): seqs = [] seq = [] for point in geojson.utils.coords(f): if len(seq) > 0: seq.append(point) if point in points: seq.append(point) if len(seq) > 1 and seq[0] in points: seqs.append(seq) seq = [point] for seq in seqs: features.append(geojson.Feature(geometry={""coordinates"":seq, ""type"":f['geometry']['type']}, properties=f['properties'], type=f['type'])) obj['features'] = features return obj" 4361,"def get_conn(filename): """"""Returns new sqlite3.Connection object with _dict_factory() as row factory"""""" conn = sqlite3.connect(filename) conn.row_factory = _dict_factory return conn" 4362,"def conn_is_open(conn): """"""Tests sqlite3 connection, returns T/F"""""" if conn is None: return False try: get_table_names(conn) return True # # Idea taken from # # http: // stackoverflow.com / questions / 1981392 / how - to - tell - if -python - sqlite - database - connection - or -cursor - is -closed # conn.execute(""select id from molecule limit 1"") # return True except sqlite3.ProgrammingError as e: # print(e) return False" 4363,"def cursor_to_data_header(cursor): """"""Fetches all rows from query (""cursor"") and returns a pair (data, header) Returns: (data, header), where - data is a [num_rows]x[num_cols] sequence of sequences; - header is a [num_cols] list containing the field names """""" n = 0 data, header = [], {} for row in cursor: if n == 0: header = row.keys() data.append(row.values()) return data, list(header)" 4364,"def get_table_info(conn, tablename): """"""Returns TableInfo object"""""" r = conn.execute(""pragma table_info('{}')"".format(tablename)) ret = TableInfo(((row[""name""], row) for row in r)) return ret" 4365,"def find(self, **kwargs): """""" Finds row matching specific field value Args: **kwargs: (**only one argument accepted**) fielname=value, e.g., formula=""OH"" Returns: list element or None """""" if len(kwargs) != 1: raise ValueError(""One and only one keyword argument accepted"") key = list(kwargs.keys())[0] value = list(kwargs.values())[0] ret = None for row in self.values(): if row[key] == value: ret = row break return ret" 4366,"def early_warning(iterable, name='this generator'): ''' This function logs an early warning that the generator is empty. This is handy for times when you're manually playing with generators and would appreciate the console warning you ahead of time that your generator is now empty, instead of being surprised with a StopIteration or GeneratorExit exception when youre trying to test something. ''' nxt = None prev = next(iterable) while 1: try: nxt = next(iterable) except: warning(' {} is now empty'.format(name)) yield prev break else: yield prev prev = nxt" 4367,"def post(self, data, request, id): """""" Create a new resource using POST """""" if id: # can't post to individual user raise errors.MethodNotAllowed() user = self._dict_to_model(data) user.save() # according to REST, return 201 and Location header return Response(201, None, { 'Location': '%s%d' % (reverse('user'), user.pk)})" 4368,"def get(self, request, id): """""" Get one user or all users """""" if id: return self._get_one(id) else: return self._get_all()" 4369,"def put(self, data, request, id): """""" Update a single user. """""" if not id: # can't update the whole container raise errors.MethodNotAllowed() userdata = self._dict_to_model(data) userdata.pk = id try: userdata.save(force_update=True) except DatabaseError: # can't udpate non-existing user raise errors.NotFound()" 4370,"def delete(self, request, id): """""" Delete a single user. """""" if not id: # can't delete the whole container raise errors.MethodNotAllowed() try: models.User.objects.get(pk=id).delete() except models.User.DoesNotExist: # we never had it, so it's definitely deleted pass" 4371,"def _get_one(self, id): """""" Get one user from db and turn into dict """""" try: return self._to_dict(models.User.objects.get(pk=id)) except models.User.DoesNotExist: raise errors.NotFound()" 4372,"def _get_all(self): """""" Get all users from db and turn into list of dicts """""" return [self._to_dict(row) for row in models.User.objects.all()]" 4373,"def _dict_to_model(self, data): """""" Create new user model instance based on the received data. Note that the created user is not saved into database. """""" try: # we can do this because we have same fields # in the representation and in the model: user = models.User(**data) except TypeError: # client sent bad data raise errors.BadRequest() else: return user" 4374,"def captures(self, uuid, withTitles=False): """"""Return the captures for a given uuid optional value withTitles=yes"""""" picker = lambda x: x.get('capture', []) return self._get((uuid,), picker, withTitles='yes' if withTitles else 'no')" 4375,"def uuid(self, type, val): """"""Return the item-uuid for a identifier"""""" picker = lambda x: x.get('uuid', x) return self._get((type, val), picker)" 4376,"def search(self, q, field=None, page=None, per_page=None): """"""Search across all (without field) or in specific field (valid fields at http://www.loc.gov/standards/mods/mods-outline.html)"""""" def picker(results): if type(results['result']) == list: return results['result'] else: return [results['result']] return self._get(('search',), picker, q=q, field=field, page=page, per_page=per_page)" 4377,"def mods(self, uuid): """"""Return a mods record for a given uuid"""""" picker = lambda x: x.get('mods', {}) return self._get(('mods', uuid), picker)" 4378,"def _get(self, components, picker, **params): """"""Generic get which handles call to api and setting of results Return: Results object"""""" url = '/'.join((self.base,) + components) headers = {""Authorization"": ""Token token="" + self._token} params['page'] = params.get('page') or self.page params['per_page'] = params.get('per_page') or self.per_page r = requests.get(""."".join([url, self.format]), params=params, headers=headers) _next = self._nextify(components, picker, params) return Result(r, picker, _next)" 4379,"def convert_datetext_to_dategui(datetext, ln=None, secs=False): """"""Convert: '2005-11-16 15:11:57' => '16 nov 2005, 15:11' Or optionally with seconds: '2005-11-16 15:11:57' => '16 nov 2005, 15:11:57' Month is internationalized """""" assert ln is None, 'setting language is not supported' try: datestruct = convert_datetext_to_datestruct(datetext) if datestruct == datestruct_default: raise ValueError if secs: output_format = ""d MMM Y, H:mm:ss"" else: output_format = ""d MMM Y, H:mm"" dt = datetime.fromtimestamp(time.mktime(datestruct)) return babel_format_datetime(dt, output_format) except ValueError: return _(""N/A"")" 4380,"def get_datetext(year, month, day): """"""year=2005, month=11, day=16 => '2005-11-16 00:00:00'"""""" input_format = ""%Y-%m-%d"" try: datestruct = time.strptime(""%i-%i-%i"" % (year, month, day), input_format) return strftime(datetext_format, datestruct) except: return datetext_default" 4381,"def get_i18n_day_name(day_nb, display='short', ln=None): """"""Get the string representation of a weekday, internationalized @param day_nb: number of weekday UNIX like. => 0=Sunday @param ln: language for output @return: the string representation of the day """""" ln = default_ln(ln) _ = gettext_set_language(ln) if display == 'short': days = {0: _(""Sun""), 1: _(""Mon""), 2: _(""Tue""), 3: _(""Wed""), 4: _(""Thu""), 5: _(""Fri""), 6: _(""Sat"")} else: days = {0: _(""Sunday""), 1: _(""Monday""), 2: _(""Tuesday""), 3: _(""Wednesday""), 4: _(""Thursday""), 5: _(""Friday""), 6: _(""Saturday"")} return days[day_nb]" 4382,"def get_i18n_month_name(month_nb, display='short', ln=None): """"""Get a non-numeric representation of a month, internationalized. @param month_nb: number of month, (1 based!) =>1=jan,..,12=dec @param ln: language for output @return: the string representation of month """""" ln = default_ln(ln) _ = gettext_set_language(ln) if display == 'short': months = {0: _(""Month""), 1: _(""Jan""), 2: _(""Feb""), 3: _(""Mar""), 4: _(""Apr""), 5: _(""May""), 6: _(""Jun""), 7: _(""Jul""), 8: _(""Aug""), 9: _(""Sep""), 10: _(""Oct""), 11: _(""Nov""), 12: _(""Dec"")} else: months = {0: _(""Month""), 1: _(""January""), 2: _(""February""), 3: _(""March""), 4: _(""April""), 5: _(""May ""), # trailing space distinguishes short/long form 6: _(""June""), 7: _(""July""), 8: _(""August""), 9: _(""September""), 10: _(""October""), 11: _(""November""), 12: _(""December"")} return months[month_nb].strip()" 4383,"def create_day_selectbox(name, selected_day=0, ln=None): """"""Creates an HTML menu for day selection. (0..31 values). @param name: name of the control (i.e. name of the var you'll get) @param selected_day: preselect a day. Use 0 for the label 'Day' @param ln: language of the menu @return: html a string """""" ln = default_ln(ln) _ = gettext_set_language(ln) out = ""\n"" return out" 4384,"def create_month_selectbox(name, selected_month=0, ln=None): """"""Creates an HTML menu for month selection. Value of selected field is numeric. @param name: name of the control, your form will be sent with name=value... @param selected_month: preselect a month. use 0 for the Label 'Month' @param ln: language of the menu @return: html as string """""" ln = default_ln(ln) out = ""\n"" return out" 4385,"def create_year_selectbox(name, from_year=-1, length=10, selected_year=0, ln=None): """"""Creates an HTML menu (dropdownbox) for year selection. @param name: name of control( i.e. name of the variable you'll get) @param from_year: year on which to begin. if <0 assume it is current year @param length: number of items in menu @param selected_year: initial selected year (if in range), else: label is selected @param ln: language @return: html as string """""" ln = default_ln(ln) _ = gettext_set_language(ln) if from_year < 0: from_year = time.localtime()[0] out = ""\n"" return out" 4386,"def parse_runtime_limit(value, now=None): """"""Parsing CLI option for runtime limit, supplied as VALUE. Value could be something like: Sunday 23:00-05:00, the format being [Wee[kday]] [hh[:mm][-hh[:mm]]]. The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future. """""" def extract_time(value): value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict() return timedelta(hours=int(value['hours']), minutes=int(value['minutes'])) def extract_weekday(value): key = value[:3].lower() try: return { 'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5, 'sun': 6, }[key] except KeyError: raise ValueError(""%s is not a good weekday name."" % value) if now is None: now = datetime.now() today = now.date() g = _RE_RUNTIMELIMIT_FULL.search(value) if not g: raise ValueError('""%s"" does not seem to be correct format for ' 'parse_runtime_limit() ' '[Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value) pieces = g.groupdict() if pieces['weekday_begin'] is None: # No weekday specified. So either today or tomorrow first_occasion_day = timedelta(days=0) next_occasion_delta = timedelta(days=1) else: # If given 'Mon' then we transform it to 'Mon-Mon' if pieces['weekday_end'] is None: pieces['weekday_end'] = pieces['weekday_begin'] # Day range weekday_begin = extract_weekday(pieces['weekday_begin']) weekday_end = extract_weekday(pieces['weekday_end']) if weekday_begin <= today.weekday() <= weekday_end: first_occasion_day = timedelta(days=0) else: days = (weekday_begin - today.weekday()) % 7 first_occasion_day = timedelta(days=days) weekday = (now + first_occasion_day).weekday() if weekday < weekday_end: # Fits in the same week next_occasion_delta = timedelta(days=1) else: # The week after days = weekday_begin - weekday + 7 next_occasion_delta = timedelta(days=days) if pieces['hour_begin'] is None: pieces['hour_begin'] = '00:00' if pieces['hour_end'] is None: pieces['hour_end'] = '00:00' beginning_time = extract_time(pieces['hour_begin']) ending_time = extract_time(pieces['hour_end']) if not ending_time: ending_time = beginning_time + timedelta(days=1) elif beginning_time and ending_time and beginning_time > ending_time: ending_time += timedelta(days=1) start_time = real_datetime.combine(today, real_time(hour=0, minute=0)) current_range = ( start_time + first_occasion_day + beginning_time, start_time + first_occasion_day + ending_time ) if now > current_range[1]: current_range = tuple(t + next_occasion_delta for t in current_range) future_range = ( current_range[0] + next_occasion_delta, current_range[1] + next_occasion_delta ) return current_range, future_range" 4387,"def guess_datetime(datetime_string): """"""Try to guess the datetime contained in a string of unknow format. @param datetime_string: the datetime representation. @type datetime_string: string @return: the guessed time. @rtype: L{time.struct_time} @raises ValueError: in case it's not possible to guess the time. """""" if CFG_HAS_EGENIX_DATETIME: try: return Parser.DateTimeFromString(datetime_string).timetuple() except ValueError: pass else: for format in (None, '%x %X', '%X %x', '%Y-%M-%dT%h:%m:%sZ'): try: return time.strptime(datetime_string, format) except ValueError: pass raise ValueError(""It is not possible to guess the datetime format of %s"" % datetime_string)" 4388,"def get_time_estimator(total): """"""Given a total amount of items to compute, return a function that, if called every time an item is computed (or every step items are computed) will give a time estimation for how long it will take to compute the whole set of itmes. The function will return two values: the first is the number of seconds that are still needed to compute the whole set, the second value is the time in the future when the operation is expected to end. """""" t1 = time.time() count = [0] def estimate_needed_time(step=1): count[0] += step t2 = time.time() t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0]) return t3, t3 + t1 return estimate_needed_time" 4389,"def pretty_date(ugly_time=False, ln=None): """"""Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc. """""" ln = default_ln(ln) _ = gettext_set_language(ln) now = real_datetime.now() if isinstance(ugly_time, six.string_types): # try to convert it to epoch timestamp date_format = '%Y-%m-%d %H:%M:%S.%f' try: ugly_time = time.strptime(ugly_time, date_format) ugly_time = int(time.mktime(ugly_time)) except ValueError: # doesn't match format, let's try to guess try: ugly_time = int(guess_datetime(ugly_time)) except ValueError: return ugly_time ugly_time = int(time.mktime(ugly_time)) # Initialize the time period difference if isinstance(ugly_time, int): diff = now - real_datetime.fromtimestamp(ugly_time) elif isinstance(ugly_time, real_datetime): diff = now - ugly_time elif not ugly_time: diff = now - now second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return '' if day_diff == 0: if second_diff < 10: return _(""just now"") if second_diff < 60: return str(second_diff) + _("" seconds ago"") if second_diff < 120: return _(""a minute ago"") if second_diff < 3600: return str(second_diff / 60) + _("" minutes ago"") if second_diff < 7200: return _(""an hour ago"") if second_diff < 86400: return str(second_diff / 3600) + _("" hours ago"") if day_diff == 1: return _(""Yesterday"") if day_diff < 7: return str(day_diff) + _("" days ago"") if day_diff < 31: if day_diff / 7 == 7: return _(""Last week"") else: return str(day_diff / 7) + _("" weeks ago"") if day_diff < 365: if day_diff / 30 == 1: return _(""Last month"") else: return str(day_diff / 30) + _("" months ago"") if day_diff / 365 == 1: return _(""Last year"") else: return str(day_diff / 365) + _("" years ago"")" 4390,"def get_dst(date_obj): """"""Determine if dst is locally enabled at this time"""""" dst = 0 if date_obj.year >= 1900: tmp_date = time.mktime(date_obj.timetuple()) # DST is 1 so reduce time with 1 hour. dst = time.localtime(tmp_date)[-1] return dst" 4391,"def utc_to_localtime( date_str, fmt=""%Y-%m-%d %H:%M:%S"", input_fmt=""%Y-%m-%dT%H:%M:%SZ""): """""" Convert UTC to localtime Reference: - (1) http://www.openarchives.org/OAI/openarchivesprotocol.html#Dates - (2) http://www.w3.org/TR/NOTE-datetime This function works only with dates complying with the ""Complete date plus hours, minutes and seconds"" profile of ISO 8601 defined by (2), and linked from (1). Eg: 1994-11-05T13:15:30Z """""" date_struct = datetime.strptime(date_str, input_fmt) date_struct += timedelta(hours=get_dst(date_struct)) date_struct -= timedelta(seconds=time.timezone) return strftime(fmt, date_struct)" 4392,"def njsd_all(network, ref, query, file, verbose=True): """"""Compute transcriptome-wide nJSD between reference and query expression profiles. Attribute: network (str): File path to a network file. ref (str): File path to a reference expression file. query (str): File path to a query expression file. """""" graph, gene_set_total = util.parse_network(network) ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True) query_gene_expression_dict = util.parse_gene_expression(query, mean=False) maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict) gene_set_present = set(query_gene_expression_dict.keys()) with open(file, 'w') as outFile: print('nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile) normal_to_tumor_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=ref_gene_expression_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set_present) tumor_to_ambiguous_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set_present) tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd) with open(file, 'a') as outFile: print(normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile) return normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd)" 4393,"def njsd_geneset(network, ref, query, gene_set, file, verbose=True): """"""Compute gene set-specified nJSD between reference and query expression profiles. Attribute; network (str): File path to a network file. ref (str): File path to a reference expression file. query (str): File path to a query expression file. geneset (str): File path to a gene set file. """""" graph, gene_set_total = util.parse_network(network) ref_gene_expression_dict = util.parse_gene_expression(ref, mean=True) query_gene_expression_dict = util.parse_gene_expression(query, mean=False) group_gene_set_dict = util.parse_gene_set(gene_set) maximally_ambiguous_gene_experession_dict = util.get_maximally_ambiguous_network(query_gene_expression_dict) gene_set_present = set(query_gene_expression_dict.keys()) with open(file, 'w') as outFile: print('Gene_set_ID', 'nJSD_NT', 'nJSD_TA', 'tITH', sep='\t', file=outFile) for group, gene_set in group_gene_set_dict.items(): gene_set_to_be_analyzed = gene_set.intersection(gene_set_present) # If no genes are available for the group, just ignore it. if len(gene_set_to_be_analyzed) == 0: logger.warning('%s has no genes available for analysis. Ignoring the group.' % group) continue # If every gene has a single neighbor, just ignore it. if all([graph.degree(gene) == 1 for gene in gene_set_to_be_analyzed]): logger.warning('%s has no genes with enough neighbors. Ignoring the group.' % group) continue normal_to_tumor_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=ref_gene_expression_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set) tumor_to_ambiguous_njsd = entropy.njsd(network=graph, ref_gene_expression_dict=maximally_ambiguous_gene_experession_dict, query_gene_expression_dict=query_gene_expression_dict, gene_set=gene_set) tITH = normal_to_tumor_njsd / (normal_to_tumor_njsd + tumor_to_ambiguous_njsd) with open(file, 'a') as outFile: print(group, normal_to_tumor_njsd, tumor_to_ambiguous_njsd, tITH, sep='\t', file=outFile)" 4394,"def create_collection(cls, collection, **kwargs): """"""Create Collection Create a new Collection This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_collection(collection, async=True) >>> result = thread.get() :param async bool :param Collection collection: Attributes of collection to create (required) :return: Collection If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_collection_with_http_info(collection, **kwargs) else: (data) = cls._create_collection_with_http_info(collection, **kwargs) return data" 4395,"def delete_collection_by_id(cls, collection_id, **kwargs): """"""Delete Collection Delete an instance of Collection by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_collection_by_id(collection_id, async=True) >>> result = thread.get() :param async bool :param str collection_id: ID of collection to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_collection_by_id_with_http_info(collection_id, **kwargs) else: (data) = cls._delete_collection_by_id_with_http_info(collection_id, **kwargs) return data" 4396,"def get_collection_by_id(cls, collection_id, **kwargs): """"""Find Collection Return single instance of Collection by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_collection_by_id(collection_id, async=True) >>> result = thread.get() :param async bool :param str collection_id: ID of collection to return (required) :return: Collection If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_collection_by_id_with_http_info(collection_id, **kwargs) else: (data) = cls._get_collection_by_id_with_http_info(collection_id, **kwargs) return data" 4397,"def list_all_collections(cls, **kwargs): """"""List Collections Return a list of Collections This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_collections(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Collection] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_collections_with_http_info(**kwargs) else: (data) = cls._list_all_collections_with_http_info(**kwargs) return data" 4398,"def replace_collection_by_id(cls, collection_id, collection, **kwargs): """"""Replace Collection Replace all attributes of Collection This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_collection_by_id(collection_id, collection, async=True) >>> result = thread.get() :param async bool :param str collection_id: ID of collection to replace (required) :param Collection collection: Attributes of collection to replace (required) :return: Collection If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_collection_by_id_with_http_info(collection_id, collection, **kwargs) else: (data) = cls._replace_collection_by_id_with_http_info(collection_id, collection, **kwargs) return data" 4399,"def update_collection_by_id(cls, collection_id, collection, **kwargs): """"""Update Collection Update attributes of Collection This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_collection_by_id(collection_id, collection, async=True) >>> result = thread.get() :param async bool :param str collection_id: ID of collection to update. (required) :param Collection collection: Attributes of collection to update. (required) :return: Collection If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs) else: (data) = cls._update_collection_by_id_with_http_info(collection_id, collection, **kwargs) return data" 4400,"def setupModule( self): """""" *The setupModule method* **Return:** - ``log`` -- a logger - ``dbConn`` -- a database connection to a test database (details from yaml settings file) - ``pathToInputDir`` -- path to modules own test input directory - ``pathToOutputDir`` -- path to modules own test output directory """""" import pymysql as ms ## VARIABLES ## logging.config.dictConfig(yaml.load(self.loggerConfig)) log = logging.getLogger(__name__) connDict = yaml.load(self.dbConfig) dbConn = ms.connect( host=connDict['host'], user=connDict['user'], passwd=connDict['password'], db=connDict['db'], use_unicode=True, charset='utf8', local_infile=1, client_flag=ms.constants.CLIENT.MULTI_STATEMENTS, connect_timeout=3600 ) dbConn.autocommit(True) return log, dbConn, self.pathToInputDir, self.pathToOutputDir" 4401,"def _handle_call(self, actual_call, stubbed_call): """"""Extends Stub call handling behavior to be callable by default."""""" self._actual_calls.append(actual_call) use_call = stubbed_call or actual_call return use_call.return_value" 4402,"def formatted_args(self): """"""Format call arguments as a string. This is used to make test failure messages more helpful by referring to calls using a string that matches how they were, or should have been called. >>> call = Call('arg1', 'arg2', kwarg='kwarg') >>> call.formatted_args ""('arg1', 'arg2', kwarg='kwarg')"" """""" arg_reprs = list(map(repr, self.args)) kwarg_reprs = ['%s=%s' % (k, repr(v)) for k, v in self.kwargs.items()] return '(%s)' % ', '.join(arg_reprs + kwarg_reprs)" 4403,"def passing(self, *args, **kwargs): """"""Assign expected call args/kwargs to this call. Returns `self` for the common case of chaining a call to `Call.returns` >>> Call().passing('foo', bar='baz') """""" self.args = args self.kwargs = kwargs return self" 4404,"def check(self): """""" Check if data and third party tools are available :raises: RuntimeError """""" #for path in self.path.values(): # if not os.path.exists(path): # raise RuntimeError(""File '{}' is missing"".format(path)) for tool in ('cd-hit', 'prank', 'hmmbuild', 'hmmpress', 'hmmscan', 'phmmer', 'mafft', 'meme'): if not self.pathfinder.exists(tool): raise RuntimeError(""Dependency {} is missing"".format(tool))" 4405,"def generate_non_rabs(self): """""" Shrink the non-Rab DB size by reducing sequence redundancy. """""" logging.info('Building non-Rab DB') run_cmd([self.pathfinder['cd-hit'], '-i', self.path['non_rab_db'], '-o', self.output['non_rab_db'], '-d', '100', '-c', str(config['param']['non_rab_db_identity_threshold']), '-g', '1', '-T', self.cpu]) os.remove(self.output['non_rab_db'] + '.clstr')" 4406,"def parse_duration(duration): """"""Attepmts to parse an ISO8601 formatted ``duration``. Returns a ``datetime.timedelta`` object. """""" duration = str(duration).upper().strip() elements = ELEMENTS.copy() for pattern in (SIMPLE_DURATION, COMBINED_DURATION): if pattern.match(duration): found = pattern.match(duration).groupdict() del found['time'] elements.update(dict((k, int(v or 0)) for k, v in found.items())) return datetime.timedelta(days=(elements['days'] + _months_to_days(elements['months']) + _years_to_days(elements['years'])), hours=elements['hours'], minutes=elements['minutes'], seconds=elements['seconds']) return ParseError()" 4407,"def skin_details(skin_id, lang=""en""): """"""This resource returns details about a single skin. :param skin_id: The skin to query for. :param lang: The language to display the texts in. The response is an object with at least the following properties. Note that the availability of some properties depends on the type of item the skin applies to. skin_id (number): The skin id. name (string): The name of the skin. type (string): The type of item the skin applies to. One of ``Armor``, ``Back`` or ``Weapon``. flags (list): Skin flags. Currently known skin flags are ``ShowInWardrobe``, ``HideIfLocked`` and ``NoCost``. restrictions (list): Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and ``Sylvari``. icon_file_id (string): The icon file id to be used with the render service. icon_file_signature (string): The icon file signature to be used with the render service. """""" params = {""skin_id"": skin_id, ""lang"": lang} cache_name = ""skin_details.%(skin_id)s.%(lang)s.json"" % params return get_cached(""skin_details.json"", cache_name, params=params)" 4408,"def bubble_to_dot(bblfile:str, dotfile:str=None, render:bool=False, oriented:bool=False): """"""Write in dotfile a graph equivalent to those depicted in bubble file"""""" tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented)) return tree_to_dot(tree, dotfile, render=render)" 4409,"def bubble_to_gexf(bblfile:str, gexffile:str=None, oriented:bool=False): """"""Write in bblfile a graph equivalent to those depicted in bubble file"""""" tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented)) gexf_converter.tree_to_file(tree, gexffile) return gexffile" 4410,"def bubble_to_js(bblfile:str, jsdir:str=None, oriented:bool=False, **style): """"""Write in jsdir a graph equivalent to those depicted in bubble file"""""" js_converter.bubble_to_dir(bblfile, jsdir, oriented=bool(oriented), **style) return jsdir" 4411,"def tree_to_dot(tree:BubbleTree, dotfile:str=None, render:bool=False): """"""Write in dotfile a graph equivalent to those depicted in bubble file See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py for graphviz API """""" graph = tree_to_graph(tree) path = None if dotfile: # first save the dot file. path = graph.save(dotfile) if render: # secondly, show it. # As the dot file is known by the Graph object, # it will be placed around the dot file. graph.view() return path" 4412,"def tree_to_graph(bbltree:BubbleTree) -> Graph or Digraph: """"""Compute as a graphviz.Graph instance the given graph. If given BubbleTree instance is oriented, returned value is a graphviz.Digraph. See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py for graphviz API """""" GraphObject = Digraph if bbltree.oriented else Graph def create(name:str): """"""Return a graphviz graph figurating a powernode"""""" ret = GraphObject('cluster_' + name) # dirty hack to get links between clusters: add a blank node inside # so the subgraph don't take it's name directly, but the blank node do. # ret.body.append('label = ""{}""'.format(name)) # replaced by: ret.node(name, style='invis', shape='point') # ret.body.append('style=plaintext') ret.body.append('color=lightgrey') ret.body.append('label=""""') ret.body.append('shape=ellipse') ret.body.append('penwidth=2') ret.body.append('pencolor=black') return ret nodes = frozenset(bbltree.nodes()) subgraphs = {} # build for each powernode the associated subgraph, and add its successors for powernode in bbltree.powernodes(): if powernode not in subgraphs: subgraphs[powernode] = create(powernode) for succ in bbltree.inclusions[powernode]: if succ not in subgraphs: if succ not in nodes: subgraphs[succ] = create(succ) else: subgraphs[powernode].node(succ) # add to Graph instances the Graph of successors as subgraphs for powernode, succs in bbltree.inclusions.items(): for succ in succs: if succ not in nodes: subgraphs[powernode].subgraph(subgraphs[succ]) # build the final graph by adding to it subgraphs of roots graph = GraphObject('graph', graph_attr={'compound': 'true'}) for root in bbltree.roots: if root in subgraphs: graph.subgraph(subgraphs[root]) # add the edges to the final graph for source, targets in bbltree.edges.items(): for target in targets: if source <= target: attrs = {} if source not in nodes: attrs.update({'ltail': 'cluster_' + source}) if target not in nodes: attrs.update({'lhead': 'cluster_' + target}) graph.edge(source, target, **attrs) # print(graph) # debug line # graph.view() # debug line return graph" 4413,"def fill(self, term_dict, terms): # type: (Dict[int, Set[Type[Rule]]], Any) -> None """""" Fill first row of the structure witch nonterminal directly rewritable to terminal. :param term_dict: Dictionary of rules directly rewritable to terminal. Key is hash of terminal, value is set of rules with key terminal at the right side. :param terms: Input sequence of terminal. """""" for i in range(len(terms)): t = terms[i] self._field[0][i] += term_dict[hash(t)]" 4414,"def rules(self, x, y): # type: (int, int) -> List[Type[Rule]] """""" Get rules at specific position in the structure. :param x: X coordinate :param y: Y coordinate :return: List of rules """""" return [r for r in self._field[y][x]]" 4415,"def positions(self, x, y): # type: (int, int) -> List[(Point, Point)] """""" Get all positions, that can be combined to get word parsed at specified position. :param x: X coordinate. :param y: Y coordinate. :return: List of tuples with two Point instances. """""" return [(Point(x, v), Point(x + 1 + v, y - 1 - v)) for v in range(y)]" 4416,"def put(self, x, y, rules): # type: (int, int, List[PlaceItem]) -> None """""" Set possible rules at specific position. :param x: X coordinate. :param y: Y coordinate. :param rules: Value to set. """""" self._field[y][x] = rules" 4417,"def froze_it(cls): """""" Decorator to prevent from creating attributes in the object ouside __init__(). This decorator must be applied to the final class (doesn't work if a decorated class is inherited). Yoann's answer at http://stackoverflow.com/questions/3603502 """""" cls._frozen = False def frozensetattr(self, key, value): if self._frozen and not hasattr(self, key): raise AttributeError(""Attribute '{}' of class '{}' does not exist!"" .format(key, cls.__name__)) else: object.__setattr__(self, key, value) def init_decorator(func): @wraps(func) def wrapper(self, *args, **kwargs): func(self, *args, **kwargs) self._frozen = True return wrapper cls.__setattr__ = frozensetattr cls.__init__ = init_decorator(cls.__init__) return cls" 4418,"def one_liner_str(self): """"""Returns string (supposed to be) shorter than str() and not contain newline"""""" assert self.less_attrs is not None, ""Forgot to set attrs class variable"" s_format = ""{}={}"" s = ""; "".join([s_format.format(x, self.__getattribute__(x)) for x in self.less_attrs]) return s" 4419,"def to_dict(self): """"""Returns OrderedDict whose keys are self.attrs"""""" ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret" 4420,"def to_list(self): """"""Returns list containing values of attributes listed in self.attrs"""""" ret = OrderedDict() for attrname in self.attrs: ret[attrname] = self.__getattribute__(attrname) return ret" 4421,"def uniq(pipe): ''' this works like bash's uniq command where the generator only iterates if the next value is not the previous ''' pipe = iter(pipe) previous = next(pipe) yield previous for i in pipe: if i is not previous: previous = i yield i" 4422,"def chunks_generator(iterable, count_items_in_chunk): """""" Очень внимательно! Не дает обходить дважды :param iterable: :param count_items_in_chunk: :return: """""" iterator = iter(iterable) for first in iterator: # stops when iterator is depleted def chunk(): # construct generator for next chunk yield first # yield element from for loop for more in islice(iterator, count_items_in_chunk - 1): yield more # yield more elements from the iterator yield chunk()" 4423,"def chunks(list_, count_items_in_chunk): """""" разбить list (l) на куски по n элементов :param list_: :param count_items_in_chunk: :return: """""" for i in range(0, len(list_), count_items_in_chunk): yield list_[i:i + count_items_in_chunk]" 4424,"def pretty_json(obj): """""" Представить объект в вище json красиво отформатированной строки :param obj: :return: """""" return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)" 4425,"def decode_jwt(input_text, secure_key): """""" Раскодирование строки на основе ключа :param input_text: исходная строка :param secure_key: секретный ключ :return: """""" if input_text is None: return None encoded = (input_text.split("":"")[1]).encode('utf-8') decoded = jwt.decode(encoded, secure_key) return decoded['sub']" 4426,"def send_request(url, method, data, args, params, headers, cookies, timeout, is_json, verify_cert): """""" Forge and send HTTP request. """""" ## Parse url args for p in args: url = url.replace(':' + p, str(args[p])) try: if data: if is_json: headers['Content-Type'] = 'application/json' data = json.dumps(data) request = requests.Request( method.upper(), url, data=data, params=params, headers=headers, cookies=cookies ) else: request = requests.Request( method.upper(), url, params=params, headers=headers, cookies=cookies ) ## Prepare and send HTTP request. session = requests.Session() session.verify = verify_cert r = session.send(request.prepare(), timeout=timeout) session.close() except requests.exceptions.Timeout: return { 'data': {}, 'cookies': CookieJar(), 'content_type': '', 'status': 0, 'is_json': False, 'timeout': True } try: content_type = r.headers.get('Content-Type', 'application/json') response = r.json() isjson = True except json.decoder.JSONDecodeError: content_type = r.headers.get('Content-Type', 'text/html') response = r.text isjson = False return { 'data': response, 'cookies': r.cookies, 'content_type': content_type, 'status': r.status_code, 'is_json': isjson, 'timeout': False }" 4427,"def neighbors(self) -> List['Node']: """""" The list of neighbors of the node. """""" self._load_neighbors() return [edge.source if edge.source != self else edge.target for edge in self._neighbors.values()]" 4428,"def add_neighbor(self, edge: ""Edge"") -> None: """""" Adds a new neighbor to the node. Arguments: edge (Edge): The edge that would connect this node with its neighbor. """""" if edge is None or (edge.source != self and edge.target != self): return if edge.source == self: other: Node = edge.target elif edge.target == self: other: Node = edge.source else: raise ValueError(""Tried to add a neighbor with an invalid edge."") edge_key: Tuple(int, int) = edge.key # The graph is considered undirected, check neighbor existence accordingly. if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])): return # The neighbor is already added. self._neighbors[edge_key] = edge self.dispatch_event(NeighborAddedEvent(other))" 4429,"def _load_neighbors(self) -> None: """""" Loads all neighbors of the node from the local database and from the external data source if needed. """""" if not self.are_neighbors_cached: self._load_neighbors_from_external_source() db: GraphDatabaseInterface = self._graph.database db_node: DBNode = db.Node.find_by_name(self.name) db_node.are_neighbors_cached = True db.session.commit() self.are_neighbors_cached = True if not self._are_neighbors_loaded: self._load_neighbors_from_database()" 4430,"def _load_neighbors_from_database(self) -> None: """""" Loads the neighbors of the node from the local database. """""" self._are_neighbors_loaded = True graph: Graph = self._graph neighbors: List[DBNode] = graph.database.Node.find_by_name(self.name).neighbors nodes: NodeList = graph.nodes for db_node in neighbors: graph.add_node(db_node.name, db_node.external_id) neighbor: Node = nodes.get_node_by_name(db_node.name) graph.add_edge(self, neighbor, 1, False)" 4431,"def key(self) -> Tuple[int, int]: """""" The unique identifier of the edge consisting of the indexes of its source and target nodes. """""" return self._source.index, self._target.index" 4432,"def add_node_by_name(self, node_name: str, external_id: Optional[str] = None) -> None: """""" Adds a new node to the graph if it doesn't exist. Arguments: node_name (str): The name of the node to add. external_id (Optional[str]): The external ID of the node. """""" if node_name is None: return node_name = node_name.strip() if len(node_name) == 0: return node: Node = self.get_node_by_name(node_name, external_id=external_id) if node is None: self._internal_add_node(node_name=node_name, external_id=external_id, are_neighbors_cached=False, add_to_cache=True)" 4433,"def get_node(self, index: int) -> Optional[Node]: """""" Returns the node with the given index if such a node currently exists in the node list. Arguments: index (int): The index of the queried node. Returns: The node with the given index if such a node currently exists in the node list, `None` otherwise. """""" return self._nodes.get(index)" 4434,"def get_node_by_name(self, node_name: str, can_validate_and_load: bool = False, external_id: Optional[str] = None) -> Optional[Node]: """""" Returns the node with the given name if it exists either in the graph or in its database cache or `None` otherwise. Arguments: node_name (str): The name of the node to return. can_validate_and_load (bool): Whether `self._graph.get_authentic_node_name(node_name)` can be called to validate the node name and add the node to the graph if the node name is valid. external_id (Optional[str]): An optional external ID that is used only if there no node with the given name in the graph or in the cache and `can_validate_and_load` is `True`. Returns: The node with the given name if it exists either in the graph or in its database cache, `None` otherwise. """""" node: Node = self._node_name_map.get(node_name) if node is not None: return node db_node: DBNode = self._graph.database.Node.find_by_name(node_name) if db_node is None: if can_validate_and_load: node_name = self._graph.get_authentic_node_name(node_name) if node_name is not None: node = self._node_name_map.get(node_name) if node is not None: return node db_node = self._graph.database.Node.find_by_name(node_name) if db_node is None: self._internal_add_node(node_name=node_name, external_id=external_id, are_neighbors_cached=False, add_to_cache=True) else: self._internal_add_node(node_name=db_node.name, external_id=db_node.external_id, are_neighbors_cached=db_node.are_neighbors_cached, add_to_cache=False) else: return None else: self._internal_add_node(node_name=db_node.name, external_id=db_node.external_id, are_neighbors_cached=db_node.are_neighbors_cached, add_to_cache=False) node = self._node_name_map.get(node_name) # Trying to load the cached neighbors of the created node from the database could # cause a very-very-very deep recursion, so don't even think about doing it here. return node" 4435,"def _internal_add_node(self, node_name: str, external_id: Optional[str] = None, are_neighbors_cached: bool = False, add_to_cache: bool = False) -> None: """""" Adds a node with the given name to the graph without checking whether it already exists or not. Arguments: node_name (str): The name of the node to add. external_id (Optional[str]): The external ID of the node. are_neighbors_cached (bool): Whether the neighbors of the node have already been cached. add_to_cache (bool): Whether the node should also be created in the local cache. """""" index: int = len(self) node: Node = self._create_node(index, node_name, external_id) node.are_neighbors_cached = are_neighbors_cached self._nodes[index] = node self._node_name_map[node_name] = node if add_to_cache: db: GraphDatabaseInterface = self._graph.database db_node: DBNode = db.Node.find_by_name(node.name) if db_node is None: db_node = db.Node(node.name, node.external_id) db_node.are_neighbors_cached = False db.session.add(db_node) db.session.commit()" 4436,"def edge_list(self) -> List[Edge]: """""" The ordered list of edges in the container. """""" return [edge for edge in sorted(self._edges.values(), key=attrgetter(""key""))]" 4437,"def add_edge(self, source: Node, target: Node, weight: float = 1, save_to_cache: bool = True) -> None: """""" Adds an edge to the edge list that will connect the specified nodes. Arguments: source (Node): The source node of the edge. target (Node): The target node of the edge. weight (float): The weight of the created edge. save_to_cache (bool): Whether the edge should be saved to the local database. """""" if not isinstance(source, Node): raise TypeError(""Invalid source: expected Node instance, got {}."".format(source)) if not isinstance(target, Node): raise TypeError(""Invalid target: expected Node instance, got {}."".format(target)) if source.index == target.index or\ self.get_edge_by_index(source.index, target.index) is not None: return self._edges[(source.index, target.index)] = Edge(source, target, weight) if save_to_cache: should_commit: bool = False database: GraphDatabaseInterface = self._graph.database db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name) if db_edge is None: database.session.add(database.Edge(source.name, target.name, weight)) should_commit = True elif db_edge.weight != weight: db_edge.weight = weight should_commit = True if should_commit: database.session.commit()" 4438,"def get_edge(self, source: Node, target: Node) -> Optional[Edge]: """""" Returns the edge connection the given nodes if such an edge exists. Arguments: source (Node): One of the endpoints of the queried edge. target (Node): The other endpoint of the queried edge. Returns: Returns the edge connection the given nodes or `None` if no such node exists. """""" return self.get_edge_by_index(source.index, target.index)" 4439,"def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]: """""" Returns the edge connecting the nodes with the specified indices if such an edge exists. Arguments: source_index (int): The index of one of the endpoints of queried edge. target_index (int): The index of the other endpoint of the queried edge. Returns: The edge connecting the nodes with the specified indices or `None` if no such node exists. """""" edge = self._edges.get((source_index, target_index)) if edge is not None: return edge return self._edges.get((target_index, source_index))" 4440,"def get_edge_by_name(self, source_name: str, target_name: str) -> Optional[Edge]: """""" Returns the edge connecting the nodes with the specified names if such an edge exists. Arguments: source_name (str): The name of one of the endpoints of queried edge. target_name (str): The name of the other endpoint of the queried edge. Returns: The edge connecting the nodes with the specified names or `None` if no such node exists. """""" nodes: NodeList = self._graph.nodes source: Optional[Node] = nodes.get_node_by_name(source_name) if source is None: return None target: Optional[Node] = nodes.get_node_by_name(target_name) if target is None: return None return self.get_edge_by_index(source.index, target.index)" 4441,"def add_edge(self, source: Node, target: Node, weight: float = 1, save_to_cache: bool = True) -> None: """""" Adds an edge between the specified nodes of the graph. Arguments: source (Node): The source node of the edge to add. target (Node): The target node of the edge to add. weight (float): The weight of the edge. save_to_cache (bool): Whether the edge should be saved to the local database. This argument is necessary (and `False`) when we load edges from the local cache. """""" if self._edges.get_edge(source, target) is not None: return self._edges.add_edge( source=source, target=target, weight=weight, save_to_cache=save_to_cache )" 4442,"def add_edge_by_index(self, source_index: int, target_index: int, weight: float, save_to_cache: bool = True) -> None: """""" Adds an edge between the nodes with the specified indices to the graph. Arguments: source_index (int): The index of the source node of the edge to add. target_index (int): The index of the target node of the edge to add. weight (float): The weight of the edge. save_to_cache (bool): Whether the edge should be saved to the local database. This argument is necessary (and `False`) when we load edges from the local cache. """""" source: Node = self._nodes.get_node(source_index) target: Node = self._nodes.get_node(target_index) if source is None or target is None: return self.add_edge( source=source, target=target, weight=weight, save_to_cache=save_to_cache )" 4443,"def add_node(self, node_name: str, external_id: Optional[str] = None) -> None: """""" Adds the node with the given name to the graph. Arguments: node_name (str): The name of the node to add to the graph. external_id (Optional[str]): The external ID of the node. """""" self._nodes.add_node_by_name(node_name, external_id)" 4444,"def get_authentic_node_name(self, node_name: str) -> Optional[str]: """""" Returns the exact, authentic node name for the given node name if a node corresponding to the given name exists in the graph (maybe not locally yet) or `None` otherwise. By default, this method checks whether a node with the given name exists locally in the graph and return `node_name` if it does or `None` otherwise. In `Graph` extensions that are used by applications where the user can enter potentially incorrect node names, this method should be overridden to improve usability. Arguments: node_name (str): The node name to return the authentic node name for. Returns: The authentic name of the node corresponding to the given node name or `None` if no such node exists. """""" node: Node = self._nodes.get_node_by_name(node_name) return node.name if node is not None else None" 4445,"def beforeSummaryReport(self, event): '''Output profiling results''' self.prof.disable() stats = pstats.Stats(self.prof, stream=event.stream).sort_stats( self.sort) event.stream.writeln(nose2.util.ln('Profiling results')) stats.print_stats() if self.pfile: stats.dump_stats(self.pfile) if self.cachegrind: visualize(self.prof.getstats())" 4446,"def separate(text): '''Takes text and separates it into a list of words''' alphabet = 'abcdefghijklmnopqrstuvwxyz' words = text.split() standardwords = [] for word in words: newstr = '' for char in word: if char in alphabet or char in alphabet.upper(): newstr += char if newstr != '': standardwords.append(newstr) return map(lambda x: x.lower(),standardwords)" 4447,"def eliminate_repeats(text): '''Returns a list of words that occur in the text. Eliminates stopwords.''' bannedwords = read_file('stopwords.txt') alphabet = 'abcdefghijklmnopqrstuvwxyz' words = text.split() standardwords = [] for word in words: newstr = '' for char in word: if char in alphabet or char in alphabet.upper(): newstr += char if newstr not in standardwords and newstr != '' and newstr not in bannedwords: standardwords.append(newstr) return map(lambda x: x.lower(),standardwords)" 4448,"def wordcount(text): '''Returns the count of the words in a file.''' bannedwords = read_file('stopwords.txt') wordcount = {} separated = separate(text) for word in separated: if word not in bannedwords: if not wordcount.has_key(word): wordcount[word] = 1 else: wordcount[word] += 1 return wordcount" 4449,"def tuplecount(text): '''Changes a dictionary into a list of tuples.''' worddict = wordcount(text) countlist = [] for key in worddict.keys(): countlist.append((key,worddict[key])) countlist = list(reversed(sorted(countlist,key = lambda x: x[1]))) return countlist" 4450,"def add_log_error(self, x, flag_also_show=False, E=None): """"""Delegates to parent form"""""" self.parent_form.add_log_error(x, flag_also_show, E)" 4451,"def add_log(self, x, flag_also_show=False): """"""Delegates to parent form"""""" self.parent_form.add_log(x, flag_also_show)" 4452,"def get_file_md5(filename): """"""Get a file's MD5"""""" if os.path.exists(filename): blocksize = 65536 try: hasher = hashlib.md5() except BaseException: hasher = hashlib.new('md5', usedForSecurity=False) with open(filename, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: # pylint: disable=len-as-condition hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest() return ''" 4453,"def get_md5(string): """"""Get a string's MD5"""""" try: hasher = hashlib.md5() except BaseException: hasher = hashlib.new('md5', usedForSecurity=False) hasher.update(string) return hasher.hexdigest()" 4454,"def deploy_signature(source, dest, user=None, group=None): """"""Deploy a signature fole"""""" move(source, dest) os.chmod(dest, 0644) if user and group: try: uid = pwd.getpwnam(user).pw_uid gid = grp.getgrnam(group).gr_gid os.chown(dest, uid, gid) except (KeyError, OSError): pass" 4455,"def get_local_version(sigdir, sig): """"""Get the local version of a signature"""""" version = None filename = os.path.join(sigdir, '%s.cvd' % sig) if os.path.exists(filename): cmd = ['sigtool', '-i', filename] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) while True: line = sigtool.stdout.readline() if line and line.startswith('Version:'): version = line.split()[1] break if not line: break sigtool.wait() return version" 4456,"def verify_sigfile(sigdir, sig): """"""Verify a signature file"""""" cmd = ['sigtool', '-i', '%s/%s.cvd' % (sigdir, sig)] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) ret_val = sigtool.wait() return ret_val == 0" 4457,"def check_download(obj, *args, **kwargs): """"""Verify a download"""""" version = args[0] workdir = args[1] signame = args[2] if version: local_version = get_local_version(workdir, signame) if not verify_sigfile(workdir, signame) or version != local_version: error(""[-] \033[91mFailed to verify signature: %s from: %s\033[0m"" % (signame, obj.url)) raise ValueError('Failed to verify signature: %s' % signame)" 4458,"def download_sig(opts, sig, version=None): """"""Download signature from hostname"""""" code = None downloaded = False useagent = 'ClamAV/0.101.1 (OS: linux-gnu, ARCH: x86_64, CPU: x86_64)' manager = PoolManager( headers=make_headers(user_agent=useagent), cert_reqs='CERT_REQUIRED', ca_certs=certifi.where(), timeout=Timeout(connect=10.0, read=60.0) ) if version: path = '/%s.cvd' % sig filename = os.path.join(opts.workdir, '%s.cvd' % sig) else: path = '/%s.cdiff' % sig filename = os.path.join(opts.workdir, '%s.cdiff' % sig) try: req = manager.request('GET', 'http://%s%s' % (opts.hostname, path)) except BaseException as msg: error(""Request error: %s"" % msg) data = req.data code = req.status if req.status == 200: with open(filename, 'w') as handle: handle.write(data) downloaded = os.path.exists(filename) return downloaded, code" 4459,"def get_record(opts): """"""Get record"""""" count = 1 for passno in range(1, 5): count = passno info(""[+] \033[92mQuerying TXT record:\033[0m %s pass: %s"" % (opts.txtrecord, passno)) record = get_txt_record(opts.txtrecord) if record: info(""=> Query returned: %s"" % record) break else: info(""=> Txt record query failed, sleeping 5 secs"") time.sleep(5) if not record: error(""=> Txt record query failed after %d tries"" % count) sys.exit(3) return record" 4460,"def copy_sig(sig, opts, isdiff): """"""Deploy a sig"""""" info(""[+] \033[92mDeploying signature:\033[0m %s"" % sig) if isdiff: sourcefile = os.path.join(opts.workdir, '%s.cdiff' % sig) destfile = os.path.join(opts.mirrordir, '%s.cdiff' % sig) else: sourcefile = os.path.join(opts.workdir, '%s.cvd' % sig) destfile = os.path.join(opts.mirrordir, '%s.cvd' % sig) deploy_signature(sourcefile, destfile, opts.user, opts.group) info(""=> Deployed signature: %s"" % sig)" 4461,"def update_sig(queue): """"""update signature"""""" while True: options, sign, vers = queue.get() info(""[+] \033[92mChecking signature version:\033[0m %s"" % sign) localver = get_local_version(options.mirrordir, sign) remotever = vers[sign] if localver is None or (localver and int(localver) < int(remotever)): info(""=> Update required local: %s => remote: %s"" % (localver, remotever)) info(""=> Downloading signature: %s"" % sign) status, code = download_sig(options, sign, remotever) if status: info(""=> Downloaded signature: %s"" % sign) copy_sig(sign, options, 0) else: if code == 404: error(""=> \033[91mSignature:\033[0m %s not found"" % sign) error(""=> \033[91mDownload failed:\033[0m %s code: %d"" % (sign, code)) else: info( ""=> No update required L: %s => R: %s"" % (localver, remotever)) queue.task_done()" 4462,"def update_diff(opts, sig): """"""Update diff"""""" for _ in range(1, 6): info(""[+] \033[92mDownloading cdiff:\033[0m %s"" % sig) status, code = download_sig(opts, sig) if status: info(""=> Downloaded cdiff: %s"" % sig) copy_sig(sig, opts, 1) else: if code == 404: error(""=> \033[91mSignature:\033[0m %s not found"" % sig) error(""=> \033[91mDownload failed:\033[0m %s code: %d"" % (sig, code))" 4463,"def create_dns_file(opts, record): """"""Create the DNS record file"""""" info(""[+] \033[92mUpdating dns.txt file\033[0m"") filename = os.path.join(opts.mirrordir, 'dns.txt') localmd5 = get_file_md5(filename) remotemd5 = get_md5(record) if localmd5 != remotemd5: create_file(filename, record) info(""=> dns.txt file updated"") else: info(""=> No update required L: %s => R: %s"" % (localmd5, remotemd5))" 4464,"def download_diffs(queue): """"""Download the cdiff files"""""" while True: options, signature_type, localver, remotever = queue.get() for num in range(int(localver), int(remotever) + 1): sig_diff = '%s-%d' % (signature_type, num) filename = os.path.join(options.mirrordir, '%s.cdiff' % sig_diff) if not os.path.exists(filename): update_diff(options, sig_diff) queue.task_done()" 4465,"def work(options): """"""The work functions"""""" # pylint: disable=too-many-locals record = get_record(options) _, mainv, dailyv, _, _, _, safebrowsingv, bytecodev = record.split(':') versions = {'main': mainv, 'daily': dailyv, 'safebrowsing': safebrowsingv, 'bytecode': bytecodev} dqueue = Queue(maxsize=0) dqueue_workers = 3 info(""[+] \033[92mStarting workers\033[0m"") for index in range(dqueue_workers): info(""=> Starting diff download worker: %d"" % (index + 1)) worker = Thread(target=download_diffs, args=(dqueue,)) worker.setDaemon(True) worker.start() mqueue = Queue(maxsize=0) mqueue_workers = 4 for index in range(mqueue_workers): info(""=> Starting signature download worker: %d"" % (index + 1)) worker = Thread(target=update_sig, args=(mqueue,)) worker.setDaemon(True) worker.start() for signature_type in ['main', 'daily', 'bytecode', 'safebrowsing']: if signature_type in ['daily', 'bytecode', 'safebrowsing']: # cdiff downloads localver = get_local_version(options.mirrordir, signature_type) remotever = versions[signature_type] if localver is not None: dqueue.put( ( options, signature_type, localver, remotever ) ) mqueue.put((options, signature_type, versions)) info(""=> Waiting on workers to complete tasks"") dqueue.join() mqueue.join() info(""=> Workers done processing queues"") create_dns_file(options, record) sys.exit(0)" 4466,"def main(): """"""Main entry point"""""" parser = OptionParser() parser.add_option('-a', '--hostname', help='ClamAV source server hostname', dest='hostname', type='str', default='db.de.clamav.net') parser.add_option('-r', '--text-record', help='ClamAV Updates TXT record', dest='txtrecord', type='str', default='current.cvd.clamav.net') parser.add_option('-w', '--work-directory', help='Working directory', dest='workdir', type='str', default='/var/spool/clamav-mirror') parser.add_option('-d', '--mirror-directory', help='The mirror directory', dest='mirrordir', type='str', default='/srv/www/clamav') parser.add_option('-u', '--user', help='Change file owner to this user', dest='user', type='str', default='nginx') parser.add_option('-g', '--group', help='Change file group to this group', dest='group', type='str', default='nginx') parser.add_option('-l', '--locks-directory', help='Lock files directory', dest='lockdir', type='str', default='/var/lock/subsys') parser.add_option('-v', '--verbose', help='Display verbose output', dest='verbose', action='store_true', default=False) options, _ = parser.parse_args() try: lockfile = os.path.join(options.lockdir, 'clamavmirror') with open(lockfile, 'w+') as lock: fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) work(options) except IOError: info(""=> Another instance is already running"") sys.exit(254)" 4467,"def copy_resource(src, dest): """""" To copy package data to destination """""" package_name = ""yass"" dest = (dest + ""/"" + os.path.basename(src)).rstrip(""/"") if pkg_resources.resource_isdir(package_name, src): if not os.path.isdir(dest): os.makedirs(dest) for res in pkg_resources.resource_listdir(__name__, src): copy_resource(src + ""/"" + res, dest) else: if not os.path.isfile(dest) \ and os.path.splitext(src)[1] not in ["".pyc""]: with open(dest, ""wb"") as f: f.write(pkg_resources.resource_string(__name__, src)) else: print(""File exists: %s "" % dest)" 4468,"def publish(endpoint, purge_files, rebuild_manifest, skip_upload): """"""Publish the site"""""" print(""Publishing site to %s ..."" % endpoint.upper()) yass = Yass(CWD) target = endpoint.lower() sitename = yass.sitename if not sitename: raise ValueError(""Missing site name"") endpoint = yass.config.get(""hosting.%s"" % target) if not endpoint: raise ValueError(""%s endpoint is missing in the config"" % target.upper()) if target == ""s3"": p = publisher.S3Website(sitename=sitename, aws_access_key_id=endpoint.get(""aws_access_key_id""), aws_secret_access_key=endpoint.get(""aws_secret_access_key""), region=endpoint.get(""aws_region"")) if not p.website_exists: print("">>>"") print(""Setting S3 site..."") if p.create_website() is True: # Need to give it enough time to create it # Should be a one time thing time.sleep(10) p.create_www_website() print(""New bucket created: %s"" % p.sitename) if rebuild_manifest: print("">>>"") print(""Rebuilding site's manifest..."") p.create_manifest_from_s3_files() if purge_files is True or endpoint.get(""purge_files"") is True: print("">>>"") print(""Purging files..."") exclude_files = endpoint.get(""purge_exclude_files"", []) p.purge_files(exclude_files=exclude_files) if not skip_upload: print("">>>"") print(""Uploading your site..."") p.upload(yass.build_dir) else: print("">>>"") print(""WARNING: files upload was skipped because of the use of --skip-upload"") print("""") print(""Yass! Your site has been successfully published to: "") print(p.website_endpoint_url) footer()" 4469,"def setup_dns(endpoint): """"""Setup site domain to route to static site"""""" print(""Setting up DNS..."") yass = Yass(CWD) target = endpoint.lower() sitename = yass.sitename if not sitename: raise ValueError(""Missing site name"") endpoint = yass.config.get(""hosting.%s"" % target) if not endpoint: raise ValueError( ""%s endpoint is missing in the hosting config"" % target.upper()) if target == ""s3"": p = publisher.S3Website(sitename=sitename, aws_access_key_id=endpoint.get(""aws_access_key_id""), aws_secret_access_key=endpoint.get(""aws_secret_access_key""), region=endpoint.get(""aws_region"")) print(""Setting AWS Route53 for: %s ..."" % p.sitename) p.setup_dns() print("""") print(""Yass! Route53 setup successfully!"") print(""You can now visit the site at :"") print(p.sitename_endpoint) footer()" 4470,"def create_site(sitename): """"""Create a new site directory and init Yass"""""" sitepath = os.path.join(CWD, sitename) if os.path.isdir(sitepath): print(""Site directory '%s' exists already!"" % sitename) else: print(""Creating site: %s..."" % sitename) os.makedirs(sitepath) copy_resource(""skel/"", sitepath) stamp_yass_current_version(sitepath) print(""Site created successfully!"") print(""CD into '%s' and run 'yass serve' to view the site"" % sitename) footer()" 4471,"def init(): """"""Initialize Yass in the current directory """""" yass_conf = os.path.join(CWD, ""yass.yml"") if os.path.isfile(yass_conf): print(""::ALERT::"") print(""It seems like Yass is already initialized here."") print(""If it's a mistake, delete 'yass.yml' in this directory"") else: print(""Init Yass in %s ..."" % CWD) copy_resource(""skel/"", CWD) stamp_yass_current_version(CWD) print(""Yass init successfully!"") print(""Run 'yass serve' to view the site"") footer()" 4472,"def create_page(pagename): """""" Create a new page Omit the extension, it will create it as .jade file """""" page = pagename.lstrip(""/"").rstrip(""/"") _, _ext = os.path.splitext(pagename) # If the file doesn't have an extension, we'll just create one if not _ext or _ext == """": page += "".jade"" if not page.endswith(PAGE_FORMAT): error(""Can't create '%s'"" % page) print(""Invalid filename format"") print(""Filename must be in: '%s'"" % "" | "".join(PAGE_FORMAT)) else: engine = Yass(CWD) markup = ""jade"" if page.endswith("".md""): markup = ""md"" if page.endswith("".html""): markup = ""html"" dest_file = os.path.join(engine.pages_dir, page) dest_dir = os.path.dirname(dest_file) content = TPL_HEADER content += TPL_BODY[markup] if os.path.isfile(dest_file): error(""File exists already"") print(""Location: %s"" % dest_file) else: if not os.path.isdir(dest_dir): os.makedirs(dest_dir) with open(dest_file, ""w"") as f: f.write(content) print(""New page created: '%s'"" % page) print(""Location: %s"" % dest_file) footer()" 4473,"def serve(port, no_livereload, open_url): """"""Serve the site """""" engine = Yass(CWD) if not port: port = engine.config.get(""local_server.port"", 8000) if no_livereload is None: no_livereload = True if engine.config.get(""local_server.livereload"") is False else False if open_url is None: open_url = False if engine.config.get(""local_server.open_url"") is False else True print(""Serving at %s"" % port) print(""Livereload is %s"" % (""OFF"" if no_livereload else ""ON"")) def build_static(): engine.build_static() def build_pages(): engine.build_pages() engine.build() server = Server() if no_livereload is False: server.watch(engine.static_dir + ""/"", build_static) server.watch(engine.pages_dir + ""/"", build_pages) server.watch(engine.templates_dir + ""/"", build_pages) server.watch(engine.data_dir + ""/"", build_pages) server.serve(open_url_delay=open_url, port=port, root=engine.build_dir)" 4474,"def get_map_location(self): """"""Get the location of the player, converted to world coordinates. :return: a tuple (x, y, z). """""" map_data = self.get_map() (bounds_e, bounds_n), (bounds_w, bounds_s) = map_data[""continent_rect""] (map_e, map_n), (map_w, map_s) = map_data[""map_rect""] assert bounds_w < bounds_e assert bounds_n < bounds_s assert map_w < map_e assert map_n < map_s meters_to_inches = 39.3701 x, y, z = self.fAvatarPosition map_x = bounds_w + ((x * meters_to_inches - map_w) / (map_e - map_w) * (bounds_e - bounds_w)) map_y = bounds_n + ((-z * meters_to_inches - map_n) / (map_s - map_n) * (bounds_s - bounds_n)) map_z = y * meters_to_inches return map_x, map_y, map_z" 4475,"def CreateVertices(self, points): """""" Returns a dictionary object with keys that are 2tuples represnting a point. """""" gr = digraph() for z, x, Q in points: node = (z, x, Q) gr.add_nodes([node]) return gr" 4476,"def CreateDirectedEdges(self, points, gr, layer_width): """""" Take each key (ie. point) in the graph and for that point create an edge to every point downstream of it where the weight of the edge is the tuple (distance, angle) """""" for z0, x0, Q0 in points: for z1, x1, Q1 in points: dz = z1 - z0 # no fabs because we check arrow direction if dz > 0.0: # make sure arrow in right direction if dz - layer_width < distance_threshold: # only adjacents dx = math.fabs(x1 - x0) if dx > 5 * bar_width: continue # Weights are negative to in order to use shortest path # algorithms on the graph. weight = -1 * math.hypot(dz, dx) edge = ((z0, x0, Q0), (z1, x1, Q1)) gr.add_edge(edge, wt=weight) # Ensure that it is already transitively reduced assert len(critical.transitive_edges(gr)) == 0 return gr" 4477,"def GetFarthestNode(self, gr, node): """"""node is start node"""""" # Remember: weights are negative distance = minmax.shortest_path_bellman_ford(gr, node)[1] # Find the farthest node, which is end of track min_key = None for key, value in distance.iteritems(): if min_key is None or value < distance[min_key]: min_key = key return min_key" 4478,"def on_success(self, fn, *args, **kwargs): """""" Call the given callback if or when the connected deferred succeeds. """""" self._callbacks.append((fn, args, kwargs)) result = self._resulted_in if result is not _NOTHING_YET: self._succeed(result=result)" 4479,"def _succeed(self, result): """""" Fire the success chain. """""" for fn, args, kwargs in self._callbacks: fn(result, *args, **kwargs) self._resulted_in = result" 4480,"def random_name(num_surnames=2): """""" Returns a random person name Arguments: num_surnames -- number of surnames """""" a = [] # Prefix if random.random() < _PROB_PREF: a.append(_prefixes[random.randint(0, len(_prefixes) - 1)]) # Forename a.append(_forenames[random.randint(0, len(_forenames) - 1)]) # Surnames for i in range(num_surnames): a.append(_surnames[random.randint(0, len(_surnames) - 1)]) # Suffix if random.random() < _PROB_SUFF: a.append(_suffixes[random.randint(0, len(_suffixes) - 1)]) return "" "".join(a)" 4481,"def create_free_shipping_coupon(cls, free_shipping_coupon, **kwargs): """"""Create FreeShippingCoupon Create a new FreeShippingCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_free_shipping_coupon(free_shipping_coupon, async=True) >>> result = thread.get() :param async bool :param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to create (required) :return: FreeShippingCoupon If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_free_shipping_coupon_with_http_info(free_shipping_coupon, **kwargs) else: (data) = cls._create_free_shipping_coupon_with_http_info(free_shipping_coupon, **kwargs) return data" 4482,"def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs): """"""Delete FreeShippingCoupon Delete an instance of FreeShippingCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) else: (data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) return data" 4483,"def get_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs): """"""Find FreeShippingCoupon Return single instance of FreeShippingCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to return (required) :return: FreeShippingCoupon If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) else: (data) = cls._get_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs) return data" 4484,"def list_all_free_shipping_coupons(cls, **kwargs): """"""List FreeShippingCoupons Return a list of FreeShippingCoupons This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shipping_coupons(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShippingCoupon] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shipping_coupons_with_http_info(**kwargs) else: (data) = cls._list_all_free_shipping_coupons_with_http_info(**kwargs) return data" 4485,"def replace_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, free_shipping_coupon, **kwargs): """"""Replace FreeShippingCoupon Replace all attributes of FreeShippingCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_coupon_by_id(free_shipping_coupon_id, free_shipping_coupon, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to replace (required) :param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to replace (required) :return: FreeShippingCoupon If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs) else: (data) = cls._replace_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs) return data" 4486,"def update_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, free_shipping_coupon, **kwargs): """"""Update FreeShippingCoupon Update attributes of FreeShippingCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_free_shipping_coupon_by_id(free_shipping_coupon_id, free_shipping_coupon, async=True) >>> result = thread.get() :param async bool :param str free_shipping_coupon_id: ID of freeShippingCoupon to update. (required) :param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to update. (required) :return: FreeShippingCoupon If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs) else: (data) = cls._update_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs) return data" 4487,"def fetch_config(filename): """"""Fetch the Configuration schema information Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned """""" # This trick gets the directory of *this* file Configuration.py thus # allowing to find the schema files relative to this file. dir_name = get_source_dir() # Append json filename = os.path.join('json', filename) fileobj = open(os.path.join(dir_name, filename), 'r') my_dict = json.loads(fileobj.read()) return my_dict" 4488,"def populate_args_level(schema, parser): """"""Use a schema to populate a command line argument parser"""""" for key, value in schema['properties'].iteritems(): if key == 'name': continue arg = '--%s' % key desc = value['description'] if 'type' in value: if value['type'] == 'string': if 'enum' in value: parser.add_argument(arg, help=desc, type=str, choices=value['enum']) else: parser.add_argument(arg, help=desc, type=str) elif value['type'] == 'number': parser.add_argument(arg, help=desc, type=float) elif value['type'] == 'integer': parser.add_argument(arg, help=desc, type=int) elif str(value['type']) == 'array': assert value['minItems'] == value['maxItems'] if value['items']['type'] != 'number': raise NotImplementedError(""Only float arrays work"") parser.add_argument(arg, help=desc, type=float, nargs=value['maxItems'], metavar='N') elif value['type'] == 'object': #group = parser.add_argument_group(key, value['description']) #populate_args_level(value, group) pass" 4489,"def set_json(self, config_json): """"""Permanently set the JSON configuration Unable to call twice."""""" if self.configuration_dict is not None: raise RuntimeError(""Can only set configuration once"", self.configuration_dict) schema = fetch_config('ConfigurationSchema.json') validictory.validate(config_json, schema) config_json['name'] = self.name config_json['run_number'] = self.run config_json['src_dir'] = get_source_dir() config_json['data_dir'] = get_data_dir() config_json['log_dir'] = get_log_dir() self.configuration_dict = config_json" 4490,"def bulk_send(self, topic, kmsgs, timeout=60): """""" Send a batch of messages :param str topic: a kafka topic :param ksr.transport.Message kmsgs: Messages to serialize :param int timeout: Timeout in seconds :return: Execution result :rtype: kser.result.Result """""" try: for kmsg in kmsgs: self.client.send( topic, self._onmessage(kmsg).dumps().encode(""UTF-8"") ) self.client.flush(timeout=timeout) return Result(stdout=""{} message(s) sent"".format(len(kmsgs))) except Exception as exc: return Result.from_exception(exc)" 4491,"def send(self, topic, kmsg, timeout=60): """""" Send the message into the given topic :param str topic: a kafka topic :param ksr.transport.Message kmsg: Message to serialize :param int timeout: Timeout in seconds (not used in proto producer) :return: Execution result :rtype: kser.result.Result """""" result = Result(uuid=kmsg.uuid) try: self.client.produce( topic, self._onmessage(kmsg).dumps().encode(""UTF-8"") ) result.stdout = ""Message {}[{}] sent"".format( kmsg.entrypoint, kmsg.uuid ) self.client.flush() except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: if result.retcode < 300: return self._onsuccess(kmsg=kmsg, result=result) else: return self._onerror(kmsg=kmsg, result=result)" 4492,"def file_strip_ext( afile, skip_version=False, only_known_extensions=False, allow_subformat=True): """""" Strip in the best way the extension from a filename. >>> file_strip_ext(""foo.tar.gz"") 'foo' >>> file_strip_ext(""foo.buz.gz"") 'foo.buz' >>> file_strip_ext(""foo.buz"") 'foo' >>> file_strip_ext(""foo.buz"", only_known_extensions=True) 'foo.buz' >>> file_strip_ext(""foo.buz;1"", skip_version=False, ... only_known_extensions=True) 'foo.buz;1' >>> file_strip_ext(""foo.gif;icon"") 'foo' >>> file_strip_ext(""foo.gif;icon"", only_know_extensions=True, ... allow_subformat=False) 'foo.gif;icon' @param afile: the path/name of a file. @type afile: string @param skip_version: whether to skip a trailing "";version"". @type skip_version: bool @param only_known_extensions: whether to strip out only known extensions or to consider as extension anything that follows a dot. @type only_known_extensions: bool @param allow_subformat: whether to consider also subformats as part of the extension. @type allow_subformat: bool @return: the name/path without the extension (and version). @rtype: string """""" import os afile = afile.split(';') if len(afile) > 1 and allow_subformat and not afile[-1].isdigit(): afile = afile[0:-1] if len(afile) > 1 and skip_version and afile[-1].isdigit(): afile = afile[0:-1] afile = ';'.join(afile) nextfile = _extensions.sub('', afile) if nextfile == afile and not only_known_extensions: nextfile = os.path.splitext(afile)[0] while nextfile != afile: afile = nextfile nextfile = _extensions.sub('', afile) return nextfile" 4493,"def guess_extension(amimetype, normalize=False): """""" Tries to guess extension for a mimetype. @param amimetype: name of a mimetype @time amimetype: string @return: the extension @rtype: string """""" ext = _mimes.guess_extension(amimetype) if ext and normalize: # Normalize some common magic mis-interpreation ext = {'.asc': '.txt', '.obj': '.bin'}.get(ext, ext) from invenio.legacy.bibdocfile.api_normalizer import normalize_format return normalize_format(ext) return ext" 4494,"def get_magic_guesses(fullpath): """""" Return all the possible guesses from the magic library about the content of the file. @param fullpath: location of the file @type fullpath: string @return: guesses about content of the file @rtype: tuple """""" if CFG_HAS_MAGIC == 1: magic_cookies = _get_magic_cookies() magic_result = [] for key in magic_cookies.keys(): magic_result.append(magic_cookies[key].file(fullpath)) return tuple(magic_result) elif CFG_HAS_MAGIC == 2: magic_result = [] for key in ({'mime': False, 'mime_encoding': False}, {'mime': True, 'mime_encoding': False}, {'mime': False, 'mime_encoding': True}): magic_result.append(_magic_wrapper(fullpath, **key)) return tuple(magic_result)" 4495,"def mimes(self): """""" Returns extended MimeTypes. """""" _mimes = MimeTypes(strict=False) _mimes.suffix_map.update({'.tbz2': '.tar.bz2'}) _mimes.encodings_map.update({'.bz2': 'bzip2'}) if cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']: for key, value in iteritems( cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']): _mimes.add_type(key, value) del key, value return _mimes" 4496,"def extensions(self): """""" Generate the regular expression to match all the known extensions. @return: the regular expression. @rtype: regular expression object """""" _tmp_extensions = self.mimes.encodings_map.keys() + \ self.mimes.suffix_map.keys() + \ self.mimes.types_map[1].keys() + \ cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS'] extensions = [] for ext in _tmp_extensions: if ext.startswith('.'): extensions.append(ext) else: extensions.append('.' + ext) extensions.sort() extensions.reverse() extensions = set([ext.lower() for ext in extensions]) extensions = '\\' + '$|\\'.join(extensions) + '$' extensions = extensions.replace('+', '\\+') return re.compile(extensions, re.I)" 4497,"def __deserialize(self, data, klass): """"""Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """""" if data is None: return None if type(klass) == str: from tradenity.resources.paging import Page if klass.startswith('page['): sub_kls = re.match('page\[(.*)\]', klass).group(1) return Page([self.__deserialize(sub_data, sub_kls) for sub_data in data[""items""]], self.__deserialize_page_info(data[""__meta""])) if klass.startswith('list['): sub_kls = re.match('list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in six.iteritems(data)} # convert str to class if klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] else: klass = getattr(tradenity.resources, klass) if klass in self.PRIMITIVE_TYPES: return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == datetime.date: return self.__deserialize_date(data) elif klass == datetime.datetime: return self.__deserialize_datatime(data) else: return self.__deserialize_model(data, klass)" 4498,"def update_params_for_auth(self, headers, querys, auth_settings): """"""Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """""" if self.auth_token_holder.token is not None: headers[Configuration.AUTH_TOKEN_HEADER_NAME] = self.auth_token_holder.token else: headers['Authorization'] = self.configuration.get_basic_auth_token()" 4499,"def start(self, service): """""" Start the service, catching and logging exceptions """""" try: map(self.start_class, service.depends) if service.is_running(): return if service in self.failed: log.warning(""%s previously failed to start"", service) return service.start() except Exception: log.exception(""Unable to start service %s"", service) self.failed.add(service)" 4500,"def start_class(self, class_): """""" Start all services of a given class. If this manager doesn't already have a service of that class, it constructs one and starts it. """""" matches = filter(lambda svc: isinstance(svc, class_), self) if not matches: svc = class_() self.register(svc) matches = [svc] map(self.start, matches) return matches" 4501,"def stop_class(self, class_): ""Stop all services of a given class"" matches = filter(lambda svc: isinstance(svc, class_), self) map(self.stop, matches)" 4502,"def log_root(self): """""" Find a directory suitable for writing log files. It uses sys.prefix to use a path relative to the root. If sys.prefix is /usr, it's the system Python, so use /var/log. """""" var_log = ( os.path.join(sys.prefix, 'var', 'log') .replace('/usr/var', '/var') ) if not os.path.isdir(var_log): os.makedirs(var_log) return var_log" 4503,"def _get_more_data(self, file, timeout): """""" Return data from the file, if available. If no data is received by the timeout, then raise RuntimeError. """""" timeout = datetime.timedelta(seconds=timeout) timer = Stopwatch() while timer.split() < timeout: data = file.read() if data: return data raise RuntimeError(""Timeout"")" 4504,"def _run_env(self): """""" Augment the current environment providing the PYTHONUSERBASE. """""" env = dict(os.environ) env.update( getattr(self, 'env', {}), PYTHONUSERBASE=self.env_path, PIP_USER=""1"", ) self._disable_venv(env) return env" 4505,"def _disable_venv(self, env): """""" Disable virtualenv and venv in the environment. """""" venv = env.pop('VIRTUAL_ENV', None) if venv: venv_path, sep, env['PATH'] = env['PATH'].partition(os.pathsep)" 4506,"def create_env(self): """""" Create a PEP-370 environment """""" root = path.Path(os.environ.get('SERVICES_ROOT', 'services')) self.env_path = (root / self.name).abspath() cmd = [ self.python, '-c', 'import site; print(site.getusersitepackages())', ] out = subprocess.check_output(cmd, env=self._run_env) site_packages = out.decode().strip() path.Path(site_packages).makedirs_p()" 4507,"def create_states_geo_zone(cls, states_geo_zone, **kwargs): """"""Create StatesGeoZone Create a new StatesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_states_geo_zone(states_geo_zone, async=True) >>> result = thread.get() :param async bool :param StatesGeoZone states_geo_zone: Attributes of statesGeoZone to create (required) :return: StatesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_states_geo_zone_with_http_info(states_geo_zone, **kwargs) else: (data) = cls._create_states_geo_zone_with_http_info(states_geo_zone, **kwargs) return data" 4508,"def delete_states_geo_zone_by_id(cls, states_geo_zone_id, **kwargs): """"""Delete StatesGeoZone Delete an instance of StatesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_states_geo_zone_by_id(states_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str states_geo_zone_id: ID of statesGeoZone to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs) else: (data) = cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs) return data" 4509,"def get_states_geo_zone_by_id(cls, states_geo_zone_id, **kwargs): """"""Find StatesGeoZone Return single instance of StatesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_states_geo_zone_by_id(states_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str states_geo_zone_id: ID of statesGeoZone to return (required) :return: StatesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs) else: (data) = cls._get_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs) return data" 4510,"def list_all_states_geo_zones(cls, **kwargs): """"""List StatesGeoZones Return a list of StatesGeoZones This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_states_geo_zones(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StatesGeoZone] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_states_geo_zones_with_http_info(**kwargs) else: (data) = cls._list_all_states_geo_zones_with_http_info(**kwargs) return data" 4511,"def replace_states_geo_zone_by_id(cls, states_geo_zone_id, states_geo_zone, **kwargs): """"""Replace StatesGeoZone Replace all attributes of StatesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_states_geo_zone_by_id(states_geo_zone_id, states_geo_zone, async=True) >>> result = thread.get() :param async bool :param str states_geo_zone_id: ID of statesGeoZone to replace (required) :param StatesGeoZone states_geo_zone: Attributes of statesGeoZone to replace (required) :return: StatesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_states_geo_zone_by_id_with_http_info(states_geo_zone_id, states_geo_zone, **kwargs) else: (data) = cls._replace_states_geo_zone_by_id_with_http_info(states_geo_zone_id, states_geo_zone, **kwargs) return data" 4512,"def update_states_geo_zone_by_id(cls, states_geo_zone_id, states_geo_zone, **kwargs): """"""Update StatesGeoZone Update attributes of StatesGeoZone This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_states_geo_zone_by_id(states_geo_zone_id, states_geo_zone, async=True) >>> result = thread.get() :param async bool :param str states_geo_zone_id: ID of statesGeoZone to update. (required) :param StatesGeoZone states_geo_zone: Attributes of statesGeoZone to update. (required) :return: StatesGeoZone If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_states_geo_zone_by_id_with_http_info(states_geo_zone_id, states_geo_zone, **kwargs) else: (data) = cls._update_states_geo_zone_by_id_with_http_info(states_geo_zone_id, states_geo_zone, **kwargs) return data" 4513,"def compaction(self, request_compaction=False): """"""Retrieve a report on, or request compaction for this instance. :param bool request_compaction: A boolean indicating whether or not to request compaction. """""" url = self._service_url + 'compaction/' if request_compaction: response = requests.post(url, **self._instances._default_request_kwargs) else: response = requests.get(url, **self._instances._default_request_kwargs) return response.json()" 4514,"def get_authenticated_connection(self, user, passwd, db='admin', ssl=True): """"""Get an authenticated connection to this instance. :param str user: The username to use for authentication. :param str passwd: The password to use for authentication. :param str db: The name of the database to authenticate against. Defaults to ``'Admin'``. :param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``. :raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails. """""" # Attempt to establish an authenticated connection. try: connection = self.get_connection(ssl=ssl) connection[db].authenticate(user, passwd) return connection # Catch exception here for logging, then just re-raise. except pymongo.errors.OperationFailure as ex: logger.exception(ex) raise" 4515,"def shards(self, add_shard=False): """"""Get a list of shards belonging to this instance. :param bool add_shard: A boolean indicating whether to add a new shard to the specified instance. """""" url = self._service_url + 'shards/' if add_shard: response = requests.post(url, **self._instances._default_request_kwargs) else: response = requests.get(url, **self._instances._default_request_kwargs) return response.json()" 4516,"def new_relic_stats(self): """""" Get stats for this instance. """""" if self._new_relic_stats is None: # if this is a sharded instance, fetch shard stats in parallel if self.type == 'mongodb_sharded': shards = [Shard(self.name, self._service_url + 'shards/', self._client, shard_doc) for shard_doc in self.shards().get('data')] fs = [] with futures.ThreadPoolExecutor(len(shards)) as executor: for shard in shards: fs.append(executor.submit(shard.get_shard_stats)) futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED) stats_this_second = self._rollup_shard_stats_to_instance_stats( {shard.name: future.result() for (shard, future) in zip(shards, fs)}) # power nap time.sleep(1) # fetch again fs = [] with futures.ThreadPoolExecutor(len(shards)) as executor: for shard in shards: fs.append(executor.submit(shard.get_shard_stats)) futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED) stats_next_second = self._rollup_shard_stats_to_instance_stats( {shard.name: future.result() for (shard, future) in zip(shards, fs)}) self._new_relic_stats = self._compile_new_relic_stats(stats_this_second, stats_next_second) else: # fetch stats like we did before (by hitting new_relic_stats API resource) response = requests.get('{}{}'.format(self._url, 'new-relic-stats'), **self._instances._default_request_kwargs) self._new_relic_stats = json.loads(response.content).get( 'data') if response.status_code == 200 else {} return self._new_relic_stats" 4517,"def _rollup_shard_stats_to_instance_stats(self, shard_stats): """""" roll up all shard stats to instance level stats :param shard_stats: dict of {shard_name: shard level stats} """""" instance_stats = {} opcounters_per_node = [] # aggregate replication_lag instance_stats['replication_lag'] = max(map(lambda s: s['replication_lag'], shard_stats.values())) aggregate_server_statistics = {} for shard_name, stats in shard_stats.items(): for statistic_key in stats.get('shard_stats'): if statistic_key != 'connections' and statistic_key in aggregate_server_statistics: aggregate_server_statistics[statistic_key] = util.sum_values(aggregate_server_statistics[statistic_key], stats.get('shard_stats')[statistic_key]) else: aggregate_server_statistics[statistic_key] = stats.get('shard_stats')[statistic_key] # aggregate per_node_stats into opcounters_per_node opcounters_per_node.append({shard_name: {member: node_stats['opcounters'] for member, node_stats in stats.get('per_node_stats').items()}}) instance_stats['opcounters_per_node'] = opcounters_per_node instance_stats['aggregate_server_statistics'] = aggregate_server_statistics return instance_stats" 4518,"def _compile_new_relic_stats(self, stats_this_second, stats_next_second): """""" from instance 'stats_this_second' and instance 'stats_next_second', compute some per second stats metrics and other aggregated metrics :param dict stats_this_second: :param dict stats_next_second: :return: compiled instance stats that has metrics {'opcounters_per_node_per_second': {...}, 'server_statistics_per_second': {...}, 'aggregate_server_statistics': {...}, 'replication_lag': 0.0, 'aggregate_database_statistics': {} } """""" server_statistics_per_second = {} opcounters_per_node_per_second = [] for subdoc in [""opcounters"", ""network""]: first_doc = stats_this_second['aggregate_server_statistics'][subdoc] second_doc = stats_next_second['aggregate_server_statistics'][subdoc] keys = set(first_doc.keys()) | set(second_doc.keys()) server_statistics_per_second[subdoc] = {key: int(second_doc[key]) - int(first_doc[key]) for key in keys if isinstance(first_doc[key], int)} for node1, node2 in zip(stats_this_second['opcounters_per_node'], stats_next_second['opcounters_per_node']): node_opcounters_per_second = {} for repl, members in node2.items(): node_opcounters_per_second[repl] = {} for member, ops in members.items(): node_opcounters_per_second[repl][member] = {} for op, count in ops.items(): node_opcounters_per_second[repl][member][op] = count - node1[repl][member][op] opcounters_per_node_per_second.append(node_opcounters_per_second) return {'opcounters_per_node_per_second': opcounters_per_node_per_second, 'server_statistics_per_second': server_statistics_per_second, 'aggregate_server_statistics': stats_next_second.get('aggregate_server_statistics'), 'replication_lag': stats_next_second.get('replication_lag'), 'aggregate_database_statistics': self.get_aggregate_database_stats()}" 4519,"def get_stepdown_window(self): """"""Get information on this instance's stepdown window."""""" url = self._service_url + 'stepdown/' response = requests.get(url, **self._instances._default_request_kwargs) return response.json()" 4520,"def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True): """"""Set the stepdown window for this instance. Date times are assumed to be UTC, so use UTC date times. :param datetime.datetime start: The datetime which the stepdown window is to open. :param datetime.datetime end: The datetime which the stepdown window is to close. :param bool enabled: A boolean indicating whether or not stepdown is to be enabled. :param bool scheduled: A boolean indicating whether or not to schedule stepdown. :param bool weekly: A boolean indicating whether or not to schedule compaction weekly. """""" # Ensure a logical start and endtime is requested. if not start < end: raise TypeError('Parameter ""start"" must occur earlier in time than ""end"".') # Ensure specified window is less than a week in length. week_delta = datetime.timedelta(days=7) if not ((end - start) <= week_delta): raise TypeError('Stepdown windows can not be longer than 1 week in length.') url = self._service_url + 'stepdown/' data = { 'start': int(start.strftime('%s')), 'end': int(end.strftime('%s')), 'enabled': enabled, 'scheduled': scheduled, 'weekly': weekly, } response = requests.post( url, data=json.dumps(data), **self._instances._default_request_kwargs ) return response.json()" 4521,"def _get_connection(self, ssl): """"""Get a live connection to this instance."""""" # Use SSL/TLS if requested and available. connect_string = self.connect_string if ssl and self.ssl_connect_string: connect_string = self.ssl_connect_string return pymongo.MongoClient(connect_string)" 4522,"def get_shard_stats(self): """""" :return: get stats for this mongodb shard """""" return requests.get(self._stats_url, params={'include_stats': True}, headers={'X-Auth-Token': self._client.auth._token} ).json()['data']['stats']" 4523,"def brand(self, brand): """"""Sets the brand of this PaymentCard. :param brand: The brand of this PaymentCard. :type: str """""" allowed_values = [""visa"", ""mastercard"", ""americanExpress"", ""discover""] if brand is not None and brand not in allowed_values: raise ValueError( ""Invalid value for `brand` ({0}), must be one of {1}"" .format(brand, allowed_values) ) self._brand = brand" 4524,"def create_payment_card(cls, payment_card, **kwargs): """"""Create PaymentCard Create a new PaymentCard This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_card(payment_card, async=True) >>> result = thread.get() :param async bool :param PaymentCard payment_card: Attributes of paymentCard to create (required) :return: PaymentCard If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_payment_card_with_http_info(payment_card, **kwargs) else: (data) = cls._create_payment_card_with_http_info(payment_card, **kwargs) return data" 4525,"def delete_payment_card_by_id(cls, payment_card_id, **kwargs): """"""Delete PaymentCard Delete an instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) else: (data) = cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) return data" 4526,"def get_payment_card_by_id(cls, payment_card_id, **kwargs): """"""Find PaymentCard Return single instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to return (required) :return: PaymentCard If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs) else: (data) = cls._get_payment_card_by_id_with_http_info(payment_card_id, **kwargs) return data" 4527,"def list_all_payment_cards(cls, **kwargs): """"""List PaymentCards Return a list of PaymentCards This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payment_cards(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[PaymentCard] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payment_cards_with_http_info(**kwargs) else: (data) = cls._list_all_payment_cards_with_http_info(**kwargs) return data" 4528,"def replace_payment_card_by_id(cls, payment_card_id, payment_card, **kwargs): """"""Replace PaymentCard Replace all attributes of PaymentCard This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_card_by_id(payment_card_id, payment_card, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to replace (required) :param PaymentCard payment_card: Attributes of paymentCard to replace (required) :return: PaymentCard If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs) else: (data) = cls._replace_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs) return data" 4529,"def update_payment_card_by_id(cls, payment_card_id, payment_card, **kwargs): """"""Update PaymentCard Update attributes of PaymentCard This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_payment_card_by_id(payment_card_id, payment_card, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to update. (required) :param PaymentCard payment_card: Attributes of paymentCard to update. (required) :return: PaymentCard If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs) else: (data) = cls._update_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs) return data" 4530,"def rec2csv(r, filename): """"""Export a recarray *r* to a CSV file *filename*"""""" names = r.dtype.names def translate(x): if x is None or str(x).lower == ""none"": x = """" return str(x) with open(filename, ""w"") as csv: csv.write("","".join([str(x) for x in names])+""\n"") for data in r: csv.write("","".join([translate(x) for x in data])+""\n"") #print ""Wrote CSV table %r"" % filename return filename" 4531,"def latex_quote(s): """"""Quote special characters for LaTeX. (Incomplete, currently only deals with underscores, dollar and hash.) """""" special = {'_':r'\_', '$':r'\$', '#':r'\#'} s = str(s) for char,repl in special.items(): new = s.replace(char, repl) s = new[:] return s" 4532,"def rec2latex(r, filename, empty=""""): """"""Export a recarray *r* to a LaTeX table in *filename*"""""" with open(filename, ""w"") as latex: latex.write(s_rec2latex(r, empty=empty)) return filename" 4533,"def s_rec2latex(r, empty=""""): """"""Export a recarray *r* to a LaTeX table in a string"""""" latex = """" names = r.dtype.names def translate(x): if x is None or str(x).lower == ""none"": x = empty return latex_quote(x) latex += r""\begin{tabular}{%s}"" % ("""".join([""c""]*len(names)),) + ""\n"" # simple c columns latex += r""\hline""+""\n"" latex += "" & "".join([latex_quote(x) for x in names])+r""\\""+""\n"" latex += r""\hline""+""\n"" for data in r: latex += "" & "".join([translate(x) for x in data])+r""\\""+""\n"" latex += r""\hline""+""\n"" latex += r""\end{tabular}""+""\n"" return latex" 4534,"def on(self, type): '''Decorator function''' def decorator(self, func): '''decorated functions should be written as class methods @on('join') def on_join(self, channel): print(""Joined channel %s"" % channel) ''' self._handlers[type].append(func) return func return decorator" 4535,"def tree_to_file(tree:'BubbleTree', outfile:str): """"""Compute the bubble representation of given power graph, and push it into given file."""""" with open(outfile, 'w') as fd: fd.write(tree_to_bubble(tree))" 4536,"def lines_from_tree(tree, nodes_and_set:bool=False) -> iter: """"""Yield lines of bubble describing given BubbleTree"""""" NODE = 'NODE\t{}' INCL = 'IN\t{}\t{}' EDGE = 'EDGE\t{}\t{}\t1.0' SET = 'SET\t{}' if nodes_and_set: for node in tree.nodes(): yield NODE.format(node) for node in tree.powernodes(): yield SET.format(node) for node, includeds in tree.inclusions.items(): for included in includeds: yield INCL.format(included, node) for node, succs in tree.edges.items(): for succ in succs: yield EDGE.format(node, succ)" 4537,"def to_python(self): """"""The string ``'True'`` (case insensitive) will be converted to ``True``, as will any positive integers. """""" if isinstance(self.data, str): return self.data.strip().lower() == 'true' if isinstance(self.data, int): return self.data > 0 return bool(self.data)" 4538,"def to_python(self): '''A :class:`datetime.datetime` object is returned.''' if self.data is None: return None # don't parse data that is already native if isinstance(self.data, datetime.datetime): return self.data elif self.use_int: return datetime.datetime.utcfromtimestamp(self.data / 1000) elif self.format is None: # parse as iso8601 return PySO8601.parse(self.data) else: return datetime.datetime.strptime(self.data, self.format)" 4539,"def get_api_call_headers(app): """""" Генерирует заголовки для API запроса. Тут же подкладывается авторизация :type app: metasdk.MetaApp """""" headers = { ""content-type"": ""application/json;charset=UTF-8"", ""User-Agent"": app.user_agent, } if not app.developer_settings: raise AuthError({""message"": ""Для корректной работы SDK нужно установить настройки разработчика"", ""url"": ""https://apps.devision.io/page?a=63&p=3975""}) headers.update(app.developer_settings.get('api_headers')) return headers" 4540,"def extract_filename_from_url(log, url): """""" *get the filename from a URL.* *Will return 'untitled.html', if no filename is found.* **Key Arguments:** - ``url`` -- the url to extract filename from Returns: - ``filename`` -- the filename **Usage:** .. code-block:: python from fundamentals.download import extract_filename_from_url name = extract_filename_from_url( log=log, url=""https://en.wikipedia.org/wiki/Docstring"" ) print name # OUT: Docstring.html """""" ## > IMPORTS ## import re # EXTRACT THE FILENAME FROM THE URL try: log.debug(""extracting filename from url "" + url) reEoURL = re.compile('([\w\.]*)$') filename = reEoURL.findall(url)[0] # log.debug(filename) if(len(filename) == 0): filename = 'untitled.html' if not (re.search('\.', filename)): filename = filename + '.html' except Exception as e: filename = None # print url log.warning(""could not extracting filename from url : "" + str(e) + ""\n"") return filename" 4541,"def build_from_developer_settings(api_name: str, api_version: str): """""" :param api_name: Example hello :param api_version: Example v1, v2alpha :return: ApiClient """""" developer_settings = read_developer_settings() api_host = ""http://"" + api_name + "".apis.devision.io"" return ApiClient( host=api_host, api_version=api_version, access_token=None, refresh_token=developer_settings['refreshToken'], client_id=developer_settings['clientId'], client_secret=developer_settings['clientSecret'], )" 4542,"def process_formdata(self, valuelist): """"""Join time string."""""" if valuelist: time_str = u' '.join(valuelist) try: timetuple = time.strptime(time_str, self.format) self.data = datetime.time(*timetuple[3:6]) except ValueError: self.data = None raise" 4543,"def validate_csrf_token(self, field): """"""Disable CRSF proection during testing."""""" if current_app.testing: return super(InvenioBaseForm, self).validate_csrf_token(field)" 4544,"def load_exchange_word_vectors( filename = ""database.db"", maximum_number_of_events = None ): """""" Load exchange data and return dataset. """""" log.info(""load word vectors of database {filename}"".format( filename = filename )) # Ensure that the database exists. if not os.path.isfile(filename): log.info(""database {filename} nonexistent"".format( filename = filename )) program.terminate() raise Exception # Access the database. database = access_database(filename = filename) # Access or create the exchanges table. table_exchanges = database[""exchanges""] # Access exchanges. table_name = ""exchanges"" # Create a datavision dataset. data = datavision.Dataset() # progress progress = shijian.Progress() progress.engage_quick_calculation_mode() number_of_entries = len(database[table_name]) index = 0 for index_entry, entry in enumerate(database[table_name].all()): if maximum_number_of_events is not None and\ index >= int(maximum_number_of_events): log.info( ""loaded maximum requested number of events "" + ""({maximum_number_of_events})\r"".format( maximum_number_of_events = maximum_number_of_events ) ) break #unique_identifier = str(entry[""id""]) utteranceWordVector = str(entry[""utteranceWordVector""]) responseWordVector = str(entry[""responseWordVector""]) if utteranceWordVector != ""None"" and responseWordVector != ""None"": index += 1 utteranceWordVector = eval(""np."" + utteranceWordVector.replace(""float32"", ""np.float32"")) responseWordVector = eval(""np."" + responseWordVector.replace(""float32"", ""np.float32"")) data.variable(index = index, name = ""utteranceWordVector"", value = utteranceWordVector) data.variable(index = index, name = ""responseWordVector"", value = responseWordVector ) #utteranceWordVector = list(eval(""np."" + utteranceWordVector.replace(""float32"", ""np.float32""))) #responseWordVector = list(eval(""np."" + responseWordVector.replace(""float32"", ""np.float32""))) #for index_component, component in enumerate(utteranceWordVector): # data.variable(index = index, name = ""uwv"" + str(index_component), value = component) #for index_component, component in enumerate(responseWordVector): # data.variable(index = index, name = ""rwv"" + str(index_component), value = component) print progress.add_datum(fraction = index_entry / number_of_entries), return data" 4545,"def access_SUSY_dataset_format_file(filename): """""" This function accesses a CSV file containing data of the form of the [SUSY dataset](https://archive.ics.uci.edu/ml/datasets/SUSY), i.e. with the first column being class labels and other columns being features. """""" # Load the CSV file to a list. with open(filename, ""rb"") as dataset_file: dataset_CSV = [row for row in csv.reader(dataset_file, delimiter = "","")] # Reorganise the data. return [ i for i in itertools.chain(*[list((element[1:], [int(float(element[0]))])) for element in dataset_CSV]) ]" 4546,"def load_HEP_data( ROOT_filename = ""output.root"", tree_name = ""nominal"", maximum_number_of_events = None ): """""" Load HEP data and return dataset. """""" ROOT_file = open_ROOT_file(ROOT_filename) tree = ROOT_file.Get(tree_name) number_of_events = tree.GetEntries() data = datavision.Dataset() progress = shijian.Progress() progress.engage_quick_calculation_mode() # counters number_of_events_loaded = 0 log.info("""") index = 0 for event in tree: if maximum_number_of_events is not None and\ number_of_events_loaded >= int(maximum_number_of_events): log.info( ""loaded maximum requested number of events "" + ""({maximum_number_of_events})\r"".format( maximum_number_of_events = maximum_number_of_events ) ) break print progress.add_datum(fraction = (index + 2) / number_of_events), if select_event(event): index += 1 #event.GetReadEntry() #data.variable(index = index, name = ""eventNumber"", value = event.eventNumber) data.variable(index = index, name = ""el_1_pt"", value = event.el_pt[0]) #data.variable(index = index, name = ""el_1_eta"", value = event.el_eta[0]) #data.variable(index = index, name = ""el_1_phi"", value = event.el_phi[0]) ##data.variable(index = index, name = ""jet_1_pt"", value = event.jet_pt[0]) #data.variable(index = index, name = ""jet_1_eta"", value = event.jet_eta[0]) #data.variable(index = index, name = ""jet_1_phi"", value = event.jet_phi[0]) ##data.variable(index = index, name = ""jet_1_e"", value = event.jet_e[0]) ##data.variable(index = index, name = ""jet_2_pt"", value = event.jet_pt[1]) #data.variable(index = index, name = ""jet_2_eta"", value = event.jet_eta[1]) #data.variable(index = index, name = ""jet_2_phi"", value = event.jet_phi[1]) ##data.variable(index = index, name = ""jet_2_e"", value = event.jet_e[1]) #data.variable(index = index, name = ""nJets"", value = event.nJets) ##data.variable(index = index, name = ""nBTags"", value = event.nBTags) ##data.variable(index = index, name = ""nLjets"", value = event.nLjets) ##data.variable(index = index, name = ""ljet_1_m"", value = event.ljet_m[0]) #data.variable(index = index, name = ""met"", value = event.met_met) #data.variable(index = index, name = ""met_phi"", value = event.met_phi) #data.variable(index = index, name = ""Centrality_all"", value = event.Centrality_all) #data.variable(index = index, name = ""Mbb_MindR"", value = event.Mbb_MindR) #data.variable(index = index, name = ""ljet_tau21"", value = event.ljet_tau21), #data.variable(index = index, name = ""ljet_tau32"", value = event.ljet_tau32), #data.variable(index = index, name = ""Aplan_bjets"", value = event.Aplan_bjets), #data.variable(index = index, name = ""H4_all"", value = event.H4_all), #data.variable(index = index, name = ""NBFricoNN_6jin4bin"", value = event.NBFricoNN_6jin4bin), #data.variable(index = index, name = ""NBFricoNN_6jin3bex"", value = event.NBFricoNN_6jin3bex), #data.variable(index = index, name = ""NBFricoNN_5jex4bin"", value = event.NBFricoNN_5jex4bin), #data.variable(index = index, name = ""NBFricoNN_3jex3bex"", value = event.NBFricoNN_3jex3bex), #data.variable(index = index, name = ""NBFricoNN_4jin3bex"", value = event.NBFricoNN_4jin3bex), #data.variable(index = index, name = ""NBFricoNN_4jin4bin"", value = event.NBFricoNN_4jin4bin) number_of_events_loaded += 1 log.info("""") return data" 4547,"def select_event( event = None, selection = ""ejets"" ): """""" Select a HEP event. """""" if selection == ""ejets"": # Require single lepton. # Require >= 4 jets. if \ 0 < len(event.el_pt) < 2 and \ len(event.jet_pt) >= 4 and \ len(event.ljet_m) >= 1: return True else: return False" 4548,"def draw_neural_network( axes = None, left = None, right = None, bottom = None, top = None, layer_sizes = None ): """""" # abstract This function draws a neural network representation diagram using matplotilb. # arguments |*argument* |*description* | |-----------|--------------------------------------------------------------| |axes |matplotlib.axes.AxesSubplot: the axes on which to plot the | | |diagram (returned by matplotlib.pyplot.gca()) | |left |float: the position of the centers of the left nodes | |right |float: the position of the centers of the right nodes | |bottom |float: the position of the centers of the bottom nodes | |top |float: the position of the centers of the top nodes | |layer_sizes|list of integers: list of layer sizes, including input and | | |output dimensionality | # example ```Python figure = matplotlib.pyplot.figure(figsize = (12, 12)) abstraction.draw_neural_network( axes = figure.gca(), left = .1, right = .9, bottom = .1, top = .9, layer_sizes = [4, 7, 2] ) figure.savefig(""neural_network_diagram.png"") ``` """""" spacing_vertical = (top - bottom) / float(max(layer_sizes)) spacing_horizontal = (right - left) / float(len(layer_sizes) - 1) # nodes for n, layer_size in enumerate(layer_sizes): layer_top = spacing_vertical * (layer_size - 1)/2 + (top + bottom) / 2 for m in xrange(layer_size): circle = matplotlib.pyplot.Circle( ( n * spacing_horizontal + left, layer_top - m * spacing_vertical ), spacing_vertical / 4, color = ""w"", ec = ""k"", zorder = 4 ) axes.add_artist(circle) # edges for n, (layer_size_a, layer_size_b) in enumerate(zip( layer_sizes[:-1], layer_sizes[1:] )): layer_top_a =\ spacing_vertical * (layer_size_a - 1) / 2 + (top + bottom) / 2 layer_top_b =\ spacing_vertical * (layer_size_b - 1) / 2 + (top + bottom) / 2 for m in xrange(layer_size_a): for o in xrange(layer_size_b): line = matplotlib.pyplot.Line2D( [ n * spacing_horizontal + left, (n + 1) * spacing_horizontal + left ], [ layer_top_a - m * spacing_vertical, layer_top_b - o * spacing_vertical ], c = ""k"" ) axes.add_artist(line)" 4549,"def sentiment( text = None, confidence = False ): """""" This function accepts a string text input. It calculates the sentiment of the text, ""pos"" or ""neg"". By default, it returns this calculated sentiment. If selected, it returns a tuple of the calculated sentiment and the classificaton confidence. """""" try: words = text.split("" "") # Remove empty strings. words = [word for word in words if word] features = word_features(words) classification = classifier.classify(features) confidence_classification = classifier.prob_classify(features).prob(classification) except: classification = None confidence_classification = None if confidence: return ( classification, confidence_classification ) else: return classification" 4550,"def usernames( self ): """""" This function returns the list of unique usernames corresponding to the tweets stored in self. """""" try: return list(set([tweet.username for tweet in self])) except: log.error(""error -- possibly a problem with tweets stored"")" 4551,"def user_sentiments( self, username = None ): """""" This function returns a list of all sentiments of the tweets of a specified user. """""" try: return [tweet.sentiment for tweet in self if tweet.username == username] except: log.error(""error -- possibly no username specified"") return None" 4552,"def user_sentiments_most_frequent( self, username = None, single_most_frequent = True ): """""" This function returns the most frequent calculated sentiments expressed in tweets of a specified user. By default, the single most frequent sentiment is returned. All sentiments with their corresponding frequencies can be returned also. """""" try: sentiment_frequencies = collections.Counter(self.user_sentiments( username = username )) if single_most_frequent: return sentiment_frequencies.most_common(1)[0][0] else: return dict(sentiment_frequencies) except: log.error(""error -- possibly no username specified"") return None" 4553,"def users_sentiments_single_most_frequent( self, usernames = None, ): """""" This function returns the single most frequent calculated sentiment expressed by all stored users or by a list of specified users as a dictionary. """""" users_sentiments_single_most_frequent = dict() if usernames is None: usernames = self.usernames() try: for username in usernames: sentiment = self.user_sentiments_most_frequent( username = username, single_most_frequent = True ) users_sentiments_single_most_frequent[username] = sentiment return users_sentiments_single_most_frequent except: log.error(""error -- possibly a problem with tweets stored"") return None" 4554,"def create_stripe_gateway(cls, stripe_gateway, **kwargs): """"""Create StripeGateway Create a new StripeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_stripe_gateway(stripe_gateway, async=True) >>> result = thread.get() :param async bool :param StripeGateway stripe_gateway: Attributes of stripeGateway to create (required) :return: StripeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_stripe_gateway_with_http_info(stripe_gateway, **kwargs) else: (data) = cls._create_stripe_gateway_with_http_info(stripe_gateway, **kwargs) return data" 4555,"def delete_stripe_gateway_by_id(cls, stripe_gateway_id, **kwargs): """"""Delete StripeGateway Delete an instance of StripeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_stripe_gateway_by_id(stripe_gateway_id, async=True) >>> result = thread.get() :param async bool :param str stripe_gateway_id: ID of stripeGateway to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs) else: (data) = cls._delete_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs) return data" 4556,"def get_stripe_gateway_by_id(cls, stripe_gateway_id, **kwargs): """"""Find StripeGateway Return single instance of StripeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_stripe_gateway_by_id(stripe_gateway_id, async=True) >>> result = thread.get() :param async bool :param str stripe_gateway_id: ID of stripeGateway to return (required) :return: StripeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs) else: (data) = cls._get_stripe_gateway_by_id_with_http_info(stripe_gateway_id, **kwargs) return data" 4557,"def list_all_stripe_gateways(cls, **kwargs): """"""List StripeGateways Return a list of StripeGateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_stripe_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StripeGateway] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_stripe_gateways_with_http_info(**kwargs) else: (data) = cls._list_all_stripe_gateways_with_http_info(**kwargs) return data" 4558,"def replace_stripe_gateway_by_id(cls, stripe_gateway_id, stripe_gateway, **kwargs): """"""Replace StripeGateway Replace all attributes of StripeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_stripe_gateway_by_id(stripe_gateway_id, stripe_gateway, async=True) >>> result = thread.get() :param async bool :param str stripe_gateway_id: ID of stripeGateway to replace (required) :param StripeGateway stripe_gateway: Attributes of stripeGateway to replace (required) :return: StripeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs) else: (data) = cls._replace_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs) return data" 4559,"def update_stripe_gateway_by_id(cls, stripe_gateway_id, stripe_gateway, **kwargs): """"""Update StripeGateway Update attributes of StripeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_stripe_gateway_by_id(stripe_gateway_id, stripe_gateway, async=True) >>> result = thread.get() :param async bool :param str stripe_gateway_id: ID of stripeGateway to update. (required) :param StripeGateway stripe_gateway: Attributes of stripeGateway to update. (required) :return: StripeGateway If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs) else: (data) = cls._update_stripe_gateway_by_id_with_http_info(stripe_gateway_id, stripe_gateway, **kwargs) return data" 4560,"def format_underline(s, char=""="", indents=0): """""" Traces a dashed line below string Args: s: string char: indents: number of leading intenting spaces Returns: list >>> print(""\\n"".join(format_underline(""Life of João da Silva"", ""^"", 2))) Life of João da Silva ^^^^^^^^^^^^^^^^^^^^^ """""" n = len(s) ind = "" "" * indents return [""{}{}"".format(ind, s), ""{}{}"".format(ind, char*n)]" 4561,"def format_h1(s, format=""text"", indents=0): """""" Encloses string in format text Args: s: string format: string starting with ""text"", ""markdown"", or ""rest"" indents: number of leading intenting spaces Returns: list >>> print(""\\n"".join(format_h2(""Header 1"", indents=10))) Header 1 -------- >>> print(""\\n"".join(format_h2(""Header 1"", ""markdown"", 0))) ## Header 1 """""" _CHAR = ""="" if format.startswith(""text""): return format_underline(s, _CHAR, indents) elif format.startswith(""markdown""): return [""# {}"".format(s)] elif format.startswith(""rest""): return format_underline(s, _CHAR, 0)" 4562,"def format_h2(s, format=""text"", indents=0): """""" Encloses string in format text Args, Returns: see format_h1() >>> print(""\\n"".join(format_h2(""Header 2"", indents=2))) Header 2 -------- >>> print(""\\n"".join(format_h2(""Header 2"", ""markdown"", 2))) ## Header 2 """""" _CHAR = ""-"" if format.startswith(""text""): return format_underline(s, _CHAR, indents) elif format.startswith(""markdown""): return [""## {}"".format(s)] elif format.startswith(""rest""): return format_underline(s, _CHAR, 0)" 4563,"def format_h3(s, format=""text"", indents=0): """""" Encloses string in format text Args, Returns: see format_h1() """""" _CHAR = ""~"" if format.startswith(""text""): return format_underline(s, _CHAR, indents) elif format.startswith(""markdown""): return [""### {}"".format(s)] elif format.startswith(""rest""): return format_underline(s, _CHAR, 0)" 4564,"def format_h4(s, format=""text"", indents=0): """""" Encloses string in format text Args, Returns: see format_h1() """""" _CHAR = ""^"" if format.startswith(""text""): return format_underline(s, _CHAR, indents) elif format.startswith(""markdown""): return [""#### {}"".format(s)] elif format.startswith(""rest""): return format_underline(s, _CHAR, 0)" 4565,"def question(question, options, default=None): """"""Ask a question with case-insensitive options of answers Args: question: string **without** the question mark and without the options. Example: 'Commit changes' options_: string or sequence of strings. If string, options will be single-lettered. Examples: 'YNC', ['yes', 'no', 'cancel']. options are case-insensitive default: default option. If passed, default option will be shown in uppercase. Answers are case-insensitive, but options will be shown in lowercase, except for the default option. Returns: str: chosen option. Although the answer is case-insensitive, the result will be as informed in the 'options' argument. """""" # Make sure options is a list options_ = [x for x in options] if default is not None and default not in options_: raise ValueError(""Default option '{}' is not in options {}."".format(default, options)) oto = ""/"".join([x.upper() if x == default else x.lower() for x in options_]) # to show ocomp = [x.lower() for x in options_] # to be used in comparison while True: ans = input(""{} ({})? "".format(question, oto)).lower() if ans == """" and default is not None: ret = default break elif ans in ocomp: ret = options_[ocomp.index(ans)] break return ret" 4566,"def yesno(question, default=None): """"""Asks a yes/no question Args: question: string **without** the question mark and without the options. Example: 'Create links' default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of these valus (this argument is case-insensitive) Returns: bool: True if user answered Yes, False otherwise """""" if default is not None: if isinstance(default, bool): pass else: default_ = default.upper() if default_ not in ('Y', 'YES', 'N', 'NO'): raise RuntimeError(""Invalid default value: '{}'"".format(default)) default = default_ in ('Y', 'YES') while True: ans = input(""{} ({}/{})? "".format(question, ""Y"" if default == True else ""y"", ""N"" if default == False else ""n"")).upper() if ans == """" and default is not None: ret = default break elif ans in (""N"", ""NO""): ret = False break elif ans in (""Y"", ""YES""): ret = True break return ret" 4567,"def menu(title, options, cancel_label=""Cancel"", flag_allow_empty=False, flag_cancel=True, ch='.'): """"""Text menu. Arguments: title -- menu title, to appear at the top options -- sequence of strings cancel_label='Cancel' -- label to show at last ""zero"" option flag_allow_empty=0 -- Whether to allow empty option flag_cancel=True -- whether there is a ""0 - Cancel"" option ch=""."" -- character to use to draw frame around title Returns: option -- an integer: None; 0-Back/Cancel/etc; 1, 2, ... Adapted from irootlab menu.m"""""" num_options, flag_ok = len(options), 0 option = None # result min_allowed = 0 if flag_cancel else 1 # minimum option value allowed (if option not empty) while True: print("""") for line in format_box(title, ch): print("" ""+line) for i, s in enumerate(options): print(("" {0:d} - {1!s}"".format(i+1, s))) if flag_cancel: print(("" 0 - << (*{0!s}*)"".format(cancel_label))) try: s_option = input('? ') except KeyboardInterrupt: raise except: print("""") n_try = 0 while True: if n_try >= 10: print('You are messing up!') break if len(s_option) == 0 and flag_allow_empty: flag_ok = True break try: option = int(s_option) if min_allowed <= option <= num_options: flag_ok = True break except ValueError: print(""Invalid integer value!"") print((""Invalid option, range is [{0:d}, {1:d}]!"".format(0 if flag_cancel else 1, num_options))) n_try += 1 s_option = input(""? "") if flag_ok: break return option" 4568,"def format_box(title, ch=""*""): """""" Encloses title in a box. Result is a list >>> for line in format_box(""Today's TODO list""): ... print(line) ************************* *** Today's TODO list *** ************************* """""" lt = len(title) return [(ch * (lt + 8)), (ch * 3 + "" "" + title + "" "" + ch * 3), (ch * (lt + 8)) ]" 4569,"def format_progress(i, n): """"""Returns string containing a progress bar, a percentage, etc."""""" if n == 0: fraction = 0 else: fraction = float(i)/n LEN_BAR = 25 num_plus = int(round(fraction*LEN_BAR)) s_plus = '+'*num_plus s_point = '.'*(LEN_BAR-num_plus) return '[{0!s}{1!s}] {2:d}/{3:d} - {4:.1f}%'.format(s_plus, s_point, i, n, fraction*100)" 4570,"def _format_exe_info(py_len, exeinfo, format, indlevel): """"""Renders ExeInfo object in specified format"""""" ret = [] ind = "" "" * indlevel * NIND if format.startswith(""text"") else """" if format == ""markdown-list"": for si in exeinfo: ret.append("" - `{0!s}`: {1!s}"".format(si.filename, si.description)) if format == ""rest-list"": for si in exeinfo: ret.append(""* ``{0!s}``: {1!s}"".format(si.filename, si.description)) elif format == ""markdown-table"": mask = ""%-{0:d}s | %s"".format(py_len+2 ) ret.append(mask % (""Script name"", ""Purpose"")) ret.append(""-"" * (py_len + 3) + ""|"" + ""-"" * 10) for si in exeinfo: ret.append(mask % (""`{0!s}`"".format(si.filename), si.description)) elif format == ""text"": sbc = 1 # spaces between columns for si in exeinfo: ss = textwrap.wrap(si.description, 79 - py_len - sbc - indlevel*NIND) for i, s in enumerate(ss): if i == 0: filecolumn = si.filename + "" "" + (""."" * (py_len - len(si.filename))) else: filecolumn = "" "" * (py_len + 1) ret.append(""{}{}{}{}"".format(ind, filecolumn, "" ""*sbc, s)) ret.append("""") return ret" 4571,"def format_exe_info(exeinfo, format=""text"", indlevel=0): """""" Generates listing of all Python scripts available as command-line programs. Args: exeinfo -- list of ExeInfo objects format -- One of the options below: ""text"" -- generates plain text for printing at the console ""markdown-list"" -- generates MarkDown as list ""markdown-table"" -- generates MarkDown as tables ""rest-list"" -- generates reStructuredText as lists indents -- indentation level (""text"" format only) Returns: (list of strings, maximum filename size) list of strings -- can be joined with a ""\n"" maximum filename size """""" py_len = max([len(si.filename) for si in exeinfo]) sisi_gra = [si for si in exeinfo if si.flag_gui == True] sisi_cmd = [si for si in exeinfo if si.flag_gui == False] sisi_none = [si for si in exeinfo if si.flag_gui is None] def get_title(x): return format_h4(x, format, indlevel*NIND) + [""""] ret = [] if len(sisi_gra) > 0: ret.extend(get_title(""Graphical applications"")) ret.extend(_format_exe_info(py_len, sisi_gra, format, indlevel + 1)) if len(sisi_cmd) > 0: ret.extend(get_title(""Command-line tools"", )) ret.extend(_format_exe_info(py_len, sisi_cmd, format, indlevel + 1)) if len(sisi_none) > 0: ret.extend(_format_exe_info(py_len, sisi_none, format, indlevel + 1)) return ret, py_len" 4572,"def markdown_table(data, headers): """""" Creates MarkDown table. Returns list of strings Arguments: data -- [(cell00, cell01, ...), (cell10, cell11, ...), ...] headers -- sequence of strings: (header0, header1, ...) """""" maxx = [max([len(x) for x in column]) for column in zip(*data)] maxx = [max(ll) for ll in zip(maxx, [len(x) for x in headers])] mask = "" | "".join([""%-{0:d}s"".format(n) for n in maxx]) ret = [mask % headers] ret.append("" | "".join([""-""*n for n in maxx])) for line in data: ret.append(mask % line) return ret" 4573,"def expand_multirow_data(data): """""" Converts multirow cells to a list of lists and informs the number of lines of each row. Returns: tuple: new_data, row_heights """""" num_cols = len(data[0]) # number of columns # calculates row heights row_heights = [] for mlrow in data: row_height = 0 for j, cell in enumerate(mlrow): row_height = max(row_height, 1 if not isinstance(cell, (list, tuple)) else len(cell)) row_heights.append(row_height) num_lines = sum(row_heights) # line != row (rows are multiline) # rebuilds table data new_data = [[""""]*num_cols for i in range(num_lines)] i0 = 0 for row_height, mlrow in zip(row_heights, data): for j, cell in enumerate(mlrow): if not isinstance(cell, (list, tuple)): cell = [cell] for incr, x in enumerate(cell): new_data[i0+incr][j] = x i0 += row_height return new_data, row_heights" 4574,"def rest_table(data, headers): """""" Creates reStructuredText table (grid format), allowing for multiline cells Arguments: data -- [((cell000, cell001, ...), (cell010, cell011, ...), ...), ...] headers -- sequence of strings: (header0, header1, ...) **Note** Tolerant to non-strings **Note** Cells may or may not be multiline >>> rest_table([[""Eric"", ""Idle""], [""Graham"", ""Chapman""], [""Terry"", ""Gilliam""]], [""Name"", ""Surname""]) """""" num_cols = len(headers) new_data, row_heights = expand_multirow_data(data) new_data = [[str(x) for x in row] for row in new_data] col_widths = [max([len(x) for x in col]) for col in zip(*new_data)] col_widths = [max(cw, len(s)) for cw, s in zip(col_widths, headers)] if any([x == 0 for x in col_widths]): raise RuntimeError(""Column widths ({}) has at least one zero"".format(col_widths)) num_lines = sum(row_heights) # line != row (rows are multiline) # horizontal lines hl0 = ""+""+""+"".join([""-""*(n+2) for n in col_widths])+""+"" hl1 = ""+""+""+"".join([""=""*(n+2) for n in col_widths])+""+"" frmtd = [""{0:{1}}"".format(x, width) for x, width in zip(headers, col_widths)] ret = [hl0, ""| ""+"" | "".join(frmtd)+"" |"", hl1] i0 = 0 for i, row_height in enumerate(row_heights): if i > 0: ret.append(hl0) for incr in range(row_height): frmtd = [""{0:{1}}"".format(x, width) for x, width in zip(new_data[i0+incr], col_widths)] ret.append(""| ""+"" | "".join(frmtd)+"" |"") i0 += row_height ret.append(hl0) return ret" 4575,"def concept_adapter(obj, request): ''' Adapter for rendering a :class:`skosprovider.skos.Concept` to json. :param skosprovider.skos.Concept obj: The concept to be rendered. :rtype: :class:`dict` ''' p = request.skos_registry.get_provider(obj.concept_scheme.uri) language = request.params.get('language', request.locale_name) label = obj.label(language) return { 'id': obj.id, 'type': 'concept', 'uri': obj.uri, 'label': label.label if label else None, 'concept_scheme': { 'uri': obj.concept_scheme.uri, 'labels': obj.concept_scheme.labels }, 'labels': obj.labels, 'notes': obj.notes, 'sources': obj.sources, 'narrower': _map_relations(obj.narrower, p, language), 'broader': _map_relations(obj.broader, p, language), 'related': _map_relations(obj.related, p, language), 'member_of': _map_relations(obj.member_of, p, language), 'subordinate_arrays': _map_relations(obj.subordinate_arrays, p, language), 'matches': obj.matches }" 4576,"def collection_adapter(obj, request): ''' Adapter for rendering a :class:`skosprovider.skos.Collection` to json. :param skosprovider.skos.Collection obj: The collection to be rendered. :rtype: :class:`dict` ''' p = request.skos_registry.get_provider(obj.concept_scheme.uri) language = request.params.get('language', request.locale_name) label = obj.label(language) return { 'id': obj.id, 'type': 'collection', 'uri': obj.uri, 'label': label.label if label else None, 'concept_scheme': { 'uri': obj.concept_scheme.uri, 'labels': obj.concept_scheme.labels }, 'labels': obj.labels, 'notes': obj.notes, 'sources': obj.sources, 'members': _map_relations(obj.members, p, language), 'member_of': _map_relations(obj.member_of, p, language), 'superordinates': _map_relations(obj.superordinates, p, language) }" 4577,"def _map_relations(relations, p, language='any'): ''' :param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list` ''' ret = [] for r in relations: c = p.get_by_id(r) if c: ret.append(_map_relation(c, language)) else: log.warning( 'A relation references a concept or collection %d in provider %s that can not be found. Please check the integrity of your data.' % (r, p.get_vocabulary_id()) ) return ret" 4578,"def _map_relation(c, language='any'): """""" Map related concept or collection, leaving out the relations. :param c: the concept or collection to map :param string language: Language to render the relation's label in :rtype: :class:`dict` """""" label = c.label(language) return { 'id': c.id, 'type': c.type, 'uri': c.uri, 'label': label.label if label else None }" 4579,"def note_adapter(obj, request): ''' Adapter for rendering a :class:`skosprovider.skos.Note` to json. :param skosprovider.skos.Note obj: The note to be rendered. :rtype: :class:`dict` ''' return { 'note': obj.note, 'type': obj.type, 'language': obj.language, 'markup': obj.markup }" 4580,"def tag( log, filepath, tags=False, rating=False, wherefrom=False): """"""Add tags and ratings to your macOS files and folders **Key Arguments:** - ``log`` -- logger - ``filepath`` -- the path to the file needing tagged - ``tags`` -- comma or space-separated string, or list of tags. Use `False` to leave file tags as they are. Use """" or [] to remove tags. Default *False*. - ``rating`` -- a rating to add to the file. Use 0 to remove rating or `False` to leave file rating as it is. Default *False*. - ``wherefrom`` -- add a URL to indicate where the file come from. Use `False` to leave file location as it is. Use """" to remove location. Default *False*. **Return:** - None **Usage:** To add any combination of tags, rating and a source URL to a file on macOS, use the following: .. code-block:: python from fundamentals.files.tag import tag tag( log=log, filepath=""/path/to/my.file"", tags=""test,tags, fundamentals"", rating=3, wherefrom=""http://www.thespacedoctor.co.uk"" ) """""" log.debug('starting the ``tag`` function') if isinstance(tags, list): tags = ("" "").join(tags) if tags and len(tags): tags = tags.replace("","", "" "") tags = """" + tags.replace("" "", "" "").replace( "" "", "" "").replace("" "", """") + """" if tags != False: now = datetime.now() now = now.strftime(""%Y%m%dt%H%M%S%f"") tagPlist = ""/tmp/fund-%(now)s-tags.plist"" % locals() # GENERATE THE TAGS PLIST FILE try: writeFile = codecs.open( tagPlist, encoding='utf-8', mode='w') except IOError, e: message = 'could not open the file %s' % (tagPlist,) raise IOError(message) writeFile.write("""""" %(tags)s """""" % locals()) writeFile.close() # CONVERT PLIST TO BINARY cmd = """"""plutil -convert binary1 %(tagPlist)s"""""" % locals( ) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output: %(stdout)s' % locals()) log.debug('output: %(stderr)s' % locals()) # ASSIGN TAGS TO FILE cmd = 'xattr -wx ""com.apple.metadata:_kMDItemUserTags"" ""`xxd -ps %(tagPlist)s`"" ""%(filepath)s""' % locals( ) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output: %(stdout)s' % locals()) log.debug('output: %(stderr)s' % locals()) # DELETE PLIST os.remove(tagPlist) if rating != False: ratingsContainer = os.path.dirname(__file__) + ""/resources/ratings/"" ratingPlist = ""%(ratingsContainer)s%(rating)s.plist"" % locals( ) # ASSIGN RATING TO FILE cmd = 'xattr -wx ""com.apple.metadata:kMDItemStarRating"" ""`xxd -ps %(ratingPlist)s`"" ""%(filepath)s""' % locals( ) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output: %(stdout)s' % locals()) log.debug('output: %(stderr)s' % locals()) cmd = 'xattr -wx ""org.openmetainfo:kMDItemStarRating"" ""`xxd -ps %(ratingPlist)s`"" ""%(filepath)s""' % locals( ) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output: %(stdout)s' % locals()) log.debug('output: %(stderr)s' % locals()) if wherefrom != False: if len(wherefrom): wherefrom = ""%(wherefrom)s"" % locals() # DAYONE LINK now = datetime.now() now = now.strftime(""%Y%m%dt%H%M%S%f"") urlPlist = ""/tmp/fund-%(now)s-url.plist"" % locals() # GENERATE THE WHEREFROM PLIST FILE try: writeFile = codecs.open( urlPlist, encoding='utf-8', mode='w') except IOError, e: message = 'could not open the file %s' % (urlPlist,) raise IOError(message) writeFile.write("""""" %(wherefrom)s """""" % locals()) writeFile.close() # ASSIGN WHEREFORM TO FILE cmd = 'xattr -wx ""com.apple.metadata:kMDItemURL"" ""`xxd -ps %(urlPlist)s`"" ""%(filepath)s""' % locals( ) # cmd = 'xattr -wx ""com.apple.metadata:kMDItemURL"" ""`plutil -convert binary1 %(urlPlist)s -o - | xxd -p`"" ""%(filepath)s""' % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output URL: %(stdout)s' % locals()) log.debug('output URL: %(stderr)s' % locals()) now = datetime.now() now = now.strftime(""%Y%m%dt%H%M%S%f"") urlPlist = ""/tmp/fund-%(now)s-url.plist"" % locals() # GENERATE THE WHEREFROM PLIST FILE try: writeFile = codecs.open( urlPlist, encoding='utf-8', mode='w') except IOError, e: message = 'could not open the file %s' % (urlPlist,) raise IOError(message) writeFile.write("""""" %(wherefrom)s """""" % locals()) writeFile.close() # ASSIGN WHEREFORM TO FILE cmd = 'xattr -wx ""com.apple.metadata:kMDItemWhereFroms"" ""`xxd -ps %(urlPlist)s`"" ""%(filepath)s""' % locals( ) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug('output URL: %(stdout)s' % locals()) log.debug('output URL: %(stderr)s' % locals()) # DELETE PLIST # os.remove(urlPlist) log.debug('completed the ``tag`` function') return None" 4581,"def add_indicators(self, indicators=list(), private=False, tags=list()): """"""Add indicators to the remote instance."""""" if len(indicators) == 0: raise Exception(""No indicators were identified."") self.logger.debug(""Checking {} indicators"".format(len(indicators))) cleaned = clean_indicators(indicators) self.logger.debug(""Cleaned {} indicators"".format(len(cleaned))) whitelisted = check_whitelist(cleaned) self.logger.debug(""Non-whitelisted {} indicators"".format(len(whitelisted))) indicators = prune_cached(whitelisted) hashed = hash_values(indicators) self.logger.debug(""Non-cached {} indicators"".format(len(indicators))) self.logger.debug(""Processing {} indicators"".format(len(indicators))) request_count = int(math.ceil(len(indicators)/100.0)) if request_count == 0: mesg = ""[!] No indicators were left to process after "" mesg += ""cleaning, whitelisting and checking the cache."" return {'message': mesg} stats = {'success': 0, 'failure': 0, 'requests': request_count, 'written': 0} mesg = ""{} indicators found, making {} requests"" self.logger.debug(mesg.format(len(indicators), request_count)) if private: indicators = hashed if type(tags) == str: tags = [t.strip().lower() for t in tags.split(',')] start, end = (0, 100) for i, idx in enumerate(range(0, request_count)): if idx > 0: time.sleep(3) # Ensure we never trip the limit self.logger.debug(""Waiting 3 seconds before next request."") to_send = {'indicators': indicators[start:end], 'tags': tags} r = self._send_data('POST', 'admin', 'add-indicators', to_send) start, end = (end, end + 100) if not r['success']: stats['failure'] += 1 continue stats['success'] += 1 stats['written'] += r['writeCount'] cache_items(to_send['indicators']) msg = """" msg += ""{written} indicators written using {requests} requests: "" msg += ""{success} success, {failure} failure"" stats['message'] = msg.format(**stats) return stats" 4582,"def get_indicators(self): """"""List indicators available on the remote instance."""""" response = self._get('', 'get-indicators') response['message'] = ""%i indicators:\n%s"" % ( len(response['indicators']), ""\n"".join(response['indicators']) ) return response" 4583,"def to_unicode(obj, encoding='utf-8'): """"""Convert obj to unicode (if it can be be converted) from http://farmdev.com/talks/unicode/"""""" if isinstance(obj, basestring): if not isinstance(obj, unicode): obj = unicode(obj, encoding) return obj" 4584,"def besttype(x, encoding=""utf-8"", percentify=True): """"""Convert string x to the most useful type, i.e. int, float or unicode string. If x is a quoted string (single or double quotes) then the quotes are stripped and the enclosed string returned. The string can contain any number of quotes, it is only important that it begins and ends with either single or double quotes. *percentify* = ``True`` turns ""34.4%"" into the float 0.344. .. Note:: Strings will be returned as Unicode strings (using :func:`unicode`), based on the *encoding* argument, which is utf-8 by default. """""" def unicodify(x): return to_unicode(x, encoding) def percent(x): try: if x.endswith(""%""): x = float(x[:-1]) / 100. else: raise ValueError except (AttributeError, ValueError): raise ValueError return x x = unicodify(x) # make unicode as soon as possible try: x = x.strip() except AttributeError: pass m = re.match(r""""""(?P['""])(?P.*)(?P=quote)$"""""", x) # matches """" or '' where COULD contain "" or '! if m is None: # not a quoted string, try different types for converter in int, float, percent, unicodify: # try them in increasing order of lenience try: return converter(x) except ValueError: pass else: # quoted string x = unicodify(m.group('value')) return x" 4585,"def _onsuccess(cls, kmsg, result): """""" To execute on execution success :param kser.schemas.Message kmsg: Kafka message :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """""" logger.info( ""{}.Success: {}[{}]: {}"".format( cls.__name__, kmsg.entrypoint, kmsg.uuid, result ), extra=dict( kmsg=kmsg.dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return cls.onsuccess(kmsg, result)" 4586,"def _onerror(cls, kmsg, result): """""" To execute on execution failure :param kser.schemas.Message kmsg: Kafka message :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result """""" logger.error( ""{}.Failed: {}[{}]: {}"".format( cls.__name__, kmsg.entrypoint, kmsg.uuid, result ), extra=dict( kmsg=kmsg.dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return cls.onerror(kmsg, result)" 4587,"def _onmessage(cls, kmsg): """""" Call on received message :param kser.schemas.Message kmsg: Kafka message :return: Kafka message :rtype: kser.schemas.Message """""" logger.debug( ""{}.ReceivedMessage {}[{}]"".format( cls.__name__, kmsg.entrypoint, kmsg.uuid ), extra=dict(kmsg=kmsg.dump()) ) return cls.onmessage(kmsg)" 4588,"def register(cls, name, entrypoint): """""" Register a new entrypoint :param str name: Key used by messages :param kser.entry.Entrypoint entrypoint: class to load :raises ValidationError: Invalid entry """""" if not issubclass(entrypoint, Entrypoint): raise ValidationError( ""Invalid type for entry '{}', MUST implement "" ""kser.entry.Entrypoint"".format(name), extra=dict(entrypoint=name) ) cls.ENTRYPOINTS[name] = entrypoint logger.debug(""{}.Registered: {}"".format(cls.__name__, name))" 4589,"def run(cls, raw_data): """"""description of run"""""" logger.debug(""{}.ReceivedFromKafka: {}"".format( cls.__name__, raw_data )) try: kmsg = cls._onmessage(cls.TRANSPORT.loads(raw_data)) except Exception as exc: logger.error( ""{}.ImportError: Failed to load data from kafka: {}"".format( cls.__name__, exc ), extra=dict(kafka_raw_data=raw_data) ) return Result.from_exception(exc) try: cls.start_processing(kmsg) if kmsg.entrypoint not in cls.ENTRYPOINTS: raise ValidationError( ""Entrypoint '{}' not registred"".format(kmsg.entrypoint), extra=dict( uuid=kmsg.uuid, entrypoint=kmsg.entrypoint, allowed=list(cls.ENTRYPOINTS.keys()) ) ) result = cls.ENTRYPOINTS[kmsg.entrypoint].from_Message( kmsg ).execute() except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: cls.stop_processing() # noinspection PyUnboundLocalVariable if result and result.retcode < 300: return cls._onsuccess(kmsg=kmsg, result=result) else: return cls._onerror(kmsg=kmsg, result=result)" 4590,"def interval_condition(value, inf, sup, dist): """"""Checks if value belongs to the interval [inf - dist, sup + dist]. """""" return (value > inf - dist and value < sup + dist)" 4591,"def nearest_point(query, root_id, get_properties, dist_fun=euclidean_dist): """"""Find the point in the tree that minimizes the distance to the query. This method implements the nearest_point query for any structure implementing a kd-tree. The only requirement is a function capable to extract the relevant properties from a node representation of the particular implementation. Args: query (:obj:`tuple` of float or int): Stores the position of the node. root_id (:obj): The identifier of the root in the kd-tree implementation. get_properties (:obj:`function`): The function to extract the relevant properties from a node, namely its point, region, axis, left child identifier, right child identifier and if it is active. If the implementation does not uses the active attribute the function should return always True. dist_fun (:obj:`function`, optional): The distance function, euclidean distance by default. Returns: :obj:`tuple`: Tuple of length 2, where the first element is the identifier of the nearest node, the second is the distance to the query. """""" k = len(query) dist = math.inf nearest_node_id = None # stack_node: stack of identifiers to nodes within a region that # contains the query. # stack_look: stack of identifiers to nodes within a region that # does not contains the query. stack_node = deque([root_id]) stack_look = deque() while stack_node or stack_look: if stack_node: node_id = stack_node.pop() look_node = False else: node_id = stack_look.pop() look_node = True point, region, axis, active, left, right = get_properties(node_id) # Should consider this node? # As it is within a region that does not contains the query, maybe # there is no chance to find a closer node in this region if look_node: inside_region = True for i in range(k): inside_region &= interval_condition(query[i], region[i][0], region[i][1], dist) if not inside_region: continue # Update the distance only if the node is active. if active: node_distance = dist_fun(query, point) if nearest_node_id is None or dist > node_distance: nearest_node_id = node_id dist = node_distance if query[axis] < point[axis]: side_node = left side_look = right else: side_node = right side_look = left if side_node is not None: stack_node.append(side_node) if side_look is not None: stack_look.append(side_look) return nearest_node_id, dist" 4592,"def deactivate(self, node_id): """"""Deactivate the node identified by node_id. Deactivates the node corresponding to node_id, which means that it can never be the output of a nearest_point query. Note: The node is not removed from the tree, its data is steel available. Args: node_id (int): The node identifier (given to the user after its insertion). """""" node = self.node_list[node_id] self.node_list[node_id] = node._replace(active=False)" 4593,"def insert(self, point, data=None): """"""Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data) """""" assert len(point) == self.k if self.size == 0: if self.region is None: self.region = [[-math.inf, math.inf]] * self.k axis = 0 return self.new_node(point, self.region, axis, data) # Iteratively descends to one leaf current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if point[axis] < parent_node.point[axis]: next_id, left = parent_node.left, True else: next_id, left = parent_node.right, False if next_id is None: break current_id = next_id # Get the region delimited by the parent node region = parent_node.region[:] region[axis] = parent_node.region[axis][:] # Limit to the child's region limit = parent_node.point[axis] # Update reference to the new node if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, (axis + 1) % self.k, data)" 4594,"def find_nearest_point(self, query, dist_fun=euclidean_dist): """"""Find the point in the tree that minimizes the distance to the query. Args: query (:obj:`tuple` of float or int): Stores the position of the node. dist_fun (:obj:`function`, optional): The distance function, euclidean distance by default. Returns: :obj:`tuple`: Tuple of length 2, where the first element is the identifier of the nearest node, the second is the distance to the query. Example: >>> tree = Tree(2, 3) >>> tree.insert((0, 0)) >>> tree.insert((3, 5)) >>> tree.insert((-1, 7)) >>> query = (-1, 8) >>> nearest_node_id, dist = tree.find_nearest_point(query) >>> dist 1 """""" def get_properties(node_id): return self.node_list[node_id][:6] return nearest_point(query, 0, get_properties, dist_fun)" 4595,"def set_to_public(self, request, queryset): """""" Set one or several releases to public """""" queryset.update(is_public=True, modified=now())" 4596,"def loads(cls, json_data): """"""description of load"""""" try: return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data)) except marshmallow.exceptions.ValidationError as exc: raise ValidationError(""Failed to load message"", extra=exc.args[0])" 4597,"def format(self, response): """""" Format the data. In derived classes, it is usually better idea to override ``_format_data()`` than this method. :param response: devil's ``Response`` object or the data itself. May also be ``None``. :return: django's ``HttpResponse`` todo: this shouldn't change the given response. only return the formatted response. """""" res = self._prepare_response(response) res.content = self._format_data(res.content, self.charset) return self._finalize_response(res)" 4598,"def parse(self, data, charset=None): """""" Parse the data. It is usually a better idea to override ``_parse_data()`` than this method in derived classes. :param charset: the charset of the data. Uses datamapper's default (``self.charset``) if not given. :returns: """""" charset = charset or self.charset return self._parse_data(data, charset)" 4599,"def _decode_data(self, data, charset): """""" Decode string data. :returns: unicode string """""" try: return smart_unicode(data, charset) except UnicodeDecodeError: raise errors.BadRequest('wrong charset')" 4600,"def _parse_data(self, data, charset): """""" Parse the data :param data: the data (may be None) """""" return self._decode_data(data, charset) if data else u''" 4601,"def _finalize_response(self, response): """""" Convert the ``Response`` object into django's ``HttpResponse`` :return: django's ``HttpResponse`` """""" res = HttpResponse(content=response.content, content_type=self._get_content_type()) # status_code is set separately to allow zero res.status_code = response.code return res" 4602,"def register_mapper(self, mapper, content_type, shortname=None): """""" Register new mapper. :param mapper: mapper object needs to implement ``parse()`` and ``format()`` functions. """""" self._check_mapper(mapper) cont_type_names = self._get_content_type_names(content_type, shortname) self._datamappers.update(dict([(name, mapper) for name in cont_type_names]))" 4603,"def select_formatter(self, request, resource): """""" Select appropriate formatter based on the request. :param request: the HTTP request :param resource: the invoked resource """""" # 1. get from resource if resource.mapper: return resource.mapper # 2. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from accept header mapper_name = self._get_name_from_accept(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()" 4604,"def select_parser(self, request, resource): """""" Select appropriate parser based on the request. :param request: the HTTP request :param resource: the invoked resource """""" # 1. get from resource if resource.mapper: return resource.mapper # 2. get from content type mapper_name = self._get_name_from_content_type(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()" 4605,"def get_mapper_by_content_type(self, content_type): """""" Returs mapper based on the content type. """""" content_type = util.strip_charset(content_type) return self._get_mapper(content_type)" 4606,"def _get_mapper(self, mapper_name): """""" Return the mapper based on the given name. :returns: the mapper based on the given ``mapper_name`` :raises: NotAcceptable if we don't support the requested format. """""" if mapper_name in self._datamappers: # mapper found return self._datamappers[mapper_name] else: # unsupported format return self._unknown_format(mapper_name)" 4607,"def _get_name_from_content_type(self, request): """""" Get name from Content-Type header """""" content_type = request.META.get('CONTENT_TYPE', None) if content_type: # remove the possible charset-encoding info return util.strip_charset(content_type) return None" 4608,"def _get_name_from_accept(self, request): """""" Process the Accept HTTP header. Find the most suitable mapper that the client wants and we support. :returns: the preferred mapper based on the accept header or ``None``. """""" accepts = util.parse_accept_header(request.META.get(""HTTP_ACCEPT"", """")) if not accepts: return None for accept in accepts: if accept[0] in self._datamappers: return accept[0] raise errors.NotAcceptable()" 4609,"def _get_name_from_url(self, request): """""" Determine short name for the mapper based on the URL. Short name can be either in query string (e.g. ?format=json) or as an extension to the URL (e.g. myresource.json). :returns: short name of the mapper or ``None`` if not found. """""" format = request.GET.get('format', None) if not format: match = self._format_query_pattern.match(request.path) if match and match.group('format'): format = match.group('format') return format" 4610,"def _check_mapper(self, mapper): """""" Check that the mapper has valid signature. """""" if not hasattr(mapper, 'parse') or not callable(mapper.parse): raise ValueError('mapper must implement parse()') if not hasattr(mapper, 'format') or not callable(mapper.format): raise ValueError('mapper must implement format()')" 4611,"def setup_cluster(self, cluster, extra_args=tuple()): """""" Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt. """""" inventory_path = self._build_inventory(cluster) if inventory_path is None: # No inventory file has been created, maybe an # invalid class has been specified in config file? Or none? # assume it is fine. elasticluster.log.info(""No setup required for this cluster."") return True assert os.path.exists(inventory_path), ( ""inventory file `{inventory_path}` does not exist"" .format(inventory_path=inventory_path)) # build list of directories to search for roles/include files ansible_roles_dirs = [ # include Ansible default first ... '/etc/ansible/roles', ] for root_path in [ # ... then ElastiCluster's built-in defaults resource_filename('elasticluster', 'share/playbooks'), # ... then wherever the playbook is os.path.dirname(self._playbook_path), ]: for path in [ root_path, os.path.join(root_path, 'roles'), ]: if path not in ansible_roles_dirs and os.path.exists(path): ansible_roles_dirs.append(path) # Use env vars to configure Ansible; # see all values in https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py # # Ansible does not merge keys in configuration files: rather # it uses the first configuration file found. However, # environment variables can be used to selectively override # parts of the config; according to [1]: ""they are mostly # considered to be a legacy system as compared to the config # file, but are equally valid."" # # [1]: http://docs.ansible.com/ansible/intro_configuration.html#environmental-configuration # # Provide default values for important configuration variables... ansible_env = { 'ANSIBLE_FORKS': '10', 'ANSIBLE_HOST_KEY_CHECKING': 'no', 'ANSIBLE_PRIVATE_KEY_FILE': cluster.user_key_private, 'ANSIBLE_ROLES_PATH': ':'.join(reversed(ansible_roles_dirs)), 'ANSIBLE_SSH_PIPELINING': 'yes', 'ANSIBLE_TIMEOUT': '120', } # ...override them with key/values set in the config file(s) for k, v in self.extra_conf.items(): if k.startswith('ansible_'): ansible_env[k.upper()] = str(v) # ...finally allow the environment have the final word ansible_env.update(os.environ) if __debug__: elasticluster.log.debug( ""Calling `ansible-playbook` with the following environment:"") for var, value in sorted(ansible_env.items()): elasticluster.log.debug(""- %s=%r"", var, value) elasticluster.log.debug(""Using playbook file %s."", self._playbook_path) # build `ansible-playbook` command-line cmd = shlex.split(self.extra_conf.get('ansible_command', 'ansible-playbook')) cmd += [ os.path.realpath(self._playbook_path), ('--inventory=' + inventory_path), ] + list(extra_args) if self._sudo: cmd.extend([ # force all plays to use `sudo` (even if not marked as such) '--become', # desired sudo-to user ('--become-user=' + self._sudo_user), ]) # determine Ansible verbosity as a function of ElastiCluster's # log level (we cannot read `ElastiCluster().params.verbose` # here, still we can access the log configuration since it's # global). verbosity = (logging.WARNING - elasticluster.log.getEffectiveLevel()) / 10 if verbosity > 0: cmd.append('-' + ('v' * verbosity)) # e.g., `-vv` # append any additional arguments provided by users ansible_extra_args = self.extra_conf.get('ansible_extra_args', None) if ansible_extra_args: cmd += shlex.split(ansible_extra_args) cmdline = ' '.join(cmd) elasticluster.log.debug(""Running Ansible command `%s` ..."", cmdline) rc = call(cmd, env=ansible_env, bufsize=1, close_fds=True) if rc == 0: elasticluster.log.info(""Cluster correctly configured."") return True else: elasticluster.log.error( ""Command `%s` failed with exit code %d."", cmdline, rc) elasticluster.log.error( ""Check the output lines above for additional information on this error."") elasticluster.log.error( ""The cluster has likely *not* been configured correctly."" "" You may need to re-run `elasticluster setup` or fix the playbooks."") return False" 4612,"def _build_inventory(self, cluster): """""" Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster` """""" inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? warn(""Node kind `{0}` not defined in cluster!"".format(node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] # check for nonstandard port, either IPv4 or IPv6 if node.preferred_ip and ':' in node.preferred_ip: match = IPV6_RE.match(node.preferred_ip) if match: host_port = match.groups()[1] else: _, _, host_port = node.preferred_ip.partition(':') if host_port: extra_vars.append('ansible_port=%s' % host_port) if node.kind in self.environment: extra_vars.extend('%s=%s' % (k, v) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: connection_ip = node.preferred_ip if connection_ip: inventory_data[group].append( (node.name, connection_ip, str.join(' ', extra_vars))) if not inventory_data: elasticluster.log.info(""No inventory file was created."") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning( ""Writing inventory file to tmp dir `%s`"", self._storage_path) inventory_path = os.path.join( self._storage_path, (cluster.name + '.inventory')) log.debug(""Writing Ansible inventory to file `%s` ..."", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error ""argument of type 'NoneType' is not # iterable"" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write(""\n["" + section + ""]\n"") for host in hosts: # don't want port, makes it look like ipv6 if ':' in host[1]: match = IPV6_RE.match(node.preferred_ip) if match: host = (host[0], match.groups()[0], host[2]) else: host = (host[0], host[1].partition(':')[0], host[2]) hostline = ""%s ansible_host=%s %s\n"" % host inventory_file.write(hostline) return inventory_path" 4613,"def cleanup(self, cluster): """"""Deletes the inventory file used last recently used. :param cluster: cluster to clear up inventory file for :type cluster: :py:class:`elasticluster.cluster.Cluster` """""" if self._storage_path and os.path.exists(self._storage_path): fname = '%s.%s' % (AnsibleSetupProvider.inventory_file_ending, cluster.name) inventory_path = os.path.join(self._storage_path, fname) if os.path.exists(inventory_path): try: os.unlink(inventory_path) if self._storage_path_tmp: if len(os.listdir(self._storage_path)) == 0: shutil.rmtree(self._storage_path) except OSError as ex: log.warning( ""AnsibileProvider: Ignoring error while deleting "" ""inventory file %s: %s"", inventory_path, ex)" 4614,"def await_task(self, task_id, service_id, callback_fn=None, sleep_sec=15): """""" Подождать выполнения задачи запускатора :param task_id: ID задачи, за которой нужно следить :param service_id: ID сервиса :param callback_fn: Функция обратного вызова, в нее будет передаваться task_info и is_finish как признак, что обработка завершена :param sleep_sec: задержка между проверкой по БД. Не рекомендуется делать меньше 10, так как это может очень сильно ударить по производительности БД :return: void """""" while True: import time time.sleep(sleep_sec) task_info = self.__metadb.one("""""" SELECT id, service_id, status, result_data FROM job.task WHERE id=:task_id::uuid AND service_id=:service_id::job.service_id LIMIT 1 """""", { ""task_id"": task_id, ""service_id"": service_id, }) self.log.info(""Ждем выполнения задачи"", { ""task_info"": task_info }) if task_info is None: break is_finish = task_info['status'] != 'NEW' and task_info['status'] != 'PROCESSING' if callback_fn: # Уведомляем вызывающего callback_fn(task_info, is_finish) if is_finish: break" 4615,"def submit(self, service_id: str, data: dict = None): """""" Отправить задачу в запускатор :param service_id: ID службы. Например ""meta.docs_generate"" :param data: Полезная нагрузка задачи :return: dict """""" if self.__app.starter_api_url == 'http://STUB_URL': self.log.info('STARTER DEV. Задача условно поставлена', { ""service_id"": service_id, ""data"": data, }) return task = {""serviceId"": service_id, ""data"": data} url = self.__app.starter_api_url + '/services/' + service_id + '/tasks' last_e = None for _idx in range(self.max_retries): try: resp = requests.post( url=url, data=json.dumps(task), headers=self.headers, timeout=15 ) try: return json.loads(resp.text) except Exception: raise IOError(""Starter response read error: "" + resp.text) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e: # При ошибках подключения пытаемся еще раз last_e = e sleep(3) raise last_e" 4616,"def parse_version_string(version_string): """""" Parse a version string into it's components. >>> parse_version_string(""0.1"") ([0, 1], 'jenkins', None) >>> parse_version_string(""0.3.2-jenkins-3447876"") ([0, 3, 2], 'jenkins', 3447876) """""" components = version_string.split('-') + [None, None] version = list(map(int, components[0].split('.'))) build_tag = components[1] if components[1] else BUILD_TAG build_number = int(components[2]) if components[2] else components[2] return (version, build_tag, build_number)" 4617,"def format_version(version, build_number=None, build_tag=BUILD_TAG): """""" Format a version string for use in packaging. >>> format_version([0,3,5]) '0.3.5' >>> format_version([8, 8, 9], 23676) '8.8.9-jenkins-23676' >>> format_version([8, 8, 9], 23676, 'koekjes') '8.8.9-koekjes-23676' """""" formatted_version = ""."".join(map(str, version)) if build_number is not None: return ""{formatted_version}-{build_tag}-{build_number}"".format(**locals()) return formatted_version" 4618,"def based_on(self, based_on): """"""Sets the based_on of this TaxRate. :param based_on: The based_on of this TaxRate. :type: str """""" allowed_values = [""shippingAddress"", ""billingAddress""] if based_on is not None and based_on not in allowed_values: raise ValueError( ""Invalid value for `based_on` ({0}), must be one of {1}"" .format(based_on, allowed_values) ) self._based_on = based_on" 4619,"def create_tax_rate(cls, tax_rate, **kwargs): """"""Create TaxRate Create a new TaxRate This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_tax_rate(tax_rate, async=True) >>> result = thread.get() :param async bool :param TaxRate tax_rate: Attributes of taxRate to create (required) :return: TaxRate If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_tax_rate_with_http_info(tax_rate, **kwargs) else: (data) = cls._create_tax_rate_with_http_info(tax_rate, **kwargs) return data" 4620,"def delete_tax_rate_by_id(cls, tax_rate_id, **kwargs): """"""Delete TaxRate Delete an instance of TaxRate by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_tax_rate_by_id(tax_rate_id, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) else: (data) = cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) return data" 4621,"def get_tax_rate_by_id(cls, tax_rate_id, **kwargs): """"""Find TaxRate Return single instance of TaxRate by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tax_rate_by_id(tax_rate_id, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to return (required) :return: TaxRate If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) else: (data) = cls._get_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) return data" 4622,"def list_all_tax_rates(cls, **kwargs): """"""List TaxRates Return a list of TaxRates This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_tax_rates(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[TaxRate] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_tax_rates_with_http_info(**kwargs) else: (data) = cls._list_all_tax_rates_with_http_info(**kwargs) return data" 4623,"def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs): """"""Replace TaxRate Replace all attributes of TaxRate This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to replace (required) :param TaxRate tax_rate: Attributes of taxRate to replace (required) :return: TaxRate If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs) else: (data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs) return data" 4624,"def update_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs): """"""Update TaxRate Update attributes of TaxRate This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_tax_rate_by_id(tax_rate_id, tax_rate, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to update. (required) :param TaxRate tax_rate: Attributes of taxRate to update. (required) :return: TaxRate If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs) else: (data) = cls._update_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs) return data" 4625,"def build_append_file_task(urllocation, filelocation): """"""Build a task to watch a specific remote url and append that data to the file. This method should be used when you would like to keep all of the information stored on the local machine, but also append the new information found at the url. For instance, if the local file is: ``` foo ``` And the remote file is: ``` bar ``` The resulting file will contain: ``` foo bar ``` """""" config = file_utils.get_celcius_config() basename = filelocation.split('/')[-1] tmp_filelocation = filelocation.replace(basename, 'tmp_'+basename) new_filelocation = filelocation.replace(basename, 'new_'+basename) if config['retrieve_command'] == 'curl': download_cmd = curl.build_download_file_command(urllocation, tmp_filelocation) elif config['retrieve_command'] == 'wget': download_cmd = wget.build_download_file_command(urllocation, tmp_filelocation) else: print(""Invalid retrieve command!"") sys.exit(1) diff_cmd = diff.build_append_file_command(filelocation, tmp_filelocation) compare_cmd = concat.build_and_concat_commands([download_cmd, diff_cmd]) redirect_cmd = redirect.redirect_output(compare_cmd, new_filelocation) full_cmd = concat.concat_commands([touch.touch(filelocation).build_command(), redirect_cmd, rm.build_force_rm_command(tmp_filelocation).build_command(), rm.build_force_rm_command(filelocation).build_command(), mv.mv(new_filelocation, filelocation).build_command()]) return full_cmd" 4626,"def get_authentic_node_name(self, node_name: str) -> Optional[str]: """""" Returns the exact, authentic node name for the given node name if a node corresponding to the given name exists in the graph (maybe not locally yet) or `None` otherwise. By default, this method checks whether a node with the given name exists locally in the graph and return `node_name` if it does or `None` otherwise. In `Graph` extensions that are used by applications where the user can enter potentially incorrect node names, this method should be overridden to improve usability. Arguments: node_name (str): The node name to return the authentic node name for. Returns: The authentic name of the node corresponding to the given node name or `None` if no such node exists. """""" # Is there a node with the given name? vertex: IGraphVertex = None try: vertex: IGraphVertex = self._wrapped_graph.vs.find(node_name) except ValueError: pass # Is node_name a node index? if vertex is None: try: vertex: IGraphVertex = self._wrapped_graph.vs[int(node_name)] except ValueError: return None except IndexError: return None try: return vertex[""name""] except KeyError: return str(vertex.index)" 4627,"def _create_memory_database_interface(self) -> GraphDatabaseInterface: """""" Creates and returns the in-memory database interface the graph will use. """""" Base = declarative_base() engine = sqlalchemy.create_engine(""sqlite://"", poolclass=StaticPool) Session = sessionmaker(bind=engine) dbi: GraphDatabaseInterface = create_graph_database_interface( sqlalchemy, Session(), Base, sqlalchemy.orm.relationship ) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) return dbi" 4628,"def _load_neighbors_from_external_source(self) -> None: """""" Loads the neighbors of the node from the igraph `Graph` instance that is wrapped by the graph that has this node. """""" graph: IGraphWrapper = self._graph ig_vertex: IGraphVertex = graph.wrapped_graph.vs[self._igraph_index] ig_neighbors: List[IGraphVertex] = ig_vertex.neighbors() for ig_neighbor in ig_neighbors: try: name: str = ig_neighbor[""name""] except KeyError: name: str = str(ig_neighbor.index) try: external_id: Optional[str] = ig_neighbor[""external_id""] except KeyError: external_id: Optional[str] = None neighbor: IGraphNode = graph.nodes.get_node_by_name(name, can_validate_and_load=True, external_id=external_id) graph.add_edge(self, neighbor)" 4629,"def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode: """""" Returns a new `IGraphNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node. """""" return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)" 4630,"def parse(self): """"""Parse the table data string into records."""""" self.parse_fields() records = [] for line in self.t['data'].split('\n'): if EMPTY_ROW.match(line): continue row = [self.autoconvert(line[start_field:end_field+1]) for start_field, end_field in self.fields] records.append(tuple(row)) self.records = records" 4631,"def recarray(self): """"""Return a recarray from the (parsed) string."""""" if self.records is None: self.parse() try: # simple (should this also be subjected to convert.to_int64() ?) return numpy.rec.fromrecords(self.records, names=self.names) except ValueError: # complicated because fromrecords cannot deal with records of lists # Quick hack: use objects for lists etc (instead of building the proper # data types (see docs for numpy.dtype , eg dtype('coord', (float, 3)) ) D = numpy.empty(len(self.records[0]), dtype=object) # number of fields from first record types = numpy.array([map(type, r) for r in self.records]) # types of all fields for icol, isSame in enumerate([numpy.all(col) for col in types.T]): if isSame: D[icol] = types[0][icol] else: D[icol] = object dtype = numpy.dtype(zip(self.names, D)) # from numpy.rec.records # TODO: this is not working properly yet; for instance, text fields # are reduced to length 0 (>> def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd): ... pass >>> try: check_arguments_compatibility(foo, {'arg1': 'bla', 'arg2': 'blo'}) ... except ValueError as err: print 'failed' ... else: print 'ok' ok >>> try: check_arguments_compatibility(foo, {'arg1': 'bla'}) ... except ValueError as err: print 'failed' ... else: print 'ok' failed Basically this function is simulating the call: >>> the_callable(**argd) but it only checks for the correctness of the arguments, without actually calling the_callable. :param the_callable: the callable to be analyzed. :type the_callable: function/callable :param argd: the arguments to be passed. :type argd: dict :raise ValueError: in case of uncompatibility """""" if not argd: argd = {} args, dummy, varkw, defaults = inspect.getargspec(the_callable) tmp_args = list(args) optional_args = [] args_dict = {} if defaults: defaults = list(defaults) else: defaults = [] while defaults: arg = tmp_args.pop() optional_args.append(arg) args_dict[arg] = defaults.pop() while tmp_args: args_dict[tmp_args.pop()] = None for arg, dummy_value in iteritems(argd): if arg in args_dict: del args_dict[arg] elif not varkw: raise ValueError( 'Argument %s not expected when calling callable ' '""%s"" with arguments %s' % ( arg, get_callable_signature_as_string(the_callable), argd)) for arg in args_dict.keys(): if arg in optional_args: del args_dict[arg] if args_dict: raise ValueError( 'Arguments %s not specified when calling callable ' '""%s"" with arguments %s' % ( ', '.join(args_dict.keys()), get_callable_signature_as_string(the_callable), argd))" 4635,"def create_enhanced_plugin_builder(compulsory_objects=None, optional_objects=None, other_data=None): """""" Creates a plugin builder function suitable to extract some specific objects (either compulsory or optional) and other simpler data >>> def dummy_needed_funct1(foo, bar): ... pass >>> class dummy_needed_class1: ... def __init__(self, baz): ... pass >>> def dummy_optional_funct2(boo): ... pass >>> create_enhanced_plugin_builder( ... compulsory_objects={ ... 'needed_funct1' : dummy_needed_funct1, ... 'needed_class1' : dummy_needed_class1 ... }, ... optional_objects={ ... 'optional_funct2' : dummy_optional_funct2, ... }, ... other_data={ ... 'CFG_SOME_DATA' : (str, ''), ... 'CFG_SOME_INT' : (int, 0), ... }) :param compulsory_objects: map of name of an object to look for inside the `plugin` and a *signature* for a class or callable. Every name specified in this map **must exists** in the plugin, otherwise the plugin will fail to load. :type compulsory_objects: dict :param optional_objects: map of name of an object to look for inside the C{plugin} and a I{signature} for a class or callable. Every name specified in this map must B{can exists} in the plugin. :type optional_objects: dict :param other_data: map of other simple data that can be loaded from the plugin. The map has the same format of the C{content} parameter of L{invenio_ext.legacy.handler.wash_urlargd}. :type other_data: dict :return: a I{plugin_builder} function that can be used with the map function. Such function will build the plugin in the form of a map, where every key is one of the keys inside the three maps provided as parameters and the corresponding value is the expected class or callable or simple data. """""" from invenio_utils.washers import wash_urlargd def plugin_builder(the_plugin): """""" Enhanced plugin_builder created by L{create_enhanced_plugin_builder}. :param plugin: the code of the module as just read from package. :return: the plugin in the form of a map. """""" plugin_name = the_plugin.__name__ plugin = {} if compulsory_objects: for object_name, object_signature in \ iteritems(compulsory_objects): the_object = getattr(the_plugin, object_name, None) if the_object is None: raise AutodiscoveryError( 'Plugin ""%s"" does not ' 'contain compulsory object ""%s""' % (plugin_name, object_name)) try: check_signature(object_name, the_object, object_signature) except AutodiscoveryError as err: raise AutodiscoveryError( 'Plugin ""%s"" contains ' 'object ""%s"" with a wrong signature: %s' % (plugin_name, object_name, err)) plugin[object_name] = the_object if optional_objects: for object_name, object_signature in iteritems(optional_objects): the_object = getattr(the_plugin, object_name, None) if the_object is not None: try: check_signature( object_name, the_object, object_signature) except AutodiscoveryError as err: raise AutodiscoveryError( 'Plugin ""%s"" ' 'contains object ""%s"" with a wrong signature: %s' % (plugin_name, object_name, err)) plugin[object_name] = the_object if other_data: the_other_data = {} for data_name, (dummy, data_default) in iteritems(other_data): the_other_data[data_name] = getattr(the_plugin, data_name, data_default) try: the_other_data = wash_urlargd(the_other_data, other_data) except Exception as err: raise AutodiscoveryError( 'Plugin ""%s"" contains other ' 'data with problems: %s' % (plugin_name, err)) plugin.update(the_other_data) return plugin return plugin_builder" 4636,"def email_quoted_txt2html(text, tabs_before=0, indent_txt='>>', linebreak_txt=""\n"", indent_html=('

    ', ""
    ""), linebreak_html='
    ', indent_block=True): """""" Takes a typical mail quoted text, e.g.:: hello, you told me: >> Your mother was a hamster and your father smelt of elderberries I must tell you that I'm not convinced. Then in this discussion: >>>> Is there someone else up there we could talk to? >> No. Now, go away, or I shall taunt you a second time-a! I think we're not going to be friends! and return an html formatted output, e.g.:: hello,
    you told me:
    Your mother was a hamster and your father smelt of elderberries
    I must tell you that I'm not convinced. Then in this discussion:
    Is there someone else up there we could talk to?
    No. Now, go away, or I shall taunt you a second time-a!
    I think we're not going to be friends! The behaviour is different when C{indent_block} is C{True} or C{False}. When C{True} the when C{indent_html} is only added at each change of level of indentation, while it is added for each line when C{False}. For eg:: >> a >> b >>>> c would result in (if C{True})::
    a
    b
    c
    or would be (if C{False})::
    a

    b

    c

    @param text: the text in quoted format @param tabs_before: number of tabulations before each line @param indent_txt: quote separator in email (default:'>>') @param linebreak_txt: line separator in email @param indent_html: tuple of (opening, closing) html tags. default: ('
    ', ""
    "") @param linebreak_html: line separator in html (default: '
    ') @param indent_block: if indentation should be done per 'block' i.e. only at changes of indentation level (+1, -1) or at each line. @return: string containing html formatted output """""" washer = HTMLWasher() final_body = """" nb_indent = 0 text = text.strip('\n') lines = text.split(linebreak_txt) for line in lines: new_nb_indent = 0 while True: if line.startswith(indent_txt): new_nb_indent += 1 line = line[len(indent_txt):] else: break if indent_block: if (new_nb_indent > nb_indent): for dummy in range(nb_indent, new_nb_indent): final_body += tabs_before * ""\t"" + indent_html[0] + ""\n"" tabs_before += 1 elif (new_nb_indent < nb_indent): for dummy in range(new_nb_indent, nb_indent): tabs_before -= 1 final_body += (tabs_before) * ""\t"" + indent_html[1] + ""\n"" else: final_body += (tabs_before) * ""\t"" else: final_body += tabs_before * ""\t"" + new_nb_indent * indent_html[0] try: line = washer.wash(line) except HTMLParseError: # Line contained something like ""foo\n"" return final_body" 4637,"def email_quote_txt(text, indent_txt='>>', linebreak_input=""\n"", linebreak_output=""\n""): """""" Takes a text and returns it in a typical mail quoted format, e.g.:: C'est un lapin, lapin de bois. >>Quoi? Un cadeau. >>What? A present. >>Oh, un cadeau. will return:: >>C'est un lapin, lapin de bois. >>>>Quoi? >>Un cadeau. >>>>What? >>A present. >>>>Oh, un cadeau. @param text: the string to quote @param indent_txt: the string used for quoting (default: '>>') @param linebreak_input: in the text param, string used for linebreaks @param linebreak_output: linebreak used for output @return: the text as a quoted string """""" if (text == """"): return """" lines = text.split(linebreak_input) text = """" for line in lines: text += indent_txt + line + linebreak_output return text" 4638,"def escape_email_quoted_text(text, indent_txt='>>', linebreak_txt='\n'): """""" Escape text using an email-like indenting rule. As an example, this text:: >>Brave Sir Robin ran away... *No!* >>bravely ran away away... I didn't!* >>When danger reared its ugly head, he bravely turned his tail and fled.
    *I never did!* will be escaped like this:: >>Brave Sir Robin ran away... <img src=""malicious_script />*No!* >>bravely ran away away... I didn't!*<script>malicious code</script> >>When danger reared its ugly head, he bravely turned his tail and fled. <form onload=""malicious""></form>*I never did!* @param text: the string to escape @param indent_txt: the string used for quoting @param linebreak_txt: in the text param, string used for linebreaks """""" washer = HTMLWasher() lines = text.split(linebreak_txt) output = '' for line in lines: line = line.strip() nb_indent = 0 while True: if line.startswith(indent_txt): nb_indent += 1 line = line[len(indent_txt):] else: break output += (nb_indent * indent_txt) + washer.wash(line, render_unallowed_tags=True) + linebreak_txt nb_indent = 0 return output[:-1]" 4639,"def _print(self, text, color=None, **kwargs): """"""print text with given color to terminal """""" COLORS = { 'red': '\033[91m{}\033[00m', 'green': '\033[92m{}\033[00m', 'yellow': '\033[93m{}\033[00m', 'cyan': '\033[96m{}\033[00m' } _ = COLORS[color] six.print_(_.format(text), **kwargs)" 4640,"def _is_unique(self, name, path): """"""verify if there is a project with given name or path on the database """""" project = None try: project = Project.select().where( (Project.name == name) | (Project.path == path) )[0] except: pass return project is None" 4641,"def _path_is_valid(self, path): """"""validates if a given path is: - absolute, - exists on current machine - is a directory """""" VALIDATORS = [ (os.path.isabs, self._ERROR_PATH_NOT_ABSOLUTE), (os.path.exists, self._ERROR_PATH_DOESNT_EXISTS), (os.path.isdir, self._ERROR_PATH_NOT_A_DIR), ] for validator in VALIDATORS: func, str_err = validator if not func(path): self._print(str_err.format(path), 'red') return return True" 4642,"def add(self, name, path=None, **kwargs): """"""add new project with given name and path to database if the path is not given, current working directory will be taken ...as default """""" path = path or kwargs.pop('default_path', None) if not self._path_is_valid(path): return if not self._is_unique(name, path): p = Project.select().where( (Project.name == name) | (Project.path == path) )[0] self._print(self._ERROR_PROJECT_EXISTS.format(name, p.path), 'red') return Project.create(name=name, path=path) self._print(self._SUCCESS_PROJECT_ADDED.format(name), 'green')" 4643,"def list(self, **kwargs): """"""displays all projects on database """""" projects = Project.select().order_by(Project.name) if len(projects) == 0: self._print('No projects available', 'yellow') return for project in projects: project_repr = self._PROJECT_ITEM.format(project.name, project.path) row = '- {}'.format(self._PROJECT_ITEM.format(project.name, project.path)) six.print_(row)" 4644,"def parent_tags(self): """"""Provides tags of all parent HTML elements."""""" tags = set() for addr in self._addresses: if addr.attr == 'text': tags.add(addr.element.tag) tags.update(el.tag for el in addr.element.iterancestors()) tags.discard(HTMLFragment._root_tag) return frozenset(tags)" 4645,"def involved_tags(self): """"""Provides all HTML tags directly involved in this string."""""" if len(self._addresses) < 2: # there can't be a tag boundary if there's only 1 or 0 characters return frozenset() # creating 'parent_sets' mapping, where the first item in tuple # is the address of character and the second is set # of character's parent HTML elements parent_sets = [] # meanwhile we are creatingalso a set of common parents so we can # put them away later on (we're not interested in them as # they're only some global wrappers) common_parents = set() for addr in self._addresses: parents = set() if addr.attr == 'text': parents.add(addr.element) parents.update(addr.element.iterancestors()) parent_sets.append((addr, parents)) if not common_parents: common_parents = parents else: common_parents &= parents # constructing final set of involved tags involved_tags = set() prev_addr = None for addr, parents in parent_sets: parents = parents - common_parents involved_tags.update(p.tag for p in parents) # hidden tags - sometimes there are tags without text which # can hide between characters, but they actually break textflow is_tail_of_hidden = ( prev_addr and addr.attr == 'tail' and prev_addr.element != addr.element ) if is_tail_of_hidden: involved_tags.add(addr.element) prev_addr = addr return frozenset(involved_tags)" 4646,"def _parse(self, html): """"""Parse given string as HTML and return it's etree representation."""""" if self._has_body_re.search(html): tree = lxml.html.document_fromstring(html).find('.//body') self.has_body = True else: tree = lxml.html.fragment_fromstring(html, create_parent=self._root_tag) if tree.tag != self._root_tag: # ensure the root element exists even if not really needed, # so the tree has always the same structure root = lxml.html.HtmlElement() root.tag = self._root_tag root.append(tree) return root return tree" 4647,"def _iter_texts(self, tree): """"""Iterates over texts in given HTML tree."""""" skip = ( not isinstance(tree, lxml.html.HtmlElement) # comments, etc. or tree.tag in self.skipped_tags ) if not skip: if tree.text: yield Text(tree.text, tree, 'text') for child in tree: for text in self._iter_texts(child): yield text if tree.tail: yield Text(tree.tail, tree, 'tail')" 4648,"def _analyze_tree(self, tree): """"""Analyze given tree and create mapping of indexes to character addresses. """""" addresses = [] for text in self._iter_texts(tree): for i, char in enumerate(text.content): if char in whitespace: char = ' ' addresses.append(CharAddress(char, text.element, text.attr, i)) # remove leading and trailing whitespace while addresses and addresses[0].char == ' ': del addresses[0] while addresses and addresses[-1].char == ' ': del addresses[-1] return addresses" 4649,"def _validate_index(self, index): """"""Validates given index, eventually raises errors."""""" if isinstance(index, slice): if index.step and index.step != 1: raise IndexError('Step is not allowed.') indexes = (index.start, index.stop) else: indexes = (index,) for index in indexes: if index is not None and index < 0: raise IndexError('Negative indexes are not allowed.')" 4650,"def _find_pivot_addr(self, index): """"""Inserting by slicing can lead into situation where no addresses are selected. In that case a pivot address has to be chosen so we know where to add characters. """""" if not self.addresses or index.start == 0: return CharAddress('', self.tree, 'text', -1) # string beginning if index.start > len(self.addresses): return self.addresses[-1] return self.addresses[index.start]" 4651,"def apply_connectivity_changes(request, add_vlan_action, remove_vlan_action, logger=None): """""" Standard implementation for the apply_connectivity_changes operation This function will accept as an input the actions to perform for add/remove vlan. It implements the basic flow of decoding the JSON connectivity changes requests, and combining the results of the add/remove vlan functions into a result object. :param str request: json string sent from the CloudShell server describing the connectivity changes to perform :param Function -> ConnectivityActionResult remove_vlan_action: This action will be called for VLAN remove operations :param Function -> ConnectivityActionResult add_vlan_action: This action will be called for VLAN add operations :param logger: logger to use for the operation, if you don't provide a logger, a default Python logger will be used :return Returns a driver action result object, this can be returned to CloudShell server by the command result :rtype: DriverResponseRoot """""" if not logger: logger = logging.getLogger(""apply_connectivity_changes"") if request is None or request == '': raise Exception('ConnectivityOperations', 'request is None or empty') holder = connectivity_request_from_json(request) driver_response = DriverResponse() results = [] driver_response_root = DriverResponseRoot() for action in holder.actions: logger.info('Action: ', action.__dict__) if action.type == ConnectivityActionRequest.SET_VLAN: action_result = add_vlan_action(action) elif action.type == ConnectivityActionRequest.REMOVE_VLAN: action_result = remove_vlan_action(action) else: continue results.append(action_result) driver_response.actionResults = results driver_response_root.driverResponse = driver_response return driver_response_root" 4652,"def check_api_key(email, api_key): """"""Check the API key of the user."""""" table = boto3.resource(""dynamodb"").Table(os.environ['people']) user = table.get_item(Key={'email': email}) if not user: return False user = user.get(""Item"") if api_key != user.get('api_key', None): return False return user" 4653,"def lambda_handler(event, context): """"""Main handler."""""" email = event.get('email', None) api_key = event.get('api_key', None) if not (api_key or email): msg = ""Missing authentication parameters in your request"" return {'success': False, 'message': msg} indicators = list(set(event.get('indicators', list()))) if len(indicators) == 0: return {'success': False, 'message': ""No indicators sent in""} user = check_api_key(email, api_key) if not user: return {'success': False, 'message': ""Email or API key was invalid.""} role = check_role(user) if not role: return {'success': False, 'message': ""Account not approved to contribute.""} current_time = datetime.datetime.now().strftime(""%Y-%m-%d %H:%M:%S"") table = boto3.resource(""dynamodb"").Table(os.environ['database']) with table.batch_writer(overwrite_by_pkeys=['indicator']) as batch: for item in indicators: if item == """": continue if len(item) != 32: item = hashlib.md5(item).hexdigest() try: batch.put_item(Item={'indicator': item, 'creator': user.get('email'), 'datetime': current_time}) except Exception as e: logger.error(str(e)) msg = ""Wrote {} indicators"".format(len(indicators)) return {'success': True, 'message': msg, 'writeCount': len(indicators)}" 4654,"def parse_email_url(url): """"""Parses an email URL."""""" conf = {} url = urlparse.urlparse(url) # Remove query strings path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration conf.update({ 'EMAIL_FILE_PATH': path, 'EMAIL_HOST_USER': url.username, 'EMAIL_HOST_PASSWORD': url.password, 'EMAIL_HOST': url.hostname, 'EMAIL_PORT': url.port, }) if url.scheme in EMAIL_SCHEMES: conf['EMAIL_BACKEND'] = EMAIL_SCHEMES[url.scheme] if url.scheme == 'smtps': conf['EMAIL_USE_TLS'] = True else: conf['EMAIL_USE_TLS'] = False return conf" 4655,"def config(name='EMAIL_URL', default='console://'): """"""Returns a dictionary with EMAIL_* settings from EMAIL_URL."""""" conf = {} s = env(name, default) if s: conf = parse_email_url(s) return conf" 4656,"def replace(html, replacements=None): """"""Performs replacements on given HTML string."""""" if not replacements: return html # no replacements html = HTMLFragment(html) for r in replacements: r.replace(html) return unicode(html)" 4657,"def _is_replacement_allowed(self, s): """"""Tests whether replacement is allowed on given piece of HTML text."""""" if any(tag in s.parent_tags for tag in self.skipped_tags): return False if any(tag not in self.textflow_tags for tag in s.involved_tags): return False return True" 4658,"def replace(self, html): """"""Perform replacements on given HTML fragment."""""" self.html = html text = html.text() positions = [] def perform_replacement(match): offset = sum(positions) start, stop = match.start() + offset, match.end() + offset s = self.html[start:stop] if self._is_replacement_allowed(s): repl = match.expand(self.replacement) self.html[start:stop] = repl else: repl = match.group() # no replacement takes place positions.append(match.end()) return repl while True: if positions: text = text[positions[-1]:] text, n = self.pattern.subn(perform_replacement, text, count=1) if not n: # all is already replaced break" 4659,"def read_relative_file(filename, relative_to=None): """"""Returns contents of the given file, which path is supposed relative to this package."""""" if relative_to is None: relative_to = os.path.dirname(__file__) with open(os.path.join(os.path.dirname(relative_to), filename)) as f: return f.read()" 4660,"def get_events(self): """"""Get events from the cloud node."""""" to_send = {'limit': 50} response = self._send_data('POST', 'admin', 'get-events', to_send) output = {'message': """"} for event in response['events']: desc = ""Source IP: {ip}\n"" desc += ""Datetime: {time}\n"" desc += ""Indicator: {match}\n"" desc += ""Method: {method}\n"" desc += ""URL: {url}\n"" desc += ""Request Type: {type}\n"" desc += ""User-Agent: {userAgent}\n"" desc += ""Contact: {contact}\n"" desc += ""\n"" output['message'] += desc.format(**event) return output" 4661,"def flush_events(self): """"""Flush events from the cloud node."""""" response = self._send_data('DELETE', 'admin', 'flush-events', {}) if response['success']: msg = ""Events flushed"" else: msg = ""Flushing of events failed"" output = {'message': msg} return output" 4662,"def put(self): """"""Push the info represented by this ``Metric`` to CloudWatch."""""" try: self.cloudwatch.put_metric_data( Namespace=self.namespace, MetricData=[{ 'MetricName': self.name, 'Value': self.value, 'Timestamp': self.timestamp }] ) except Exception: logging.exception(""Error pushing {0} to CloudWatch."".format(str(self)))" 4663,"def log(self, message, level=logging.INFO, *args, **kwargs): """""" Send log entry :param str message: log message :param int level: `Logging level `_ :param list args: log record arguments :param dict kwargs: log record key argument """""" msg = ""{}.{}: {}[{}]: {}"".format( self.__class__.__name__, self.status, self.__class__.path, self.uuid, message ) extra = kwargs.pop(""extra"", dict()) extra.update(dict(kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump())) return logger.log( level=level, msg=msg, extra=extra, *args, **kwargs )" 4664,"def setFocus(self, reason=None): """"""Sets focus to first field. Note: reason is ignored."""""" self.formLayout.itemAt(0, QFormLayout.FieldRole).widget().setFocus()" 4665,"def _connect(self): """""" Connect to a RabbitMQ instance :returns: Boolean corresponding to success of connection :rtype: bool """""" logger.info(""Connecting to rabbit"") for url in self._urls: try: self._connection = pika.BlockingConnection(pika.URLParameters(url)) self._channel = self._connection.channel() self._declare() if self._confirm_delivery: self._channel.confirm_delivery() logger.info(""Enabled delivery confirmation"") logger.debug(""Connected to rabbit"") return True except pika.exceptions.AMQPConnectionError: logger.exception(""Unable to connect to rabbit"") continue except Exception: logger.exception(""Unexpected exception connecting to rabbit"") continue raise pika.exceptions.AMQPConnectionError" 4666,"def _disconnect(self): """""" Cleanly close a RabbitMQ connection. :returns: None """""" try: self._connection.close() logger.debug(""Disconnected from rabbit"") except Exception: logger.exception(""Unable to close connection"")" 4667,"def publish_message(self, message, content_type=None, headers=None, mandatory=False, immediate=False): """""" Publish a response message to a RabbitMQ instance. :param message: Response message :param content_type: Pika BasicProperties content_type value :param headers: Message header properties :param mandatory: The mandatory flag :param immediate: The immediate flag :returns: Boolean corresponding to the success of publishing :rtype: bool """""" logger.debug(""Publishing message"") try: self._connect() return self._do_publish(mandatory=mandatory, immediate=immediate, content_type=content_type, headers=headers, message=message) except pika.exceptions.AMQPConnectionError: logger.error(""AMQPConnectionError occurred. Message not published."") raise PublishMessageError except NackError: # raised when a message published in publisher-acknowledgments mode # is returned via `Basic.Return` followed by `Basic.Ack`. logger.error(""NackError occurred. Message not published."") raise PublishMessageError except UnroutableError: # raised when a message published in publisher-acknowledgments # mode is returned via `Basic.Return` followed by `Basic.Ack`. logger.error(""UnroutableError occurred. Message not published."") raise PublishMessageError except Exception: logger.exception(""Unknown exception occurred. Message not published."") raise PublishMessageError" 4668,"def visit(folder, provenance_id, step_name, previous_step_id=None, config=None, db_url=None, is_organised=True): """"""Record all files from a folder into the database. Note: If a file has been copied from a previous processing step without any transformation, it will be detected and marked in the DB. The type of file will be detected and stored in the DB (NIFTI, DICOM, ...). If a files (e.g. a DICOM file) contains some meta-data, those will be stored in the DB. Arguments: :param folder: folder path. :param provenance_id: provenance label. :param step_name: Name of the processing step that produced the folder to visit. :param previous_step_id: (optional) previous processing step ID. If not defined, we assume this is the first processing step. :param config: List of flags: - boost: (optional) When enabled, we consider that all the files from a same folder share the same meta-data. When enabled, the processing is (about 2 times) faster. This option is enabled by default. - session_id_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. - visit_id_in_patient_id: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you have to enable this flag. This will try to split PatientID into VisitID and PatientID. - visit_id_from_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). - repetition_from_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file. :param is_organised: (optional) Disable this flag when scanning a folder that has not been organised yet (should only affect nifti files). :return: return processing step ID. """""" config = config if config else [] logging.info(""Visiting %s"", folder) logging.info(""-> is_organised=%s"", str(is_organised)) logging.info(""-> config=%s"", str(config)) logging.info(""Connecting to database..."") db_conn = connection.Connection(db_url) step_id = _create_step(db_conn, step_name, provenance_id, previous_step_id) previous_files_hash = _get_files_hash_from_step(db_conn, previous_step_id) checked = dict() def process_file(file_path): logging.debug(""Processing '%s'"" % file_path) file_type = _find_type(file_path) if ""DICOM"" == file_type: is_copy = _hash_file(file_path) in previous_files_hash leaf_folder = os.path.split(file_path)[0] if leaf_folder not in checked or 'boost' not in config: ret = dicom_import.dicom2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config, 'visit_id_in_patient_id' in config, 'repetition_from_path' in config) try: checked[leaf_folder] = ret['repetition_id'] except KeyError: # TODO: Remove it when dicom2db will be more stable logging.warning(""Cannot find repetition ID !"") else: dicom_import.extract_dicom( file_path, file_type, is_copy, checked[leaf_folder], step_id) elif ""NIFTI"" == file_type and is_organised: is_copy = _hash_file(file_path) in previous_files_hash nifti_import.nifti2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config) elif file_type: is_copy = _hash_file(file_path) in previous_files_hash others_import.others2db( file_path, file_type, is_copy, step_id, db_conn) if sys.version_info.major == 3 and sys.version_info.minor < 5: matches = [] for root, dirnames, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, '*'): matches.append(os.path.join(root, filename)) for file_path in matches: process_file(file_path) else: for file_path in glob.iglob(os.path.join(folder, ""**/*""), recursive=True): process_file(file_path) logging.info(""Closing database connection..."") db_conn.close() return step_id" 4669,"def create_provenance(dataset, software_versions=None, db_url=None): """"""Create (or get if already exists) a provenance entity, store it in the database and get back a provenance ID. Arguments: :param dataset: Name of the data set. :param software_versions: (optional) Version of the software components used to get the data. It is a dictionary that accepts the following fields: - matlab_version - spm_version - spm_revision - fn_called - fn_version - others :param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file. :return: Provenance ID. """""" logging.info(""Connecting to database..."") db_conn = connection.Connection(db_url) try: matlab_version = software_versions['matlab_version'] except (KeyError, TypeError): matlab_version = None try: spm_version = software_versions['spm_version'] except (KeyError, TypeError): spm_version = None try: spm_revision = software_versions['spm_revision'] except (KeyError, TypeError): spm_revision = None try: fn_called = software_versions['fn_called'] except (KeyError, TypeError): fn_called = None try: fn_version = software_versions['fn_version'] except (KeyError, TypeError): fn_version = None try: others = software_versions['others'] except (KeyError, TypeError): others = None provenance = db_conn.db_session.query(db_conn.Provenance).filter_by( dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision, fn_called=fn_called, fn_version=fn_version, others=others ).first() if not provenance: provenance = db_conn.Provenance( dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision, fn_called=fn_called, fn_version=fn_version, others=others ) db_conn.db_session.merge(provenance) db_conn.db_session.commit() provenance = db_conn.db_session.query(db_conn.Provenance).filter_by( dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision, fn_called=fn_called, fn_version=fn_version, others=others ).first() provenance_id = provenance.id logging.info(""Closing database connection..."") db_conn.close() return provenance_id" 4670,"def check_sockets(self): ''' Check for new messages on sockets and respond accordingly. .. versionchanged:: 0.11.3 Update routes table by setting ``df_routes`` property of :attr:`parent.canvas_slave`. .. versionchanged:: 0.12 Update ``dynamic_electrode_state_shapes`` layer of :attr:`parent.canvas_slave` when dynamic electrode actuation states change. .. versionchanged:: 0.13 Update local global, electrode, and route command lists in response to ``microdrop.command_plugin`` messages. ''' try: msg_frames = (self.command_socket .recv_multipart(zmq.NOBLOCK)) except zmq.Again: pass else: self.on_command_recv(msg_frames) try: msg_frames = (self.subscribe_socket .recv_multipart(zmq.NOBLOCK)) source, target, msg_type, msg_json = msg_frames if ((source == 'microdrop.device_info_plugin') and (msg_type == 'execute_reply')): msg = json.loads(msg_json) if msg['content']['command'] == 'get_device': data = decode_content_data(msg) if data is not None: self.parent.on_device_loaded(data) elif ((source == 'microdrop.electrode_controller_plugin') and (msg_type == 'execute_reply')): msg = json.loads(msg_json) if msg['content']['command'] in ('set_electrode_state', 'set_electrode_states'): data = decode_content_data(msg) if data is None: print msg else: #self.emit('electrode-states-updated', data) self.parent.on_electrode_states_updated(data) elif msg['content']['command'] == 'get_channel_states': data = decode_content_data(msg) if data is None: print msg else: #self.emit('electrode-states-set', data) self.parent.on_electrode_states_set(data) elif ((source == 'droplet_planning_plugin') and (msg_type == 'execute_reply')): msg = json.loads(msg_json) if msg['content']['command'] in ('add_route', ): self.execute_async('droplet_planning_plugin', 'get_routes') elif msg['content']['command'] in ('get_routes', ): data = decode_content_data(msg) self.parent.canvas_slave.df_routes = data elif ((source == 'microdrop.command_plugin') and (msg_type == 'execute_reply')): msg = json.loads(msg_json) if msg['content']['command'] in ('get_commands', 'unregister_command', 'register_command'): df_commands = decode_content_data(msg).set_index('namespace') for group_i, df_i in df_commands.groupby('namespace'): register = getattr(self.parent.canvas_slave, 'register_%s_command' % group_i, None) if register is None: continue else: for j, command_ij in df_i.iterrows(): register(command_ij.command_name, title=command_ij.title, group=command_ij.plugin_name) _L().debug('registered %s command: `%s`', group_i, command_ij) else: self.most_recent = msg_json except zmq.Again: pass except: logger.error('Error processing message from subscription ' 'socket.', exc_info=True) return True" 4671,"def on_execute__set_video_config(self, request): ''' .. versionchanged:: 0.12 Accept empty video configuration as either `None` or an empty `pandas.Series`. ''' data = decode_content_data(request) compare_fields = ['device_name', 'width', 'height', 'name', 'fourcc', 'framerate'] if data['video_config'] is None or not data['video_config'].shape[0]: i = None else: for i, row in self.parent.video_mode_slave.configs.iterrows(): if (row[compare_fields] == data['video_config'][compare_fields]).all(): break else: i = None if i is None: logger.error('Unsupported video config:\n%s', data['video_config']) logger.error('Video configs:\n%s', self.parent.video_mode_slave.configs) self.parent.video_mode_slave.config_combo.set_active(0) else: logger.error('Set video config (%d):\n%s', i + 1, data['video_config']) self.parent.video_mode_slave.config_combo.set_active(i + 1)" 4672,"def on_execute__set_surface_alphas(self, request): ''' .. versionchanged:: 0.12 Queue redraw after setting surface alphas. ''' data = decode_content_data(request) logger.debug('[on_execute__set_surface_alphas] %s', data['surface_alphas']) for name, alpha in data['surface_alphas'].iteritems(): self.parent.canvas_slave.set_surface_alpha(name, alpha) self.parent.canvas_slave.render() gobject.idle_add(self.parent.canvas_slave.draw)" 4673,"def on_execute__set_dynamic_electrode_states(self, request): ''' .. versionadded:: 0.15 Set dynamic electrode states. ''' data = decode_content_data(request) self.parent.on_dynamic_electrode_states_set(data['electrode_states'])" 4674,"def on_connect_button__clicked(self, event): ''' Connect to Zero MQ plugin hub (`zmq_plugin.hub.Hub`) using the settings from the text entry fields (e.g., hub URI, plugin name). Emit `plugin-connected` signal with the new plugin instance after hub connection has been established. ''' hub_uri = self.plugin_uri.get_text() ui_plugin_name = self.ui_plugin_name.get_text() plugin = self.create_plugin(ui_plugin_name, hub_uri) self.init_plugin(plugin) self.connect_button.set_sensitive(False) self.emit('plugin-connected', plugin)" 4675,"def create_quali_api_instance(context, logger): """""" Get needed attributes from context and create instance of QualiApiHelper :param context: :param logger: :return: """""" if hasattr(context, 'reservation') and context.reservation: domain = context.reservation.domain elif hasattr(context, 'remote_reservation') and context.remote_reservation: domain = context.remote_reservation.domain else: domain = None address = context.connectivity.server_address token = context.connectivity.admin_auth_token if token: instance = QualiAPIHelper(address, logger, token=token, domain=domain) else: instance = QualiAPIHelper(address, logger, username='admin', password='admin', domain=domain) return instance" 4676,"def login(self): """""" Login :return: """""" uri = 'API/Auth/Login' if self._token: json_data = {'token': self._token, 'domain': self._domain} else: json_data = {'username': self._username, 'password': self._password, 'domain': self._domain} result = self.__rest_client.request_put(uri, json_data) self.__rest_client.session.headers.update(authorization=""Basic {0}"".format(result.replace('""', '')))" 4677,"def run_migrations_offline(): """"""Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """""" url = winchester_config['database']['url'] context.configure(url=url) with context.begin_transaction(): context.run_migrations()" 4678,"def run_migrations_online(): """"""Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """""" engine = engine_from_config( winchester_config['database'], prefix='', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()" 4679,"def follow_cf(save, Uspan, target_cf, nup, n_tot=5.0, slsp=None): """"""Calculates the quasiparticle weight in single site spin hamiltonian under with N degenerate half-filled orbitals """""" if slsp == None: slsp = Spinon(slaves=6, orbitals=3, avg_particles=n_tot, hopping=[0.5]*6, populations = np.asarray([n_tot]*6)/6) zet, lam, mu, mean_f = [], [], [], [] for co in Uspan: print('U=', co, 'del=', target_cf) res=root(targetpop, nup[-1],(co,target_cf,slsp, n_tot)) print(res.x) if res.x>nup[-1]: break nup.append(res.x) slsp.param['populations']=population_distri(nup[-1]) mean_f.append(slsp.mean_field()) zet.append(slsp.quasiparticle_weight()) lam.append(slsp.param['lambda']) mu.append(orbital_energies(slsp.param, zet[-1])) # plt.plot(np.asarray(zet)[:,0], label='d={}, zl'.format(str(target_cf))) # plt.plot(np.asarray(zet)[:,5], label='d={}, zh'.format(str(target_cf))) case = save.createGroup('cf={}'.format(target_cf)) varis = st.setgroup(case) st.storegroup(varis, Uspan[:len(zet)], zet, lam, mu, nup[1:],target_cf,mean_f)" 4680,"def targetpop(upper_density, coul, target_cf, slsp, n_tot): """"""restriction on finding the right populations that leave the crystal field same"""""" if upper_density < 0.503: return 0. trypops=population_distri(upper_density, n_tot) slsp.set_filling(trypops) slsp.selfconsistency(coul,0) efm_free = dos_bethe_find_crystalfield(trypops, slsp.param['hopping']) orb_ener = slsp.param['lambda']+ slsp.quasiparticle_weight()*efm_free obtained_cf = orb_ener[5] - orb_ener[0] return target_cf - obtained_cf" 4681,"def produce_pdf(rst_content=None, doctree_content=None, filename=None): """"""produce a pdf content based of a given rst content If filename is given, it will store the result using the given filename if no filename is given, it will generate a pdf in /tmp/ with a random name """""" if filename is None: filename = os.path.join( ""/tmp"", ''.join([random.choice(string.ascii_letters + string.digits) for n in range(15)]) + '.pdf') r2p = RstToPdf(stylesheets=['pdf.style'], style_path=[os.path.join(os.path.dirname(__file__), 'styles')], breaklevel=0, splittables=True, footer=""""""###Title### - ###Page###/###Total###"""""") r2p.createPdf(text=rst_content, doctree=doctree_content, output=filename) return filename" 4682,"def load(self, filename=None): """"""Method was overriden to set spectrum.filename as well"""""" DataFile.load(self, filename) self.spectrum.filename = filename" 4683,"def _do_save_as(self, filename): """"""Saves spectrum back to FITS file."""""" if len(self.spectrum.x) < 2: raise RuntimeError(""Spectrum must have at least two points"") if os.path.isfile(filename): os.unlink(filename) # PyFITS does not overwrite file hdu = self.spectrum.to_hdu() overwrite_fits(hdu, filename)" 4684,"def alternator(*pipes): ''' a lot like zip, just instead of: (a,b),(a,b),(a,b) it works more like: a,b,a,b,a,b,a until one of the pipes ends ''' try: for p in cycle(map(iter, pipes)): yield next(p) except StopIteration: pass" 4685,"def matches(): """"""This resource returns a list of the currently running WvW matches, with the participating worlds included in the result. Further details about a match can be requested using the ``match_details`` function. The response is a list of match objects, each of which contains the following properties: wvw_match_id (string): The WvW match id. red_world_id (number): The world id of the red world. blue_world_id (number): The world id of the blue world. green_world_id (number): The world id of the green world. start_time (datetime): A timestamp of when the match started. end_time (datetime): A timestamp of when the match ends. """""" wvw_matches = get_cached(""wvw/matches.json"", False).get(""wvw_matches"") for match in wvw_matches: match[""start_time""] = parse_datetime(match[""start_time""]) match[""end_time""] = parse_datetime(match[""end_time""]) return wvw_matches" 4686,"def objective_names(lang=""en""): """"""This resource returns a list of the localized WvW objective names for the specified language. :param lang: The language to query the names for. :return: A dictionary mapping the objective Ids to the names. *Note that these are not the names displayed in the game, but rather the abstract type.* """""" params = {""lang"": lang} cache_name = ""objective_names.%(lang)s.json"" % params data = get_cached(""wvw/objective_names.json"", cache_name, params=params) return dict([(objective[""id""], objective[""name""]) for objective in data])" 4687,"def _parse_data(self, data, charset): """""" Parse the xml data into dictionary. """""" builder = TreeBuilder(numbermode=self._numbermode) if isinstance(data,basestring): xml.sax.parseString(data, builder) else: xml.sax.parse(data, builder) return builder.root[self._root_element_name()]" 4688,"def _format_data(self, data, charset): """""" Format data into XML. """""" if data is None or data == '': return u'' stream = StringIO.StringIO() xml = SimplerXMLGenerator(stream, charset) xml.startDocument() xml.startElement(self._root_element_name(), {}) self._to_xml(xml, data) xml.endElement(self._root_element_name()) xml.endDocument() return stream.getvalue()" 4689,"def _to_xml(self, xml, data, key=None): """""" Recursively convert the data into xml. This function was originally copied from the `Piston project `_ It has been modified since. :param xml: the xml document :type xml: SimplerXMLGenerator :param data: data to be formatted :param key: name of the parent element (for root this is ``None``) """""" if isinstance(data, (list, tuple)): for item in data: elemname = self._list_item_element_name(key) xml.startElement(elemname, {}) self._to_xml(xml, item) xml.endElement(elemname) elif isinstance(data, dict): for key, value in data.iteritems(): xml.startElement(key, {}) self._to_xml(xml, value, key) xml.endElement(key) else: xml.characters(smart_unicode(data))" 4690,"def startElement(self, name, attrs): """""" Initialize new node and store current node into stack. """""" self.stack.append((self.current, self.chardata)) self.current = {} self.chardata = []" 4691,"def endElement(self, name): """""" End current xml element, parse and add to to parent node. """""" if self.current: # we have nested elements obj = self.current else: # text only node text = ''.join(self.chardata).strip() obj = self._parse_node_data(text) newcurrent, self.chardata = self.stack.pop() self.current = self._element_to_node(newcurrent, name, obj)" 4692,"def _parse_node_data(self, data): """""" Parse the value of a node. Override to provide your own parsing. """""" data = data or '' if self.numbermode == 'basic': return self._try_parse_basic_number(data) elif self.numbermode == 'decimal': return self._try_parse_decimal(data) else: return data" 4693,"def _try_parse_basic_number(self, data): """""" Try to convert the data into ``int`` or ``float``. :returns: ``Decimal`` or ``data`` if conversion fails. """""" # try int first try: return int(data) except ValueError: pass # try float next try: return float(data) except ValueError: pass # no luck, return data as it is return data" 4694,"def _element_to_node(self, node, name, value): """""" Insert the parsed element (``name``, ``value`` pair) into the node. You should always use the returned node and forget the one that was given in parameter. :param node: the node where the is added to :returns: the node. Note that this may be a new node instance. """""" # is the target node a list? try: node.append(value) except AttributeError: pass else: return node # target node is a dict if name in node: # there's already an element with same name -> convert the node into list node = node.values() + [value] else: # just add the value into the node node[name] = value return node" 4695,"def _get_programs_dict(pkgname_only, flag_protected, flag_no_pfant=False): """"""Returns dictionary {(package description): [ExeInfo0, ...], ...}"""""" allinfo = f311.get_programs_dict(pkgname_only, flag_protected) if not flag_no_pfant and ""pyfant"" in allinfo: _add_PFANT(allinfo) return allinfo" 4696,"def apize_raw(url, method='GET'): """""" Convert data and params dict -> json. """""" def decorator(func): def wrapper(*args, **kwargs): elem = func(*args, **kwargs) if type(elem) is not dict: raise BadReturnVarType(func.__name__) response = send_request(url, method, elem.get('data', {}), elem.get('args', {}), elem.get('params', {}), elem.get('headers', {}), elem.get('cookies', {}), elem.get('timeout', 8), elem.get('is_json', False), elem.get('verify_cert', True) ) return response return wrapper return decorator" 4697,"def extract_version(path): """""" Reads the file at the specified path and returns the version contained in it. This is meant for reading the __init__.py file inside a package, and so it expects a version field like: __version__ = '1.0.0' :param path: path to the Python file :return: the version inside the file """""" # Regular expression for the version _version_re = re.compile(r'__version__\s+=\s+(.*)') with open(path + '__init__.py', 'r', encoding='utf-8') as f: version = f.read() if version: version = _version_re.search(version) if version: version = version.group(1) version = str(ast.literal_eval(version.rstrip())) extracted = version else: extracted = None else: extracted = None return extracted" 4698,"def _make_connect(module, args, kwargs): """""" Returns a function capable of making connections with a particular driver given the supplied credentials. """""" # pylint: disable-msg=W0142 return functools.partial(module.connect, *args, **kwargs)" 4699,"def connect(module, *args, **kwargs): """""" Connect to a database using the given DB-API driver module. Returns a database context representing that connection. Any arguments or keyword arguments are passed the module's :py:func:`connect` function. """""" mdr = SingleConnectionMediator( module, _make_connect(module, args, kwargs)) return Context(module, mdr)" 4700,"def create_pool(module, max_conns, *args, **kwargs): """""" Create a connection pool appropriate to the driver module's capabilities. """""" if not hasattr(module, 'threadsafety'): raise NotSupported(""Cannot determine driver threadsafety."") if max_conns < 1: raise ValueError(""Minimum number of connections is 1."") if module.threadsafety >= 2: return Pool(module, max_conns, *args, **kwargs) if module.threadsafety >= 1: return DummyPool(module, *args, **kwargs) raise ValueError(""Bad threadsafety level: %d"" % module.threadsafety)" 4701,"def transactional(wrapped): """""" A decorator to denote that the content of the decorated function or method is to be ran in a transaction. The following code is equivalent to the example for :py:func:`dbkit.transaction`:: import sqlite3 import sys from dbkit import connect, transactional, query_value, execute # ...do some stuff... with connect(sqlite3, '/path/to/my.db') as ctx: try: change_ownership(page_id, new_owner_id) catch ctx.IntegrityError: print >> sys.stderr, ""Naughty!"" @transactional def change_ownership(page_id, new_owner_id): old_owner_id = query_value( ""SELECT owner_id FROM pages WHERE page_id = ?"", (page_id,)) execute( ""UPDATE users SET owned = owned - 1 WHERE id = ?"", (old_owner_id,)) execute( ""UPDATE users SET owned = owned + 1 WHERE id = ?"", (new_owner_id,)) execute( ""UPDATE pages SET owner_id = ? WHERE page_id = ?"", (new_owner_id, page_id)) """""" # pylint: disable-msg=C0111 def wrapper(*args, **kwargs): with Context.current().transaction(): return wrapped(*args, **kwargs) return functools.update_wrapper(wrapper, wrapped)" 4702,"def execute(stmt, args=()): """""" Execute an SQL statement. Returns the number of affected rows. """""" ctx = Context.current() with ctx.mdr: cursor = ctx.execute(stmt, args) row_count = cursor.rowcount _safe_close(cursor) return row_count" 4703,"def query(stmt, args=(), factory=None): """""" Execute a query. This returns an iterator of the result set. """""" ctx = Context.current() factory = ctx.default_factory if factory is None else factory with ctx.mdr: return factory(ctx.execute(stmt, args), ctx.mdr)" 4704,"def query_row(stmt, args=(), factory=None): """""" Execute a query. Returns the first row of the result set, or `None`. """""" for row in query(stmt, args, factory): return row return None" 4705,"def query_value(stmt, args=(), default=None): """""" Execute a query, returning the first value in the first row of the result set. If the query returns no result set, a default value is returned, which is `None` by default. """""" for row in query(stmt, args, TupleFactory): return row[0] return default" 4706,"def execute_proc(procname, args=()): """""" Execute a stored procedure. Returns the number of affected rows. """""" ctx = Context.current() with ctx.mdr: cursor = ctx.execute_proc(procname, args) row_count = cursor.rowcount _safe_close(cursor) return row_count" 4707,"def query_proc(procname, args=(), factory=None): """""" Execute a stored procedure. This returns an iterator of the result set. """""" ctx = Context.current() factory = ctx.default_factory if factory is None else factory with ctx.mdr: return factory(ctx.execute_proc(procname, args), ctx.mdr)" 4708,"def query_proc_row(procname, args=(), factory=None): """""" Execute a stored procedure. Returns the first row of the result set, or `None`. """""" for row in query_proc(procname, args, factory): return row return None" 4709,"def query_proc_value(procname, args=(), default=None): """""" Execute a stored procedure, returning the first value in the first row of the result set. If it returns no result set, a default value is returned, which is `None` by default. """""" for row in query_proc(procname, args, TupleFactory): return row[0] return default" 4710,"def make_placeholders(seq, start=1): """""" Generate placeholders for the given sequence. """""" if len(seq) == 0: raise ValueError('Sequence must have at least one element.') param_style = Context.current().param_style placeholders = None if isinstance(seq, dict): if param_style in ('named', 'pyformat'): template = ':%s' if param_style == 'named' else '%%(%s)s' placeholders = (template % key for key in six.iterkeys(seq)) elif isinstance(seq, (list, tuple)): if param_style == 'numeric': placeholders = (':%d' % i for i in xrange(start, start + len(seq))) elif param_style in ('qmark', 'format', 'pyformat'): placeholders = itertools.repeat( '?' if param_style == 'qmark' else '%s', len(seq)) if placeholders is None: raise NotSupported( ""Param style '%s' does not support sequence type '%s'"" % ( param_style, seq.__class__.__name__)) return ', '.join(placeholders)" 4711,"def make_file_object_logger(fh): """""" Make a logger that logs to the given file object. """""" def logger_func(stmt, args, fh=fh): """""" A logger that logs everything sent to a file object. """""" now = datetime.datetime.now() six.print_(""Executing (%s):"" % now.isoformat(), file=fh) six.print_(textwrap.dedent(stmt), file=fh) six.print_(""Arguments:"", file=fh) pprint.pprint(args, fh) return logger_func" 4712,"def current(cls, with_exception=True): """""" Returns the current database context. """""" if with_exception and len(cls.stack) == 0: raise NoContext() return cls.stack.top()" 4713,"def transaction(self): """""" Sets up a context where all the statements within it are ran within a single database transaction. For internal use only. """""" # The idea here is to fake the nesting of transactions. Only when # we've gotten back to the topmost transaction context do we actually # commit or rollback. with self.mdr: try: self._depth += 1 yield self self._depth -= 1 except self.mdr.OperationalError: # We've lost the connection, so there's no sense in # attempting to roll back back the transaction. self._depth -= 1 raise except: self._depth -= 1 if self._depth == 0: self.mdr.rollback() raise if self._depth == 0: self.mdr.commit()" 4714,"def cursor(self): """""" Get a cursor for the current connection. For internal use only. """""" cursor = self.mdr.cursor() with self.transaction(): try: yield cursor if cursor.rowcount != -1: self.last_row_count = cursor.rowcount self.last_row_id = getattr(cursor, 'lastrowid', None) except: self.last_row_count = None self.last_row_id = None _safe_close(cursor) raise" 4715,"def execute(self, stmt, args): """""" Execute a statement, returning a cursor. For internal use only. """""" self.logger(stmt, args) with self.cursor() as cursor: cursor.execute(stmt, args) return cursor" 4716,"def execute_proc(self, procname, args): """""" Execute a stored procedure, returning a cursor. For internal use only. """""" self.logger(procname, args) with self.cursor() as cursor: cursor.callproc(procname, args) return cursor" 4717,"def close(self): """""" Close the connection this context wraps. """""" self.logger = None for exc in _EXCEPTIONS: setattr(self, exc, None) try: self.mdr.close() finally: self.mdr = None" 4718,"def connect(self): """""" Returns a context that uses this pool as a connection source. """""" ctx = Context(self.module, self.create_mediator()) ctx.logger = self.logger ctx.default_factory = self.default_factory return ctx" 4719,"def close(self): """""" Release all resources associated with this factory. """""" if self.mdr is None: return exc = (None, None, None) try: self.cursor.close() except: exc = sys.exc_info() try: if self.mdr.__exit__(*exc): exc = (None, None, None) except: exc = sys.exc_info() self.mdr = None self.cursor = None if exc != (None, None, None): six.reraise(*exc)" 4720,"def add_item(cls, item, **kwargs): """"""Add item. Add new item to the shopping cart. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_item(item, async=True) >>> result = thread.get() :param async bool :param LineItem item: Line item to add to cart (required) :return: ShoppingCart If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._add_item_with_http_info(item, **kwargs) else: (data) = cls._add_item_with_http_info(item, **kwargs) return data" 4721,"def checkout(cls, order, **kwargs): """"""Checkout cart. Checkout cart, Making an order. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.checkout(order, async=True) >>> result = thread.get() :param async bool :param Order order: Required order details. (required) :return: Order If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._checkout_with_http_info(order, **kwargs) else: (data) = cls._checkout_with_http_info(order, **kwargs) return data" 4722,"def delete_item(cls, item_id, **kwargs): """"""Remove item. Remove item from shopping cart This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_item(item_id, async=True) >>> result = thread.get() :param async bool :param str item_id: Item ID to delete. (required) :return: ShoppingCart If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_item_with_http_info(item_id, **kwargs) else: (data) = cls._delete_item_with_http_info(item_id, **kwargs) return data" 4723,"def empty(cls, **kwargs): """"""Empty cart. Empty the shopping cart. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.empty(async=True) >>> result = thread.get() :param async bool :return: ShoppingCart If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._empty_with_http_info(**kwargs) else: (data) = cls._empty_with_http_info(**kwargs) return data" 4724,"def get(cls, **kwargs): """"""Get cart. Retrieve the shopping cart of the current session. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get(async=True) >>> result = thread.get() :param async bool :return: ShoppingCart If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_with_http_info(**kwargs) else: (data) = cls._get_with_http_info(**kwargs) return data" 4725,"def update_item(cls, item_id, item, **kwargs): """"""Update cart. Update cart item. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_item(item_id, item, async=True) >>> result = thread.get() :param async bool :param str item_id: Item ID to update. (required) :param LineItem item: Line item to update. (required) :return: ShoppingCart If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_item_with_http_info(item_id, item, **kwargs) else: (data) = cls._update_item_with_http_info(item_id, item, **kwargs) return data" 4726,"def get_perm_names(cls, resource): """""" Return all permissions supported by the resource. This is used for auto-generating missing permissions rows into database in syncdb. """""" return [cls.get_perm_name(resource, method) for method in cls.METHODS]" 4727,"def get_perm_name(cls, resource, method): """""" Compose permission name @param resource the resource @param method the request method (case doesn't matter). """""" return '%s_%s_%s' % ( cls.PREFIX, cls._get_resource_name(resource), method.lower())" 4728,"def check_perm(self, request, resource): """""" Check permission @param request the HTTP request @param resource the requested resource @raise Forbidden if the user doesn't have access to the resource """""" perm_name = self.get_perm_name(resource, request.method) if not self._has_perm(request.user, perm_name): raise errors.Forbidden()" 4729,"def _has_perm(self, user, permission): """""" Check whether the user has the given permission @return True if user is granted with access, False if not. """""" if user.is_superuser: return True if user.is_active: perms = [perm.split('.')[1] for perm in user.get_all_permissions()] return permission in perms return False" 4730,"def wash_url_argument(var, new_type): """""" Wash argument into 'new_type', that can be 'list', 'str', 'int', 'tuple' or 'dict'. If needed, the check 'type(var) is not None' should be done before calling this function. @param var: variable value @param new_type: variable type, 'list', 'str', 'int', 'tuple' or 'dict' @return: as much as possible, value var as type new_type If var is a list, will change first element into new_type. If int check unsuccessful, returns 0 """""" out = [] if new_type == 'list': # return lst if isinstance(var, list): out = var else: out = [var] elif new_type == 'str': # return str if isinstance(var, list): try: out = ""%s"" % var[0] except: out = """" elif isinstance(var, str): out = var else: out = ""%s"" % var elif new_type == 'int': # return int if isinstance(var, list): try: out = int(var[0]) except: out = 0 elif isinstance(var, (int, long)): out = var elif isinstance(var, str): try: out = int(var) except: out = 0 else: out = 0 elif new_type == 'tuple': # return tuple if isinstance(var, tuple): out = var else: out = (var, ) elif new_type == 'dict': # return dictionary if isinstance(var, dict): out = var else: out = {0: var} return out" 4731,"def is_local_url(target): """"""Determine if URL is a local."""""" ref_url = urlparse(cfg.get('CFG_SITE_SECURE_URL')) test_url = urlparse(urljoin(cfg.get('CFG_SITE_SECURE_URL'), target)) return test_url.scheme in ('http', 'https') and \ ref_url.netloc == test_url.netloc" 4732,"def get_safe_redirect_target(arg='next'): """"""Get URL to redirect to and ensure that it is local."""""" for target in request.args.get(arg), request.referrer: if not target: continue if is_local_url(target): return target return None" 4733,"def redirect_to_url(req, url, redirection_type=None, norobot=False): """""" Redirect current page to url. @param req: request as received from apache @param url: url to redirect to @param redirection_type: what kind of redirection is required: e.g.: apache.HTTP_MULTIPLE_CHOICES = 300 apache.HTTP_MOVED_PERMANENTLY = 301 apache.HTTP_MOVED_TEMPORARILY = 302 apache.HTTP_SEE_OTHER = 303 apache.HTTP_NOT_MODIFIED = 304 apache.HTTP_USE_PROXY = 305 apache.HTTP_TEMPORARY_REDIRECT = 307 The default is apache.HTTP_MOVED_TEMPORARILY @param norobot: wether to instruct crawlers and robots such as GoogleBot not to index past this point. @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 """""" url = url.strip() if redirection_type is None: redirection_type = apache.HTTP_MOVED_TEMPORARILY from flask import redirect r = redirect(url, code=redirection_type) raise apache.SERVER_RETURN(r) # FIXME enable code bellow del req.headers_out[""Cache-Control""] req.headers_out[""Cache-Control""] = ""no-cache, private, no-store, "" \ ""must-revalidate, post-check=0, pre-check=0, max-age=0"" req.headers_out[""Pragma""] = ""no-cache"" if norobot: req.headers_out[ ""X-Robots-Tag""] = ""noarchive, nosnippet, noindex, nocache"" user_agent = req.headers_in.get('User-Agent', '') if 'Microsoft Office Existence Discovery' in user_agent or 'ms-office' in user_agent: # HACK: this is to workaround Microsoft Office trying to be smart # when users click on URLs in Office documents that require # authentication. Office will check the validity of the URL # but will pass the browser the redirected URL rather than # the original one. This is incompatible with e.g. Shibboleth # based SSO since the referer would be lost. # See: http://support.microsoft.com/kb/899927 req.status = 200 req.content_type = 'text/html' if req.method != 'HEAD': req.write("""""" Intermediate page for URLs clicked on MS Office Documents

    You are going to be redirected to the desired content within 5 seconds. If the redirection does not happen automatically please click on %(url_ok)s.

    """""" % { 'url': escape(req.unparsed_uri, True), 'url_ok': escape(req.unparsed_uri) }) raise apache.SERVER_RETURN(apache.DONE) req.headers_out[""Location""] = url if req.response_sent_p: raise IOError(""Cannot redirect after headers have already been sent."") req.status = redirection_type req.write('

    Please go to here

    \n' % url) raise apache.SERVER_RETURN(apache.DONE)" 4734,"def rewrite_to_secure_url(url, secure_base=None): """""" Rewrite URL to a Secure URL @param url URL to be rewritten to a secure URL. @param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL). """""" if secure_base is None: secure_base = cfg.get('CFG_SITE_SECURE_URL') url_parts = list(urlparse(url)) url_secure_parts = urlparse(secure_base) url_parts[0] = url_secure_parts[0] url_parts[1] = url_secure_parts[1] return urlunparse(url_parts)" 4735,"def get_referer(req, replace_ampersands=False): """""" Return the referring page of a request. Referer (wikipedia): Referer is a common misspelling of the word ""referrer""; so common, in fact, that it made it into the official specification of HTTP. When visiting a webpage, the referer or referring page is the URL of the previous webpage from which a link was followed. @param req: request @param replace_ampersands: if 1, replace & by & in url (correct HTML cannot contain & characters alone) """""" try: referer = req.headers_in['Referer'] if replace_ampersands == 1: return referer.replace('&', '&') return referer except KeyError: return ''" 4736,"def make_canonical_urlargd(urlargd, default_urlargd): """""" Build up the query part of an URL from the arguments passed in the 'urlargd' dictionary. 'default_urlargd' is a secondary dictionary which contains tuples of the form (type, default value) for the query arguments (this is the same dictionary as the one you can pass to webinterface_handler.wash_urlargd). When a query element has its default value, it is discarded, so that the simplest (canonical) url query is returned. The result contains the initial '?' if there are actual query items remaining. """""" canonical = drop_default_urlargd(urlargd, default_urlargd) if canonical: return '?' + urlencode(canonical, doseq=True) # FIXME double escaping of '&'? .replace('&', '&') return ''" 4737,"def create_html_link(urlbase, urlargd, link_label, linkattrd=None, escape_urlargd=True, escape_linkattrd=True, urlhash=None): """"""Creates a W3C compliant link. @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}) @param link_label: text displayed in a browser (has to be already escaped) @param linkattrd: dictionary of attributes (e.g. a={'class': 'img'}) @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes < or "" becomes ") @param escape_linkattrd: boolean indicating if the function should escape attributes (e.g. < becomes < or "" becomes ") @param urlhash: hash string to add at the end of the link """""" attributes_separator = ' ' output = '' return output" 4738,"def create_html_mailto( email, subject=None, body=None, cc=None, bcc=None, link_label=""%(email)s"", linkattrd=None, escape_urlargd=True, escape_linkattrd=True, email_obfuscation_mode=None): """"""Creates a W3C compliant 'mailto' link. Encode/encrypt given email to reduce undesired automated email harvesting when embedded in a web page. NOTE: there is no ultimate solution to protect against email harvesting. All have drawbacks and can more or less be circumvented. There are other techniques to protect email addresses. We implement the less annoying one for users. @param email: the recipient of the email @param subject: a default subject for the email (must not contain line feeds) @param body: a default body for the email @param cc: the co-recipient(s) of the email @param bcc: the hidden co-recpient(s) of the email @param link_label: the label of this mailto link. String replacement is performed on key %(email)s with the email address if needed. @param linkattrd: dictionary of attributes (e.g. a={'class': 'img'}) @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes < or "" becomes ") @param escape_linkattrd: boolean indicating if the function should escape attributes (e.g. < becomes < or "" becomes ") @param email_obfuscation_mode: the protection mode. See below: You can choose among several modes to protect emails. It is advised to keep the default CFG_MISCUTIL_EMAIL_HARVESTING_PROTECTION value, so that it is possible for an admin to change the policy globally. Available modes ([t] means ""transparent"" for the user): -1: hide all emails, excepted CFG_SITE_ADMIN_EMAIL and CFG_SITE_SUPPORT_EMAIL. [t] 0 : no protection, email returned as is. foo@example.com => foo@example.com 1 : basic email munging: replaces @ by [at] and . by [dot] foo@example.com => foo [at] example [dot] com [t] 2 : transparent name mangling: characters are replaced by equivalent HTML entities. foo@example.com => foo@example.com [t] 3 : javascript insertion. Requires Javascript enabled on client side. 4 : replaces @ and . characters by gif equivalents. foo@example.com => fooexamplecom """""" # TODO: implement other protection modes to encode/encript email: # # [t] 5 : form submission. User is redirected to a form that he can # fills in to send the email (??Use webmessage??). # Depending on WebAccess, ask to answer a question. ## # [t] 6 : if user can see (controlled by WebAccess), display. Else # ask to login to see email. If user cannot see, display # form submission. if email_obfuscation_mode is None: email_obfuscation_mode = cfg.get( 'CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE') if linkattrd is None: linkattrd = {} parameters = {} if subject: parameters[""subject""] = subject if body: parameters[""body""] = body.replace('\r\n', '\n').replace('\n', '\r\n') if cc: parameters[""cc""] = cc if bcc: parameters[""bcc""] = bcc # Preprocessing values for some modes if email_obfuscation_mode == 1: # Basic Munging email = email.replace(""@"", "" [at] "").replace(""."", "" [dot] "") elif email_obfuscation_mode == 2: # Transparent name mangling email = string_to_numeric_char_reference(email) if '%(email)s' in link_label: link_label = link_label % {'email': email} mailto_link = create_html_link('mailto:' + email, parameters, link_label, linkattrd, escape_urlargd, escape_linkattrd) if email_obfuscation_mode == 0: # Return ""as is"" return mailto_link elif email_obfuscation_mode == 1: # Basic Munging return mailto_link elif email_obfuscation_mode == 2: # Transparent name mangling return mailto_link elif email_obfuscation_mode == 3: # Javascript-based return '''''' % \ mailto_link[::-1].replace(""'"", ""\\'"") elif email_obfuscation_mode == 4: # GIFs-based email = email.replace( '.', '' % cfg.get('CFG_SITE_URL')) email = email.replace( '@', '' % cfg.get('CFG_SITE_URL')) return email # All other cases, including mode -1: return """"" 4739,"def string_to_numeric_char_reference(string): """""" Encode a string to HTML-compatible numeric character reference. Eg: encode_html_entities(""abc"") == 'abc' """""" out = """" for char in string: out += ""&#"" + str(ord(char)) + "";"" return out" 4740,"def get_canonical_and_alternates_urls( url, drop_ln=True, washed_argd=None, quote_path=False): """""" Given an Invenio URL returns a tuple with two elements. The first is the canonical URL, that is the original URL with CFG_SITE_URL prefix, and where the ln= argument stripped. The second element element is mapping, language code -> alternate URL @param quote_path: if True, the path section of the given C{url} is quoted according to RFC 2396 """""" dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse( url) canonical_scheme, canonical_netloc = urlparse(cfg.get('CFG_SITE_URL'))[0:2] parsed_query = washed_argd or parse_qsl(query) no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != 'ln'] if drop_ln: canonical_parsed_query = no_ln_parsed_query else: canonical_parsed_query = parsed_query if quote_path: path = urllib.quote(path) canonical_query = urlencode(canonical_parsed_query) canonical_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment)) alternate_urls = {} for ln in cfg.get('CFG_SITE_LANGS'): alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)]) alternate_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment)) alternate_urls[ln] = alternate_url return canonical_url, alternate_urls" 4741,"def create_url(urlbase, urlargd, escape_urlargd=True, urlhash=None): """"""Creates a W3C compliant URL. Output will look like this: 'urlbase?param1=value1&param2=value2' @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'} @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes < or "" becomes ") @param urlhash: hash string to add at the end of the link """""" separator = '&' output = urlbase if urlargd: output += '?' if escape_urlargd: arguments = [escape(quote(str(key)), quote=True) + '=' + escape(quote(str(urlargd[key])), quote=True) for key in urlargd.keys()] else: arguments = [str(key) + '=' + str(urlargd[key]) for key in urlargd.keys()] output += separator.join(arguments) if urlhash: output += ""#"" + escape(quote(str(urlhash))) return output" 4742,"def same_urls_p(a, b): """""" Compare two URLs, ignoring reorganizing of query arguments """""" ua = list(urlparse(a)) ub = list(urlparse(b)) ua[4] = parse_qs(ua[4]) ub[4] = parse_qs(ub[4]) return ua == ub" 4743,"def urlargs_replace_text_in_arg(urlargs, regexp_argname, text_old, text_new): """"""Analyze `urlargs' (URL CGI GET query arguments in string form) and for each occurrence of argument matching `regexp_argname' replace every substring `text_old' by `text_new'. Return the resulting new URL. Used to be used for search engine's create_nearest_terms_box, now it is not used there anymore. It is left here in case it will become possibly useful later. """""" out = """" # parse URL arguments into a dictionary: urlargsdict = parse_qs(urlargs) # construct new URL arguments: urlargsdictnew = {} for key in urlargsdict.keys(): if re.match(regexp_argname, key): # replace `arg' by new values urlargsdictnew[key] = [] for parg in urlargsdict[key]: urlargsdictnew[key].append(parg.replace(text_old, text_new)) else: # keep old values urlargsdictnew[key] = urlargsdict[key] # build new URL for this word: for key in urlargsdictnew.keys(): for val in urlargsdictnew[key]: out += ""&"" + key + ""="" + quote_plus(val, '') if out.startswith(""&""): out = out[5:] return out" 4744,"def make_user_agent_string(component=None): """""" Return a nice and uniform user-agent string to be used when Invenio act as a client in HTTP requests. """""" ret = ""Invenio-%s (+%s; \""%s\"")"" % (cfg.get('CFG_VERSION'), cfg.get('CFG_SITE_URL'), cfg.get('CFG_SITE_NAME')) if component: ret += "" %s"" % component return ret" 4745,"def make_invenio_opener(component=None): """""" Return an urllib2 opener with the useragent already set in the appropriate way. """""" opener = urllib2.build_opener() opener.addheaders = [('User-agent', make_user_agent_string(component))] return opener" 4746,"def create_AWS_request_url(base_url, argd, _amazon_secret_access_key, _timestamp=None): """""" Create a signed AWS (Amazon Web Service) request URL corresponding to the given parameters. Example: >> create_AWS_request_url(""http://ecs.amazon.com/onca/xml"", {'AWSAccessKeyID': '0000000000', 'Service': 'AWSECommerceService', 'Operation': 'ItemLookup', 'ItemID': '0679722769', 'ResponseGroup': 'ItemAttributes,Offers,Images,Review'}, ""1234567890"") @param base_url: Service URL of the Amazon store to query @param argd: dictionary of arguments defining the query @param _amazon_secret_access_key: your Amazon secret key @param _timestamp: for testing purpose only (default: current timestamp) @type base_url: string @type argd: dict @type _amazon_secret_access_key: string @type _timestamp: string @return signed URL of the request (string) """""" # First define a few util functions def get_AWS_signature(argd, _amazon_secret_access_key, method=""GET"", request_host=""webservices.amazon.com"", request_uri=""/onca/xml"", _timestamp=None): """""" Returns the signature of an Amazon request, based on the arguments of the request. @param argd: dictionary of arguments defining the query @param _amazon_secret_access_key: your Amazon secret key @param method: method of the request POST or GET @param request_host: host contacted for the query. To embed in the signature. @param request_uri: uri contacted at 'request_host'. To embed in the signature. @param _timestamp: for testing purpose only (default: current timestamp) @type argd: dict @type _amazon_secret_access_key: string @type method: string @type host_header: string @type http_request_uri: string @type _timestamp: string @return signature of the request (string) """""" # Add timestamp if not _timestamp: argd[""Timestamp""] = time.strftime(""%Y-%m-%dT%H:%M:%SZ"", time.gmtime()) else: argd[""Timestamp""] = _timestamp # Order parameter keys by byte value parameter_keys = sorted(argd.keys()) # Encode arguments, according to RFC 3986. Make sure we # generate a list which is ordered by byte value of the keys arguments = [quote(str(key), safe=""~/"") + ""="" + quote(str(argd[key]), safe=""~/"") for key in parameter_keys] # Join parameters_string = ""&"".join(arguments) # Prefix parameters_string = method.upper() + ""\n"" + \ request_host.lower() + ""\n"" + \ (request_uri or ""/"") + ""\n"" + \ parameters_string # Sign and return return calculate_RFC2104_HMAC(parameters_string, _amazon_secret_access_key) def calculate_RFC2104_HMAC(data, _amazon_secret_access_key): """""" Computes a RFC 2104 compliant HMAC Signature and then Base64 encodes it. Module hashlib must be installed if Python < 2.5 @param data: data to sign @param _amazon_secret_access_key: your Amazon secret key @type data: string @type _amazon_secret_access_key: string. Empty if hashlib module not installed """""" if not HASHLIB_IMPORTED: current_app.logger.warning( ""Module hashlib not installed. Please install it."" ) return """" else: if sys.version_info < (2, 5): # compatibility mode for Python < 2.5 and hashlib my_digest_algo = _MySHA256(sha256()) else: my_digest_algo = sha256 return base64.encodestring( hmac.new( _amazon_secret_access_key, data, my_digest_algo).digest()).strip() # End util functions parsed_url = urlparse(base_url) signature = get_AWS_signature(argd, _amazon_secret_access_key, request_host=parsed_url[1], request_uri=parsed_url[2], _timestamp=_timestamp) if signature: argd[""Signature""] = signature return base_url + ""?"" + urlencode(argd)" 4747,"def create_Indico_request_url( base_url, indico_what, indico_loc, indico_id, indico_type, indico_params, indico_key, indico_sig, _timestamp=None): """""" Create a signed Indico request URL to access Indico HTTP Export APIs. See U{http://indico.cern.ch/ihelp/html/ExportAPI/index.html} for more information. Example: >> create_Indico_request_url(""https://indico.cern.ch"", ""categ"", """", [1, 7], ""xml"", {'onlypublic': 'yes', 'order': 'title', 'from': 'today', 'to': 'tomorrow'}, '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000') @param base_url: Service base URL of the Indico instance to query @param indico_what: element to export @type indico_what: one of the strings: C{categ}, C{event}, C{room}, C{reservation} @param indico_loc: location of the element(s) specified by ID (only used for some elements) @param indico_id: ID of the element to be exported @type indico_id: a string or a list/tuple of strings @param indico_type: output format @type indico_type: one of the strings: C{json}, C{jsonp}, C{xml}, C{html}, C{ics}, C{atom} @param indico_params: parameters of the query. See U{http://indico.cern.ch/ihelp/html/ExportAPI/common.html} @param indico_key: API key provided for the given Indico instance @param indico_sig: API secret key (signature) provided for the given Indico instance @param _timestamp: for testing purpose only (default: current timestamp) @return signed URL of the request (string) """""" url = '/export/' + indico_what + '/' if indico_loc: url += indico_loc + '/' if type(indico_id) in (list, tuple): # dash separated list of values indico_id = '-'.join([str(x) for x in indico_id]) url += indico_id + '.' + str(indico_type) if hasattr(indico_params, 'items'): items = indico_params.items() else: items = list(indico_params) if indico_key: items.append(('apikey', indico_key)) if indico_sig and HASHLIB_IMPORTED: if _timestamp: items.append(('timestamp', str(_timestamp))) else: items.append(('timestamp', str(int(time.time())))) items = sorted(items, key=lambda x: x[0].lower()) url_to_sign = '%s?%s' % (url, urlencode(items)) if sys.version_info < (2, 5): # compatibility mode for Python < 2.5 and hashlib my_digest_algo = _MySHA1(sha1()) else: my_digest_algo = sha1 signature = hmac.new(indico_sig, url_to_sign, my_digest_algo).hexdigest() items.append(('signature', signature)) elif not HASHLIB_IMPORTED: current_app.logger.warning( ""Module hashlib not installed. Please install it."" ) if not items: return url url = '%s%s?%s' % (base_url.strip('/'), url, urlencode(items)) return url" 4748,"def auto_version_url(file_path): """""" Appends modification time of the file to the request URL in order for the browser to refresh the cache when file changes @param file_path: path to the file, e.g js/foo.js @return: file_path with modification time appended to URL """""" file_md5 = """" try: file_md5 = md5(open(cfg.get('CFG_WEBDIR') + os.sep + file_path).read()).hexdigest() except IOError: pass return file_path + ""?%s"" % file_md5" 4749,"def get_relative_url(url): """""" Returns the relative URL from a URL. For example: 'http://web.net' -> '' 'http://web.net/' -> '' 'http://web.net/1222' -> '/1222' 'http://web.net/wsadas/asd' -> '/wsadas/asd' It will never return a trailing ""/"". @param url: A url to transform @type url: str @return: relative URL """""" # remove any protocol info before stripped_site_url = url.replace(""://"", """") baseurl = ""/"" + ""/"".join(stripped_site_url.split(""/"")[1:]) # remove any trailing slash (""/"") if baseurl[-1] == ""/"": return baseurl[:-1] else: return baseurl" 4750,"def function_arg_count(fn): """""" returns how many arguments a funciton has """""" assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn)) if hasattr(fn, '__code__') and hasattr(fn.__code__, 'co_argcount'): return fn.__code__.co_argcount else: return 1" 4751,"def map(*args): """""" this map works just like the builtin.map, except, this one you can also: - give it multiple functions to map over an iterable - give it a single function with multiple arguments to run a window based map operation over an iterable """""" functions_to_apply = [i for i in args if callable(i)] iterables_to_run = [i for i in args if not callable(i)] #print('functions_to_apply:',functions_to_apply) #print('iterables_to_run:',iterables_to_run) assert len(functions_to_apply)>0, 'at least one function needs to be given to map' assert len(iterables_to_run)>0, 'no iterables were given to map' # check for native map usage if len(functions_to_apply) == 1 and len(iterables_to_run) >= 1 and function_arg_count(*functions_to_apply)==1: if hasattr(iter([]), '__next__'): # if python 3 return __builtins__.map(functions_to_apply[0], *iterables_to_run) else: return iter(__builtins__.map(functions_to_apply[0], *iterables_to_run)) # ---------------------------- new logic below ---------------------------- # logic for a single function elif len(functions_to_apply) == 1: fn = functions_to_apply[0] # if there is a single iterable, chop it up if len(iterables_to_run) == 1: return (fn(*i) for i in window(iterables_to_run[0], function_arg_count(functions_to_apply[0]))) # logic for more than 1 function elif len(functions_to_apply) > 1 and len(iterables_to_run) == 1: return multi_ops(*(iterables_to_run + functions_to_apply)) else: raise ValueError('invalid usage of map()')" 4752,"def merge(left, right, how='inner', key=None, left_key=None, right_key=None, left_as='left', right_as='right'): """""" Performs a join using the union join function. """""" return join(left, right, how, key, left_key, right_key, join_fn=make_union_join(left_as, right_as))" 4753,"def join(left, right, how='inner', key=None, left_key=None, right_key=None, join_fn=tuple_join): """""" :param left: left iterable to be joined :param right: right iterable to be joined :param str | function key: either an attr name, dict key, or function that produces hashable value :param how: 'inner', 'left', 'right', or 'outer' :param join_fn: function called on joined left and right iterable items to complete join :rtype: list """""" if key is None and (left_key is None or right_key is None): raise ValueError(""Must provide either key param or both left_key and right_key"") if key is not None: lkey = rkey = key if callable(key) else make_key_fn(key) else: lkey = left_key if callable(left_key) else make_key_fn(left_key) rkey = right_key if callable(right_key) else make_key_fn(right_key) try: join_impl = { ""left"": _left_join, ""right"": _right_join, ""inner"": _inner_join, ""outer"": _outer_join, }[how] except KeyError: raise ValueError(""Invalid value for how: {}, must be left, right, "" ""inner, or outer."".format(str(how))) else: return join_impl(left, right, lkey, rkey, join_fn)" 4754,"def _inner_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): """""" Inner join using left and right key functions :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list """""" joiner = defaultdict(list) for ele in right: joiner[right_key_fn(ele)].append(ele) joined = [] for ele in left: for other in joiner[left_key_fn(ele)]: joined.append(join_fn(ele, other)) return joined" 4755,"def _right_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): """""" :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list """""" def reversed_join_fn(left_ele, right_ele): return join_fn(right_ele, left_ele) return _left_join(right, left, right_key_fn, left_key_fn, reversed_join_fn)" 4756,"def _outer_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): """""" :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list """""" left_joiner = defaultdict(list) for ele in left: left_joiner[left_key_fn(ele)].append(ele) right_joiner = defaultdict(list) for ele in right: right_joiner[right_key_fn(ele)].append(ele) keys = set(left_joiner.keys()).union(set(right_joiner.keys())) def iter_join(l, r, join_keys): for join_key in join_keys: for ele in l.get(join_key, [None]): for other in r.get(join_key, [None]): yield join_fn(ele, other) return list(iter_join(left_joiner, right_joiner, keys))" 4757,"def group(iterable, key=lambda ele: ele): """""" Groups an iterable by a specified attribute, or using a specified key access function. Returns tuples of grouped elements. >>> dogs = [Dog('gatsby', 'Rruff!', 15), Dog('william', 'roof', 12), Dog('edward', 'hi', 15)] >>> groupby(dogs, 'weight') [(Dog('gatsby', 'Rruff!', 15), Dog('edward', 'hi', 15)), (Dog('william', 'roof', 12), )] :param iterable: iterable to be grouped :param key: a key-access function or attr name to be used as a group key """""" if callable(key): return _group(iterable, key) else: return _group(iterable, make_key_fn(key))" 4758,"def trigger_keyphrases( text = None, # input text to parse keyphrases = None, # keyphrases for parsing input text response = None, # optional text response on trigger function = None, # optional function on trigger kwargs = None, # optional function keyword arguments confirm = False, # optional return of confirmation confirmation_prompt = ""Do you want to continue? (y/n)"", confirmation_feedback_confirm = ""confirm"", confirmation_feedback_deny = ""deny"" ): """""" Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments. """""" if any(pattern in text for pattern in keyphrases): if confirm: return confirmation( prompt = confirmation_prompt, feedback_confirm = confirmation_feedback_confirm, feedback_deny = confirmation_feedback_deny, function = function, kwargs = kwargs ) if function and not kwargs: result = function() elif function and kwargs: result = function(**kwargs) else: result = None if response: return response elif not response and result: return str(result) else: return True else: return False" 4759,"def parse( text = None, humour = 75 ): """""" Parse input text using various triggers, some returning text and some for engaging functions. If triggered, a trigger returns text or True if and if not triggered, returns False. If no triggers are triggered, return False, if one trigger is triggered, return the value returned by that trigger, and if multiple triggers are triggered, return a list of the values returned by those triggers. Options such as humour engage or disengage various triggers. """""" triggers = [] # general if humour >= 75: triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ ""image"" ], response = ""http://i.imgur.com/MiqrlTh.jpg"" ), trigger_keyphrases( text = text, keyphrases = [ ""sup"", ""hi"" ], response = ""sup home bean"" ), trigger_keyphrases( text = text, keyphrases = [ ""thanks"", ""thank you"" ], response = ""you're welcome, boo ;)"" ) ]) # information triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ ""where are you"", ""IP"", ""I.P."", ""IP address"", ""I.P. address"", ""ip address"" ], function = report_IP ), trigger_keyphrases( text = text, keyphrases = [ ""how are you"", ""are you well"", ""status"" ], function = report_system_status, kwargs = {""humour"": humour} ), trigger_keyphrases( text = text, keyphrases = [ ""heartbeat"" ], function = heartbeat_message ), trigger_keyphrases( text = text, keyphrases = [ ""METAR"" ], function = report_METAR, kwargs = {""text"": text} ), trigger_keyphrases( text = text, keyphrases = [ ""TAF"" ], response = report_TAF, kwargs = {""text"": text} ), trigger_keyphrases( text = text, keyphrases = [ ""rain"" ], response = report_rain_times, kwargs = {""text"": text} ) ]) # actions triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ ""command"", ""run command"", ""engage command"", ""execute command"" ], response = command() ), trigger_keyphrases( text = text, keyphrases = [ ""restart"" ], function = restart, confirm = True, confirmation_prompt = ""Do you want to restart this "" ""program? (y/n)"", confirmation_feedback_confirm = ""confirm restart"", confirmation_feedback_deny = ""deny restart"" ) ]) if any(triggers): responses = [response for response in triggers if response] if len(responses) > 1: return responses else: return responses[0] else: return False" 4760,"def parse_networking( text = None ): """""" Access address and port parameters via the builtins or __builtin__ module. Relish the nonsense. """""" try: address = _builtins.address port = _builtins.port except: address = None port = None triggers = [] if address and port: triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ ""reverse SSH"", ""reverse ssh"" ], function = engage_command, kwargs = {""command"": ""ssh -R "" + str(port) + "":localhost:22 "" + address}, confirm = True, confirmation_prompt = ""Do you want to reverse SSH "" ""connect? (y/n)"", confirmation_feedback_confirm = ""confirm reverse SSH connect: "" ""ssh localhost -p "" + str(port), confirmation_feedback_deny = ""deny reverse SSH connect"" ) ]) if any(triggers): responses = [response for response in triggers if response] if len(responses) > 1: return responses else: return responses[0] else: return False" 4761,"def multiparse( text = None, parsers = [parse], help_message = None ): """""" Parse input text by looping over a list of multiple parsers. If one trigger is triggered, return the value returned by that trigger, if multiple triggers are triggered, return a list of the values returned by those triggers. If no triggers are triggered, return False or an optional help message. """""" responses = [] for _parser in parsers: response = _parser(text = text) if response is not False: responses.extend(response if response is list else [response]) if not any(responses): if help_message: return help_message else: return False else: if len(responses) > 1: return responses else: return responses[0]" 4762,"def run( self ): """""" Engage contained function with optional keyword arguments. """""" if self._function and not self._kwargs: return self._function() if self._function and self._kwargs: return self._function(**self._kwargs)" 4763,"def tax_class_based_on(self, tax_class_based_on): """"""Sets the tax_class_based_on of this TaxSettings. :param tax_class_based_on: The tax_class_based_on of this TaxSettings. :type: str """""" allowed_values = [""shippingAddress"", ""billingAddress""] # noqa: E501 if tax_class_based_on is not None and tax_class_based_on not in allowed_values: raise ValueError( ""Invalid value for `tax_class_based_on` ({0}), must be one of {1}"" # noqa: E501 .format(tax_class_based_on, allowed_values) ) self._tax_class_based_on = tax_class_based_on" 4764,"def writequery( log, sqlQuery, dbConn, Force=False, manyValueList=False ): """"""*Execute a MySQL write command given a sql query* **Key Arguments:** - ``sqlQuery`` -- the MySQL command to execute - ``dbConn`` -- the db connection - ``Force`` -- do not exit code if error occurs, move onto the next command - ``manyValueList`` -- a list of value tuples if executing more than one insert **Return:** - ``message`` -- error/warning message **Usage:** Here's an example of how to create a table using the database connection passed to the function: .. code-block:: python from fundamentals.mysql import writequery sqlQuery = ""CREATE TABLE `testing_table` (`id` INT NOT NULL, PRIMARY KEY (`id`))"" message = writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn, Force=False, manyValueList=False ) Here's a many value insert example: .. code-block:: python from fundamentals.mysql import writequery sqlQuery = ""INSERT INTO testing_table (id) values (%s)"" message = writequery( log=log, sqlQuery=sqlQuery, dbConn=dbConn, Force=False, manyValueList=[(1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (11,), (12,), ] ) """""" log.debug('starting the ``writequery`` function') import pymysql import warnings warnings.filterwarnings('error', category=pymysql.Warning) message = """" try: cursor = dbConn.cursor(pymysql.cursors.DictCursor) except Exception as e: log.error('could not create the database cursor.') # EXECUTE THE SQL COMMAND try: if manyValueList == False: cursor.execute(sqlQuery) else: # cursor.executemany(sqlQuery, manyValueList) # INSET LARGE LISTS IN BATCHES TO STOP MYSQL SERVER BARFING batch = 100000 offset = 0 stop = 0 while stop == 0: thisList = manyValueList[offset:offset + batch] offset += batch a = len(thisList) cursor.executemany(sqlQuery, thisList) dbConn.commit() if len(thisList) < batch: stop = 1 except pymysql.Error as e: if e[0] == 1050 and 'already exists' in e[1]: log.info(str(e) + '\n') elif e[0] == 1062: # Duplicate Key error log.debug('Duplicate Key error: %s\n' % (str(e), )) message = ""duplicate key error"" elif e[0] == 1061: # Duplicate Key error log.debug('index already exists: %s\n' % (str(e), )) message = ""index already exists"" elif ""Duplicate entry"" in str(e): log.debug('Duplicate Key error: %s\n' % (str(e), )) message = ""duplicate key error"" elif ""Deadlock"" in str(e): i = 0 while i < 10: time.sleep(1) i += 1 try: if manyValueList == False: cursor.execute(sqlQuery) else: # cursor.executemany(sqlQuery, manyValueList) # INSET LARGE LISTS IN BATCHES TO STOP MYSQL SERVER # BARFING batch = 100000 offset = 0 stop = 0 while stop == 0: thisList = manyValueList[offset:offset + batch] offset += batch a = len(thisList) cursor.executemany(sqlQuery, thisList) dbConn.commit() if len(thisList) < batch: stop = 1 i = 20 except: pass if i == 10: log.error('Deadlock: %s\n' % (str(e), )) message = ""Deadlock error"" raise else: sqlQueryTrim = sqlQuery[:1000] message = 'MySQL write command not executed for this query: << %s >>\nThe error was: %s \n' % (sqlQuery, str(e)) if Force == False: log.error(message) raise else: log.info(message) return -1 except pymysql.Warning as e: log.info(str(e)) except Exception as e: if ""truncated"" in str(e): log.error('%s\n Here is the sqlquery:\n%s\n' % (str(e), sqlQuery)) if manyValueList: log.error('... and the values:\n%s\n' % (thisList, )) elif ""Duplicate entry"" in str(e): log.warning('Duplicate Key error: %s\n' % (str(e), )) message = ""duplicate key error"" else: sqlQuery = sqlQuery[:2000] log.error( 'MySQL write command not executed for this query: << %s >>\nThe error was: %s \n' % (sqlQuery, str(e))) if Force == False: sys.exit(0) return -1 dbConn.commit() # CLOSE THE CURSOR cOpen = True count = 0 while cOpen: try: cursor.close() cOpen = False except Exception as e: time.sleep(1) count += 1 if count == 10: log.warning('could not close the db cursor ' + str(e) + '\n') raise e count = 0 log.debug('completed the ``writequery`` function') return message" 4765,"def create_fixed_rate_shipping(cls, fixed_rate_shipping, **kwargs): """"""Create FixedRateShipping Create a new FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_fixed_rate_shipping(fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to create (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) else: (data) = cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) return data" 4766,"def delete_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, **kwargs): """"""Delete FixedRateShipping Delete an instance of FixedRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_fixed_rate_shipping_by_id(fixed_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) else: (data) = cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) return data" 4767,"def get_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, **kwargs): """"""Find FixedRateShipping Return single instance of FixedRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_fixed_rate_shipping_by_id(fixed_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to return (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) else: (data) = cls._get_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) return data" 4768,"def list_all_fixed_rate_shippings(cls, **kwargs): """"""List FixedRateShippings Return a list of FixedRateShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_fixed_rate_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FixedRateShipping] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_fixed_rate_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_fixed_rate_shippings_with_http_info(**kwargs) return data" 4769,"def replace_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, fixed_rate_shipping, **kwargs): """"""Replace FixedRateShipping Replace all attributes of FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_fixed_rate_shipping_by_id(fixed_rate_shipping_id, fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to replace (required) :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to replace (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) else: (data) = cls._replace_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) return data" 4770,"def update_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, fixed_rate_shipping, **kwargs): """"""Update FixedRateShipping Update attributes of FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_fixed_rate_shipping_by_id(fixed_rate_shipping_id, fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to update. (required) :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to update. (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) else: (data) = cls._update_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) return data" 4771,"def create_wish_list(cls, wish_list, **kwargs): """"""Create WishList Create a new WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_wish_list(wish_list, async=True) >>> result = thread.get() :param async bool :param WishList wish_list: Attributes of wishList to create (required) :return: WishList If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_wish_list_with_http_info(wish_list, **kwargs) else: (data) = cls._create_wish_list_with_http_info(wish_list, **kwargs) return data" 4772,"def delete_wish_list_by_id(cls, wish_list_id, **kwargs): """"""Delete WishList Delete an instance of WishList by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_wish_list_by_id(wish_list_id, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) else: (data) = cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) return data" 4773,"def get_wish_list_by_id(cls, wish_list_id, **kwargs): """"""Find WishList Return single instance of WishList by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_wish_list_by_id(wish_list_id, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to return (required) :return: WishList If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_wish_list_by_id_with_http_info(wish_list_id, **kwargs) else: (data) = cls._get_wish_list_by_id_with_http_info(wish_list_id, **kwargs) return data" 4774,"def list_all_wish_lists(cls, **kwargs): """"""List WishLists Return a list of WishLists This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_wish_lists(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[WishList] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_wish_lists_with_http_info(**kwargs) else: (data) = cls._list_all_wish_lists_with_http_info(**kwargs) return data" 4775,"def replace_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs): """"""Replace WishList Replace all attributes of WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_wish_list_by_id(wish_list_id, wish_list, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to replace (required) :param WishList wish_list: Attributes of wishList to replace (required) :return: WishList If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) else: (data) = cls._replace_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) return data" 4776,"def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs): """"""Update WishList Update attributes of WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to update. (required) :param WishList wish_list: Attributes of wishList to update. (required) :return: WishList If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) else: (data) = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) return data" 4777,"def task(arg = None): """""" Task decorator """""" # make sure stdout is patched if not hasattr(sys.stdout, 'indent_level'): sys.stdout = IndentedFile(sys.stdout) def decorator(base): info = ': ' + arg if type(arg) is str else '' header = fore.green('** ' + fore.cyan(base.__name__) + info) def func(*args, **kwargs): sys.stdout.indent_level += 1 puts(header) base(*args, **kwargs) sys.stdout.indent_level -= 1 params = inspect.formatargspec(*inspect.getargspec(base))[1:-1] specformat = fore.cyan('%s') + ' ' + fore.white('%s') func._task = True func._spec = specformat % (base.__name__, params) func._desc = re.sub('\s+', ' ', inspect.getdoc(base) or '') return func if type(arg) == types.FunctionType: return decorator(arg) else: return decorator" 4778,"def recurse_up(directory, filename): """""" Recursive walk a directory up to root until it contains `filename` """""" directory = osp.abspath(directory) while True: searchfile = osp.join(directory, filename) if osp.isfile(searchfile): return directory if directory == '/': break else: directory = osp.dirname(directory) return False" 4779,"def etree_to_dict(tree): """"""Translate etree into dictionary. :param tree: etree dictionary object :type tree: """""" d = {tree.tag.split('}')[1]: map( etree_to_dict, tree.iterchildren() ) or tree.text} return d" 4780,"def csv( self, filepath=None ): """"""*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,""belfast, uk"" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv(""/path/to/myfile.csv"") """""" self.log.debug('starting the ``csv`` method') renderedData = self._list_of_dictionaries_to_csv(""machine"") if filepath and renderedData != ""NO MATCH"": # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData" 4781,"def json( self, filepath=None ): """"""*Render the data in json format* **Key Arguments:** - ``filepath`` -- path to the file to write the json content to. Default *None* **Return:** - ``renderedData`` -- the data rendered as json **Usage:** To render the data set as json: .. code-block:: python print dataSet.json() .. code-block:: json [ { ""address"": ""belfast, uk"", ""owner"": ""daisy"", ""pet"": ""dog"" }, { ""address"": ""the moon"", ""owner"": ""john"", ""pet"": ""snake"" }, { ""address"": ""larne"", ""owner"": ""susan"", ""pet"": ""crocodile"" } ] and to save the json rendering to file: .. code-block:: python dataSet.json(""/path/to/myfile.json"") """""" self.log.debug('starting the ``json`` method') dataCopy = copy.deepcopy(self.listOfDictionaries) for d in dataCopy: for k, v in d.iteritems(): if isinstance(v, datetime): d[k] = v.strftime(""%Y%m%dt%H%M%S"") renderedData = json.dumps( dataCopy, separators=(',', ': '), sort_keys=True, indent=4 ) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``json`` method') return renderedData" 4782,"def yaml( self, filepath=None ): """"""*Render the data in yaml format* **Key Arguments:** - ``filepath`` -- path to the file to write the yaml content to. Default *None* **Return:** - ``renderedData`` -- the data rendered as yaml **Usage:** To render the data set as yaml: .. code-block:: python print dataSet.yaml() .. code-block:: yaml - address: belfast, uk owner: daisy pet: dog - address: the moon owner: john pet: snake - address: larne owner: susan pet: crocodile and to save the yaml rendering to file: .. code-block:: python dataSet.json(""/path/to/myfile.yaml"") """""" self.log.debug('starting the ``yaml`` method') dataCopy = [] dataCopy[:] = [dict(l) for l in self.listOfDictionaries] renderedData = yaml.dump(dataCopy, default_flow_style=False) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) stream = file(filepath, 'w') yaml.dump(dataCopy, stream, default_flow_style=False) stream.close() self.log.debug('completed the ``yaml`` method') return renderedData" 4783,"def mysql( self, tableName, filepath=None, createStatement=None ): """"""*Render the dataset as a series of mysql insert statements* **Key Arguments:** - ``tableName`` -- the name of the mysql db table to assign the insert statements to. - ``filepath`` -- path to the file to write the mysql inserts content to. Default *None* createStatement **Return:** - ``renderedData`` -- the data rendered mysql insert statements (string format) **Usage:** .. code-block:: python print dataSet.mysql(""testing_table"") this output the following: .. code-block:: plain INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES (""belfast, uk"" ,""2016-09-14T16:21:36"" ,""daisy"" ,""dog"") ON DUPLICATE KEY UPDATE address=""belfast, uk"", dateCreated=""2016-09-14T16:21:36"", owner=""daisy"", pet=""dog"" ; INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES (""the moon"" ,""2016-09-14T16:21:36"" ,""john"" ,""snake"") ON DUPLICATE KEY UPDATE address=""the moon"", dateCreated=""2016-09-14T16:21:36"", owner=""john"", pet=""snake"" ; INSERT INTO `testing_table` (address,dateCreated,owner,pet) VALUES (""larne"" ,""2016-09-14T16:21:36"" ,""susan"" ,""crocodile"") ON DUPLICATE KEY UPDATE address=""larne"", dateCreated=""2016-09-14T16:21:36"", owner=""susan"", pet=""crocodile"" ; To save this rendering to file use: .. code-block:: python dataSet.mysql(""testing_table"", ""/path/to/myfile.sql"") """""" self.log.debug('starting the ``csv`` method') import re if createStatement and ""create table if not exists"" not in createStatement.lower(): regex = re.compile(r'^\s*CREATE TABLE ', re.I | re.S) createStatement = regex.sub( ""CREATE TABLE IF NOT EXISTS "", createStatement) renderedData = self._list_of_dictionaries_to_mysql_inserts( tableName=tableName, createStatement=createStatement ) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData" 4784,"def _list_of_dictionaries_to_csv( self, csvType=""human""): """"""Convert a python list of dictionaries to pretty csv output **Key Arguments:** - ``csvType`` -- human, machine or reST **Return:** - ``output`` -- the contents of a CSV file """""" self.log.debug( 'starting the ``_list_of_dictionaries_to_csv`` function') if not len(self.listOfDictionaries): return ""NO MATCH"" dataCopy = copy.deepcopy(self.listOfDictionaries) tableColumnNames = dataCopy[0].keys() columnWidths = [] columnWidths[:] = [len(tableColumnNames[i]) for i in range(len(tableColumnNames))] output = io.BytesIO() # setup csv styles if csvType == ""machine"": delimiter = "","" elif csvType in [""human"", ""markdown""]: delimiter = ""|"" elif csvType in [""reST""]: delimiter = ""|"" if csvType in [""markdown""]: writer = csv.writer(output, delimiter=delimiter, quoting=csv.QUOTE_NONE, doublequote=False, quotechar='""', escapechar=""\\"", lineterminator=""\n"") else: writer = csv.writer(output, dialect='excel', delimiter=delimiter, quotechar='""', quoting=csv.QUOTE_MINIMAL, lineterminator=""\n"") if csvType in [""markdown""]: dividerWriter = csv.writer( output, delimiter=""|"", quoting=csv.QUOTE_NONE, doublequote=False, quotechar='""', escapechar=""\\"", lineterminator=""\n"") else: dividerWriter = csv.writer(output, dialect='excel', delimiter=""+"", quotechar='""', quoting=csv.QUOTE_MINIMAL, lineterminator=""\n"") # add column names to csv header = [] divider = [] rstDivider = [] allRows = [] # clean up data for row in dataCopy: for c in tableColumnNames: if isinstance(row[c], float) or isinstance(row[c], Decimal): row[c] = ""%0.9g"" % row[c] elif isinstance(row[c], datetime): thisDate = str(row[c])[:10] row[c] = ""%(thisDate)s"" % locals() # set the column widths for row in dataCopy: for i, c in enumerate(tableColumnNames): if len(unicode(row[c])) > columnWidths[i]: columnWidths[i] = len(unicode(row[c])) # table borders for human readable if csvType in [""human"", ""markdown"", ""reST""]: header.append("""") divider.append("""") rstDivider.append("""") for i, c in enumerate(tableColumnNames): if csvType == ""machine"": header.append(c) elif csvType in [""human"", ""markdown"", ""reST""]: header.append( c.ljust(columnWidths[i] + 2).rjust(columnWidths[i] + 3)) divider.append('-' * (columnWidths[i] + 3)) rstDivider.append('=' * (columnWidths[i] + 3)) # table border for human readable if csvType in [""human"", ""markdown"", ""reST""]: header.append("""") divider.append("""") rstDivider.append("""") # fill in the data for row in dataCopy: thisRow = [] # table border for human readable if csvType in [""human"", ""markdown"", ""reST""]: thisRow.append("""") for i, c in enumerate(tableColumnNames): if csvType in [""human"", ""markdown"", ""reST""]: if row[c] == None: row[c] = """" row[c] = unicode(unicode(row[c]).ljust(columnWidths[i] + 2) .rjust(columnWidths[i] + 3)) thisRow.append(row[c]) # table border for human readable if csvType in [""human"", ""markdown"", ""reST""]: thisRow.append("""") allRows.append(thisRow) if csvType in [""reST""]: allRows.append(divider) if csvType == ""machine"": writer.writerow(header) if csvType in [""reST""]: dividerWriter.writerow(divider) writer.writerow(header) dividerWriter.writerow(rstDivider) if csvType in [""human""]: dividerWriter.writerow(divider) writer.writerow(header) dividerWriter.writerow(divider) elif csvType in [""markdown""]: writer.writerow(header) dividerWriter.writerow(divider) # write out the data writer.writerows(allRows) # table border for human readable if csvType in [""human""]: dividerWriter.writerow(divider) output = output.getvalue() output = output.strip() if csvType in [""markdown""]: output = output.replace(""|--"", ""|:-"") if csvType in [""reST""]: output = output.replace(""|--"", ""+--"").replace(""--|"", ""--+"") self.log.debug( 'completed the ``_list_of_dictionaries_to_csv`` function') return output" 4785,"def _list_of_dictionaries_to_mysql_inserts( self, tableName, createStatement=None): """"""Convert a python list of dictionaries to pretty csv output **Key Arguments:** - ``tableName`` -- the name of the table to create the insert statements for - ``createStatement`` -- add this create statement to the top of the file. Will only be executed if no table of that name exists in database. Default *None* **Return:** - ``output`` -- the mysql insert statements (as a string) """""" self.log.debug( 'completed the ````_list_of_dictionaries_to_mysql_inserts`` function') if not len(self.listOfDictionaries): return ""NO MATCH"" dataCopy = copy.deepcopy(self.listOfDictionaries) if createStatement: output = createStatement + ""\n"" else: output = """" inserts = [] inserts = [] inserts[:] = [convert_dictionary_to_mysql_table(log=self.log, dictionary=d, dbTableName=tableName, uniqueKeyList=[ ], dateModified=False, returnInsertOnly=True, replace=True, batchInserts=False, reDatetime=self.reDatetime) for d in dataCopy] output += "";\n"".join(inserts) + "";"" self.log.debug( 'completed the ``_list_of_dictionaries_to_mysql_inserts`` function') return output" 4786,"def create_payment_token(cls, payment_token, **kwargs): """"""Create PaymentToken Create a new PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_token(payment_token, async=True) >>> result = thread.get() :param async bool :param PaymentToken payment_token: Attributes of paymentToken to create (required) :return: PaymentToken If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_payment_token_with_http_info(payment_token, **kwargs) else: (data) = cls._create_payment_token_with_http_info(payment_token, **kwargs) return data" 4787,"def delete_payment_token_by_id(cls, payment_token_id, **kwargs): """"""Delete PaymentToken Delete an instance of PaymentToken by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_token_by_id(payment_token_id, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_token_by_id_with_http_info(payment_token_id, **kwargs) else: (data) = cls._delete_payment_token_by_id_with_http_info(payment_token_id, **kwargs) return data" 4788,"def get_payment_token_by_id(cls, payment_token_id, **kwargs): """"""Find PaymentToken Return single instance of PaymentToken by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_token_by_id(payment_token_id, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to return (required) :return: PaymentToken If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs) else: (data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs) return data" 4789,"def list_all_payment_tokens(cls, **kwargs): """"""List PaymentTokens Return a list of PaymentTokens This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payment_tokens(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[PaymentToken] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payment_tokens_with_http_info(**kwargs) else: (data) = cls._list_all_payment_tokens_with_http_info(**kwargs) return data" 4790,"def replace_payment_token_by_id(cls, payment_token_id, payment_token, **kwargs): """"""Replace PaymentToken Replace all attributes of PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_token_by_id(payment_token_id, payment_token, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to replace (required) :param PaymentToken payment_token: Attributes of paymentToken to replace (required) :return: PaymentToken If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) else: (data) = cls._replace_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) return data" 4791,"def update_payment_token_by_id(cls, payment_token_id, payment_token, **kwargs): """"""Update PaymentToken Update attributes of PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_payment_token_by_id(payment_token_id, payment_token, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to update. (required) :param PaymentToken payment_token: Attributes of paymentToken to update. (required) :return: PaymentToken If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) else: (data) = cls._update_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) return data" 4792,"def split_in_columns(filterform, fields_per_column=None): ''' Return iterator that yields a column (iterator too). By default, flat field list is divided in columns with fields_per_column elements in each (fields_per_column is a class attribute). ''' nfields = len(filterform.fields) if fields_per_column is None: fields_per_column = filterform.fields_per_column ncolumns, tail = divmod(nfields, fields_per_column) if tail > 0: ncolumns += 1 itr = iter(filterform) for _i in range(ncolumns): yield itertools.islice(itr, fields_per_column)" 4793,"def dispatch_event(self, event: ""Event"") -> None: """""" Dispatches the given event. It is the duty of this method to set the target of the dispatched event by calling `event.set_target(self)`. Args: event (Event): The event to dispatch. Must not be `None`. Raises: TypeError: If the event is `None` or its type is incorrect. """""" # Set the target of the event if it doesn't have one already. It could happen that # we are simply redispatching an event. if event.target is None: event.set_target(self) listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type) if listeners is None: return for listener in listeners: listener(event)" 4794,"def remove_event_listener(self, event_type: str, event_handler: types.MethodType) -> None: """""" Removes the given event listener registered on the dispatcher for the given event type. Args: event_type (str): The type of the event to remove the event handler from. Must not be `None` or empty string. event_handler (types.MethodType): The event handler to remove from the given event type of the dispatcher. Must not be `None`. Raises: ValueError: If any of the parameters are invalid. """""" # TODO: we should also accept types.FunctionType, # don't forget the documentation here and in the interface. if not isinstance(event_type, str) or event_type == """" or\ not isinstance(event_handler, types.MethodType): raise ValueError(""Invalid arguments: {}, {}"".format(event_type, event_handler)) listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event_type) listener: types.MethodType = None if listeners is None else listeners.get(event_handler) if listener is not None: del listeners[event_handler]" 4795,"def set_target(self, target: EventDispatcherBase) -> None: """""" This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance. """""" if self._target is not None: raise PermissionError(""The target property already has a valid value."") if not isinstance(target, EventDispatcherBase): raise TypeError(""Invalid target type: {}"".format(target)) self._target = target" 4796,"def download_url(url, content_type=None, download_to_file=None, retry_count=10, timeout=10.0): """""" Will download a file from given URL (either local or external) to the desired path (or generate one if none is given). Local files are copied directly. The function will retry a number of times based on retry_count (default 10) parameter and sleeps a number of seconds based on given timeout (default 10.0 sec) after each failed request. Returns the path to the downloaded file if successful. Otherwise an exception is raised. Given a content_type and an external URL, the function will make sure that the desired content_type is equal to the content-type of returned file. @param url: where the file lives on the interwebs @type url: string @param content_type: desired content_type to check for in external URLs. (optional) @type content_type: string @param download_to_file: where the file should live after download. (optional) @type download_to_file: string @param retry_count: number of times to retry. Defaults to 10. (optional) @type retry_count: int @param timeout: number of seconds to sleep between attempts. Defaults to 10.0 seconds. (optional) @type timeout: float @return: the path of the downloaded/copied file @raise InvenioFileDownloadError: raised upon URL/HTTP errors, file errors or wrong format """""" if not download_to_file: download_to_file = safe_mkstemp(suffix="".tmp"", prefix=""filedownloadutils_"") try: if is_url_a_local_file(url): downloaded_file = download_local_file(url, download_to_file) else: downloaded_file = download_external_url(url, download_to_file, content_type=content_type, retry_count=retry_count, timeout=timeout) except InvenioFileDownloadError: raise return downloaded_file" 4797,"def download_external_url(url, download_to_file, content_type=None, retry_count=10, timeout=10.0, verbose=False): """""" Download a url (if it corresponds to a remote file) and return a local url to it. If format is specified, a check will be performed in order to make sure that the format of the downloaded file is equal to the expected format. @param url: the URL to download @type url: string @param download_to_file: the path to download the file to @type download_to_file: string @param content_type: the content_type of the file (optional) @type content_type: string @param retry_count: max number of retries for downloading the file @type retry_count: int @param timeout: time to sleep in between attemps @type timeout: int @return: the path to the download local file @rtype: string @raise StandardError: if the download failed """""" error_str = """" error_code = None retry_attempt = 0 while retry_attempt < retry_count: try: # Attempt to download the external file request = open_url(url) if request.code == 200 and ""Refresh"" in request.headers: # PDF is being generated, they ask us to wait for # n seconds. # New arxiv responses, we are not sure if the old ones are # deactivated try: retry_after = int(request.headers[""Refresh""]) # We make sure that we do not retry too often even if # they tell us to retry after 1s retry_after = max(retry_after, timeout) except ValueError: retry_after = timeout if verbose: msg = ""retrying after %ss"" % (retry_after,) print >> sys.stderr, msg time.sleep(retry_after) retry_attempt += 1 continue except urllib2.HTTPError as e: error_code = e.code error_str = str(e) retry_after = timeout # This handling is the same as OAI queries. # We are getting 503 errors when PDFs are being generated if e.code == 503 and ""Retry-After"" in e.headers: # PDF is being generated, they ask us to wait for n seconds try: retry_after = int(e.headers[""Retry-After""]) # We make sure that we do not retry too often even if # they tell us to retry after 1s retry_after = max(retry_after, timeout) except ValueError: pass if verbose: msg = ""retrying after %ss"" % (retry_after,) print >> sys.stderr, msg time.sleep(retry_after) retry_attempt += 1 except (urllib2.URLError, socket.timeout, socket.gaierror, socket.error) as e: if verbose: error_str = str(e) msg = ""socket error, retrying after %ss"" % (timeout,) print >> sys.stderr, msg time.sleep(timeout) retry_attempt += 1 else: # When we get here, it means that the download was a success. try: finalize_download(url, download_to_file, content_type, request) finally: request.close() return download_to_file # All the attempts were used, but no successfull download - so raise error msg = 'URL could not be opened: %s' % (error_str,) raise InvenioFileDownloadError(msg, code=error_code)" 4798,"def finalize_download(url, download_to_file, content_type, request): """""" Finalizes the download operation by doing various checks, such as format type, size check etc. """""" # If format is given, a format check is performed. if content_type and content_type not in request.headers['content-type']: msg = 'The downloaded file is not of the desired format' raise InvenioFileDownloadError(msg) # Save the downloaded file to desired or generated location. to_file = open(download_to_file, 'w') try: try: while True: block = request.read(CFG_FILEUTILS_BLOCK_SIZE) if not block: break to_file.write(block) except Exception as e: msg = ""Error when downloading %s into %s: %s"" % \ (url, download_to_file, e) raise InvenioFileDownloadError(msg) finally: to_file.close() # Check Size filesize = os.path.getsize(download_to_file) if filesize == 0: raise InvenioFileDownloadError(""%s seems to be empty"" % (url,)) # download successful, return the new path return download_to_file" 4799,"def download_local_file(filename, download_to_file): """""" Copies a local file to Invenio's temporary directory. @param filename: the name of the file to copy @type filename: string @param download_to_file: the path to save the file to @type download_to_file: string @return: the path of the temporary file created @rtype: string @raise StandardError: if something went wrong """""" # Try to copy. try: path = urllib2.urlparse.urlsplit(urllib.unquote(filename))[2] if os.path.abspath(path) != path: msg = ""%s is not a normalized path (would be %s)."" \ % (path, os.path.normpath(path)) raise InvenioFileCopyError(msg) allowed_path_list = current_app.config.get( 'CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS', [] ) allowed_path_list.append(current_app.config['CFG_TMPSHAREDDIR']) for allowed_path in allowed_path_list: if path.startswith(allowed_path): shutil.copy(path, download_to_file) if os.path.getsize(download_to_file) == 0: os.remove(download_to_file) msg = ""%s seems to be empty"" % (filename,) raise InvenioFileCopyError(msg) break else: msg = ""%s is not in one of the allowed paths."" % (path,) raise InvenioFileCopyError() except Exception as e: msg = ""Impossible to copy the local file '%s' to %s: %s"" % \ (filename, download_to_file, str(e)) raise InvenioFileCopyError(msg) return download_to_file" 4800,"def safe_mkstemp(suffix, prefix='filedownloadutils_'): """"""Create a temporary filename that don't have any '.' inside a part from the suffix."""""" tmpfd, tmppath = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=current_app.config['CFG_TMPSHAREDDIR'] ) # Close the file and leave the responsability to the client code to # correctly open/close it. os.close(tmpfd) if '.' not in suffix: # Just in case format is empty return tmppath while '.' in os.path.basename(tmppath)[:-len(suffix)]: os.remove(tmppath) tmpfd, tmppath = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=current_app.config['CFG_TMPSHAREDDIR'] ) os.close(tmpfd) return tmppath" 4801,"def open_url(url, headers=None): """""" Opens a URL. If headers are passed as argument, no check is performed and the URL will be opened. @param url: the URL to open @type url: string @param headers: the headers to use @type headers: dictionary @return: a file-like object as returned by urllib2.urlopen. """""" request = urllib2.Request(url) if headers: for key, value in headers.items(): request.add_header(key, value) return URL_OPENER.open(request)" 4802,"def bulk_log(self, log_message=u""Еще одна пачка обработана"", total=None, part_log_time_minutes=5): """""" Возвращает инстант логгера для обработки списокв данных :param log_message: То, что будет написано, когда время придет :param total: Общее кол-во объектов, если вы знаете его :param part_log_time_minutes: Раз в какое кол-во минут пытаться писать лог :return: BulkLogger """""" return BulkLogger(log=self.log, log_message=log_message, total=total, part_log_time_minutes=part_log_time_minutes)" 4803,"def db(self, db_alias, shard_key=None): """""" Получить экземпляр работы с БД :type db_alias: basestring Альяс БД из меты :type shard_key: Любой тип. Некоторый идентификатор, который поможет мете найти нужную шарду. Тип зависи от принимающей стороны :rtype: DbQueryService """""" if shard_key is None: shard_key = '' db_key = db_alias + '__' + str(shard_key) if db_key not in self.__db_list: self.__db_list[db_key] = DbQueryService(self, self.__default_headers, {""db_alias"": db_alias, ""dbAlias"": db_alias, ""shard_find_key"": shard_key, ""shardKey"": shard_key}) return self.__db_list[db_key]" 4804,"def __read_developer_settings(self): """""" Читает конфигурации разработчика с локальной машины или из переменных окружения При этом переменная окружения приоритетнее :return: """""" self.developer_settings = read_developer_settings() if not self.developer_settings: self.log.warning(""НЕ УСТАНОВЛЕНЫ настройки разработчика, это может приводить к проблемам в дальнейшей работе!"")" 4805,"def api_call(self, service, method, data, options): """""" :type app: metasdk.MetaApp """""" if 'self' in data: # может не быть, если вызывается напрямую из кода, # а не из прослоек типа DbQueryService data.pop(""self"") if options: data.update(options) _headers = dict(self.__default_headers) if self.auth_user_id: _headers['X-META-AuthUserID'] = str(self.auth_user_id) request = { ""url"": self.meta_url + ""/api/v1/adptools/"" + service + ""/"" + method, ""data"": json.dumps(data), ""headers"": _headers, ""timeout"": (60, 1800) } for _try_idx in range(20): try: resp = requests.post(**request) if resp.status_code == 200: decoded_resp = json.loads(resp.text) if 'data' in decoded_resp: return decoded_resp['data'][method] if 'error' in decoded_resp: if 'details' in decoded_resp['error']: eprint(decoded_resp['error']['details']) raise DbQueryError(decoded_resp['error']) raise UnexpectedError() else: process_meta_api_error_code(resp.status_code, request, resp.text) except (requests.exceptions.ConnectionError, ConnectionError, TimeoutError) as e: self.log.warning('META API Connection Error. Sleep...', {""e"": e}) time.sleep(15) except Exception as e: if 'Служба частично или полностью недоступна' in str(e): self.log.warning('META API Connection Error. Sleep...', {""e"": e}) time.sleep(15) else: raise e raise ServerError(request)" 4806,"def native_api_call(self, service, method, data, options, multipart_form=False, multipart_form_data=None, stream=False, http_path=""/api/meta/v1/"", http_method='POST', get_params=None, connect_timeout_sec=60): """""" :type app: metasdk.MetaApp :rtype: requests.Response """""" if get_params is None: get_params = {} if 'self' in data: # может не быть, если вызывается напрямую из кода, # а не из прослоек типа DbQueryService data.pop(""self"") if options: data.update(options) _headers = dict(self.__default_headers) if self.auth_user_id: _headers['X-META-AuthUserID'] = str(self.auth_user_id) request = { ""url"": self.meta_url + http_path + service + ""/"" + method, ""timeout"": (connect_timeout_sec, 1800), ""stream"": stream, ""params"": get_params, } if multipart_form: if multipart_form_data: request['files'] = multipart_form_data request['data'] = data _headers.pop('content-type', None) else: request['data'] = json.dumps(data) request['headers'] = _headers for _try_idx in range(20): try: resp = requests.request(http_method, **request) if resp.status_code == 200: return resp else: process_meta_api_error_code(resp.status_code, request, resp.text) except (requests.exceptions.ConnectionError, ConnectionError, TimeoutError) as e: self.log.warning('META API Connection Error. Sleep...', {""e"": e}) time.sleep(15) except Exception as e: if 'Служба частично или полностью недоступна' in str(e): self.log.warning('META API Service Temporarily Unavailable. Sleep...', {""e"": e}) time.sleep(15) else: raise e raise ServerError(request)" 4807,"async def get_json(self, url, timeout=30, astext=False, exceptions=False): """"""Get URL and parse JSON from text."""""" try: with async_timeout.timeout(timeout): res = await self._aio_session.get(url) if res.status != 200: _LOGGER.error(""QSUSB returned %s [%s]"", res.status, url) return None res_text = await res.text() except (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError) as exc: if exceptions: raise exc return None if astext: return res_text try: return json.loads(res_text) except json.decoder.JSONDecodeError: if res_text.strip("" "") == """": return None _LOGGER.error(""Could not decode %s [%s]"", res_text, url)" 4808,"def stop(self): """"""Stop listening."""""" self._running = False if self._sleep_task: self._sleep_task.cancel() self._sleep_task = None" 4809,"def version(self): """"""Get the QS Mobile version."""""" return self.get_json(URL_VERSION.format(self._url), astext=True)" 4810,"def listen(self, callback=None): """"""Start the &listen long poll and return immediately."""""" self._running = True self.loop.create_task(self._async_listen(callback))" 4811,"async def _async_listen(self, callback=None): """"""Listen loop."""""" while True: if not self._running: return try: packet = await self.get_json( URL_LISTEN.format(self._url), timeout=30, exceptions=True) except asyncio.TimeoutError: continue except aiohttp.client_exceptions.ClientError as exc: _LOGGER.warning(""ClientError: %s"", exc) self._sleep_task = self.loop.create_task(asyncio.sleep(30)) try: await self._sleep_task except asyncio.CancelledError: pass self._sleep_task = None continue if isinstance(packet, dict) and QS_CMD in packet: _LOGGER.debug(""callback( %s )"", packet) try: callback(packet) except Exception as err: # pylint: disable=broad-except _LOGGER.error(""Exception in callback\nType: %s: %s"", type(err), err) else: _LOGGER.debug(""unknown packet? %s"", packet)" 4812,"def set_qs_value(self, qsid, val, success_cb): """"""Push state to QSUSB, retry with backoff."""""" self.loop.create_task(self.async_set_qs_value(qsid, val, success_cb))" 4813,"async def async_set_qs_value(self, qsid, val, success_cb=None): """"""Push state to QSUSB, retry with backoff."""""" set_url = URL_SET.format(self._url, qsid, val) for _repeat in range(1, 6): set_result = await self.get_json(set_url, 2) if set_result and set_result.get('data', 'NO REPLY') != 'NO REPLY': if success_cb: success_cb() return True await asyncio.sleep(0.01 * _repeat) _LOGGER.error(""Unable to set %s"", set_url) return False" 4814,"async def update_from_devices(self): """"""Retrieve a list of &devices and values."""""" res = await self.get_json(URL_DEVICES.format(self._url)) if res: self.devices.update_devices(res) return True return False" 4815,"def multi_ops(data_stream, *funcs): """""" fork a generator with multiple operations/functions data_stream - an iterable data structure (ie: list/generator/tuple) funcs - every function that will be applied to the data_stream """""" assert all(callable(func) for func in funcs), 'multi_ops can only apply functions to the first argument' assert len(funcs), 'multi_ops needs at least one function to apply to data_stream' for i in data_stream: if len(funcs) > 1: yield tuple(func(i) for func in funcs) elif len(funcs) == 1: yield funcs[0](i)" 4816,"def attowiki_distro_path(): """"""return the absolute complete path where attowiki is located .. todo:: use pkg_resources ? """""" attowiki_path = os.path.abspath(__file__) if attowiki_path[-1] != '/': attowiki_path = attowiki_path[:attowiki_path.rfind('/')] else: attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')] return attowiki_path" 4817,"def build_command(self): """"""Build out the crontab command"""""" return cron_utils.cronify(""crontab -l | {{ cat; echo \""{} {} {} {} {} CJOBID='{}' MAILTO='' {}\""; }} | crontab - > /dev/null"".format(self._minute, self._hour, self._day_of_month, self._month_of_year, self._day_of_week, self._jobid, self._command))" 4818,"def read_environment_file(envfile=None): """""" Read a .env file into os.environ. If not given a path to a envfile path, does filthy magic stack backtracking to find manage.py and then find the envfile. """""" if envfile is None: frame = sys._getframe() envfile = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env') if not os.path.exists(envfile): warnings.warn(""not reading %s - it doesn't exist."" % envfile) return for k, v in parse_environment_file(envfile): os.environ.setdefault(k, v)" 4819,"def infer_format(filename:str) -> str: """"""Return extension identifying format of given filename"""""" _, ext = os.path.splitext(filename) return ext" 4820,"def reversed_graph(graph:dict) -> dict: """"""Return given graph reversed"""""" ret = defaultdict(set) for node, succs in graph.items(): for succ in succs: ret[succ].add(node) return dict(ret)" 4821,"def walk(start:list, graphs:iter) -> iter: """"""walk on given graphs, beginning on start. Yield all found nodes, including start. All graph are understood as a single one, with merged keys and values. """""" walked = set([start]) stack = [start] while len(stack) > 0: *stack, curr = stack yield curr succs = it.chain.from_iterable(graph.get(curr, ()) for graph in graphs) for succ in succs: if succ not in walked: walked.add(curr) stack.append(succ)" 4822,"def have_cycle(graph:dict) -> frozenset: """"""Perform a topologic sort to detect any cycle. Return the set of unsortable nodes. If at least one item, then there is cycle in given graph. """""" # topological sort walked = set() # walked nodes nodes = frozenset(it.chain(it.chain.from_iterable(graph.values()), graph.keys())) # all nodes of the graph preds = reversed_graph(graph) # succ: preds last_walked_len = -1 while last_walked_len != len(walked): last_walked_len = len(walked) for node in nodes - walked: if len(preds.get(node, set()) - walked) == 0: walked.add(node) return frozenset(nodes - walked)" 4823,"def file_lines(bblfile:str) -> iter: """"""Yield lines found in given file"""""" with open(bblfile) as fd: yield from (line.rstrip() for line in fd if line.rstrip())" 4824,"def line_type(line:str) -> str: """"""Give type of input line, as defined in LINE_TYPES >>> line_type('IN\\ta\\tb') 'IN' >>> line_type('') 'EMPTY' """""" for regex, ltype in LINE_TYPES.items(): if re.fullmatch(regex, line): return ltype raise ValueError(""Input line \""{}\"" is not bubble formatted"".format(line))" 4825,"def line_data(line:str) -> tuple: """"""Return groups found in given line >>> line_data('IN\\ta\\tb') ('IN', 'a', 'b') >>> line_data('') () """""" for regex, _ in LINE_TYPES.items(): match = re.fullmatch(regex, line) if match: return match.groups() raise ValueError(""Input line \""{}\"" is not bubble formatted"".format(line))" 4826,"def _catchCurrentViewContent(self): """"""! \~english Catch the current view content @return: a PIL Image @note Automatically converts the cache color mode and at the same time rotates the captured image data according to the screen angle \~chinese 从缓存中抓取当前视图大小的数据 @return: PIL Image 对象 @note 自动转换缓存色彩模式,同时根据屏幕角度设定旋转所抓取的图像数据 """""" viewContent = None if self._buffer_color_mode != self._display_color_mode: viewContent = self._buffer.crop( self.View.rectToArray() ) .convert( self._display_color_mode ) else: viewContent = self._buffer.crop( self.View.rectToArray() ) # Rotate for display direction if self._display_direction == 0: return viewContent else: return viewContent.rotate( angle = self._display_direction, expand=True )" 4827,"def _initBuffer(self, bufferColorMode, bufferSize): """"""! \~english Initialize the buffer object instance, use PIL Image as for buffer @param bufferColorMode: ""RGB"" or ""1"" @param bufferSize: (width, height) \~chinese 初始化缓冲区对象实例,使用PIL Image作为缓冲区 @param bufferColorMode: 色彩模式, 取值: ""RGB"" 或 ""1"" @param bufferSize: 缓存大小 (width, height),例如: (128, 64) """""" # super(SSScreenBase)._initBuffer(bufferColorMode, bufferSize) self._buffer_color_mode = bufferColorMode #create screen image buffer and canvas if bufferSize==None: self._buffer = Image.new( bufferColorMode , self._display_size ) else: self._buffer = Image.new( bufferColorMode , bufferSize ) self.Canvas = ImageDraw.Draw( self._buffer ) #creare screen view self.View = SSRect( 0, 0, self._display_size[0], self._display_size[1] )" 4828,"def clearCanvas(self, fillColor = 0 ): """"""! \~engliash Clear up canvas and fill color at same time @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO (""1"") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB (""RGB"") color mode, RGB color values can be used \~chinese 清除画布并同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO (""1"") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB (""RGB"") 彩色模式,可以使用 RGB 色彩值 """""" self.Canvas.rectangle((0, 0, self._display_size[0], self._display_size[1]), outline=0, fill=fillColor)" 4829,"def clearView(self, fillColor = 0 ): """"""! \~english Clear up canvas with view size @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO (""1"") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB (""RGB"") color mode, RGB color values can be used \~chinese 清除画布中当前视图大小的区域同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO (""1"") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB (""RGB"") 彩色模式,可以使用 RGB 色彩值 """""" self.Canvas.rectangle(self.View.rectToArray(), outline=0, fill=fillColor)" 4830,"def redefineBuffer(self, newBuffer ): """"""! \~english Redefine frame of Screen @param newFrame: a new fram data @note newFrame can be: * PIL Image * PIL ImageFile * Dictionary, eg. { ""size"":(width, height), ""color_mode"":""1"" } or { ""size"":(width, height), ""color_mode"":""RGB"" } \~chinese 重新定义缓存数据 @param newFrame: 新缓存数据 \n newFrame 可以为下面值: * PIL Image * PIL ImageFile * 字典, eg. { ""size"":(width, height), ""color_mode"":""1"" } or { ""size"":(width, height), ""color_mode"":""RGB"" } """""" # Redefine Frame from an image object if type(self._buffer) == type(newBuffer): self._buffer = newBuffer self.Canvas = ImageDraw.Draw( self._buffer ) # self.View.resize(newBuffer.width, newBuffer.height) return True # Redefine Frame from an if type(newBuffer).__name__.find(PIL.ImageFile.ImageFile.__name__) != -1: self._buffer = self._buffer.resize((newBuffer.width, newBuffer.height)) self._buffer.paste( newBuffer, (0,0)) # self.View.resize(newBuffer.width, newBuffer.height) return True # Recreated a new frame from dict of frame if isinstance(newBuffer, dict): self._buffer = Image.new( newBuffer[""color_mode""] , newBuffer[""size""] ) self.Canvas = ImageDraw.Draw( self._buffer ) return True pass" 4831,"def write(self, text="""", xy=(0,0), align=""left"", font=None, fontName=None, fontSize = 10, fill = 1, spacing = 0, screenCenter = False): """"""! \~english Print one line text or multi-line text on the screen @param text: Text to be drawn. eg. ""Hello World!"" or ""Hello/nWorld!"" @param xy: Top left corner of the text. defaule: (0,0) @param align: ""left"", ""center"" or ""right"". defaule: ""left"" @param fontName: Name of font or font instance. defaule: None (use default font) @param fontSize: Font size. default: 10 @param fill: Color to use for the text. default: 1 (white) @param spacing: The number of pixels between lines. default: 0 @param screenCenter: Keep the text center of screen. default: False @note How to use screenCenter? 1. align=""left""; screenCenter=False
            +---------------------------------+
            |  Simple text line1              |
            |  Simple line2                   |
            |  Simple                         |
            |                                 |
            +---------------------------------+
            
    2. align=""left""; screenCenter=True
            +---------------------------------+
            |        Simple text line1        |
            |        Simple line2             |
            |        Simple                   |
            |                                 |
            +---------------------------------+
            
    \~chinese 在屏幕上打印一行文字或多行文字 @param text: 要输出的文字,可以单行也可以多行。例如: ""Hello World!"" 或 ""Hello/nWorld!"" @param xy: 文字输出的坐标点。默认: (0,0) @param align: 多行文字对齐方式,可选: ""left"", ""center"" 或 ""right"". 默认: ""left"" @param fontName: 字体名或字体对象实例。默认:None(使用系统默认的字体) @param fontSize: 字体大小。默认:10 @param fill: 文字颜色。默认: 1 (白色) @param spacing: 行间距。默认:0 @param screenCenter: 让文本居中屏幕。 @note screenCenter 效果示例: 1. align=""left""; screenCenter=False
            +---------------------------------+
            |  Simple text line1              |
            |  Simple line2                   |
            |  Simple                         |
            |                                 |
            +---------------------------------+
            
    2. align=""left""; screenCenter=True
            +---------------------------------+
            |        Simple text line1        |
            |        Simple line2             |
            |        Simple                   |
            |                                 |
            +---------------------------------+
            
    """""" tx = xy[0] try: dwFont = font if font != None else DEF_SCR_FRONT if fontName==None else ImageFont.truetype(fontName, fontSize) except: dwFont = DEF_SCR_FRONT try: if screenCenter == True: (fw, fh) = self.Canvas.multiline_textsize( text, font ) tx = xy[0] + (self._display_size[0]-fw)/2 self.Canvas.multiline_text( (tx, xy[1]) , text, font = dwFont, align=align, fill=fill, spacing=spacing) except: print(""ERROR: canvas write error"")" 4832,"def resize(self, newWidth = 0, newHeight = 0): """"""! \~english Resize width and height of rectangles @param newWidth: new width value @param newHeight: new height value \~chinese 重新设定矩形高宽 @param newWidth: 新宽度 @param newHeight: 新高度 """""" self.height = newHeight self.width = newWidth" 4833,"def adjuestSize(self, offsetWidth = 0, offsetHeight = 0): """"""! \~english Adjuest width and height of rectangles @param offsetWidth: adjust the width. Negative numbers are smaller, Positive number is increased @param offsetHeight: adjust the height. Negative numbers are smaller, Positive number is increased @note The negative numbers are smaller, positive number is increased,0 remains unchanged. \~chinese 调整矩形高宽数据 @param offsetWidth: 调整宽度。 负数较小,正数增加 @param offsetHeight: 调整高度。 负数较小,正数增加 @note 负数较小,正数增加,0保持不变。 """""" self.height += offsetHeight self.width += offsetWidth" 4834,"def moveTo(self, newX=0, newY=0): """"""! \~english Move vertex of rectangles to new point (x,y) @param newX: Coordinated X value @param newY: Coordinated Y value \~chinese 移动矩形到新坐标点 (x,y) @param newX: 坐标 X @param newY: 坐标 Y """""" self.x = newX self.y = newY" 4835,"def moveOffset(self, offsetX=0, offsetY=0): """"""! \~english Offset vertex of rectangles to new point (x,y) @param offsetX: offset X value @param offsetY: offset Y value @note The negative numbers are left or up move , positive number is right or down move,0 remains unchanged. \~chinese 平移矩形指定的距离 (x,y) @param offsetX: 平移 X @param offsetY: 平移 Y @note 负数是左移( X )或上移( Y ),正数是右移( X )或下移( Y ),0 保持不变。 """""" self.x += offsetX self.y += offsetY" 4836,"def swapWH(self): """"""! \~english Swap width and height of rectangles \~chinese 交换矩形高宽边数据 """""" width = self.width self.width = self.height self.height = width" 4837,"def rectToArray(self, swapWH = False): """"""! \~english Rectangles converted to array of coordinates @return: an array of rect points. eg. (x1,y1,x2,y2) \~chinese 矩形数据转换为矩形坐标数组 @return: 矩形座标数组, 例如: ( x1,y1,x2,y2 ) """""" if swapWH == False: return [self.x, self.y, self.x + self.width, self.y + self.height] else: return [self.x, self.y, self.x + self.height, self.y + self.width]" 4838,"def _needSwapWH(self, oldDirection, newDirection ): """"""! \~english return screen direction status @return Boolean @note No need to rotate if the screen orientation is 0 degrees and 180 degrees \~chinese 返回屏幕方向状态 @return 布尔值 @note 如果屏幕方向是0度和180度就不需要旋转 """""" if abs(newDirection - oldDirection) == 0: return False if abs(newDirection - oldDirection) % 180 == 0: return False if abs(newDirection - oldDirection) % 90 == 0: return True return False" 4839,"def rotateDirection(self, displayDirection): """"""! \~english rotate screen direction @param displayDirection: Screen Direction. value can be chosen: 0, 90, 180, 270 \~chinese 旋转显示屏方向 @param displayDirection: 显示屏方向。可选值: 0, 90, 180, 270 \~ @note \~english after rotate the View resize to screen size \~chinese 改变方向后,默认的 View 大小会更新为当前 Screen 的大小 \~\n """""" if self._needSwapWH(self._display_direction, displayDirection): self._display_size = ( self._display_size[1], self._display_size[0] ) if self.redefineBuffer( { ""size"":self._display_size, ""color_mode"":self._buffer_color_mode } ): self.View.resize(self._display_size[0], self._display_size[1]) self._display_direction = displayDirection" 4840,"def create_tax_class(cls, tax_class, **kwargs): """"""Create TaxClass Create a new TaxClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_tax_class(tax_class, async=True) >>> result = thread.get() :param async bool :param TaxClass tax_class: Attributes of taxClass to create (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_tax_class_with_http_info(tax_class, **kwargs) else: (data) = cls._create_tax_class_with_http_info(tax_class, **kwargs) return data" 4841,"def delete_tax_class_by_id(cls, tax_class_id, **kwargs): """"""Delete TaxClass Delete an instance of TaxClass by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_tax_class_by_id(tax_class_id, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_tax_class_by_id_with_http_info(tax_class_id, **kwargs) else: (data) = cls._delete_tax_class_by_id_with_http_info(tax_class_id, **kwargs) return data" 4842,"def get_tax_class_by_id(cls, tax_class_id, **kwargs): """"""Find TaxClass Return single instance of TaxClass by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tax_class_by_id(tax_class_id, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to return (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) else: (data) = cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) return data" 4843,"def list_all_tax_classes(cls, **kwargs): """"""List TaxClasses Return a list of TaxClasses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_tax_classes(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[TaxClass] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_tax_classes_with_http_info(**kwargs) else: (data) = cls._list_all_tax_classes_with_http_info(**kwargs) return data" 4844,"def replace_tax_class_by_id(cls, tax_class_id, tax_class, **kwargs): """"""Replace TaxClass Replace all attributes of TaxClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_tax_class_by_id(tax_class_id, tax_class, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to replace (required) :param TaxClass tax_class: Attributes of taxClass to replace (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs) else: (data) = cls._replace_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs) return data" 4845,"def update_tax_class_by_id(cls, tax_class_id, tax_class, **kwargs): """"""Update TaxClass Update attributes of TaxClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_tax_class_by_id(tax_class_id, tax_class, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to update. (required) :param TaxClass tax_class: Attributes of taxClass to update. (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs) else: (data) = cls._update_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs) return data" 4846,"def autodiscover_modules(packages, related_name_re='.+', ignore_exceptions=False): """"""Autodiscover function follows the pattern used by Celery. :param packages: List of package names to auto discover modules in. :type packages: list of str :param related_name_re: Regular expression used to match modules names. :type related_name_re: str :param ignore_exceptions: Ignore exception when importing modules. :type ignore_exceptions: bool """""" warnings.warn('autodiscover_modules has been deprecated. ' 'Use Flask-Registry instead.', DeprecationWarning) global _RACE_PROTECTION if _RACE_PROTECTION: return [] _RACE_PROTECTION = True modules = [] try: tmp = [find_related_modules(pkg, related_name_re, ignore_exceptions) for pkg in packages] for l in tmp: for m in l: if m is not None: modules.append(m) # Workaround for finally-statement except: _RACE_PROTECTION = False raise _RACE_PROTECTION = False return modules" 4847,"def find_related_modules(package, related_name_re='.+', ignore_exceptions=False): """"""Find matching modules using a package and a module name pattern."""""" warnings.warn('find_related_modules has been deprecated.', DeprecationWarning) package_elements = package.rsplit(""."", 1) try: if len(package_elements) == 2: pkg = __import__(package_elements[0], globals(), locals(), [ package_elements[1]]) pkg = getattr(pkg, package_elements[1]) else: pkg = __import__(package_elements[0], globals(), locals(), []) pkg_path = pkg.__path__ except AttributeError: return [] # Find all modules named according to related_name p = re.compile(related_name_re) modules = [] for name in find_modules(package, include_packages=True): if p.match(name.split('.')[-1]): try: modules.append(import_string(name, silent=ignore_exceptions)) except Exception as e: if not ignore_exceptions: raise e return modules" 4848,"def import_related_module(package, pkg_path, related_name, ignore_exceptions=False): """"""Import module from given path."""""" try: imp.find_module(related_name, pkg_path) except ImportError: return try: return getattr( __import__('%s' % (package), globals(), locals(), [related_name]), related_name ) except Exception as e: if ignore_exceptions: current_app.logger.exception( 'Can not import ""{}"" package'.format(package) ) else: raise e" 4849,"def ansi(string, *args): """""" Convenience function to chain multiple ColorWrappers to a string """""" ansi = '' for arg in args: arg = str(arg) if not re.match(ANSI_PATTERN, arg): raise ValueError('Additional arguments must be ansi strings') ansi += arg return ansi + string + colorama.Style.RESET_ALL" 4850,"def puts(*args, **kwargs): """""" Full feature printing function featuring trimming and padding for both files and ttys """""" # parse kwargs trim = kwargs.pop('trim', False) padding = kwargs.pop('padding', None) stream = kwargs.pop('stream', sys.stdout) # HACK: check if stream is IndentedFile indent = getattr(stream, 'indent', 0) # stringify args args = [str(i) for i in args] # helpers def trimstr(ansi, width): string = ''; size = 0; i = 0 while i < len(ansi): mobj = re.match(ANSI_PATTERN, ansi[i:]) if mobj: # append ansi code string = string + mobj.group(0) i += len(mobj.group(0)) else: # loop for more ansi codes even at max width size += 1 if size > width: break # append normal char string = string + ansi[i] i += 1 return (string, size) # process strings if not stream.isatty(): # remove ansi codes and print for string in args: stream.write(re.sub(ANSI_PATTERN, '', string) + '\n') else: # get terminal width try: curses.setupterm() except: trim = False padding = None else: width = curses.tigetnum('cols') - indent for string in args: if trim or padding: trimmed, size = trimstr(string, width) # trim string if trim: if len(trimmed) < len(string): trimmed = trimstr(string, width - 3)[0] + colorama.Style.RESET_ALL + '...' string = trimmed # add padding if padding: string += padding * (width - size) # print final string stream.write(string + '\n')" 4851,"def _restore_tree_for(root, translate): # type: (Any, Dict[Type[Nonterminal], Type[Rule]]) -> Union[Nonterminal, Terminal] """""" Create part of AST that generate epsilon. :param root: Symbol in the original rule that results in epsilon. Can be Nonterminal or epsilon itself. :param translate: Dictionary where key is nonterminal and value is rule which is next to generate epsilon. :return: Nonterminal instance with part of AST generating epsilon. """""" # the symbol is epsilon directly, just return Terminal. if root is EPSILON: return Terminal(EPSILON) # create nonterminal created_nonterm = root() # type: Nonterminal created_rule = translate[root]() # type: Rule created_nonterm._set_to_rule(created_rule) created_rule._from_symbols.append(created_nonterm) # all symbols from the right are rewritable to epsilon, so we need to restore them as well for ch in created_rule.right: p = _restore_tree_for(ch, translate) # type: Nonterminal p._set_from_rule(created_rule) created_rule._to_symbols.append(p) return created_nonterm" 4852,"def epsilon_rules_restore(root): # type: (Nonterminal) -> Nonterminal """""" Transform parsed tree to contain epsilon rules originally removed from the grammar. :param root: Root of the parsed tree. :return: Modified tree including epsilon rules. """""" items = Traversing.post_order(root) items = filter(lambda x: isinstance(x, EpsilonRemovedRule), items) for rule in items: # create original rule created_rule = rule.from_rule() # type: Rule # attach parrents parents for s in rule.from_symbols: # type: Nonterminal s._set_to_rule(created_rule) created_rule._from_symbols.append(s) # attach children up to replace index (that will contain epsilon) for i in range(rule.replace_index): ch = rule.to_symbols[i] # type: Nonterminal ch._set_from_rule(created_rule) created_rule._to_symbols.append(ch) # add symbols originally rewrote to epsilon symb = _restore_tree_for(created_rule.right[rule.replace_index], rule.backtrack) # type: Nonterminal created_rule._to_symbols.append(symb) symb._set_from_rule(created_rule) # attach rest of children for i in range(rule.replace_index, len(rule.to_symbols)): ch = rule.to_symbols[i] # type: Nonterminal ch._set_from_rule(created_rule) created_rule._to_symbols.append(ch) return root" 4853,"def char_on_predicate(compiler, cont, test): '''return current char and step if @test succeed, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function''' test = test.interlang() text = compiler.new_var(il.ConstLocalVar('text')) pos = compiler.new_var(il.ConstLocalVar('pos')) if not isinstance(test, il.PyFunction): raise DaoCompileTypeError(test) return il.Begin(( il.AssignFromList(text, pos, il.parse_state), il.If(il.Ge(pos,il.Len(text)), il.failcont(il.FALSE), il.If(il.Call(test, il.GetItem(text, pos)), il.begin( il.SetParseState(il.Tuple(text, il.add(pos, il.Integer(1)))), il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), cont(il.GetItem(text, pos))), il.failcont(il.FALSE)))))" 4854,"def char_between(lower, upper, func_name): '''return current char and step if char is between lower and upper, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function''' function = register_function(func_name, lambda char: lower<=char<=upper) return char_on_predicate(function)" 4855,"def char_in(string, func_name): '''return current char and step if char is in string, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function''' function = register_function(func_name, lambda char: char in string) return char_on_predicate(function)" 4856,"def string_on_predicate0(compiler, cont, test): '''return current char and step if @test succeed, where @test: a python function with one argument, which tests on one char and return True or False @test must be registered with register_function''' test = test.interlang() text = compiler.new_var(il.ConstLocalVar('text')) pos = compiler.new_var(il.ConstLocalVar('pos')) length = compiler.new_var(il.ConstLocalVar('length')) p = compiler.new_var(il.LocalVar('p')) if not isinstance(test, il.PyFunction): raise DaoCompileTypeError(test) return il.Begin(( il.AssignFromList(text, pos, il.parse_state), il.Assign(length, il.Len(text)), il.If(il.Ge(pos,il.Len(text)), cont(il.String('')), il.begin( il.Assign(p, pos), il.While(il.And(il.Lt(p, length), il.Call(test, il.GetItem(text, p))), il.AddAssign(p, il.Integer(1))), il.SetParseState(il.Tuple(text, p)), il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), cont(il.GetItem(text, il.Slice2(pos, p)))))))" 4857,"def word(compiler, cont, arg): 'word of letters' text = compiler.new_var(il.ConstLocalVar('text')) pos = compiler.new_var(il.ConstLocalVar('pos')) p = compiler.new_var(il.LocalVar('p')) length = compiler.new_var(il.ConstLocalVar('length')) if isinstance(arg, Var): arg = arg.interlang() x = compiler.new_var(il.ConstLocalVar('x')) return il.Begin(( il.AssignFromList(text, pos, il.parse_state), il.Assign(length, il.Len(text)), il.If(il.Ge(pos, length), il.failcont(il.FALSE), il.If(il.And(il.Not(il.Cle(il.String('a'), il.GetItem(text, pos), il.String('z'))), il.Not(il.Cle(il.String('A'), il.GetItem(text, pos), il.String('Z')))), il.failcont(il.FALSE), il.Begin(( il.Assign(p, il.add(pos, il.Integer(1))), il.while_(il.And(il.Lt(p, length), il.Or(il.Cle(il.String('a'), il.GetItem(text, p), il.String('z')), il.Cle(il.String('A'),il.GetItem(text, p),il.String('Z')))), il.AddAssign(p, il.Integer(1))), il.Assign(x, il.Deref(arg)), il.If(il.IsLogicVar(x), il.begin(il.SetParseState(il.Tuple(text, p)), il.SetBinding(x, il.GetItem(text, il.Slice2(pos, p))), il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos)), il.DelBinding(x)), cont(il.GetItem(text, pos))), il.If(il.Isinstance(x, il.String('str')), il.If(il.Eq(x, il.GetItem(text, il.Slice2(pos, p))), il.begin(il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), il.SetParseState(il.Tuple(text, p)), cont(il.GetItem(text, pos))), il.failcont(il.NONE)), il.RaiseTypeError(x))))))))) elif isinstance(arg, String): arg = arg.interlang() return il.Begin(( il.AssignFromList(text, pos, il.parse_state), il.Assign(length, il.Len(text)), il.If(il.Ge(pos, length), il.failcont(il.FALSE), il.If(il.And(il.Not(il.Cle(il.String('a'), il.GetItem(text, pos), il.String('z'))), il.Not(il.Cle(il.String('A'), il.GetItem(text, pos), il.String('Z')))), il.failcont(il.FALSE), il.Begin(( il.Assign(p, il.add(pos, il.Integer(1))), il.while_(il.And(il.Lt(p, length), il.Or(il.Cle(il.String('a'), il.GetItem(text, p), il.String('z')), il.Cle(il.String('A'),il.GetItem(text, p), il.String('Z')))), il.AddAssign(p, il.Integer(1))), il.If(il.Eq(arg, il.GetItem(text, il.Slice2(pos, p))), il.begin(il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), il.SetParseState(il.Tuple(text, p)), cont(arg)), il.failcont(il.NONE)))))))) else: raise CompileTypeError" 4858,"def literal(compiler, cont, arg): '''any given instance string''' text = compiler.new_var(il.ConstLocalVar('text')) pos = compiler.new_var(il.ConstLocalVar('pos')) p = compiler.new_var(il.LocalVar('p')) i = compiler.new_var(il.LocalVar('i')) x = compiler.new_var(il.ConstLocalVar('x')) length = compiler.new_var(il.ConstLocalVar('length')) length2 = compiler.new_var(il.ConstLocalVar('length2')) if isinstance(arg, Var): return il.Begin(( il.Assign(x, il.Deref(arg)), il.AssignFromList(text, pos, il.parse_state), il.Assign(length, il.Len(text)), il.Assign(length, il.Len(x)), il.Assign(i, il.Integer(0)), il.Assign(p, pos), il.while_(il.and_(il.Lt(i, length2), il.Lt(p, length), il.Eq(il.GetItem(text, p), il.GetItem(x, i))), il.AddAssign(p, il.Integer(1))), il.If(il.Lt(i, length2), il.failcont(il.NONE), il.begin(il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), il.SetParseState(il.Tuple(text, p)), cont(arg))))) elif isinstance(arg, String): arg = arg.interlang() return il.Begin(( il.AssignFromList(text, pos, il.parse_state), il.Assign(length, il.Len(text)), il.Assign(length2, il.Len(arg)), il.Assign(i, il.Integer(0)), il.Assign(p, pos), il.while_(il.and_(il.Lt(i, length2), il.Lt(p, length), il.Eq(il.GetItem(text, p), il.GetItem(arg, i))), il.AddAssign(p, il.Integer(1)), il.AddAssign(i, il.Integer(1)), ), il.If(il.Lt(i, length2), il.failcont(il.NONE), il.begin(il.append_failcont(compiler, il.SetParseState(il.Tuple(text, pos))), il.SetParseState(il.Tuple(text, p)), cont(arg))))) else: raise CompileTypeError" 4859,"def update_version(self, version, step=1): ""Compute an new version and write it as a tag"" # update the version based on the flags passed. if self.config.patch: version.patch += step if self.config.minor: version.minor += step if self.config.major: version.major += step if self.config.build: version.build_number += step if self.config.build_number: version.build_number = self.config.build_number # create a new tag in the repo with the new version. if self.config.dry_run: log.info('Not updating repo to version {0}, because of --dry-run'.format(version)) else: version = self.call_plugin_function('set_version', version) return version" 4860,"def row(self): """"""Returns current data row: MyDBRow object, or None"""""" ret = None i = self.tableWidget.currentRow() if i >= 0: ret = self._data[i] return ret" 4861,"def _find_id(self, id_): """"""Moves to row where formula is (if found, otherwise does nothing)"""""" for i, row in enumerate(self._data): if row[""id""] == id_: t = self.tableWidget # idx = t.itemFromIndex() t.setCurrentCell(i, 0) break" 4862,"def _wanna_emit_id_changed(self): """"""Filters intentions to emit the id_changed signal (only does if id really changed)"""""" if self._last_id != self._get_id(): self._last_id = self._get_id() self.id_changed.emit()" 4863,"def _get_id(self): """"""Getter because using the id property from within was not working"""""" ret = None row = self.row if row: ret = row[""id""] return ret" 4864,"def time_pipeline(iterable, *steps): ''' This times the steps in a pipeline. Give it an iterable to test against followed by the steps of the pipeline seperated in individual functions. Example Usage: ``` from random import choice, randint l = [randint(0,50) for i in range(100)] step1 = lambda iterable:(i for i in iterable if i%5==0) step2 = lambda iterable:(i for i in iterable if i%8==3) step3 = lambda iterable:sorted((1.0*i)/50 for i in iterable) step4 = lambda iterable:(float(float(float(float(i*3)))) for i in iterable) print('filter first') time_pipeline(l, step1, step2, step3, step4) print('process first') time_pipeline(l, step3, step4, step1, step2) print('filter, process, filter, process') time_pipeline(l, step1, step3, step2, step4) ``` Outputs: filter first step 1 | 2.0427s | step1 = lambda iterable:(i for i in iterable if i%5==0) step 2 | 2.0510s | step2 = lambda iterable:(i for i in iterable if i%8==3) step 3 | 2.4839s | step3 = lambda iterable:sorted((1.0*i)/50 for i in iterable) step 4 | 2.8446s | step4 = lambda iterable:(float(float(float(float(i*3)))) for i in iterable) process first step 1 | 7.5291s | step3 = lambda iterable:sorted((1.0*i)/50 for i in iterable) step 2 | 20.6732s | step4 = lambda iterable:(float(float(float(float(i*3)))) for i in iterable) step 3 | 16.8470s | step1 = lambda iterable:(i for i in iterable if i%5==0) step 4 | 16.8269s | step2 = lambda iterable:(i for i in iterable if i%8==3) filter, process, filter, process step 1 | 2.0528s | step1 = lambda iterable:(i for i in iterable if i%5==0) step 2 | 3.3039s | step3 = lambda iterable:sorted((1.0*i)/50 for i in iterable) step 3 | 3.1385s | step2 = lambda iterable:(i for i in iterable if i%8==3) step 4 | 3.1489s | step4 = lambda iterable:(float(float(float(float(i*3)))) for i in iterable) ''' if callable(iterable): try: iter(iterable()) callable_base = True except: raise TypeError('time_pipeline needs the first argument to be an iterable or a function that produces an iterable.') else: try: iter(iterable) callable_base = False except: raise TypeError('time_pipeline needs the first argument to be an iterable or a function that produces an iterable.') # if iterable is not a function, load the whole thing # into a list so it can be ran over multiple times if not callable_base: iterable = tuple(iterable) # these store timestamps for time calculations durations = [] results = [] for i,_ in enumerate(steps): current_tasks = steps[:i+1] #print('testing',current_tasks) duration = 0.0 # run this test x number of times for t in range(100000): # build the generator test_generator = iter(iterable()) if callable_base else iter(iterable) # time its execution start = ts() for task in current_tasks: test_generator = task(test_generator) for i in current_tasks[-1](test_generator): pass duration += ts() - start durations.append(duration) if len(durations) == 1: results.append(durations[0]) #print(durations[0],durations[0]) else: results.append(durations[-1]-durations[-2]) #print(durations[-1]-durations[-2],durations[-1]) #print(results) #print(durations) assert sum(results) > 0 resultsum = sum(results) ratios = [i/resultsum for i in results] #print(ratios) for i in range(len(ratios)): try: s = getsource(steps[i]).splitlines()[0].strip() except: s = repr(steps[i]).strip() print('step {} | {:2.4f}s | {}'.format(i+1, durations[i], s))" 4865,"def runs_per_second(generator, seconds=3): ''' use this function as a profiler for both functions and generators to see how many iterations or cycles they can run per second Example usage for timing simple operations/functions: ``` print(runs_per_second(lambda:1+2)) # 2074558 print(runs_per_second(lambda:1-2)) # 2048523 print(runs_per_second(lambda:1/2)) # 2075186 print(runs_per_second(lambda:1*2)) # 2101722 print(runs_per_second(lambda:1**2)) # 2104572 ``` Example usage for timing iteration speed of generators: ``` def counter(): c = 0 while 1: yield c c+=1 print(runs_per_second(counter())) # 1697328 print(runs_per_second((i for i in range(2000)))) # 1591301 ``` ''' assert isinstance(seconds, int), 'runs_per_second needs seconds to be an int, not {}'.format(repr(seconds)) assert seconds>0, 'runs_per_second needs seconds to be positive, not {}'.format(repr(seconds)) # if generator is a function, turn it into a generator for testing if callable(generator) and not any(i in ('next', '__next__', '__iter__') for i in dir(generator)): try: # get the output of the function output = generator() except: # if the function crashes without any arguments raise Exception('runs_per_second needs a working function that accepts no arguments') else: # this usage of iter infinitely calls a function until the second argument is the output # so I set the second argument to something that isnt what output was. generator = iter(generator, (1 if output is None else None)) del output c=0 # run counter, keep this one short for performance reasons entire_test_time_used = False start = ts() end = start+seconds for _ in generator: if ts()>end: entire_test_time_used = True break else: c += 1 duration = (ts())-start # the ( ) around ts ensures that it will be the first thing calculated return int(c/(seconds if entire_test_time_used else duration))" 4866,"def remove_unreachable_symbols(grammar, inplace=False): # type: (Grammar, bool) -> Grammar """""" Remove unreachable symbols from the gramar :param grammar: Grammar where to symbols remove :param inplace: True if transformation should be performed in place. False by default. :return: Grammar without unreachable symbols. """""" # copy if required if inplace is False: grammar = copy(grammar) # check if start symbol is set if grammar.start is None: raise StartSymbolNotSetException() # create process sets reachable = {grammar.start} rules = grammar.rules.copy() # begin iterations while True: # create sets for current iteration active = reachable.copy() # loop the working rules for rule in rules.copy(): # lf left part of rule already in reachable symbols if rule.fromSymbol in reachable: # set symbols on the right as reachable for symbol in rule.right: active.add(symbol) # remove rule from the next iteration rules.remove(rule) # end of rules loop # if current and previous iterations are same, we are done if active == reachable: break # otherwise swap the sets reachable = active # remove the symbols nonterminals_to_remove = grammar.nonterminals.difference(reachable) terminals_to_remove = grammar.terminals.difference(reachable) grammar.nonterminals.remove(*nonterminals_to_remove) grammar.terminals.remove(*terminals_to_remove) # return grammar return grammar" 4867,"def fermion_avg(efermi, norm_hopping, func): """"""calcules for every slave it's average over the desired observable"""""" if func == 'ekin': func = bethe_ekin_zeroT elif func == 'ocupation': func = bethe_filling_zeroT return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)])" 4868,"def spinflipandhop(slaves): """"""Calculates the interaction term of a spin flip and pair hopping"""""" Sdw = [csr_matrix(spin_gen(slaves, i, 0)) for i in range(slaves)] Sup = [mat.T for mat in Sdw] sfh = np.zeros_like(Sup[0]) orbitals = slaves//2 for n in range(orbitals): for m in range(n+1, orbitals): sfh += Sup[2*n ] * Sdw[2*n + 1] * Sup[2*m + 1] * Sdw[2*m ] sfh += Sup[2*n+1] * Sdw[2*n ] * Sup[2*m ] * Sdw[2*m+1] sfh += Sup[2*n] * Sup[2*n + 1] * Sdw[2*m] * Sdw[2*m+1] sfh += Sup[2*m] * Sup[2*m + 1] * Sdw[2*n] * Sdw[2*n+1] return sfh" 4869,"def spin_z_op(param, oper): """"""Generates the required Sz operators, given the system parameter setup and the operator dictionary"""""" slaves = param['slaves'] oper['Sz'] = np.array([spin_z(slaves, spin) for spin in range(slaves)]) oper['Sz+1/2'] = oper['Sz'] + 0.5*np.eye(2**slaves) oper['sumSz2'] = oper['Sz'].sum(axis=0)**2 # because Sz is diagonal Sz_mat_shape = oper['Sz'].reshape(param['orbitals'], 2, 2**slaves, 2**slaves) oper['sumSz-sp2'] = (Sz_mat_shape.sum(axis=1)**2).sum(axis=0) oper['sumSz-or2'] = (Sz_mat_shape.sum(axis=0)**2).sum(axis=0)" 4870,"def spin_gen_op(oper, gauge): """"""Generates the generic spin matrices for the system"""""" slaves = len(gauge) oper['O'] = np.array([spin_gen(slaves, i, c) for i, c in enumerate(gauge)]) oper['O_d'] = np.transpose(oper['O'], (0, 2, 1)) oper['O_dO'] = np.einsum('...ij,...jk->...ik', oper['O_d'], oper['O']) oper['Sfliphop'] = spinflipandhop(slaves)" 4871,"def set_filling(self, populations): """"""Sets the orbital enenergies for on the reference of the free case. By setting the desired local populations on every orbital. Then generate the necesary operators to respect such configuraion"""""" populations = np.asarray(populations) # # self.param['orbital_e'] -= bethe_findfill_zeroT( \ # self.param['avg_particles'], # self.param['orbital_e'], # self.param['hopping']) efermi = - bethe_find_crystalfield( populations, self.param['hopping']) self.param['populations'] = populations # fermion_avg(efermi, self.param['hopping'], 'ocupation') self.param['ekin'] = fermion_avg(efermi, self.param['hopping'], 'ekin') spin_gen_op(self.oper, estimate_gauge(populations))" 4872,"def reset(self, populations, lag, mu, u_int, j_coup, mean_f): """"""Resets the system into the last known state as given by the input values"""""" self.set_filling(populations) self.param['lambda'] = lag self.param['orbital_e'] = mu self.selfconsistency(u_int, j_coup, mean_f)" 4873,"def update_H(self, mean_field, l): """"""Updates the spin hamiltonian and recalculates its eigenbasis"""""" self.H_s = self.spin_hamiltonian(mean_field, l) try: self.eig_energies, self.eig_states = diagonalize(self.H_s) except np.linalg.linalg.LinAlgError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) raise except ValueError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) print(mean_field, l) raise" 4874,"def spin_hamiltonian(self, h, l): """"""Constructs the single site spin Hamiltonian"""""" h_spin = np.einsum('i,ijk', h[1], self.oper['O']) h_spin += np.einsum('i,ijk', h[0], self.oper['O_d']) h_spin += np.einsum('i,ijk', l, self.oper['Sz+1/2']) h_spin += self.oper['Hint'] return h_spin" 4875,"def inter_spin_hamiltonian(self, u_int, J_coup): """"""Calculates the interaction Hamiltonian. The Hund coupling is a fraction of the coulom interaction"""""" J_coup *= u_int h_int = (u_int - 2*J_coup)/2.*self.oper['sumSz2'] h_int += J_coup*self.oper['sumSz-sp2'] h_int -= J_coup/2.*self.oper['sumSz-or2'] h_int -= J_coup*self.oper['Sfliphop'] return h_int" 4876,"def expected(self, observable, beta=1e5): """"""Wrapper to the expected_value function to fix the eigenbasis"""""" return expected_value(observable, self.eig_energies, self.eig_states, beta)" 4877,"def quasiparticle_weight(self): """"""Calculates quasiparticle weight"""""" return np.array([self.expected(op)**2 for op in self.oper['O']])" 4878,"def mean_field(self): """"""Calculates mean field"""""" mean_field = [] for sp_oper in [self.oper['O'], self.oper['O_d']]: avgO = np.array([self.expected(op) for op in sp_oper]) avgO[abs(avgO) < 1e-10] = 0. mean_field.append(avgO*self.param['ekin']) return np.array(mean_field)" 4879,"def selfconsistency(self, u_int, J_coup, mean_field_prev=None): """"""Iterates over the hamiltonian to get the stable selfcosistent one"""""" if mean_field_prev is None: mean_field_prev = np.array([self.param['ekin']]*2) hlog = [mean_field_prev] self.oper['Hint'] = self.inter_spin_hamiltonian(u_int, J_coup) converging = True half_fill = (self.param['populations'] == 0.5).all() while converging: if half_fill: self.update_H(hlog[-1], self.param['lambda']) else: res = root(self.restriction, self.param['lambda'], (hlog[-1]))#, method='lm') if not res.success: res.x = res.x * 0.5 + 0.5*self.param['lambda'] self.update_H(self.mean_field()*0.5 + 0.5*hlog[-1], res.x) print('fail', self.param['populations'][3:5]) if (self.quasiparticle_weight() < 0.001).all(): return hlog self.param['lambda'] = res.x hlog.append(self.mean_field()) converging = (abs(hlog[-1] - hlog[-2]) > self.param['tol']).all() \ or (abs(self.restriction(self.param['lambda'], hlog[-1])) > self.param['tol']).all() return hlog" 4880,"def restriction(self, lam, mean_field): """"""Lagrange multiplier in lattice slave spin"""""" self.update_H(mean_field, lam) restric = np.array([self.expected(op) - n for op, n in zip(self.oper['Sz+1/2'], self.param['populations'])]) return restric" 4881,"def others2db(file_path, file_type, is_copy, step_id, db_conn): """"""Extract some meta-data from files (actually mostly from their paths) and stores it in a DB. Arguments: :param file_path: File path. :param file_type: File type. :param is_copy: Indicate if this file is a copy. :param step_id: Step ID. :param db_conn: Database connection. :return: """""" logging.info(""Processing '%s'"" % file_path) df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none() if not df: df = db_conn.DataFile( path=file_path, type=file_type, is_copy=is_copy, processing_step_id=step_id ) db_conn.db_session.merge(df) db_conn.db_session.commit() else: if file_type not in [None, '', df.type]: df.type = file_type db_conn.db_session.commit() if is_copy not in [None, df.is_copy]: df.is_copy = is_copy db_conn.db_session.commit() if step_id not in [None, df.processing_step_id]: df.processing_step_id = step_id db_conn.db_session.commit()" 4882,"def create_currency(cls, currency, **kwargs): """"""Create Currency Create a new Currency This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_currency(currency, async=True) >>> result = thread.get() :param async bool :param Currency currency: Attributes of currency to create (required) :return: Currency If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_currency_with_http_info(currency, **kwargs) else: (data) = cls._create_currency_with_http_info(currency, **kwargs) return data" 4883,"def delete_currency_by_id(cls, currency_id, **kwargs): """"""Delete Currency Delete an instance of Currency by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_currency_by_id(currency_id, async=True) >>> result = thread.get() :param async bool :param str currency_id: ID of currency to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_currency_by_id_with_http_info(currency_id, **kwargs) else: (data) = cls._delete_currency_by_id_with_http_info(currency_id, **kwargs) return data" 4884,"def get_currency_by_id(cls, currency_id, **kwargs): """"""Find Currency Return single instance of Currency by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_currency_by_id(currency_id, async=True) >>> result = thread.get() :param async bool :param str currency_id: ID of currency to return (required) :return: Currency If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_currency_by_id_with_http_info(currency_id, **kwargs) else: (data) = cls._get_currency_by_id_with_http_info(currency_id, **kwargs) return data" 4885,"def list_all_currencies(cls, **kwargs): """"""List Currencies Return a list of Currencies This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_currencies(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Currency] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_currencies_with_http_info(**kwargs) else: (data) = cls._list_all_currencies_with_http_info(**kwargs) return data" 4886,"def replace_currency_by_id(cls, currency_id, currency, **kwargs): """"""Replace Currency Replace all attributes of Currency This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_currency_by_id(currency_id, currency, async=True) >>> result = thread.get() :param async bool :param str currency_id: ID of currency to replace (required) :param Currency currency: Attributes of currency to replace (required) :return: Currency If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_currency_by_id_with_http_info(currency_id, currency, **kwargs) else: (data) = cls._replace_currency_by_id_with_http_info(currency_id, currency, **kwargs) return data" 4887,"def update_currency_by_id(cls, currency_id, currency, **kwargs): """"""Update Currency Update attributes of Currency This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_currency_by_id(currency_id, currency, async=True) >>> result = thread.get() :param async bool :param str currency_id: ID of currency to update. (required) :param Currency currency: Attributes of currency to update. (required) :return: Currency If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs) else: (data) = cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs) return data" 4888,"def run_cmd(cmd, out=os.path.devnull, err=os.path.devnull): """"""Runs an external command :param list cmd: Command to run. :param str out: Output file :param str err: Error file :raises: RuntimeError """""" logger.debug(' '.join(cmd)) with open(out, 'w') as hout: proc = subprocess.Popen(cmd, stdout=hout, stderr=subprocess.PIPE) err_msg = proc.communicate()[1].decode() with open(err, 'w') as herr: herr.write(str(err_msg)) msg = '({}) {}'.format(' '.join(cmd), err_msg) if proc.returncode != 0: logger.error(msg) raise RuntimeError(msg)" 4889,"def run_cmd_if_file_missing(cmd, fname, out=os.path.devnull, err=os.path.devnull): """"""Runs an external command if file is absent. :param list cmd: Command to run. :param str fname: Path to the file, which existence is being checked. :param str out: Output file :param str err: Error file :return: True if cmd was executed, False otherwise :rtype: boolean """""" if fname is None or not os.path.exists(fname): run_cmd(cmd, out, err) return True else: return False" 4890,"def merge_files(sources, destination): """"""Copy content of multiple files into a single file. :param list(str) sources: source file names (paths) :param str destination: destination file name (path) :return: """""" with open(destination, 'w') as hout: for f in sources: if os.path.exists(f): with open(f) as hin: shutil.copyfileobj(hin, hout) else: logger.warning('File is missing: {}'.format(f))" 4891,"def add_path(self, path): """""" Adds a new path to the list of searchable paths :param path: new path """""" if os.path.exists(path): self.paths.add(path) return path else: #logger.debug('Path {} doesn\'t exist'.format(path)) return None" 4892,"def get(self, name): """""" Looks for a name in the path. :param name: file name :return: path to the file """""" for d in self.paths: if os.path.exists(d) and name in os.listdir(d): return os.path.join(d, name) logger.debug('File not found {}'.format(name)) return None" 4893,"def overwrite_fits(hdulist, filename): """""" Saves a FITS file. Combined file rename, save new, delete renamed for FITS files Why: HDUlist.writeto() does not overwrite existing files Why(2): It is also a standardized way to save FITS files """""" assert isinstance(hdulist, (fits.HDUList, fits.PrimaryHDU)) temp_name = None flag_delete_temp = False if os.path.isfile(filename): # PyFITS does not overwrite file temp_name = a99.rename_to_temp(filename) try: hdulist.writeto(filename, output_verify='warn') flag_delete_temp = temp_name is not None except: # Writing failed, reverts renaming os.rename(temp_name, filename) raise if flag_delete_temp: os.unlink(temp_name)" 4894,"def load_conf(yml_file, conf={}): """""" To load the config :param yml_file: the config file path :param conf: dict, to override global config :return: dict """""" with open(yml_file) as f: data = yaml.load(f) if conf: data.update(conf) return dictdot(data)" 4895,"def table_exists( dbConn, log, dbTableName): """"""*Probe a database to determine if a given table exists* **Key Arguments:** - ``dbConn`` -- mysql database connection - ``log`` -- logger - ``dbTableName`` -- the database tablename **Return:** - ``tableExists`` -- True or False **Usage:** To test if a table exists in a database: .. code-block:: python from fundamentals.mysql import table_exists exists = table_exists( dbConn=dbConn, log=log, dbTableName=""stupid_named_table"" ) print exists # OUTPUT: False """""" log.debug('starting the ``table_exists`` function') sqlQuery = u"""""" SELECT count(*) FROM information_schema.tables WHERE table_name = '%(dbTableName)s' """""" % locals() tableExists = readquery( log=log, sqlQuery=sqlQuery, dbConn=dbConn, quiet=False ) if tableExists[0][""count(*)""] == 0: tableExists = False else: tableExists = True log.debug('completed the ``table_exists`` function') return tableExists" 4896,"def __handle_request(self, request, *args, **kw): """""" Intercept the request and response. This function lets `HttpStatusCodeError`s fall through. They are caught and transformed into HTTP responses by the caller. :return: ``HttpResponse`` """""" self._authenticate(request) self._check_permission(request) method = self._get_method(request) data = self._get_input_data(request) data = self._clean_input_data(data, request) response = self._exec_method(method, request, data, *args, **kw) return self._process_response(response, request)" 4897,"def _exec_method(self, method, request, data, *args, **kw): """""" Execute appropriate request handler. """""" if self._is_data_method(request): return method(data, request, *args, **kw) else: return method(request, *args, **kw)" 4898,"def _process_response(self, response, request): """""" Process the response. If the response is ``HttpResponse``, does nothing. Otherwise, serializes, formats and validates the response. :param response: resource's response. This can be - ``None``, - django's ``HttpResponse`` - devil's ``Response`` - dictionary (or list of dictionaries) - object (or list of objects) that are first serialized into dict using ``self.factory``. - plaintext :returns: Django's ``HttpResponse`` """""" def coerce_response(): """""" Coerce the response object into devil structure. """""" if not isinstance(response, Response): return Response(0, response) return response if isinstance(response, HttpResponse): # we don't do anything if resource returns django's http response return response devil_res = coerce_response() if devil_res.content and devil_res.get_code_num() in (0, 200, 201): # serialize, format and validate serialized_res = devil_res.content = self._serialize_object(devil_res.content, request) formatted_res = self._format_response(request, devil_res) self._validate_output_data(response, serialized_res, formatted_res, request) else: # no data -> format only formatted_res = self._format_response(request, devil_res) return formatted_res" 4899,"def _format_response(self, request, response): """""" Format response using appropriate datamapper. Take the devil response and turn it into django response, ready to be returned to the client. """""" res = datamapper.format(request, response, self) # data is now formatted, let's check if the status_code is set if res.status_code is 0: res.status_code = 200 # apply headers self._add_resposne_headers(res, response) return res" 4900,"def _add_resposne_headers(self, django_response, devil_response): """""" Add response headers. Add HTTP headers from devil's response to django's response. """""" try: headers = devil_response.headers except AttributeError: # ok, there was no devil_response pass else: for k, v in headers.items(): django_response[k] = v return django_response" 4901,"def _get_input_data(self, request): """""" If there is data, parse it, otherwise return None. """""" # only PUT and POST should provide data if not self._is_data_method(request): return None content = [row for row in request.read()] content = ''.join(content) if content else None return self._parse_input_data(content, request) if content else None" 4902,"def _clean_input_data(self, data, request): """""" Clean input data. """""" # sanity check if not self._is_data_method(request): # this is not PUT or POST -> return return data # do cleaning try: if self.representation: # representation defined -> perform validation self._validate_input_data(data, request) if self.factory: # factory defined -> create object return self._create_object(data, request) else: # no factory nor representation -> return the same data back return data except ValidationError, exc: return self._input_validation_failed(exc, data, request)" 4903,"def _get_input_validator(self, request): """""" Return appropriate input validator. For POST requests, ``self.post_representation`` is returned if it is present, ``self.representation`` otherwise. """""" method = request.method.upper() if method != 'POST': return self.representation elif self.post_representation: return self.post_representation else: return self.representation" 4904,"def _validate_input_data(self, data, request): """""" Validate input data. :param request: the HTTP request :param data: the parsed data :return: if validation is performed and succeeds the data is converted into whatever format the validation uses (by default Django's Forms) If not, the data is returned unchanged. :raises: HttpStatusCodeError if data is not valid """""" validator = self._get_input_validator(request) if isinstance(data, (list, tuple)): return map(validator.validate, data) else: return validator.validate(data)" 4905,"def _validate_output_data( self, original_res, serialized_res, formatted_res, request): """""" Validate the response data. :param response: ``HttpResponse`` :param data: payload data. This implementation assumes dict or list of dicts. :raises: `HttpStatusCodeError` if data is not valid """""" validator = self.representation # when not to validate... if not validator: return try: if isinstance(serialized_res, (list, tuple)): map(validator.validate, serialized_res) else: validator.validate(serialized_res) except ValidationError, exc: self._output_validation_failed(exc, serialized_res, request)" 4906,"def _create_object(self, data, request): """""" Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given. """""" if request.method.upper() == 'POST' and self.post_factory: fac_func = self.post_factory.create else: fac_func = self.factory.create if isinstance(data, (list, tuple)): return map(fac_func, data) else: return fac_func(data)" 4907,"def _serialize_object(self, response_data, request): """""" Create a python datatype from the given python object. This will use ``self.factory`` object's ``serialize()`` function to convert the object into dictionary. If no factory is defined, this will simply return the same data that was given. :param response_data: data returned by the resource """""" if not self.factory: return response_data if isinstance(response_data, (list, tuple)): return map( lambda item: self.factory.serialize(item, request), response_data) else: return self.factory.serialize(response_data, request)" 4908,"def _get_unknown_error_response(self, request, exc): """""" Generate HttpResponse for unknown exceptions. todo: this should be more informative.. """""" logging.getLogger('devil').error( 'while doing %s on %s with [%s], devil caught: %s' % ( request.method, request.path_info, str(request.GET), str(exc)), exc_info=True) if settings.DEBUG: raise else: return HttpResponse(status=codes.INTERNAL_SERVER_ERROR[1])" 4909,"def _get_error_response(self, exc): """""" Generate HttpResponse based on the HttpStatusCodeError. """""" if exc.has_code(codes.UNAUTHORIZED): return self._get_auth_challenge(exc) else: if exc.has_code(codes.INTERNAL_SERVER_ERROR): logging.getLogger('devil').error('devil caught http error: ' + str(exc), exc_info=True) else: logging.getLogger('devil').error('devil caught http error: ' + str(exc)) content = exc.content or '' return HttpResponse(content=content, status=exc.get_code_num())" 4910,"def _get_auth_challenge(self, exc): """""" Returns HttpResponse for the client. """""" response = HttpResponse(content=exc.content, status=exc.get_code_num()) response['WWW-Authenticate'] = 'Basic realm=""%s""' % REALM return response" 4911,"def _get_method(self, request): """""" Figure out the requested method and return the callable. """""" methodname = request.method.lower() method = getattr(self, methodname, None) if not method or not callable(method): raise errors.MethodNotAllowed() return method" 4912,"def _authenticate(self, request): """""" Perform authentication. """""" def ensure_user_obj(): """""" Make sure that request object has user property. If `request.user` is not present or is `None`, it is created and initialized with `AnonymousUser`. """""" try: if request.user: return except AttributeError: pass request.user = AnonymousUser() def anonymous_access(exc_obj): """""" Determine what to do with unauthenticated requests. If the request has already been authenticated, does nothing. :param exc_obj: exception object to be thrown if anonymous access is not permitted. """""" if request.user and request.user.is_authenticated(): # request is already authenticated pass elif self.allow_anonymous: request.user = AnonymousUser() else: raise exc_obj # first, make sure that the request carries `user` attribute ensure_user_obj() if self.authentication: # authentication handler is configured try: self.authentication.authenticate(request) except errors.Unauthorized, exc: # http request doesn't carry any authentication information anonymous_access(exc) else: # no authentication configured anonymous_access(errors.Forbidden())" 4913,"def _check_permission(self, request): """""" Check user permissions. :raises: Forbidden, if user doesn't have access to the resource. """""" if self.access_controller: self.access_controller.check_perm(request, self)" 4914,"def print_devices_change_callback(devices, key, new): """"""Print the reply from &devices() and highlight errors."""""" dev = devices[key] print('- ', new, ' ', dev) if dev['type'] == QSType.unknown: print("" ERR decoding"") elif dev['value'] == -1: dev("" ERR decoding: -1?"") qcord = pyqwikswitch.decode_qwikcord(dev['data'][pyqwikswitch.QS_VALUE]) if qcord is not None: print(' qwikcord (CTAVG, CTsum) = ' + str(qcord))" 4915,"def print_item_callback(item): """"""Print an item callback, used by &listen."""""" print('&listen [{}, {}={}]'.format( item.get('cmd', ''), item.get('id', ''), item.get('data', '')))" 4916,"def main(): """"""Quick test for QSUsb class."""""" import argparse parser = argparse.ArgumentParser() parser.add_argument('--url', help='QSUSB URL [http://127.0.0.1:2020]', default='http://127.0.0.1:2020') parser.add_argument('--file', help='a test file from /&devices') parser.add_argument('--test_ids', help='List of test IDs', default='@0c2700,@0ac2f0') args = parser.parse_args() if args.file: with open(args.file) as data_file: data = json.load(data_file) qsusb = pyqwikswitch.QSDevices( print_devices_change_callback, print_devices_change_callback) print_bad_data(data) qsusb.set_qs_values(data) return print('Execute a basic test on server: {}\n'.format(args.url)) def qs_to_value(key, new): print("" --> New value: {}={}"".format(key, new)) qsusb = QSUsb(args.url, 1, qs_to_value) print('Version: ' + qsusb.version()) qsusb.set_qs_values() qsusb.listen(print_item_callback, timeout=5) print(""Started listening"") try: # Do some test while listening if args.test_ids and len(args.test_ids) > 0: test_devices_set(qsusb.devices, args.test_ids.split(',')) print(""\n\nListening for 60 seconds (test buttons now)\n"") sleep(60) except KeyboardInterrupt: pass finally: qsusb.stop() # Close all threads print(""Stopped listening"")" 4917,"def get(self): """"""Called to get the asset values and if it is valid """""" with self._lock: now = datetime.now() active = [] for i, vef in enumerate(self.futures): # has expired if (vef[1] or datetime.max) <= now: self.futures.pop(i) continue # in future elif (vef[2] or datetime.min) >= now: continue else: active.append(i) if active: # this will evict values old values # because new ones are ""more recent"" via future value, _e, _f = self.futures[active[-1]] for i in active[:-1]: self.futures.pop(i) return value raise ValueError(""dicttime: no current value, however future has (%d) values"" % len(self.futures))" 4918,"def add_node(node, **kwds): """"""add_node from Sphinx """""" nodes._add_node_class_names([node.__name__]) for key, val in kwds.iteritems(): try: visit, depart = val except ValueError: raise ValueError('Value for key %r must be a ' '(visit, depart) function tuple' % key) if key == 'html': from docutils.writers.html4css1 import HTMLTranslator as translator elif key == 'latex': from docutils.writers.latex2e import LaTeXTranslator as translator else: # ignore invalid keys for compatibility continue setattr(translator, 'visit_'+node.__name__, visit) if depart: setattr(translator, 'depart_'+node.__name__, depart)" 4919,"def _lookup(self, bearer, target=None, permission=None): """"""Lookup the proper registry for this permission. Returns (, ) where registry is the proper lookup and key is the generated key to use for the permission."""""" if target is None: key = (bearer, permission) lookup = self.bearer elif permission is None: key = (bearer, target) lookup = self.target else: key = (bearer, target, permission) lookup = self return lookup, key" 4920,"def retrieve(self, *args, **kwargs): """"""Retrieve the permsission function for the provided things. """""" lookup, key = self._lookup(*args, **kwargs) return lookup[key]" 4921,"def __api_proxy_call(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants, stream=False): """""" :param engine: Система :param payload: Данные для запроса :param method: string Может содержать native_call | tsv | json_newline :param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси :param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос :param stream: :return: """""" log_ctx = { ""engine"": engine, ""method"": payload.get('method'), ""method_params"": payload.get('method_params') } self.__app.log.info(""Call api proxy"", log_ctx) body = { ""engine"": engine, ""payload"": payload } for _try_idx in range(20): try: # 1h таймаут, так как бывают большие долгие данные, а лимит хоть какой-то нужен body_str = json.dumps(body) headers = { ""User-Agent"": self.__app.user_agent, ""X-App"": ""META"", ""X-Worker"": self.__app.service_id, ""X-ObjectLocator"": LOGGER_ENTITY.get(""objectLocator"") } resp = requests.post(self.__app.api_proxy_url + ""/"" + method, body_str, timeout=3600, stream=stream, headers=headers) self.check_err(resp, analyze_json_error_param=analyze_json_error_param, retry_request_substr_variants=retry_request_substr_variants) return resp except (RetryHttpRequestError, RateLimitError, ConnectionError) as e: self.__app.log.warning(""Sleep retry query: "" + str(e), log_ctx) sleep_time = 20 if e.__class__.__name__ == ""RateLimitError"": sleep_time = e.waiting_time time.sleep(sleep_time) raise EndOfTriesError(""Api of api proxy tries request"")" 4922,"def call_proxy_with_paging(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants, max_pages=MAX_PAGES): """""" Постраничный запрос :param engine: Система :param payload: Данные для запроса :param method: string Может содержать native_call | tsv | json_newline :param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси :param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос :param max_pages: Максимальное количество страниц в запросе :return: объект генератор """""" copy_payload = copy.deepcopy(payload) idx = 0 for idx in range(max_pages): resp = self.__api_proxy_call(engine, copy_payload, method, analyze_json_error_param, retry_request_substr_variants) yield resp paging_resp = resp.json().get(""paging"") if not paging_resp: break copy_payload[""paging""] = paging_resp if idx >= max_pages: self.__app.log.warning(""Достигнут максимальный предел страниц"", {""max_pages"": max_pages})" 4923,"def call_proxy(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants, stream=False): """""" :param engine: Система :param payload: Данные для запроса :param method: string Может содержать native_call | tsv | json_newline :param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси :param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос :param stream: :return: """""" return self.__api_proxy_call(engine, payload, method, analyze_json_error_param, retry_request_substr_variants, stream)" 4924,"def check_err(resp, analyze_json_error_param=False, retry_request_substr_variants=None): """""" :type retry_request_substr_variants: list Список вхождений строк, при налиции которых в ошибке апи будет произведен повторный запрос к апи """""" if retry_request_substr_variants is None: retry_request_substr_variants = [] # РКН блокировки вызывают ошибку SSL retry_request_substr_variants.append(""TLSV1_ALERT_ACCESS_DENIED"") if resp.status_code in [502, 503, 504]: raise RetryHttpRequestError(resp.text) if resp.status_code >= 400: rtext = resp.text for v_ in retry_request_substr_variants: if v_ in rtext: raise RetryHttpRequestError(rtext) raise UnexpectedError(""HTTP request failed: {} {}"".format(resp.status_code, rtext)) if analyze_json_error_param: data_ = resp.json() if 'error' in data_ and data_.get('error'): error = data_.get('error') full_err_ = json.dumps(error) if error.get(""type"") == ""RateLimitError"": raise RateLimitError(error.get(""message""), waiting_time=error.get(""waiting_time"")) for v_ in retry_request_substr_variants: if v_ in full_err_: raise RetryHttpRequestError(full_err_) raise ApiProxyError(full_err_) return resp" 4925,"def has_rabf_motif(self): """"""Checks if the sequence has enough RabF motifs within the G domain If there exists more than one G domain in the sequence enough RabF motifs is required in at least one of those domains to classify the sequence as a Rab. """""" if self.rabf_motifs: for gdomain in self.gdomain_regions: beg, end = map(int, gdomain.split('-')) motifs = [x for x in self.rabf_motifs if x[1] >= beg and x[2] <= end] if motifs: matches = int(pairwise2.align.globalxx('12345', ''.join(str(x[0]) for x in motifs))[0][2]) if matches >= self.motif_number: return True return False" 4926,"def summarize(self): """""" G protein annotation summary in a text format :return: A string summary of the annotation :rtype: str """""" data = [ ['Sequence ID', self.seqrecord.id], ['G domain', ' '.join(self.gdomain_regions) if self.gdomain_regions else None], ['E-value vs rab db', self.evalue_bh_rabs], ['E-value vs non-rab db', self.evalue_bh_non_rabs], ['RabF motifs', ' '.join(map(str, self.rabf_motifs)) if self.rabf_motifs else None], ['Is Rab?', self.is_rab()] ] summary = '' for name, value in data: summary += '{:25s}{}\n'.format(name, value) if self.is_rab(): summary += '{:25s}{}\n'.format('Top 5 subfamilies', ', '.join('{:s} ({:.2g})'.format(name, score) for name, score in self.rab_subfamily_top5)) return summary" 4927,"def write(self): """"""Write sequences predicted to be Rabs as a fasta file. :return: Number of written sequences :rtype: int """""" rabs = [x.seqrecord for x in self.gproteins.values() if x.is_rab()] return SeqIO.write(rabs, self.tmpfname + '.phase2', 'fasta')" 4928,"def check(self): """""" Check if data and third party tools, necessary to run the classification, are available :raises: RuntimeError """""" pathfinder = Pathfinder(True) if pathfinder.add_path(pathfinder['superfamily']) is None: raise RuntimeError(""'superfamily' data directory is missing"") for tool in ('hmmscan', 'phmmer', 'mast', 'blastp', 'ass3.pl', 'hmmscan.pl'): if not pathfinder.exists(tool): raise RuntimeError('Dependency {} is missing'.format(tool))" 4929,"def create_brand(cls, brand, **kwargs): """"""Create Brand Create a new Brand This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_brand(brand, async=True) >>> result = thread.get() :param async bool :param Brand brand: Attributes of brand to create (required) :return: Brand If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_brand_with_http_info(brand, **kwargs) else: (data) = cls._create_brand_with_http_info(brand, **kwargs) return data" 4930,"def delete_brand_by_id(cls, brand_id, **kwargs): """"""Delete Brand Delete an instance of Brand by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_brand_by_id(brand_id, async=True) >>> result = thread.get() :param async bool :param str brand_id: ID of brand to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_brand_by_id_with_http_info(brand_id, **kwargs) else: (data) = cls._delete_brand_by_id_with_http_info(brand_id, **kwargs) return data" 4931,"def get_brand_by_id(cls, brand_id, **kwargs): """"""Find Brand Return single instance of Brand by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_brand_by_id(brand_id, async=True) >>> result = thread.get() :param async bool :param str brand_id: ID of brand to return (required) :return: Brand If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_brand_by_id_with_http_info(brand_id, **kwargs) else: (data) = cls._get_brand_by_id_with_http_info(brand_id, **kwargs) return data" 4932,"def list_all_brands(cls, **kwargs): """"""List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_brands_with_http_info(**kwargs) else: (data) = cls._list_all_brands_with_http_info(**kwargs) return data" 4933,"def replace_brand_by_id(cls, brand_id, brand, **kwargs): """"""Replace Brand Replace all attributes of Brand This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_brand_by_id(brand_id, brand, async=True) >>> result = thread.get() :param async bool :param str brand_id: ID of brand to replace (required) :param Brand brand: Attributes of brand to replace (required) :return: Brand If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_brand_by_id_with_http_info(brand_id, brand, **kwargs) else: (data) = cls._replace_brand_by_id_with_http_info(brand_id, brand, **kwargs) return data" 4934,"def update_brand_by_id(cls, brand_id, brand, **kwargs): """"""Update Brand Update attributes of Brand This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_brand_by_id(brand_id, brand, async=True) >>> result = thread.get() :param async bool :param str brand_id: ID of brand to update. (required) :param Brand brand: Attributes of brand to update. (required) :return: Brand If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_brand_by_id_with_http_info(brand_id, brand, **kwargs) else: (data) = cls._update_brand_by_id_with_http_info(brand_id, brand, **kwargs) return data" 4935,"def filter_(*permissions, **kwargs): """""" Constructs a clause to filter all bearers or targets for a given berarer or target. """""" bearer = kwargs['bearer'] target = kwargs.get('target') bearer_cls = type_for(bearer) # We need a query object. There are many ways to get one, Either we can # be passed one, or we can make one from the session. We can either be # passed the session, or we can grab the session from the bearer passed. if 'query' in kwargs: query = kwargs['query'] elif 'session' in kwargs: query = kwargs['session'].query(target) else: query = object_session(bearer).query(target) getter = functools.partial( registry.retrieve, bearer=bearer_cls, target=target) try: # Generate a hash of {rulefn: permission} that we can use later # to collect all of the rules. if len(permissions): rules = {getter(permission=x): x for x in permissions} else: rules = {getter(): None} except KeyError: # No rules defined. Default to no permission. return query.filter(sql.false()) # Invoke all the rules and collect the results # Abusing reduce here to invoke each rule and send the return value (query) # from one rule to the next one. In this way the query becomes # increasingly decorated as it marches through the system. # q == query # r = (rulefn, permission) reducer = lambda q, r: r[0](permission=r[1], query=q, bearer=bearer) return reduce(reducer, six.iteritems(rules), query)" 4936,"def has(*permissions, **kwargs): """""" Checks if the passed bearer has the passed permissions (optionally on the passed target). """""" target = kwargs['target'] kwargs['target'] = type_for(target) # TODO: Predicate evaluation? return target in filter_(*permissions, **kwargs)" 4937,"def get_now_datetime_filestamp(longTime=False): """""" *A datetime stamp to be appended to the end of filenames: ``YYYYMMDDtHHMMSS``* **Key Arguments:** - ``longTime`` -- make time string longer (more change of filenames being unique) **Return:** - ``now`` -- current time and date in filename format **Usage:** .. code-block:: python from fundamentals.download import get_now_datetime_filestamp get_now_datetime_filestamp(longTime=False) #Out: '20160316t154635' get_now_datetime_filestamp(longTime=True) #Out: '20160316t154644133638' """""" ## > IMPORTS ## from datetime import datetime, date, time now = datetime.now() if longTime: now = now.strftime(""%Y%m%dt%H%M%S%f"") else: now = now.strftime(""%Y%m%dt%H%M%S"") return now" 4938,"def create_app(application, request_class=Request): """""" Create a WSGI application out of the given Minion app. Arguments: application (Application): a minion app request_class (callable): a class to use for constructing incoming requests out of the WSGI environment. It will be passed a single arg, the environ. By default, this is :class:`minion.request.WSGIRequest` if unprovided. """""" def wsgi(environ, start_response): response = application.serve( request=request_class(environ), path=environ.get(""PATH_INFO"", """"), ) start_response( response.status, [ (name, b"","".join(values)) for name, values in response.headers.canonicalized() ], ) return [response.content] return wsgi" 4939,"def get_store_profile_by_id(cls, store_profile_id, **kwargs): """"""Find StoreProfile Return single instance of StoreProfile by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_profile_by_id(store_profile_id, async=True) >>> result = thread.get() :param async bool :param str store_profile_id: ID of storeProfile to return (required) :return: StoreProfile If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs) else: (data) = cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs) return data" 4940,"def replace_store_profile_by_id(cls, store_profile_id, store_profile, **kwargs): """"""Replace StoreProfile Replace all attributes of StoreProfile This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_profile_by_id(store_profile_id, store_profile, async=True) >>> result = thread.get() :param async bool :param str store_profile_id: ID of storeProfile to replace (required) :param StoreProfile store_profile: Attributes of storeProfile to replace (required) :return: StoreProfile If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs) else: (data) = cls._replace_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs) return data" 4941,"def update_store_profile_by_id(cls, store_profile_id, store_profile, **kwargs): """"""Update StoreProfile Update attributes of StoreProfile This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_store_profile_by_id(store_profile_id, store_profile, async=True) >>> result = thread.get() :param async bool :param str store_profile_id: ID of storeProfile to update. (required) :param StoreProfile store_profile: Attributes of storeProfile to update. (required) :return: StoreProfile If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs) else: (data) = cls._update_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs) return data" 4942,"def get_documentation(self, request, *args, **kw): """""" Generate the documentation. """""" ret = dict() ret['resource'] = self.name() ret['urls'] = self._get_url_doc() ret['description'] = self.__doc__ ret['representation'] = self._get_representation_doc() ret['methods'] = self._get_method_doc() return ret" 4943,"def _serialize_object(self, response_data, request): """""" Override to not serialize doc responses. """""" if self._is_doc_request(request): return response_data else: return super(DocumentedResource, self)._serialize_object( response_data, request)" 4944,"def _validate_output_data( self, original_res, serialized_res, formatted_res, request): """""" Override to not validate doc output. """""" if self._is_doc_request(request): return else: return super(DocumentedResource, self)._validate_output_data( original_res, serialized_res, formatted_res, request)" 4945,"def _get_method(self, request): """""" Override to check if this is a documentation request. """""" if self._is_doc_request(request): return self.get_documentation else: return super(DocumentedResource, self)._get_method(request)" 4946,"def _get_representation_doc(self): """""" Return documentation for the representation of the resource. """""" if not self.representation: return 'N/A' fields = {} for name, field in self.representation.fields.items(): fields[name] = self._get_field_doc(field) return fields" 4947,"def _get_field_doc(self, field): """""" Return documentation for a field in the representation. """""" fieldspec = dict() fieldspec['type'] = field.__class__.__name__ fieldspec['required'] = field.required fieldspec['validators'] = [{validator.__class__.__name__: validator.__dict__} for validator in field.validators] return fieldspec" 4948,"def _get_url_doc(self): """""" Return a list of URLs that map to this resource. """""" resolver = get_resolver(None) possibilities = resolver.reverse_dict.getlist(self) urls = [possibility[0] for possibility in possibilities] return urls" 4949,"def _get_method_doc(self): """""" Return method documentations. """""" ret = {} for method_name in self.methods: method = getattr(self, method_name, None) if method: ret[method_name] = method.__doc__ return ret" 4950,"def clean(df,error_rate = 0): """""" Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric """""" df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print(""Stripped extra whitespace from '{}'"".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print(""Coerced '{}' to datatype '{}'"".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print(""Scrubbed '{}' from the front and '{}' from the back of column '{}'"" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print(""Coerced '{}' to datatype '{}'"".format(col_name, new_dtype)) return df" 4951,"def create_process(self, command, shell=True, stdout=None, stderr=None, env=None): """""" Execute a process using subprocess.Popen, setting the backend's DISPLAY """""" env = env if env is not None else dict(os.environ) env['DISPLAY'] = self.display return subprocess.Popen(command, shell=shell, stdout=stdout, stderr=stderr, env=env)" 4952,"def pause(self, instance_id, keep_provisioned=True): """"""shuts down the instance without destroying it. The AbstractCloudProvider class uses 'stop' to refer to destroying a VM, so use 'pause' to mean powering it down while leaving it allocated. :param str instance_id: instance identifier :return: None """""" try: if self._paused: log.debug(""node %s is already paused"", instance_id) return self._paused = True post_shutdown_action = 'Stopped' if keep_provisioned else \ 'StoppedDeallocated' result = self._subscription._sms.shutdown_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=self._qualified_name, post_shutdown_action=post_shutdown_action) self._subscription._wait_result(result) except Exception as exc: log.error(""error pausing instance %s: %s"", instance_id, exc) raise log.debug('paused instance(instance_id=%s)', instance_id)" 4953,"def restart(self, instance_id): """"""restarts a paused instance. :param str instance_id: instance identifier :return: None """""" try: if not self._paused: log.debug(""node %s is not paused, can't restart"", instance_id) return self._paused = False result = self._subscription._sms.start_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=instance_id) self._subscription._wait_result(result) except Exception as exc: log.error('error restarting instance %s: %s', instance_id, exc) raise log.debug('restarted instance(instance_id=%s)', instance_id)" 4954,"def start_instance( self, key_name, public_key_path, private_key_path, security_group, flavor, image, image_userdata, location=None, base_name=None, username=None, node_name=None, host_name=None, use_public_ips=None, wait_timeout=None, use_short_vm_names=None, n_cloud_services=None, n_storage_accounts=None, **kwargs): """"""Starts a new instance on the cloud using the given properties. Multiple instances might be started in different threads at the same time. The implementation should handle any problems regarding this itself. :return: str - instance id of the started instance """""" if self._start_failed: raise Exception('start_instance for node %s: failing due to' ' previous errors.' % node_name) index = None with self._resource_lock: # it'd be nice if elasticluster called something like # init_cluster() with all the args that will be the # same for every node created. But since it doesn't, handle that on # first start_instance call. if not self._cluster_prep_done: self._times['CLUSTER_START'] = time.time() self._config.setup( key_name, public_key_path, private_key_path, security_group, location, base_name=base_name, username=username, use_public_ips=use_public_ips, wait_timeout=wait_timeout, use_short_vm_names=use_short_vm_names, n_cloud_services=n_cloud_services, n_storage_accounts=n_storage_accounts, **kwargs) # we know we're starting the first node, so create global # requirements now self._create_global_reqs() if self._start_failed: return None # this will allow vms to be created self._times['SETUP_DONE'] = time.time() self._cluster_prep_done = True # absolute node index in cluster (0..n-1) determines what # subscription, cloud service, storage # account, etc. this VM will use. Create the vm and add it to # its cloud service, then try to start it. index = self._n_instances v_m = AzureVM( self._config, index, flavor=flavor, image=image, node_name=node_name, host_name=host_name, image_userdata=image_userdata) v_m._cloud_service._instances[v_m._qualified_name] = v_m try: v_m._cloud_service._start_vm(v_m) except Exception: log.error(traceback.format_exc()) log.error(""setting start_failed flag. Will not "" ""try to start further nodes."") self._start_failed = True return None log.debug('started instance %s', v_m._qualified_name) if index == self._config._n_vms_requested - 1: # all nodes started self._times['NODES_STARTED'] = time.time() self._times['SETUP_ELAPSED'] = self._times['SETUP_DONE'] - \ self._times['CLUSTER_START'] self._times['NODE_START_ELAPSED'] = self._times['NODES_STARTED']\ - self._times['SETUP_DONE'] self._times['CLUSTER_START_ELAPSED'] = \ self._times['SETUP_ELAPSED'] + \ self._times['NODE_START_ELAPSED'] log.debug(""setup time: %.1f sec"", self._times['SETUP_ELAPSED']) log.debug(""node start time: %.1f sec (%.1f sec per vm)"", self._times['NODE_START_ELAPSED'], self._times['NODE_START_ELAPSED'] / self._config._n_vms_requested) log.debug(""total cluster start time: %.1f sec (%.1f sec per vm)"", self._times['CLUSTER_START_ELAPSED'], self._times['CLUSTER_START_ELAPSED'] / self._config._n_vms_requested) # pause here to try to address the fact that Ansible setup fails # more often on the first try than subsequent tries time.sleep(_retry_sleep()) self._save_or_update() # store our state return v_m._qualified_name" 4955,"def stop_instance(self, instance_id): """"""Stops the instance gracefully. :param str instance_id: instance identifier :return: None """""" self._restore_from_storage(instance_id) if self._start_failed: raise Exception('stop_instance for node %s: failing due to' ' previous errors.' % instance_id) with self._resource_lock: try: v_m = self._qualified_name_to_vm(instance_id) if not v_m: err = ""stop_instance: can't find instance %s"" % instance_id log.error(err) raise Exception(err) v_m._cloud_service._stop_vm(v_m) # note: self._n_instances is a derived property, doesn't need # to be updated if self._n_instances == 0: log.debug('last instance deleted, destroying ' 'global resources') self._delete_global_reqs() self._delete_cloud_provider_storage() except Exception as exc: log.error(traceback.format_exc()) log.error(""error stopping instance %s: %s"", instance_id, exc) raise log.debug('stopped instance %s', instance_id)" 4956,"def get_ips(self, instance_id): """"""Retrieves the private and public ip addresses for a given instance. Note: Azure normally provides access to vms from a shared load balancer IP and mapping of ssh ports on the vms. So by default, the Azure provider returns strings of the form 'ip:port'. However, 'stock' elasticluster and ansible don't support this, so _use_public_ips uses Azure PublicIPs to expose each vm on the internet with its own IP and using the standard SSH port. :return: list (IPs) """""" self._restore_from_storage(instance_id) if self._start_failed: raise Exception('get_ips for node %s: failing due to' ' previous errors.' % instance_id) ret = list() v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception(""Can't find instance_id %s"" % instance_id) if self._config._use_public_ips: ret.append(v_m._public_ip) else: ret.append(""%s:%s"" % (v_m._public_ip, v_m._ssh_port)) log.debug('get_ips (instance %s) returning %s', instance_id, ', '.join(ret)) return ret" 4957,"def is_instance_running(self, instance_id): """"""Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise """""" self._restore_from_storage(instance_id) if self._start_failed: raise Exception('is_instance_running for node %s: failing due to' ' previous errors.' % instance_id) try: v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception(""Can't find instance_id %s"" % instance_id) except Exception: log.error(traceback.format_exc()) raise return v_m._power_state == 'Started'" 4958,"def _save_or_update(self): """"""Save or update the private state needed by the cloud provider. """""" with self._resource_lock: if not self._config or not self._config._storage_path: raise Exception(""self._config._storage path is undefined"") if not self._config._base_name: raise Exception(""self._config._base_name is undefined"") if not os.path.exists(self._config._storage_path): os.makedirs(self._config._storage_path) path = self._get_cloud_provider_storage_path() with open(path, 'wb') as storage: pickle.dump(self._config, storage, pickle.HIGHEST_PROTOCOL) pickle.dump(self._subscriptions, storage, pickle.HIGHEST_PROTOCOL)" 4959,"def split_iter(src, sep=None, maxsplit=None): """"""Splits an iterable based on a separator, *sep*, a max of *maxsplit* times (no max by default). *sep* can be: * a single value * an iterable of separators * a single-argument callable that returns True when a separator is encountered ``split_iter()`` yields lists of non-separator values. A separator will never appear in the output. >>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None])) [['hi', 'hello'], ['sup'], ['soap']] Note that ``split_iter`` is based on :func:`str.split`, so if *sep* is ``None``, ``split()`` **groups** separators. If empty lists are desired between two contiguous ``None`` values, simply use ``sep=[None]``: >>> list(split_iter(['hi', 'hello', None, None, 'sup', None])) [['hi', 'hello'], ['sup']] >>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None])) [['hi', 'hello'], [], ['sup'], []] Using a callable separator: >>> falsy_sep = lambda x: not x >>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep)) [['hi', 'hello'], [], ['sup'], []] See :func:`split` for a list-returning version. """""" if not is_iterable(src): raise TypeError('expected an iterable') if maxsplit is not None: maxsplit = int(maxsplit) if maxsplit == 0: yield [src] return if callable(sep): sep_func = sep elif not is_scalar(sep): sep = frozenset(sep) sep_func = lambda x: x in sep else: sep_func = lambda x: x == sep cur_group = [] split_count = 0 for s in src: if maxsplit is not None and split_count >= maxsplit: sep_func = lambda x: False if sep_func(s): if sep is None and not cur_group: # If sep is none, str.split() ""groups"" separators # check the str.split() docs for more info continue split_count += 1 yield cur_group cur_group = [] else: cur_group.append(s) if cur_group or sep is not None: yield cur_group return" 4960,"def chunked(src, size, count=None, **kw): """"""Returns a list of *count* chunks, each with *size* elements, generated from iterable *src*. If *src* is not evenly divisible by *size*, the final chunk will have fewer than *size* elements. Provide the *fill* keyword argument to provide a pad value and enable padding, otherwise no padding will take place. >>> chunked(range(10), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> chunked(range(10), 3, fill=None) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] >>> chunked(range(10), 3, count=2) [[0, 1, 2], [3, 4, 5]] See :func:`chunked_iter` for more info. """""" chunk_iter = chunked_iter(src, size, **kw) if count is None: return list(chunk_iter) else: return list(itertools.islice(chunk_iter, count))" 4961,"def chunked_iter(src, size, **kw): """"""Generates *size*-sized chunks from *src* iterable. Unless the optional *fill* keyword argument is provided, iterables not even divisible by *size* will have a final chunk that is smaller than *size*. >>> list(chunked_iter(range(10), 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(chunked_iter(range(10), 3, fill=None)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] Note that ``fill=None`` in fact uses ``None`` as the fill value. """""" # TODO: add count kwarg? if not is_iterable(src): raise TypeError('expected an iterable') size = int(size) if size <= 0: raise ValueError('expected a positive integer chunk size') do_fill = True try: fill_val = kw.pop('fill') except KeyError: do_fill = False fill_val = None if kw: raise ValueError('got unexpected keyword arguments: %r' % kw.keys()) if not src: return postprocess = lambda chk: chk if isinstance(src, basestring): postprocess = lambda chk, _sep=type(src)(): _sep.join(chk) src_iter = iter(src) while True: cur_chunk = list(itertools.islice(src_iter, size)) if not cur_chunk: break lc = len(cur_chunk) if lc < size and do_fill: cur_chunk[lc:] = [fill_val] * (size - lc) yield postprocess(cur_chunk) return" 4962,"def windowed_iter(src, size): """"""Returns tuples with length *size* which represent a sliding window over iterable *src*. >>> list(windowed_iter(range(7), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] If the iterable is too short to make a window of length *size*, then no window tuples are returned. >>> list(windowed_iter(range(3), 5)) [] """""" # TODO: lists? (for consistency) tees = itertools.tee(src, size) try: for i, t in enumerate(tees): for _ in xrange(i): next(t) except StopIteration: return izip([]) return izip(*tees)" 4963,"def xfrange(stop, start=None, step=1.0): """"""Same as :func:`frange`, but generator-based instead of returning a list. >>> tuple(xfrange(1, 3, step=0.75)) (1.0, 1.75, 2.5) See :func:`frange` for more details. """""" if not step: raise ValueError('step must be non-zero') if start is None: start, stop = 0.0, stop * 1.0 else: # swap when all args are used stop, start = start * 1.0, stop * 1.0 cur = start while cur < stop: yield cur cur += step" 4964,"def frange(stop, start=None, step=1.0): """"""A :func:`range` clone for float-based ranges. >>> frange(5) [0.0, 1.0, 2.0, 3.0, 4.0] >>> frange(6, step=1.25) [0.0, 1.25, 2.5, 3.75, 5.0] >>> frange(100.5, 101.5, 0.25) [100.5, 100.75, 101.0, 101.25] >>> frange(5, 0) [] >>> frange(5, 0, step=-1.25) [5.0, 3.75, 2.5, 1.25] """""" if not step: raise ValueError('step must be non-zero') if start is None: start, stop = 0.0, stop * 1.0 else: # swap when all args are used stop, start = start * 1.0, stop * 1.0 count = int(math.ceil((stop - start) / step)) ret = [None] * count if not ret: return ret ret[0] = start for i in xrange(1, count): ret[i] = ret[i - 1] + step return ret" 4965,"def backoff(start, stop, count=None, factor=2.0, jitter=False): """"""Returns a list of geometrically-increasing floating-point numbers, suitable for usage with `exponential backoff`_. Exactly like :func:`backoff_iter`, but without the ``'repeat'`` option for *count*. See :func:`backoff_iter` for more details. .. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff >>> backoff(1, 10) [1.0, 2.0, 4.0, 8.0, 10.0] """""" if count == 'repeat': raise ValueError(""'repeat' supported in backoff_iter, not backoff"") return list(backoff_iter(start, stop, count=count, factor=factor, jitter=jitter))" 4966,"def backoff_iter(start, stop, count=None, factor=2.0, jitter=False): """"""Generates a sequence of geometrically-increasing floats, suitable for usage with `exponential backoff`_. Starts with *start*, increasing by *factor* until *stop* is reached, optionally stopping iteration once *count* numbers are yielded. *factor* defaults to 2. In general retrying with properly-configured backoff creates a better-behaved component for a larger service ecosystem. .. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff >>> list(backoff_iter(1.0, 10.0, count=5)) [1.0, 2.0, 4.0, 8.0, 10.0] >>> list(backoff_iter(1.0, 10.0, count=8)) [1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0] >>> list(backoff_iter(0.25, 100.0, factor=10)) [0.25, 2.5, 25.0, 100.0] A simplified usage example: .. code-block:: python for timeout in backoff_iter(0.25, 5.0): try: res = network_call() break except Exception as e: log(e) time.sleep(timeout) An enhancement for large-scale systems would be to add variation, or *jitter*, to timeout values. This is done to avoid a thundering herd on the receiving end of the network call. Finally, for *count*, the special value ``'repeat'`` can be passed to continue yielding indefinitely. Args: start (float): Positive number for baseline. stop (float): Positive number for maximum. count (int): Number of steps before stopping iteration. Defaults to the number of steps between *start* and *stop*. Pass the string, `'repeat'`, to continue iteration indefinitely. factor (float): Rate of exponential increase. Defaults to `2.0`, e.g., `[1, 2, 4, 8, 16]`. jitter (float): A factor between `-1.0` and `1.0`, used to uniformly randomize and thus spread out timeouts in a distributed system, avoiding rhythm effects. Positive values use the base backoff curve as a maximum, negative values use the curve as a minimum. Set to 1.0 or `True` for a jitter approximating Ethernet's time-tested backoff solution. Defaults to `False`. """""" start = float(start) stop = float(stop) factor = float(factor) if start < 0.0: raise ValueError('expected start >= 0, not %r' % start) if factor < 1.0: raise ValueError('expected factor >= 1.0, not %r' % factor) if stop == 0.0: raise ValueError('expected stop >= 0') if stop < start: raise ValueError('expected stop >= start, not %r' % stop) if count is None: denom = start if start else 1 count = 1 + math.ceil(math.log(stop/denom, factor)) count = count if start else count + 1 if count != 'repeat' and count < 0: raise ValueError('count must be positive or ""repeat"", not %r' % count) if jitter: jitter = float(jitter) if not (-1.0 <= jitter <= 1.0): raise ValueError('expected jitter -1 <= j <= 1, not: %r' % jitter) cur, i = start, 0 while count == 'repeat' or i < count: if not jitter: cur_ret = cur elif jitter: cur_ret = cur - (cur * jitter * random.random()) yield cur_ret i += 1 if cur == 0: cur = 1 elif cur < stop: cur *= factor if cur > stop: cur = stop return" 4967,"def bucketize(src, key=None, value_transform=None, key_filter=None): """"""Group values in the *src* iterable by the value returned by *key*, which defaults to :class:`bool`, grouping values by truthiness. >>> bucketize(range(5)) {False: [0], True: [1, 2, 3, 4]} >>> is_odd = lambda x: x % 2 == 1 >>> bucketize(range(5), is_odd) {False: [0, 2, 4], True: [1, 3]} Value lists are not deduplicated: >>> bucketize([None, None, None, 'hello']) {False: [None, None, None], True: ['hello']} Bucketize into more than 3 groups >>> bucketize(range(10), lambda x: x % 3) {0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]} ``bucketize`` has a couple of advanced options useful in certain cases. *value_transform* can be used to modify values as they are added to buckets, and *key_filter* will allow excluding certain buckets from being collected. >>> bucketize(range(5), value_transform=lambda x: x*x) {False: [0], True: [1, 4, 9, 16]} >>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1) {0: [0, 3, 6, 9], 2: [2, 5, 8]} Note in some of these examples there were at most two keys, ``True`` and ``False``, and each key present has a list with at least one item. See :func:`partition` for a version specialized for binary use cases. """""" if not is_iterable(src): raise TypeError('expected an iterable') if key is None: key = bool if not callable(key): raise TypeError('expected callable key function') if value_transform is None: value_transform = lambda x: x if not callable(value_transform): raise TypeError('expected callable value transform function') ret = {} for val in src: key_of_val = key(val) if key_filter is None or key_filter(key_of_val): ret.setdefault(key_of_val, []).append(value_transform(val)) return ret" 4968,"def partition(src, key=None): """"""No relation to :meth:`str.partition`, ``partition`` is like :func:`bucketize`, but for added convenience returns a tuple of ``(truthy_values, falsy_values)``. >>> nonempty, empty = partition(['', '', 'hi', '', 'bye']) >>> nonempty ['hi', 'bye'] *key* defaults to :class:`bool`, but can be carefully overridden to use any function that returns either ``True`` or ``False``. >>> import string >>> is_digit = lambda x: x in string.digits >>> decimal_digits, hexletters = partition(string.hexdigits, is_digit) >>> ''.join(decimal_digits), ''.join(hexletters) ('0123456789', 'abcdefABCDEF') """""" bucketized = bucketize(src, key) return bucketized.get(True, []), bucketized.get(False, [])" 4969,"def unique_iter(src, key=None): """"""Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye'] """""" if not is_iterable(src): raise TypeError('expected an iterable, not %r' % type(src)) if key is None: key_func = lambda x: x elif callable(key): key_func = key elif isinstance(key, basestring): key_func = lambda x: getattr(x, key, x) else: raise TypeError('""key"" expected a string or callable, not %r' % key) seen = set() for i in src: k = key_func(i) if k not in seen: seen.add(k) yield i return" 4970,"def one(src, default=None, key=None): """"""Along the same lines as builtins, :func:`all` and :func:`any`, and similar to :func:`first`, ``one()`` returns the single object in the given iterable *src* that evaluates to ``True``, as determined by callable *key*. If unset, *key* defaults to :class:`bool`. If no such objects are found, *default* is returned. If *default* is not passed, ``None`` is returned. If *src* has more than one object that evaluates to ``True``, or if there is no object that fulfills such condition, return *default*. It's like an `XOR`_ over an iterable. >>> one((True, False, False)) True >>> one((True, False, True)) >>> one((0, 0, 'a')) 'a' >>> one((0, False, None)) >>> one((True, True), default=False) False >>> bool(one(('', 1))) True >>> one((10, 20, 30, 42), key=lambda i: i > 40) 42 See `Martín Gaitán's original repo`_ for further use cases. .. _Martín Gaitán's original repo: https://github.com/mgaitan/one .. _XOR: https://en.wikipedia.org/wiki/Exclusive_or """""" ones = list(itertools.islice(filter(key, src), 2)) return ones[0] if len(ones) == 1 else default" 4971,"def same(iterable, ref=_UNSET): """"""``same()`` returns ``True`` when all values in *iterable* are equal to one another, or optionally a reference value, *ref*. Similar to :func:`all` and :func:`any` in that it evaluates an iterable and returns a :class:`bool`. ``same()`` returns ``True`` for empty iterables. >>> same([]) True >>> same([1]) True >>> same(['a', 'a', 'a']) True >>> same(range(20)) False >>> same([[], []]) True >>> same([[], []], ref='test') False """""" iterator = iter(iterable) if ref is _UNSET: ref = next(iterator, ref) return all(val == ref for val in iterator)" 4972,"def remap(root, visit=default_visit, enter=default_enter, exit=default_exit, **kwargs): """"""The remap (""recursive map"") function is used to traverse and transform nested structures. Lists, tuples, sets, and dictionaries are just a few of the data structures nested into heterogenous tree-like structures that are so common in programming. Unfortunately, Python's built-in ways to manipulate collections are almost all flat. List comprehensions may be fast and succinct, but they do not recurse, making it tedious to apply quick changes or complex transforms to real-world data. remap goes where list comprehensions cannot. Here's an example of removing all Nones from some data: >>> from pprint import pprint >>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None}, ... 'Babylon 5': 6, 'Dr. Who': None} >>> pprint(remap(reviews, lambda p, k, v: v is not None)) {'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}} Notice how both Nones have been removed despite the nesting in the dictionary. Not bad for a one-liner, and that's just the beginning. See `this remap cookbook`_ for more delicious recipes. .. _this remap cookbook: http://sedimental.org/remap.html remap takes four main arguments: the object to traverse and three optional callables which determine how the remapped object will be created. Args: root: The target object to traverse. By default, remap supports iterables like :class:`list`, :class:`tuple`, :class:`dict`, and :class:`set`, but any object traversable by *enter* will work. visit (callable): This function is called on every item in *root*. It must accept three positional arguments, *path*, *key*, and *value*. *path* is simply a tuple of parents' keys. *visit* should return the new key-value pair. It may also return ``True`` as shorthand to keep the old item unmodified, or ``False`` to drop the item from the new structure. *visit* is called after *enter*, on the new parent. The *visit* function is called for every item in root, including duplicate items. For traversable values, it is called on the new parent object, after all its children have been visited. The default visit behavior simply returns the key-value pair unmodified. enter (callable): This function controls which items in *root* are traversed. It accepts the same arguments as *visit*: the path, the key, and the value of the current item. It returns a pair of the blank new parent, and an iterator over the items which should be visited. If ``False`` is returned instead of an iterator, the value will not be traversed. The *enter* function is only called once per unique value. The default enter behavior support mappings, sequences, and sets. Strings and all other iterables will not be traversed. exit (callable): This function determines how to handle items once they have been visited. It gets the same three arguments as the other functions -- *path*, *key*, *value* -- plus two more: the blank new parent object returned from *enter*, and a list of the new items, as remapped by *visit*. Like *enter*, the *exit* function is only called once per unique value. The default exit behavior is to simply add all new items to the new parent, e.g., using :meth:`list.extend` and :meth:`dict.update` to add to the new parent. Immutable objects, such as a :class:`tuple` or :class:`namedtuple`, must be recreated from scratch, but use the same type as the new parent passed back from the *enter* function. reraise_visit (bool): A pragmatic convenience for the *visit* callable. When set to ``False``, remap ignores any errors raised by the *visit* callback. Items causing exceptions are kept. See examples for more details. remap is designed to cover the majority of cases with just the *visit* callable. While passing in multiple callables is very empowering, remap is designed so very few cases should require passing more than one function. When passing *enter* and *exit*, it's common and easiest to build on the default behavior. Simply add ``from boltons.iterutils import default_enter`` (or ``default_exit``), and have your enter/exit function call the default behavior before or after your custom logic. See `this example`_. Duplicate and self-referential objects (aka reference loops) are automatically handled internally, `as shown here`_. .. _this example: http://sedimental.org/remap.html#sort_all_lists .. _as shown here: http://sedimental.org/remap.html#corner_cases """""" # TODO: improve argument formatting in sphinx doc # TODO: enter() return (False, items) to continue traverse but cancel copy? if not callable(visit): raise TypeError('visit expected callable, not: %r' % visit) if not callable(enter): raise TypeError('enter expected callable, not: %r' % enter) if not callable(exit): raise TypeError('exit expected callable, not: %r' % exit) reraise_visit = kwargs.pop('reraise_visit', True) if kwargs: raise TypeError('unexpected keyword arguments: %r' % kwargs.keys()) path, registry, stack = (), {}, [(None, root)] new_items_stack = [] while stack: key, value = stack.pop() id_value = id(value) if key is _REMAP_EXIT: key, new_parent, old_parent = value id_value = id(old_parent) path, new_items = new_items_stack.pop() value = exit(path, key, old_parent, new_parent, new_items) registry[id_value] = value if not new_items_stack: continue elif id_value in registry: value = registry[id_value] else: res = enter(path, key, value) try: new_parent, new_items = res except TypeError: # TODO: handle False? raise TypeError('enter should return a tuple of (new_parent,' ' items_iterator), not: %r' % res) if new_items is not False: # traverse unless False is explicitly passed registry[id_value] = new_parent new_items_stack.append((path, [])) if value is not root: path += (key,) stack.append((_REMAP_EXIT, (key, new_parent, value))) if new_items: stack.extend(reversed(list(new_items))) continue if visit is _orig_default_visit: # avoid function call overhead by inlining identity operation visited_item = (key, value) else: try: visited_item = visit(path, key, value) except Exception: if reraise_visit: raise visited_item = True if visited_item is False: continue # drop elif visited_item is True: visited_item = (key, value) # TODO: typecheck? # raise TypeError('expected (key, value) from visit(),' # ' not: %r' % visited_item) try: new_items_stack[-1][1].append(visited_item) except IndexError: raise TypeError('expected remappable root, not: %r' % root) return value" 4973,"def get_path(root, path, default=_UNSET): """"""Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised. """""" if isinstance(path, basestring): path = path.split('.') cur = root try: for seg in path: try: cur = cur[seg] except (KeyError, IndexError) as exc: raise PathAccessError(exc, seg, path) except TypeError as exc: # either string index in a list, or a parent that # doesn't support indexing try: seg = int(seg) cur = cur[seg] except (ValueError, KeyError, IndexError, TypeError): if not is_iterable(cur): exc = TypeError('%r object is not indexable' % type(cur).__name__) raise PathAccessError(exc, seg, path) except PathAccessError: if default is _UNSET: raise return default return cur" 4974,"def research(root, query=lambda p, k, v: True, reraise=False): """"""The :func:`research` function uses :func:`remap` to recurse over any data nested in *root*, and find values which match a given criterion, specified by the *query* callable. Results are returned as a list of ``(path, value)`` pairs. The paths are tuples in the same format accepted by :func:`get_path`. This can be useful for comparing values nested in two or more different structures. Here's a simple example that finds all integers: >>> root = {'a': {'b': 1, 'c': (2, 'd', 3)}, 'e': None} >>> res = research(root, query=lambda p, k, v: isinstance(v, int)) >>> print(sorted(res)) [(('a', 'b'), 1), (('a', 'c', 0), 2), (('a', 'c', 2), 3)] Note how *query* follows the same, familiar ``path, key, value`` signature as the ``visit`` and ``enter`` functions on :func:`remap`, and returns a :class:`bool`. Args: root: The target object to search. Supports the same types of objects as :func:`remap`, including :class:`list`, :class:`tuple`, :class:`dict`, and :class:`set`. query (callable): The function called on every object to determine whether to include it in the search results. The callable must accept three arguments, *path*, *key*, and *value*, commonly abbreviated *p*, *k*, and *v*, same as *enter* and *visit* from :func:`remap`. reraise (bool): Whether to reraise exceptions raised by *query* or to simply drop the result that caused the error. With :func:`research` it's easy to inspect the details of a data structure, like finding values that are at a certain depth (using ``len(p)``) and much more. If more advanced functionality is needed, check out the code and make your own :func:`remap` wrapper, and consider `submitting a patch`_! .. _submitting a patch: https://github.com/mahmoud/boltons/pulls """""" ret = [] if not callable(query): raise TypeError('query expected callable, not: %r' % query) def enter(path, key, value): try: if query(path, key, value): ret.append((path + (key,), value)) except Exception: if reraise: raise return default_enter(path, key, value) remap(root, enter=enter) return ret" 4975,"def unflatten(data, separator='.', replace=True): ''' Expand all compound keys (at any depth) into nested dicts In [13]: d = {'test.test2': {'k1.k2': 'val'}} In [14]: flange.expand(d) Out[14]: {'test.test2': {'k1': {'k2': 'val'}}} :param data: input dict :param separator: separator in compound keys :param replace: if true, remove the compound key. Otherwise the value will exist under the compound and expanded key :return: copy of input dict with expanded keys ''' if not separator: return data return remap({'temp':data}, visit=lambda p, k, v: __expand_keys(k, v, separator, replace))['temp']" 4976,"def __query(p, k, v, accepted_keys=None, required_values=None, path=None, exact=True): """""" Query function given to visit method :param p: visited path in tuple form :param k: visited key :param v: visited value :param accepted_keys: list of keys where one must match k to satisfy query. :param required_values: list of values where one must match v to satisfy query :param path: exact path in tuple form that must match p to satisfy query :param exact: if True then key and value match uses contains function instead of == :return: True if all criteria are satisfied, otherwise False """""" # if not k: # print '__query p k:', p, k # print p, k, accepted_keys, required_values, path, exact def as_values_iterable(v): if isinstance(v, dict): return v.values() elif isinstance(v, six.string_types): return [v] else: # assume is already some iterable type return v if path and path != p: return False if accepted_keys: if isinstance(accepted_keys, six.string_types): accepted_keys = [accepted_keys] if len([akey for akey in accepted_keys if akey == k or (not exact and akey in k)]) == 0: return False if required_values: if isinstance(required_values, six.string_types): required_values = [required_values] # Find all terms in the vfilter that have a match somewhere in the values of the v dict. If the # list is shorter than vfilter then some terms did not match and this v fails the test. if len(required_values) > len([term for term in required_values for nv in as_values_iterable(v) if term == nv or (not exact and term in nv)]): return False return True" 4977,"def create_customer_group(cls, customer_group, **kwargs): """"""Create CustomerGroup Create a new CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_customer_group(customer_group, async=True) >>> result = thread.get() :param async bool :param CustomerGroup customer_group: Attributes of customerGroup to create (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_customer_group_with_http_info(customer_group, **kwargs) else: (data) = cls._create_customer_group_with_http_info(customer_group, **kwargs) return data" 4978,"def delete_customer_group_by_id(cls, customer_group_id, **kwargs): """"""Delete CustomerGroup Delete an instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._delete_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data" 4979,"def get_customer_group_by_id(cls, customer_group_id, **kwargs): """"""Find CustomerGroup Return single instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to return (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data" 4980,"def list_all_customer_groups(cls, **kwargs): """"""List CustomerGroups Return a list of CustomerGroups This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customer_groups(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CustomerGroup] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customer_groups_with_http_info(**kwargs) else: (data) = cls._list_all_customer_groups_with_http_info(**kwargs) return data" 4981,"def replace_customer_group_by_id(cls, customer_group_id, customer_group, **kwargs): """"""Replace CustomerGroup Replace all attributes of CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_customer_group_by_id(customer_group_id, customer_group, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to replace (required) :param CustomerGroup customer_group: Attributes of customerGroup to replace (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) else: (data) = cls._replace_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) return data" 4982,"def update_customer_group_by_id(cls, customer_group_id, customer_group, **kwargs): """"""Update CustomerGroup Update attributes of CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_customer_group_by_id(customer_group_id, customer_group, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to update. (required) :param CustomerGroup customer_group: Attributes of customerGroup to update. (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) else: (data) = cls._update_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) return data" 4983,"def _connect(self): """"""Connects to the cloud web services. If this is the first authentication, a web browser will be started to authenticate against google and provide access to elasticluster. :return: A Resource object with methods for interacting with the service. """""" # check for existing connection with GoogleCloudProvider.__gce_lock: if self._gce: return self._gce flow = OAuth2WebServerFlow(self._client_id, self._client_secret, GCE_SCOPE) # The `Storage` object holds the credentials that your # application needs to authorize access to the user's # data. The name of the credentials file is provided. If the # file does not exist, it is created. This object can only # hold credentials for a single user. It stores the access # priviledges for the application, so a user only has to grant # access through the web interface once. storage_path = os.path.join(self._storage_path, self._client_id + '.oauth.dat') storage = Storage(storage_path) credentials = storage.get() if credentials is None or credentials.invalid: args = argparser.parse_args([]) args.noauth_local_webserver = self._noauth_local_webserver # try to start a browser to have the user authenticate with Google # TODO: what kind of exception is raised if the browser # cannot be started? try: credentials = run_flow(flow, storage, flags=args) except: import sys print ""Unexpected error:"", sys.exc_info()[0] raise http = httplib2.Http() self._auth_http = credentials.authorize(http) self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http) return self._gce" 4984,"def start_instance(self, # these are common to any # CloudProvider.start_instance() call key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, # these params are specific to the # GoogleCloudProvider node_name=None, boot_disk_type='pd-standard', boot_disk_size=10, tags=None, scheduling=None, **kwargs): """"""Starts a new instance with the given properties and returns the instance id. :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_id: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :param str node_name: name of the instance :param str tags: comma-separated list of ""tags"" to label the instance :param str scheduling: scheduling option to use for the instance (""preemptible"") :param str|Sequence tags: ""Tags"" to label the instance. Can be either a single string (individual tags are comma-separated), or a sequence of strings (each string being a single tag). :return: str - instance id of the started instance """""" # construct URLs project_url = '%s%s' % (GCE_URL, self._project_id) machine_type_url = '%s/zones/%s/machineTypes/%s' \ % (project_url, self._zone, flavor) boot_disk_type_url = '%s/zones/%s/diskTypes/%s' \ % (project_url, self._zone, boot_disk_type) # FIXME: `conf.py` should ensure that `boot_disk_size` has the right # type, so there would be no need to convert here boot_disk_size_gb = int(boot_disk_size) network_url = '%s/global/networks/%s' % (project_url, self._network) if image_id.startswith('http://') or image_id.startswith('https://'): image_url = image_id else: # The image names and full resource URLs for several Google- # provided images (debian, centos, etc.) follow a consistent # pattern, and so elasticluster supports a short-hand of just # an image name, such as # ""debian-7-wheezy-v20150526"". # The cloud project in this case is then ""debian-cloud"". # # Several images do not follow this convention, and so are # special-cased here: # backports-debian -> debian-cloud # ubuntu -> ubuntu-os-cloud # containter-vm -> google-containers if image_id.startswith('container-vm-'): os_cloud = 'google-containers' elif image_id.startswith('backports-debian-'): os_cloud = 'debian-cloud' elif image_id.startswith('ubuntu-'): os_cloud = 'ubuntu-os-cloud' else: os = image_id.split(""-"")[0] os_cloud = ""%s-cloud"" % os image_url = '%s%s/global/images/%s' % ( GCE_URL, os_cloud, image_id) if scheduling is None: # use GCE's default scheduling_option = {} elif scheduling == 'preemptible': scheduling_option = { 'preemptible': True } else: raise InstanceError(""Unknown scheduling option: '%s'"" % scheduling) if isinstance(tags, types.StringTypes): tags = tags.split(',') elif isinstance(tags, collections.Sequence): # ok, nothing to do pass elif tags is not None: raise TypeError( ""The `tags` argument to `gce.start_instance`"" "" should be a string or a list, got {T} instead"" .format(T=type(tags))) # construct the request body if node_name: instance_id = node_name.lower().replace('_', '-') # GCE doesn't allow ""_"" else: instance_id = 'elasticluster-%s' % uuid.uuid4() with open(public_key_path, 'r') as f: public_key_content = f.read() instance = { 'name': instance_id, 'machineType': machine_type_url, 'tags': { 'items': tags, }, 'scheduling': scheduling_option, 'disks': [{ 'autoDelete': 'true', 'boot': 'true', 'type': 'PERSISTENT', 'initializeParams' : { 'diskName': ""%s-disk"" % instance_id, 'diskType': boot_disk_type_url, 'diskSizeGb': boot_disk_size_gb, 'sourceImage': image_url } }], 'networkInterfaces': [ {'accessConfigs': [ {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT' }], 'network': network_url }], 'serviceAccounts': [ {'email': self._email, 'scopes': GCE_DEFAULT_SCOPES }], ""metadata"": { ""kind"": ""compute#metadata"", ""items"": [ { ""key"": ""sshKeys"", ""value"": ""%s:%s"" % (username, public_key_content) } ] } } # create the instance gce = self._connect() request = gce.instances().insert( project=self._project_id, body=instance, zone=self._zone) try: response = self._execute_request(request) response = self._wait_until_done(response) self._check_response(response) return instance_id except (HttpError, CloudProviderError) as e: log.error(""Error creating instance `%s`"" % e) raise InstanceError(""Error creating instance `%s`"" % e)" 4985,"def _get_image_url(self, image_id): """"""Gets the url for the specified image. Unfortunatly this only works for images uploaded by the user. The images provided by google will not be found. :param str image_id: image identifier :return: str - api url of the image """""" gce = self._connect() filter = ""name eq %s"" % image_id request = gce.images().list(project=self._project_id, filter=filter) response = self._execute_request(request) response = self._wait_until_done(response) image_url = None if ""items"" in response: image_url = response[""items""][0][""selfLink""] if image_url: return image_url else: raise ImageError(""Could not find given image id `%s`"" % image_id)" 4986,"def select_event( event = None, selection = ""all"", required_variables = None, ensure_required_variables_present = False, verbose = True ): """""" Select a HEP event. """""" if required_variables is None: required_variables = [ ""Aplan_bjets"", ""Aplan_jets"", ""Centrality_all"", ""ClassifBDTOutput_6jsplit"", ""ClassifBDTOutput_basic"", ""ClassifBDTOutput_withReco_6jsplit"", ""ClassifBDTOutput_withReco_basic"", ""ClassifHPLUS_Semilep_HF_BDT200_Output"", ""dEtajj_MaxdEta"", ""dRbb_avg"", ""dRbb_MaxM"", ""dRbb_MaxPt"", ""dRbb_min"", ""dRbj_Wmass"", ""dRHl_MaxdR"", ""dRHl_MindR"", ""dRjj_min"", ""dRlepbb_MindR"", ""dRlj_MindR"", ""dRuu_MindR"", ""H1_all"", ""H4_all"", ""HhadT_nJets"", ""HiggsbbM"", ""HiggsjjM"", ""HT_all"", ""HT_jets"", ""Mbb_MaxM"", ""Mbb_MaxPt"", ""Mbb_MindR"", ""Mbj_MaxPt"", ""Mbj_MindR"", ""Mbj_Wmass"", ""met_met"", ""met_phi"", ""MHiggs"", ""Mjj_HiggsMass"", ""Mjjj_MaxPt"", ""Mjj_MaxPt"", ""Mjj_MindR"", ""Mjj_MinM"", ""mu"", ""Muu_MindR"", ""NBFricoNN_dil"", ""nBTags"", ""nBTags30"", ""nBTags50"", ""nBTags60"", ""nBTags70"", ""nBTags77"", ""nBTags80"", ""nBTags85"", ""nBTags90"", ""nBTagsFlatBEff_30"", ""nBTagsFlatBEff_40"", ""nBTagsFlatBEff_50"", ""nBTagsFlatBEff_60"", ""nBTagsFlatBEff_70"", ""nBTagsFlatBEff_77"", ""nBTagsFlatBEff_85"", ""nElectrons"", ""nHFJets"", ""NHiggs_30"", ""Njet_pt40"", ""Njet_pt40"", ""nJets"", ""nMuons"", ""nPrimaryVtx"", ""pT_jet3"", ""pT_jet5"", ""pTuu_MindR"", ""semilepMVAreco_b1higgsbhadtop_dR"", ""semilepMVAreco_bbhiggs_dR"", ""semilepMVAreco_BDT_output"", ""semilepMVAreco_BDT_output_6jsplit"", ""semilepMVAreco_BDT_output_truthMatchPattern"", ""semilepMVAreco_BDT_withH_output"", ""semilepMVAreco_BDT_withH_output_6jsplit"", ""semilepMVAreco_BDT_withH_output_truthMatchPattern"", ""semilepMVAreco_hadWb1Higgs_mass"", ""semilepMVAreco_higgsbhadtop_withH_dR"", ""semilepMVAreco_higgsbleptop_mass"", ""semilepMVAreco_higgsbleptop_withH_dR"", ""semilepMVAreco_higgslep_dR"", ""semilepMVAreco_higgsleptop_dR"", ""semilepMVAreco_higgs_mass"", ""semilepMVAreco_higgsq1hadW_mass"", ""semilepMVAreco_higgsttbar_withH_dR"", ""semilepMVAreco_leptophadtop_dR"", ""semilepMVAreco_leptophadtop_withH_dR"", ""semilepMVAreco_Ncombinations"", ""semilepMVAreco_nuApprox_recoBDT"", ""semilepMVAreco_nuApprox_recoBDT_6jsplit"", ""semilepMVAreco_nuApprox_recoBDT_withH"", ""semilepMVAreco_nuApprox_recoBDT_withH_6jsplit"", ""semilepMVAreco_ttH_Ht_withH"", #""ttHF_mva_discriminant"", ""el_d0sig[0]"", ""el_delta_z0_sintheta[0]"", ""el_e[0]"", ""el_eta[0]"", ""el_phi[0]"", ""el_pt[0]"", ""el_topoetcone20[0]"", #""mu_d0sig[0]"", #""mu_delta_z0_sintheta[0]"", #""mu_e[0]"", #""mu_eta[0]"", #""mu_phi[0]"", #""mu_pt[0]"", ""mu_topoetcone20[0]"", ""jet_e[0]"", ""jet_eta[0]"", ""jet_jvt[0]"", ""jet_mv2c10[0]"", ""jet_mv2c20[0]"", ""jet_phi[0]"", ""jet_pt[0]"", ""jet_semilepMVAreco_recoBDT_cand[0]"", ""jet_semilepMVAreco_recoBDT_cand_6jsplit[0]"", ""jet_semilepMVAreco_recoBDT_withH_cand[0]"", ""jet_semilepMVAreco_recoBDT_withH_cand_6jsplit[0]"", ""jet_e[1]"", ""jet_eta[1]"", ""jet_jvt[1]"", ""jet_mv2c10[1]"", ""jet_mv2c20[1]"", ""jet_phi[1]"", ""jet_pt[1]"", ""jet_semilepMVAreco_recoBDT_cand[1]"", ""jet_semilepMVAreco_recoBDT_cand_6jsplit[1]"", ""jet_semilepMVAreco_recoBDT_withH_cand[1]"", ""jet_semilepMVAreco_recoBDT_withH_cand_6jsplit[1]"", ""jet_e[2]"", ""jet_eta[2]"", ""jet_jvt[2]"", ""jet_mv2c10[2]"", ""jet_mv2c20[2]"", ""jet_phi[2]"", ""jet_pt[2]"", ""jet_semilepMVAreco_recoBDT_cand[2]"", ""jet_semilepMVAreco_recoBDT_cand_6jsplit[2]"", ""jet_semilepMVAreco_recoBDT_withH_cand[2]"", ""jet_semilepMVAreco_recoBDT_withH_cand_6jsplit[2]"", ""jet_e[3]"", ""jet_eta[3]"", ""jet_jvt[3]"", ""jet_mv2c10[3]"", ""jet_mv2c20[3]"", ""jet_phi[3]"", ""jet_pt[3]"", ""jet_semilepMVAreco_recoBDT_cand[3]"", ""jet_semilepMVAreco_recoBDT_cand_6jsplit[3]"", ""jet_semilepMVAreco_recoBDT_withH_cand[3]"", ""jet_semilepMVAreco_recoBDT_withH_cand_6jsplit[3]"" ] if ensure_required_variables_present and not all([hasattr(event, variable) for variable in required_variables]): return False #for variable in required_variables: # if not hasattr(event, variable): # print(""missing {variable}"".format(variable = variable)) # By default, do not pass. conditions = [False] if selection == ""ejets"": conditions = [ event.nElectrons == 1, # Require 1 electron. event.nJets >= 4, # Require >= 4 jets. #event.nLjets >= 1 # Require a single large-R jet. ] elif selection == ""mujets"": conditions = [ event.nMuons == 1, # Require 1 muon. event.nJets >= 4, # Require >= 4 jets. #event.nLjets >= 1 # Require a single large-R jet. ] if selection == ""ejets_5JE4BI"": conditions = [ event.nElectrons == 1, # Require 1 electron. event.nJets == 5, # Require 5 jets. event.nBTags >= 4 # Require >= 4 b tags. #event.nLjets >= 1 # Require a single large-R jet. ] if selection == ""ejets_6JI4BI"": conditions = [ event.nElectrons == 1, # Require 1 electron. event.nJets >= 6, # Require >=6 jets. event.nBTags >= 4 # Require >= 4 b tags. #event.nLjets >= 1 # Require a single large-R jet. ] elif selection == ""all"": conditions = [ event.nElectrons == 1 or event.nMuons == 1, # Require 1 electron or 1 muon. event.nJets >= 4, # Require >= 4 jets. #event.nLjets >= 1 # Require a single large-R jet. ] if all(conditions): if verbose: log.info(""event number {event_number} passed selection {selection}"".format( event_number = event.eventNumber, selection = selection )) return True else: return False" 4987,"def create_free_shipping(cls, free_shipping, **kwargs): """"""Create FreeShipping Create a new FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_free_shipping(free_shipping, async=True) >>> result = thread.get() :param async bool :param FreeShipping free_shipping: Attributes of freeShipping to create (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_free_shipping_with_http_info(free_shipping, **kwargs) else: (data) = cls._create_free_shipping_with_http_info(free_shipping, **kwargs) return data" 4988,"def delete_free_shipping_by_id(cls, free_shipping_id, **kwargs): """"""Delete FreeShipping Delete an instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) else: (data) = cls._delete_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data" 4989,"def get_free_shipping_by_id(cls, free_shipping_id, **kwargs): """"""Find FreeShipping Return single instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to return (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) else: (data) = cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data" 4990,"def list_all_free_shippings(cls, **kwargs): """"""List FreeShippings Return a list of FreeShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShipping] If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_free_shippings_with_http_info(**kwargs) return data" 4991,"def replace_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs): """"""Replace FreeShipping Replace all attributes of FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_by_id(free_shipping_id, free_shipping, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to replace (required) :param FreeShipping free_shipping: Attributes of freeShipping to replace (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) else: (data) = cls._replace_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) return data" 4992,"def update_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs): """"""Update FreeShipping Update attributes of FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_free_shipping_by_id(free_shipping_id, free_shipping, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to update. (required) :param FreeShipping free_shipping: Attributes of freeShipping to update. (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """""" kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) else: (data) = cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) return data" 4993,"def publish(dataset_uri): """"""Return access URL to HTTP enabled (published) dataset. Exits with error code 1 if the dataset_uri is not a dataset. Exits with error code 2 if the dataset cannot be HTTP enabled. """""" try: dataset = dtoolcore.DataSet.from_uri(dataset_uri) except dtoolcore.DtoolCoreTypeError: print(""Not a dataset: {}"".format(dataset_uri)) sys.exit(1) try: access_uri = dataset._storage_broker.http_enable() except AttributeError: print( ""Datasets of type '{}' cannot be published using HTTP"".format( dataset._storage_broker.key) ) sys.exit(2) return access_uri" 4994,"def cli(): """"""Command line utility to HTTP enable (publish) a dataset."""""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( ""dataset_uri"", help=""Dtool dataset URI"" ) parser.add_argument( ""-q"", ""--quiet"", action=""store_true"", help=""Only return the http URI"" ) args = parser.parse_args() access_uri = publish(args.dataset_uri) if args.quiet: print(access_uri) else: print(""Dataset accessible at: {}"".format(access_uri))" 4995,"def execute(self): """""" Load the cluster and build a GC3Pie configuration snippet. """""" creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as ex: log.error(""Listing nodes from cluster %s: %s\n"" % (cluster_name, ex)) return from elasticluster.gc3pie_config import create_gc3pie_config_snippet if self.params.append: path = os.path.expanduser(self.params.append) try: fd = open(path, 'a') fd.write(create_gc3pie_config_snippet(cluster)) fd.close() except IOError as ex: log.error(""Unable to write configuration to file %s: %s"", path, ex) else: print(create_gc3pie_config_snippet(cluster))" 4996,"def write_xml(self, outfile, encoding=""UTF-8""): """"""Write the Media RSS Feed's XML representation to the given file."""""" # we add the media namespace if we see any media items if any([key for item in self.items for key in vars(item) if key.startswith('media_') and getattr(item, key)]): self.rss_attrs[""xmlns:media""] = ""http://search.yahoo.com/mrss/"" self.generator = _generator_name super(MediaRSS2, self).write_xml(outfile, encoding)" 4997,"def _add_attribute(self, name, value, allowed_values=None): """"""Add an attribute to the MediaContent element."""""" if value and value != 'none': if isinstance(value, (int, bool)): value = str(value) if allowed_values and value not in allowed_values: raise TypeError( ""Attribute '"" + name + ""' must be one of "" + str( allowed_values) + "" but is '"" + str(value) + ""'"") self.element_attrs[name] = value" 4998,"def check_complicance(self): """"""Check compliance with Media RSS Specification, Version 1.5.1. see http://www.rssboard.org/media-rss Raises AttributeError on error. """""" # check Media RSS requirement: one of the following elements is # required: media_group | media_content | media_player | media_peerLink # | media_location. We do the check only if any media_... element is # set to allow non media feeds if(any([ma for ma in vars(self) if ma.startswith('media_') and getattr(self, ma)]) and not self.media_group and not self.media_content and not self.media_player and not self.media_peerLink and not self.media_location ): raise AttributeError( ""Using media elements requires the specification of at least "" ""one of the following elements: 'media_group', "" ""'media_content', 'media_player', 'media_peerLink' or "" ""'media_location'."") # check Media RSS requirement: if media:player is missing all # media_content elements need to have url attributes. if not self.media_player: if self.media_content: # check if all media_content elements have a URL set if isinstance(self.media_content, list): if not all([False for mc in self.media_content if 'url' not in mc.element_attrs]): raise AttributeError( ""MediaRSSItems require a media_player attribute "" ""if a media_content has no url set."") else: if not self.media_content.element_attrs['url']: raise AttributeError( ""MediaRSSItems require a media_player attribute "" ""if a media_content has no url set."") pass elif self.media_group: # check media groups without player if its media_content # elements have a URL set raise NotImplementedError( ""MediaRSSItem: media_group check not implemented yet."")" 4999,"def publish_extensions(self, handler): """"""Publish the Media RSS Feed elements as XML."""""" if isinstance(self.media_content, list): [PyRSS2Gen._opt_element(handler, ""media:content"", mc_element) for mc_element in self.media_content] else: PyRSS2Gen._opt_element(handler, ""media:content"", self.media_content) if hasattr(self, 'media_title'): PyRSS2Gen._opt_element(handler, ""media:title"", self.media_title) if hasattr(self, 'media_text'): PyRSS2Gen._opt_element(handler, ""media:text"", self.media_text)"