_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q200
_nth
train
def _nth(arr, n): """ Return the nth value of array If it is missing return NaN """ try:
python
{ "resource": "" }
q201
make_time
train
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None): """ Convert time to milliseconds. See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified, :func:`pysubs2.time.frames_to_ms()` is called instead. Raises: ValueError: Invalid fps, or one
python
{ "resource": "" }
q202
SSAEvent.shift
train
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None): """ Shift start and end times. See :meth:`SSAFile.shift()` for full description. """
python
{ "resource": "" }
q203
SSAEvent.equals
train
def equals(self, other): """Field-based equality for SSAEvents.""" if isinstance(other, SSAEvent): return self.as_dict() == other.as_dict()
python
{ "resource": "" }
q204
SSAFile.load
train
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs): """ Load subtitle file from given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of input file. Defaults to UTF-8, you may need to change this. format_ (str): Optional, forces use of specific parser (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file contents. This argument should be rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. Framerate might be detected from the file, in which case you don't need to specify it here (when given, this argument overrides autodetection). kwargs: Extra options for the parser. Returns: SSAFile Raises: IOError
python
{ "resource": "" }
q205
SSAFile.from_string
train
def from_string(cls, string, format_=None, fps=None, **kwargs): """ Load subtitle file from string. See :meth:`SSAFile.load()` for full description. Arguments: string (str): Subtitle file in a string. Note that the string must be Unicode (in Python 2). Returns: SSAFile Example: >>> text = '''
python
{ "resource": "" }
q206
SSAFile.from_file
train
def from_file(cls, fp, format_=None, fps=None, **kwargs): """ Read subtitle file from file object. See :meth:`SSAFile.load()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.load()` or :meth:`SSAFile.from_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed
python
{ "resource": "" }
q207
SSAFile.save
train
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs): """ Save subtitle file to given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of output file. Defaults to UTF-8, which should be fine for most purposes. format_ (str): Optional, specifies desired subtitle format (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file extension. Thus, this argument is rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. When omitted, :attr:`SSAFile.fps` value is used (ie. the framerate used for loading the file, if any). When the :class:`SSAFile` wasn't loaded from MicroDVD, or if you wish save it with different framerate, use this argument. See also :meth:`SSAFile.transform_framerate()` for fixing bad frame-based to time-based
python
{ "resource": "" }
q208
SSAFile.to_string
train
def to_string(self, format_, fps=None, **kwargs): """ Get subtitle file as a string. See :meth:`SSAFile.save()` for full description. Returns: str """
python
{ "resource": "" }
q209
SSAFile.to_file
train
def to_file(self, fp, format_, fps=None, **kwargs): """ Write subtitle file to file object. See :meth:`SSAFile.save()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.save()` or :meth:`SSAFile.to_string()` is preferable. Arguments:
python
{ "resource": "" }
q210
SSAFile.rename_style
train
def rename_style(self, old_name, new_name): """ Rename a style, including references to it. Arguments: old_name (str): Style to be renamed. new_name (str): New name for the style (must be unused). Raises: KeyError: No style named old_name. ValueError: new_name is not a legal name (cannot use commas) or new_name is taken. """ if old_name not in self.styles: raise KeyError("Style %r not found" % old_name) if new_name in self.styles: raise ValueError("There is already
python
{ "resource": "" }
q211
SSAFile.import_styles
train
def import_styles(self, subs, overwrite=True): """ Merge in styles from other SSAFile. Arguments: subs (SSAFile): Subtitle file imported from. overwrite (bool): On name conflict, use style from the other file (default: True). """
python
{ "resource": "" }
q212
SSAFile.equals
train
def equals(self, other): """ Equality of two SSAFiles. Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`. Order of entries in OrderedDicts does not matter. "ScriptType" key in info is considered an implementation detail and thus ignored. Useful mostly in unit tests. Differences are logged at DEBUG level. """ if isinstance(other, SSAFile): for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}: sv, ov = self.info.get(key), other.info.get(key) if sv is None: logging.debug("%r missing in self.info", key) return False elif ov is None: logging.debug("%r missing in other.info", key) return False elif sv != ov: logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov) return False for key in set(chain(self.styles.keys(), other.styles.keys())): sv, ov = self.styles.get(key), other.styles.get(key) if sv is None: logging.debug("%r missing in self.styles", key) return False elif ov is None: logging.debug("%r missing in other.styles", key) return False elif sv != ov: for k in sv.FIELDS: if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k)
python
{ "resource": "" }
q213
get_file_extension
train
def get_file_extension(format_): """Format identifier -> file extension""" if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS: raise UnknownFormatIdentifierError(format_)
python
{ "resource": "" }
q214
autodetect_format
train
def autodetect_format(content): """Return format identifier for given fragment or raise FormatAutodetectionError.""" formats = set() for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values(): guess = impl.guess_format(content) if guess is not None: formats.add(guess) if len(formats) ==
python
{ "resource": "" }
q215
src_reload
train
async def src_reload(app, path: str = None): """ prompt each connected browser to reload by sending websocket message. :param path: if supplied this must be a path relative to app['static_path'], eg. reload of a single file is only supported for static resources. :return: number of sources reloaded """ cli_count = len(app[WS]) if cli_count == 0: return 0 is_html = None if path: path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path'])) is_html = mimetypes.guess_type(path)[0] == 'text/html' reloads = 0 aux_logger.debug('prompting source reload for %d clients', cli_count) for ws, url in app[WS]: if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}: aux_logger.debug('skipping reload for client at %s', url) continue aux_logger.debug('reload client at %s', url) data = { 'command': 'reload', 'path': path or url, 'liveCSS': True,
python
{ "resource": "" }
q216
Settings.substitute_environ
train
def substitute_environ(self): """ Substitute environment variables into settings. """ for attr_name in dir(self): if attr_name.startswith('_') or attr_name.upper() != attr_name: continue orig_value = getattr(self, attr_name) is_required = isinstance(orig_value, Required) orig_type = orig_value.v_type if is_required else type(orig_value) env_var_name = self._ENV_PREFIX + attr_name env_var = os.getenv(env_var_name, None) if env_var is not None: if issubclass(orig_type, bool): env_var = env_var.upper() in ('1', 'TRUE') elif issubclass(orig_type, int): env_var = int(env_var) elif issubclass(orig_type, Path): env_var = Path(env_var) elif issubclass(orig_type, bytes):
python
{ "resource": "" }
q217
serve
train
def serve(path, livereload, port, verbose): """ Serve static files from a directory. """
python
{ "resource": "" }
q218
runserver
train
def runserver(**config): """ Run a development server for an aiohttp apps. Takes one argument "app-path" which should be a path to either a directory containing a recognized default file ("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".". The app path is run directly, see the "--app-factory" option for details on
python
{ "resource": "" }
q219
scenario
train
def scenario(weight=1, delay=0.0, name=None): """Decorator to register a function as a Molotov test. Options: - **weight** used by Molotov when the scenarii are randomly picked. The functions with the highest values are more likely to be picked. Integer, defaults to 1. This value is ignored when the *scenario_picker* decorator is used. - **delay** once the scenario is done, the worker will sleep *delay* seconds. Float, defaults to 0. The general --delay argument you can pass to Molotov will be summed with this delay. - **name** name of the scenario. If not provided, will use the function __name___ attribute. The decorated function receives an :class:`aiohttp.ClientSession` instance. """ def _scenario(func, *args, **kw):
python
{ "resource": "" }
q220
request
train
def request(endpoint, verb='GET', session_options=None, **options): """Performs a synchronous request. Uses a dedicated event loop and aiohttp.ClientSession object. Options: - endpoint: the endpoint to call - verb: the HTTP verb to use (defaults: GET) - session_options: a dict containing options to initialize the session (defaults: None) - options: extra options for the request (defaults: None) Returns a dict object
python
{ "resource": "" }
q221
get_var
train
def get_var(name, factory=None): """Gets a global variable given its name. If factory is not None and the variable is not set, factory
python
{ "resource": "" }
q222
Worker.step
train
async def step(self, step_id, session, scenario=None): """ single scenario call. When it returns 1, it works. -1 the script failed, 0 the test is stopping or needs to stop. """ if scenario is None: scenario = pick_scenario(self.wid, step_id) try: await self.send_event('scenario_start', scenario=scenario) await scenario['func'](session, *scenario['args'], **scenario['kw']) await self.send_event('scenario_success', scenario=scenario) if scenario['delay'] > 0.:
python
{ "resource": "" }
q223
main
train
def main(): """Moloslave clones a git repo and runs a molotov test """ parser = argparse.ArgumentParser(description='Github-based load test') parser.add_argument('--version', action='store_true', default=False, help='Displays version and exits.') parser.add_argument('--virtualenv', type=str, default='virtualenv', help='Virtualenv executable.') parser.add_argument('--python', type=str, default=sys.executable, help='Python executable.') parser.add_argument('--config', type=str, default='molotov.json', help='Path of the configuration file.') parser.add_argument('repo', help='Github repo', type=str, nargs="?") parser.add_argument('run', help='Test to run', nargs="?") args = parser.parse_args() if args.version: print(__version__) sys.exit(0) tempdir = tempfile.mkdtemp() curdir = os.getcwd() os.chdir(tempdir) print('Working directory is %s' % tempdir) try: clone_repo(args.repo) config_file = os.path.join(tempdir, args.config) with open(config_file) as f: config = json.loads(f.read()) # creating the virtualenv create_virtualenv(args.virtualenv, args.python) # install deps
python
{ "resource": "" }
q224
copy_files
train
def copy_files(source_files, target_directory, source_directory=None): """Copies a list of files to the specified directory. If source_directory is provided, it will be prepended to each source file.""" try: os.makedirs(target_directory) except: # TODO: specific exception? pass
python
{ "resource": "" }
q225
yes_or_no
train
def yes_or_no(message): """Gets user input and returns True for yes and False for no.""" while True: print message, '(yes/no)', line = raw_input() if line is None: return None line
python
{ "resource": "" }
q226
add_plugin
train
def add_plugin(plugin, directory=None): """Adds the specified plugin. This returns False if it was already added.""" repo = require_repo(directory)
python
{ "resource": "" }
q227
get_plugin_settings
train
def get_plugin_settings(plugin, directory=None): """Gets the settings for the specified plugin.""" repo = require_repo(directory)
python
{ "resource": "" }
q228
preview
train
def preview(directory=None, host=None, port=None, watch=True): """Runs a local server to preview the working directory of a repository.""" directory = directory or '.' host = host or '127.0.0.1' port = port or 5000 # TODO: admin interface # TODO: use cache_only to keep from modifying output directly out_directory = build(directory) # Serve generated site
python
{ "resource": "" }
q229
require_repo
train
def require_repo(directory=None): """Checks for a presentation repository and raises an exception if not found.""" if directory and not os.path.isdir(directory): raise ValueError('Directory not found: ' + repr(directory))
python
{ "resource": "" }
q230
init
train
def init(directory=None): """Initializes a Gitpress presentation repository at the specified directory.""" repo = repo_path(directory) if os.path.isdir(repo): raise RepositoryAlreadyExistsError(directory, repo)
python
{ "resource": "" }
q231
iterate_presentation_files
train
def iterate_presentation_files(path=None, excludes=None, includes=None): """Iterates the repository presentation files relative to 'path', not including themes. Note that 'includes' take priority.""" # Defaults if includes is None: includes = [] if excludes is None: excludes = [] # Transform glob patterns to regular expressions includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.' excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.' includes_re = re.compile(includes_pattern) excludes_re = re.compile(excludes_pattern) def included(root, name): """Returns True if the specified file is a presentation file.""" full_path = os.path.join(root, name) # Explicitly included files takes priority
python
{ "resource": "" }
q232
read_config_file
train
def read_config_file(path): """Returns the configuration from the specified file.""" try: with open(path, 'r') as f: return json.load(f, object_pairs_hook=OrderedDict)
python
{ "resource": "" }
q233
write_config
train
def write_config(repo_directory, config): """Writes the specified configuration to the presentation repository.""" return
python
{ "resource": "" }
q234
write_config_file
train
def write_config_file(path, config): """Writes the specified configuration to the specified file.""" contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n' try: with open(path, 'w') as f: f.write(contents)
python
{ "resource": "" }
q235
get_value
train
def get_value(repo_directory, key, expect_type=None): """Gets the value of the specified key in the config file.""" config = read_config(repo_directory) value = config.get(key) if expect_type and value is not None and not isinstance(value, expect_type):
python
{ "resource": "" }
q236
set_value
train
def set_value(repo_directory, key, value, strict=True): """Sets the value of a particular key in the config file. This has no effect when setting to the same value.""" if value is None: raise ValueError('Argument "value" must not be None.') # Read values and do nothing if not making any changes
python
{ "resource": "" }
q237
build
train
def build(content_directory=None, out_directory=None): """Builds the site from its content and presentation repository.""" content_directory = content_directory or '.' out_directory = os.path.abspath(out_directory or default_out_directory) repo = require_repo(content_directory) # Prevent user mistakes if out_directory == '.': raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory)) if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..': raise ValueError('Output directory must not contain the source
python
{ "resource": "" }
q238
gpp
train
def gpp(argv=None): """Shortcut function for running the previewing command.""" if argv is None: argv =
python
{ "resource": "" }
q239
use_theme
train
def use_theme(theme, directory=None): """Switches to the specified theme. This returns False if switching to the already active theme.""" repo = require_repo(directory) if theme not in list_themes(directory):
python
{ "resource": "" }
q240
data_type
train
def data_type(data, grouped=False, columns=None, key_on='idx', iter_idx=None): '''Data type check for automatic import''' if iter_idx: return Data.from_mult_iters(idx=iter_idx, **data) if pd: if isinstance(data, (pd.Series, pd.DataFrame)): return Data.from_pandas(data, grouped=grouped, columns=columns,
python
{ "resource": "" }
q241
Map.rebind
train
def rebind(self, column=None, brew='GnBu'): """Bind a new column to the data map Parameters ---------- column: str, default None Pandas DataFrame column name brew: str, default None Color brewer abbreviation. See colors.py """ self.data['table'] = Data.keypairs( self.raw_data, columns=[self.data_key,
python
{ "resource": "" }
q242
Visualization.axis_titles
train
def axis_titles(self, x=None, y=None): """Apply axis titles to the figure. This is a convenience method for manually modifying the "Axes" mark. Parameters ---------- x: string, default 'null' X-axis title y: string, default 'null' Y-axis title Example ------- >>>vis.axis_titles(y="Data 1", x="Data 2") """ keys = self.axes.get_keys() if keys:
python
{ "resource": "" }
q243
Visualization._set_axis_properties
train
def _set_axis_properties(self, axis): """Set AxisProperties and PropertySets""" if not getattr(axis, 'properties'): axis.properties = AxisProperties() for prop
python
{ "resource": "" }
q244
Visualization._set_all_axis_color
train
def _set_all_axis_color(self, axis, color): """Set axis ticks, title, labels to given color""" for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks', 'title', 'labels']: prop_set = getattr(axis.properties, prop) if color and prop in ['title', 'labels']:
python
{ "resource": "" }
q245
Visualization._axis_properties
train
def _axis_properties(self, axis, title_size, title_offset, label_angle, label_align, color): """Assign axis properties""" if self.axes: axis = [a for a in self.axes if a.scale == axis][0] self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size: axis.properties.title.font_size = ValueRef(value=title_size) if label_angle: axis.properties.labels.angle = ValueRef(value=label_angle)
python
{ "resource": "" }
q246
Visualization.common_axis_properties
train
def common_axis_properties(self, color=None, title_size=None): """Set common axis properties such as color Parameters ---------- color: str, default None Hex color str, etc """ if self.axes: for axis in self.axes: self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size:
python
{ "resource": "" }
q247
Visualization.x_axis_properties
train
def x_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): """Change x-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None
python
{ "resource": "" }
q248
Visualization.y_axis_properties
train
def y_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): """Change y-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None
python
{ "resource": "" }
q249
Visualization.legend
train
def legend(self, title=None, scale='color', text_color=None): """Convience method for adding a legend to the figure. Important: This defaults to the color scale that is generated with Line, Area, Stacked Line, etc charts. For bar charts, the scale ref is usually 'y'. Parameters ---------- title: string, default None Legend Title scale: string, default 'color' Scale reference for legend text_color: str, default None Title and label color
python
{ "resource": "" }
q250
Visualization.colors
train
def colors(self, brew=None, range_=None): """Convenience method for adding color brewer scales to charts with a color scale, such as stacked or grouped bars. See the colors here: http://colorbrewer2.org/ Or here: http://bl.ocks.org/mbostock/5577023 This assumes that a 'color' scale exists on your chart. Parameters ---------- brew: string, default None Color brewer scheme (BuGn, YlOrRd, etc) range: list, default None
python
{ "resource": "" }
q251
Visualization.validate
train
def validate(self, require_all=True, scale='colors'): """Validate the visualization contents. Parameters ---------- require_all : boolean, default True If True (default), then all fields ``data``, ``scales``, ``axes``, and ``marks`` must be defined. The user is allowed to disable this if the intent is to define the elements client-side. If the contents of the visualization are not valid Vega, then a
python
{ "resource": "" }
q252
Visualization.display
train
def display(self): """Display the visualization inline in the IPython notebook. This is deprecated, use the following instead:: from IPython.display import display display(viz)
python
{ "resource": "" }
q253
Data.validate
train
def validate(self, *args): """Validate contents of class """ super(self.__class__, self).validate(*args)
python
{ "resource": "" }
q254
Data.serialize
train
def serialize(obj): """Convert an object into a JSON-serializable value This is used by the ``from_pandas`` and ``from_numpy`` functions to convert data to JSON-serializable types when loading. """ if isinstance(obj, str_types): return obj elif hasattr(obj, 'timetuple'): return int(time.mktime(obj.timetuple())) * 1000 elif hasattr(obj, 'item'): return obj.item() elif hasattr(obj, '__float__'): if isinstance(obj, int):
python
{ "resource": "" }
q255
Data.from_pandas
train
def from_pandas(cls, data, columns=None, key_on='idx', name=None, series_key='data', grouped=False, records=False, **kwargs): """Load values from a pandas ``Series`` or ``DataFrame`` object Parameters ---------- data : pandas ``Series`` or ``DataFrame`` Pandas object to import data from. columns: list, default None DataFrame columns to convert to Data. Keys default to col names. If columns are given and on_index is False, x-axis data will default to the first column. key_on: string, default 'index' Value to key on for x-axis data. Defaults to index. name : string, default None Applies to the ``name`` attribute of the generated class. If ``None`` (default), then the ``name`` attribute of ``pd_obj`` is used if it exists, or ``'table'`` if it doesn't. series_key : string, default 'data' Applies only to ``Series``. If ``None`` (default), then defaults to data.name. For example, if ``series_key`` is ``'x'``, then the entries of the ``values`` list will be ``{'idx': ..., 'col': 'x', 'val': ...}``. grouped: boolean, default False Pass true for an extra grouping parameter records: boolean, defaule False Requires Pandas 0.12 or greater. Writes the Pandas DataFrame using the df.to_json(orient='records') formatting. **kwargs : dict Additional arguments passed to the :class:`Data` constructor. """ # Note: There's an experimental JSON encoder floating around in # pandas land that hasn't made it into the main branch. This # function should be revisited if it ever does. if not pd: raise LoadError('pandas could not be imported') if not hasattr(data, 'index'): raise ValueError('Please load a Pandas object.') if name: vega_data = cls(name=name, **kwargs) else: vega_data = cls(name='table', **kwargs) pd_obj = data.copy() if columns: pd_obj = data[columns] if key_on != 'idx': pd_obj.index = data[key_on]
python
{ "resource": "" }
q256
Data.from_numpy
train
def from_numpy(cls, np_obj, name, columns, index=None, index_key=None, **kwargs): """Load values from a numpy array Parameters ---------- np_obj : numpy.ndarray numpy array to load data from name : string ``name`` field for the data columns : iterable Sequence of column names, from left to right. Must have same length as the number of columns of ``np_obj``. index : iterable, default None Sequence of indices from top to bottom. If ``None`` (default), then the indices are integers starting at 0. Must have same length as the number of rows of ``np_obj``. index_key : string, default None Key to use for the index. If ``None`` (default), ``idx`` is used. **kwargs : dict Additional arguments passed to the :class:`Data` constructor Notes ----- The individual elements of ``np_obj``, ``columns``, and ``index`` must return valid values from :func:`Data.serialize`. """ if not np: raise LoadError('numpy could not be imported') _assert_is_type('numpy object', np_obj, np.ndarray) # Integer index if
python
{ "resource": "" }
q257
Data.from_mult_iters
train
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()]
python
{ "resource": "" }
q258
Data.from_iter
train
def from_iter(cls, data, name=None): """Convenience method for loading data from an iterable. Defaults to numerical indexing for x-axis. Parameters ---------- data: iterable An iterable of data (list, tuple, dict of key/val pairs) name: string, default None Name of the data set. If None (default), the name will be set to ``'table'``. """ if not name:
python
{ "resource": "" }
q259
Data._numpy_to_values
train
def _numpy_to_values(data): '''Convert a NumPy array to values attribute''' def to_list_no_index(xvals, yvals): return [{"x": x, "y": np.asscalar(y)} for x, y in zip(xvals, yvals)] if len(data.shape) == 1 or data.shape[1] == 1: xvals = range(data.shape[0] + 1) values = to_list_no_index(xvals, data) elif len(data.shape) == 2: if data.shape[1] == 2: # NumPy arrays and matrices have different iteration rules. if isinstance(data, np.matrix): xidx = (0, 0) yidx = (0, 1) else: xidx = 0
python
{ "resource": "" }
q260
Data.to_json
train
def to_json(self, validate=False, pretty_print=True, data_path=None): """Convert data to JSON Parameters ---------- data_path : string If not None, then data is written to a separate file at the specified path. Note that the ``url`` attribute if the data must be set independently for the data to load correctly. Returns ------- string Valid Vega JSON.
python
{ "resource": "" }
q261
_assert_is_type
train
def _assert_is_type(name, value, value_type): """Assert that a value must be a given type.""" if not isinstance(value, value_type): if type(value_type) is tuple: types = ', '.join(t.__name__ for t in value_type) raise ValueError('{0} must be
python
{ "resource": "" }
q262
grammar
train
def grammar(grammar_type=None, grammar_name=None): """Decorator to define properties that map to the ``grammar`` dict. This dict is the canonical representation of the Vega grammar within Vincent. This decorator is intended for classes that map to some pre-defined JSON structure, such as axes, data, marks, scales, etc. It is assumed that this decorates functions with an instance of ``self.grammar``. Parameters ---------- grammar_type : type or tuple of types, default None If the argument to the decorated function is not of the given types, then a ValueError is raised. No type checking is done if the type is None (default). grammar_name : string, default None An optional name to map to the internal ``grammar`` dict. If None (default), then the key for the dict is the name of the function being decorated. If not None, then it will be the name specified here. This is useful if the expected JSON field name is a Python keyword or has an un-Pythonic name. This should decorate a "validator" function that should return no value but raise an exception if the provided value is not valid Vega grammar. If the validator throws no exception, then the value is assigned to the ``grammar`` dict.
python
{ "resource": "" }
q263
GrammarClass.validate
train
def validate(self): """Validate the contents of the object. This calls ``setattr`` for each of the class's grammar properties. It will catch ``ValueError``s raised by the grammar property's setters and re-raise them as :class:`ValidationError`. """ for key, val in
python
{ "resource": "" }
q264
GrammarClass.to_json
train
def to_json(self, path=None, html_out=False, html_path='vega_template.html', validate=False, pretty_print=True): """Convert object to JSON Parameters ---------- path: string, default None Path to write JSON out. If there is no path provided, JSON will be returned as a string to the console. html_out: boolean, default False If True, vincent will output an simple HTML scaffold to visualize the vega json output. html_path: string, default 'vega_template.html' Path for the html file (if html_out=True) validate : boolean If True, call the object's `validate` method before serializing. Default is False. pretty_print : boolean If True (default), JSON is printed in more-readable form with indentation and spaces. Returns ------- string JSON serialization of the class's grammar properties. """ if validate: self.validate() if pretty_print:
python
{ "resource": "" }
q265
useful_mimetype
train
def useful_mimetype(text): """Check to see if the given mime type is a MIME type which is useful in terms of how
python
{ "resource": "" }
q266
normalize_extension
train
def normalize_extension(extension): """Normalise a file name extension.""" extension = decode_path(extension) if extension is None: return if extension.startswith('.'):
python
{ "resource": "" }
q267
fetch
train
def fetch(url: str, **kwargs) -> Selector: """ Send HTTP request and parse it as a DOM tree. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions. """ kwargs.setdefault('headers', DEFAULT_HEADERS) try: res = requests.get(url, **kwargs)
python
{ "resource": "" }
q268
async_fetch
train
async def async_fetch(url: str, **kwargs) -> Selector: """ Do the fetch in an async style. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions. """ kwargs.setdefault('headers',
python
{ "resource": "" }
q269
links
train
def links(res: requests.models.Response, search: str = None, pattern: str = None) -> list: """Get the links of the page. Args: res (requests.models.Response): The response of the page. search (str, optional): Defaults to None. Search the links you want. pattern (str, optional): Defaults to None. Search the links use a regex pattern. Returns:
python
{ "resource": "" }
q270
save_as_json
train
def save_as_json(total: list, name='data.json', sort_by: str = None, no_duplicate=False, order='asc'): """Save what you crawled as a json file. Args: total (list): Total of data you crawled. name (str, optional): Defaults to 'data.json'. The name of the file. sort_by (str, optional): Defaults to
python
{ "resource": "" }
q271
IlluminantMixin.set_observer
train
def set_observer(self, observer): """ Validates and sets the color's observer angle. .. note:: This only changes the observer angle value. It does no conversion of the color's coordinates. :param str observer: One of '2' or '10'.
python
{ "resource": "" }
q272
IlluminantMixin.set_illuminant
train
def set_illuminant(self, illuminant): """ Validates and sets the color's illuminant. .. note:: This only changes the illuminant. It does no conversion of the color's coordinates. For this, you'll want to refer to :py:meth:`XYZColor.apply_adaptation <colormath.color_objects.XYZColor.apply_adaptation>`. .. tip:: Call this after setting your observer. :param str illuminant: One of the various illuminants.
python
{ "resource": "" }
q273
SpectralColor.get_numpy_array
train
def get_numpy_array(self): """ Dump this color into NumPy array. """ # This holds the obect's spectral data, and will be passed to # numpy.array() to create a numpy array (matrix) for the matrix math # that will be done during the conversion to XYZ. values = [] # Use the required value list to build this dynamically. Default to # 0.0, since that ultimately won't affect the outcome due to the math # involved.
python
{ "resource": "" }
q274
XYZColor.apply_adaptation
train
def apply_adaptation(self, target_illuminant, adaptation='bradford'): """ This applies an adaptation matrix to change the XYZ color's illuminant. You'll most likely only need this during RGB conversions. """ logger.debug(" \- Original illuminant: %s", self.illuminant) logger.debug(" \- Target illuminant: %s", target_illuminant) # If the XYZ values were taken with a different reference white than the # native reference white of the target RGB space, a transformation matrix # must be applied. if self.illuminant != target_illuminant: logger.debug(" \* Applying transformation
python
{ "resource": "" }
q275
BaseRGBColor._clamp_rgb_coordinate
train
def _clamp_rgb_coordinate(self, coord): """ Clamps an RGB coordinate, taking into account whether or not the color is upscaled or not. :param float coord: The coordinate value. :rtype: float :returns: The clamped value.
python
{ "resource": "" }
q276
BaseRGBColor.get_upscaled_value_tuple
train
def get_upscaled_value_tuple(self): """ Scales an RGB color object from decimal 0.0-1.0 to int 0-255. """ # Scale up to 0-255 values. rgb_r = int(math.floor(0.5 + self.rgb_r * 255))
python
{ "resource": "" }
q277
auto_density
train
def auto_density(color): """ Given a SpectralColor, automatically choose the correct ANSI T filter. Returns a tuple with a string representation of the filter the calculated density. :param SpectralColor color: The SpectralColor object to calculate density for. :rtype: float :returns: The density value, with the filter selected automatically. """ blue_density = ansi_density(color, ANSI_STATUS_T_BLUE) green_density = ansi_density(color, ANSI_STATUS_T_GREEN) red_density = ansi_density(color, ANSI_STATUS_T_RED) densities = [blue_density, green_density, red_density] min_density = min(densities) max_density = max(densities)
python
{ "resource": "" }
q278
_get_lab_color1_vector
train
def _get_lab_color1_vector(color): """ Converts an LabColor into a NumPy vector. :param LabColor color: :rtype: numpy.ndarray """
python
{ "resource": "" }
q279
_get_adaptation_matrix
train
def _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation): """ Calculate the correct transformation matrix based on origin and target illuminants. The observer angle must be the same between illuminants. See colormath.color_constants.ADAPTATION_MATRICES for a list of possible adaptations. Detailed conversion documentation is available at: http://brucelindbloom.com/Eqn_ChromAdapt.html """ # Get the appropriate transformation matrix, [MsubA]. m_sharp = color_constants.ADAPTATION_MATRICES[adaptation] # In case the white-points are still input as strings # Get white-points for illuminant if isinstance(wp_src, str): orig_illum = wp_src.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(wp_src, '__iter__'): wp_src = wp_src if isinstance(wp_dst, str):
python
{ "resource": "" }
q280
apply_chromatic_adaptation
train
def apply_chromatic_adaptation(val_x, val_y, val_z, orig_illum, targ_illum, observer='2', adaptation='bradford'): """ Applies a chromatic adaptation matrix to convert XYZ values between illuminants. It is important to recognize that color transformation results in color errors, determined by how far the original illuminant is from the target illuminant. For example, D65 to A could result in very high maximum deviance. An informative article with estimate average Delta E values for each illuminant conversion may be found at: http://brucelindbloom.com/ChromAdaptEval.html """ # It's silly to have to do this, but some people may want to call this # function directly, so we'll protect them from messing up upper/lower case. adaptation = adaptation.lower() # Get white-points for illuminant if isinstance(orig_illum, str): orig_illum = orig_illum.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(orig_illum, '__iter__'): wp_src = orig_illum
python
{ "resource": "" }
q281
apply_chromatic_adaptation_on_color
train
def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford'): """ Convenience function to apply an adaptation directly to a Color object. """ xyz_x = color.xyz_x xyz_y = color.xyz_y xyz_z = color.xyz_z orig_illum = color.illuminant targ_illum = targ_illum.lower() observer = color.observer adaptation = adaptation.lower() # Return individual X, Y, and Z coordinates.
python
{ "resource": "" }
q282
example_lab_to_xyz
train
def example_lab_to_xyz(): """ This function shows a simple conversion of an Lab color to an XYZ color. """ print("=== Simple Example: Lab->XYZ ===") # Instantiate an Lab color object with the given values. lab = LabColor(0.903, 16.296, -2.22)
python
{ "resource": "" }
q283
example_lchab_to_lchuv
train
def example_lchab_to_lchuv(): """ This function shows very complex chain of conversions in action. LCHab to LCHuv involves four different calculations, making this the conversion requiring the most steps. """ print("=== Complex Example: LCHab->LCHuv ===") # Instantiate an LCHab color object with the given values. lchab = LCHabColor(0.903, 16.447, 352.252)
python
{ "resource": "" }
q284
example_lab_to_rgb
train
def example_lab_to_rgb(): """ Conversions to RGB are a little more complex mathematically. There are also several kinds of RGB color spaces. When converting from a device-independent color space to RGB, sRGB is assumed unless otherwise specified with the target_rgb keyword arg. """ print("=== RGB Example: Lab->RGB ===") # Instantiate an Lab color object with
python
{ "resource": "" }
q285
example_rgb_to_xyz
train
def example_rgb_to_xyz(): """ The reverse is similar. """ print("=== RGB Example: RGB->XYZ ===") # Instantiate an Lab color object with the given values.
python
{ "resource": "" }
q286
example_spectral_to_xyz
train
def example_spectral_to_xyz(): """ Instantiate an Lab color object with the given values. Note that the spectral range can run from 340nm to 830nm. Any omitted values assume a value of 0.0, which is more or less ignored. For the distribution below, we are providing an example reading from an X-Rite i1 Pro, which only measures between 380nm and 730nm. """ print("=== Example: Spectral->XYZ ===") spc = SpectralColor( observer='2', illuminant='d50', spec_380nm=0.0600, spec_390nm=0.0600, spec_400nm=0.0641,
python
{ "resource": "" }
q287
example_lab_to_ipt
train
def example_lab_to_ipt(): """ This function shows a simple conversion of an XYZ color to an IPT color. """ print("=== Simple Example: XYZ->IPT ===") # Instantiate an XYZ color object with the given values. xyz = XYZColor(0.5, 0.5, 0.5, illuminant='d65')
python
{ "resource": "" }
q288
apply_RGB_matrix
train
def apply_RGB_matrix(var1, var2, var3, rgb_type, convtype="xyz_to_rgb"): """ Applies an RGB working matrix to convert from XYZ to RGB. The arguments are tersely named var1, var2, and var3 to allow for the passing of XYZ _or_ RGB values. var1 is X for XYZ, and R for RGB. var2 and var3 follow suite. """ convtype = convtype.lower() # Retrieve the appropriate transformation matrix from the constants. rgb_matrix = rgb_type.conversion_matrices[convtype] logger.debug(" \* Applying RGB conversion matrix: %s->%s", rgb_type.__class__.__name__, convtype) # Stuff the RGB/XYZ values into a NumPy matrix for conversion. var_matrix = numpy.array(( var1, var2, var3
python
{ "resource": "" }
q289
color_conversion_function
train
def color_conversion_function(start_type, target_type): """ Decorator to indicate a function that performs a conversion from one color space to another. This decorator will return the original function unmodified, however it will be registered in the _conversion_manager so it can be used to perform color space transformations between color spaces that do not have direct conversion functions (e.g., Luv to CMYK). Note: For a conversion to/from RGB supply the BaseRGBColor class.
python
{ "resource": "" }
q290
Spectral_to_XYZ
train
def Spectral_to_XYZ(cobj, illuminant_override=None, *args, **kwargs): """ Converts spectral readings to XYZ. """ # If the user provides an illuminant_override numpy array, use it. if illuminant_override: reference_illum = illuminant_override else: # Otherwise, look up the illuminant from known standards based # on the value of 'illuminant' pulled from the SpectralColor object. try: reference_illum = spectral_constants.REF_ILLUM_TABLE[cobj.illuminant] except KeyError: raise InvalidIlluminantError(cobj.illuminant) # Get the spectral distribution of the selected standard observer. if cobj.observer == '10': std_obs_x = spectral_constants.STDOBSERV_X10 std_obs_y = spectral_constants.STDOBSERV_Y10 std_obs_z = spectral_constants.STDOBSERV_Z10 else:
python
{ "resource": "" }
q291
Lab_to_XYZ
train
def Lab_to_XYZ(cobj, *args, **kwargs): """ Convert from Lab to XYZ """ illum = cobj.get_illuminant_xyz() xyz_y = (cobj.lab_l + 16.0) / 116.0 xyz_x = cobj.lab_a / 500.0 + xyz_y xyz_z = xyz_y - cobj.lab_b / 200.0 if math.pow(xyz_y, 3) > color_constants.CIE_E: xyz_y = math.pow(xyz_y, 3) else: xyz_y = (xyz_y - 16.0 / 116.0) / 7.787 if math.pow(xyz_x, 3) > color_constants.CIE_E: xyz_x = math.pow(xyz_x, 3) else: xyz_x = (xyz_x - 16.0 / 116.0) / 7.787 if math.pow(xyz_z, 3) > color_constants.CIE_E: xyz_z = math.pow(xyz_z,
python
{ "resource": "" }
q292
Luv_to_XYZ
train
def Luv_to_XYZ(cobj, *args, **kwargs): """ Convert from Luv to XYZ. """ illum = cobj.get_illuminant_xyz() # Without Light, there is no color. Short-circuit this and avoid some # zero division errors in the var_a_frac calculation. if cobj.luv_l <= 0.0: xyz_x = 0.0 xyz_y = 0.0 xyz_z = 0.0 return XYZColor( xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant) # Various variables used throughout the conversion. cie_k_times_e = color_constants.CIE_K * color_constants.CIE_E u_sub_0 = (4.0 * illum["X"]) / (illum["X"] + 15.0 * illum["Y"] + 3.0 * illum["Z"]) v_sub_0 = (9.0 * illum["Y"]) / (illum["X"] + 15.0 * illum["Y"] + 3.0 * illum["Z"]) var_u = cobj.luv_u / (13.0 * cobj.luv_l) + u_sub_0 var_v = cobj.luv_v / (13.0 * cobj.luv_l) + v_sub_0 #
python
{ "resource": "" }
q293
xyY_to_XYZ
train
def xyY_to_XYZ(cobj, *args, **kwargs): """ Convert from xyY to XYZ. """ # avoid division by zero if cobj.xyy_y == 0.0: xyz_x = 0.0 xyz_y = 0.0 xyz_z = 0.0 else: xyz_x = (cobj.xyy_x * cobj.xyy_Y) / cobj.xyy_y xyz_y = cobj.xyy_Y
python
{ "resource": "" }
q294
XYZ_to_xyY
train
def XYZ_to_xyY(cobj, *args, **kwargs): """ Convert from XYZ to xyY. """ xyz_sum = cobj.xyz_x + cobj.xyz_y + cobj.xyz_z # avoid division by zero if xyz_sum == 0.0: xyy_x = 0.0 xyy_y = 0.0 else:
python
{ "resource": "" }
q295
XYZ_to_Luv
train
def XYZ_to_Luv(cobj, *args, **kwargs): """ Convert from XYZ to Luv """ temp_x = cobj.xyz_x temp_y = cobj.xyz_y temp_z = cobj.xyz_z denom = temp_x + (15.0 * temp_y) + (3.0 * temp_z) # avoid division by zero if denom == 0.0: luv_u = 0.0 luv_v = 0.0 else: luv_u = (4.0 * temp_x) / denom luv_v = (9.0 * temp_y) / denom illum = cobj.get_illuminant_xyz() temp_y = temp_y / illum["Y"] if temp_y > color_constants.CIE_E: temp_y = math.pow(temp_y, (1.0 / 3.0)) else: temp_y = (7.787 * temp_y) + (16.0 / 116.0) ref_U = (4.0 * illum["X"]) / (illum["X"] + (15.0 * illum["Y"]) + (3.0 * illum["Z"]))
python
{ "resource": "" }
q296
XYZ_to_Lab
train
def XYZ_to_Lab(cobj, *args, **kwargs): """ Converts XYZ to Lab. """ illum = cobj.get_illuminant_xyz() temp_x = cobj.xyz_x / illum["X"] temp_y = cobj.xyz_y / illum["Y"] temp_z = cobj.xyz_z / illum["Z"] if temp_x > color_constants.CIE_E: temp_x = math.pow(temp_x, (1.0 / 3.0)) else: temp_x = (7.787 * temp_x) + (16.0 / 116.0) if temp_y > color_constants.CIE_E: temp_y = math.pow(temp_y, (1.0 / 3.0)) else: temp_y = (7.787 * temp_y) + (16.0 / 116.0) if temp_z > color_constants.CIE_E: temp_z = math.pow(temp_z, (1.0 / 3.0))
python
{ "resource": "" }
q297
XYZ_to_RGB
train
def XYZ_to_RGB(cobj, target_rgb, *args, **kwargs): """ XYZ to RGB conversion. """ temp_X = cobj.xyz_x temp_Y = cobj.xyz_y temp_Z = cobj.xyz_z logger.debug(" \- Target RGB space: %s", target_rgb) target_illum = target_rgb.native_illuminant logger.debug(" \- Target native illuminant: %s", target_illum) logger.debug(" \- XYZ color's illuminant: %s", cobj.illuminant) # If the XYZ values were taken with a different reference white than the # native reference white of the target RGB space, a transformation matrix # must be applied. if cobj.illuminant != target_illum: logger.debug(" \* Applying transformation from %s to %s ", cobj.illuminant, target_illum) # Get the adjusted XYZ values, adapted for the target illuminant. temp_X, temp_Y, temp_Z = apply_chromatic_adaptation( temp_X, temp_Y, temp_Z, orig_illum=cobj.illuminant, targ_illum=target_illum) logger.debug(" \* New values: %.3f, %.3f, %.3f", temp_X, temp_Y, temp_Z) # Apply an RGB working space matrix to the XYZ values (matrix mul). rgb_r, rgb_g, rgb_b = apply_RGB_matrix( temp_X, temp_Y, temp_Z, rgb_type=target_rgb, convtype="xyz_to_rgb") # v linear_channels = dict(r=rgb_r, g=rgb_g, b=rgb_b) # V nonlinear_channels = {} if target_rgb == sRGBColor: for channel in ['r', 'g', 'b']:
python
{ "resource": "" }
q298
RGB_to_XYZ
train
def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs): """ RGB to XYZ conversion. Expects 0-255 RGB values. Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html """ # Will contain linearized RGB channels (removed the gamma func). linear_channels = {} if isinstance(cobj, sRGBColor): for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= 0.04045: linear_channels[channel] = V / 12.92 else: linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4) elif isinstance(cobj, BT2020Color): if kwargs.get('is_12_bits_system'): a, b, c = 1.0993, 0.0181, 0.081697877417347 else: a, b, c = 1.099, 0.018, 0.08124794403514049 for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= c: linear_channels[channel] = V / 4.5 else: linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45) else: # If it's not sRGB... gamma = cobj.rgb_gamma
python
{ "resource": "" }
q299
RGB_to_HSV
train
def RGB_to_HSV(cobj, *args, **kwargs): """ Converts from RGB to HSV. H values are in degrees and are 0 to 360. S values are a percentage, 0.0 to 1.0. V values are a percentage, 0.0 to 1.0. """ var_R = cobj.rgb_r var_G = cobj.rgb_g var_B = cobj.rgb_b var_max = max(var_R, var_G, var_B) var_min = min(var_R, var_G, var_B)
python
{ "resource": "" }