query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get the scaling ratios required to upsample for the whole world. If resolution is None, then assume it will be upsampled to the native destination resolution. See Dataset.GetNativeResolution() If places is not None, rounds the ratios to the number of decimal places specified.
def GetWorldScalingRatios(self, resolution=None, places=None): if resolution is None: resolution = self.GetNativeResolution() spatial_ref = self.GetSpatialReference() world = spatial_ref.GetWorldExtents().dimensions src_pixel_sizes = XY(x=world.x / self.RasterXSize, y=world.y / self.RasterYSize) dst_pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution) xscale = abs(src_pixel_sizes.x / dst_pixel_sizes.x) # Make sure that yscale fits within the whole world yscale = min(xscale, abs(src_pixel_sizes.y / dst_pixel_sizes.y)) if places is not None: xscale = round(xscale, places) yscale = round(yscale, places) return XY(x=xscale, y=yscale)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetScalingRatios(self, resolution=None, places=None):\n if resolution is None:\n resolution = self.GetNativeResolution(transform=None)\n\n # Get the pixel dimensions in map units. There is no custom transform,\n # because it makes no sense to compute a pixel ratio for a\n # reprojection.\n spatial_ref = self.GetSpatialReference()\n dst_pixel_width, dst_pixel_height = spatial_ref.GetPixelDimensions(\n resolution=resolution\n )\n src_pixel_width, src_pixel_height = self.GetPixelDimensions()\n\n xscale = abs(src_pixel_width / dst_pixel_width)\n yscale = abs(src_pixel_height / dst_pixel_height)\n\n if places is not None:\n xscale = round(xscale, places)\n yscale = round(yscale, places)\n\n return XY(x=xscale, y=yscale)", "def UResolution(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_UResolution(self, *args)", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def resolution_range(self) -> Optional[float]:\n return self._get_property(RESOLUTION_RANGE_PROP, float)", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def set_rscale(self, top, bottom=0, round_up=False):\n if self.shape == 'circle':\n r = top\n elif self.shape == 'polygon':\n angle_of_slice = 2 * np.pi / self.size\n r = top / np.cos(angle_of_slice / 2.)\n if round_up:\n r = np.ceil(r)\n else:\n # this should never happen since this is checked for in class\n # creation\n raise ValueError('unknown value for `frame`: %s' % self.shape)\n self.set_ylim(bottom, r)", "def _compute_output_resolution(input_spatial_resolution, kernel_size, stride,\n total_padding):\n if (input_spatial_resolution is None) or (kernel_size is None) or (\n stride is None) or (total_padding is None):\n return None\n return int(\n math.ceil((\n input_spatial_resolution + total_padding - kernel_size + 1) / stride))", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)", "def scaling(self):\n return self.__scaling", "def getResolution(self):\n return self.resolution", "def getResolution(self):\n return self._lowLevelGetDeviceResolution()", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz", "def get_resolution(self, curvename):\n\n if curvename == 'flank':\n return self.points_flank\n elif curvename == 'fillet':\n return self.points_fillet\n elif curvename == 'tip':\n return self.points_tip\n elif curvename == 'root':\n return self.points_root\n elif curvename == 'shaft':\n return self.points_shaft\n elif curvename == 'width':\n return self.points_width", "def _get_uniform_ratios(self, context):\n min_ratio = 0.\n max_ratio = 1.\n target = (self._min_ratio + self._max_ratio) / 2\n flops = context.eval_graph.flops()\n model_size = context.eval_graph.numel_params()\n ratios = None\n while min_ratio < max_ratio:\n ratio = (max_ratio + min_ratio) / 2\n ratios = [ratio] * len(self._pruned_param_names)\n param_shape_backup = {}\n self._prune_parameters(\n context.eval_graph,\n context.scope,\n self._pruned_param_names,\n ratios,\n context.place,\n only_graph=True,\n param_shape_backup=param_shape_backup)\n\n pruned_flops = 1 - (float(context.eval_graph.flops()) / flops)\n pruned_size = 1 - (float(context.eval_graph.numel_params()) /\n model_size)\n for param in param_shape_backup.keys():\n context.eval_graph.var(param).set_shape(param_shape_backup[\n param])\n\n if abs(pruned_flops - target) < 1e-2:\n break\n if pruned_flops > target:\n max_ratio = ratio\n else:\n min_ratio = ratio\n _logger.info('Get ratios: {}'.format([round(r, 2) for r in ratios]))\n return ratios", "def get_time_ratio(self) -> Optional[float]:\n time_ratios: list[float] = []\n for lt in self.camera_placements.values():\n time_ratio = lt.get_time_ratio()\n if time_ratio is not None:\n time_ratios.append(time_ratio)\n\n if len(time_ratios) == 0:\n return None\n return fsum(time_ratios) / len(time_ratios)", "def standardizeRatios( self, ratios ):\n\t\tratios_standardized = ratios.copy()\n\t\tzscore = lambda x: ( x - x.mean() ) / x.std()\n\t\tfor row in ratios.iterrows():\n\t\t\tratios_standardized.loc[ row[0] ] = zscore( row[1] )\n\t\treturn ratios_standardized", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def _round_filters(self, filters):\n filters *= self.width_coefficient\n new_filters = max(\n self.depth_divisor,\n int(filters + self.depth_divisor / 2)\n // self.depth_divisor\n * self.depth_divisor,\n )\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += self.depth_divisor\n return int(new_filters)", "def _get_precision(self) -> float:\n precicions = [PRECISION_WHOLE, PRECISION_HALVES, PRECISION_TENTHS]\n static_info = self._static_info\n if static_info.visual_current_temperature_step != 0:\n step = static_info.visual_current_temperature_step\n else:\n step = static_info.visual_target_temperature_step\n for prec in precicions:\n if step >= prec:\n return prec\n # Fall back to highest precision, tenths\n return PRECISION_TENTHS", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def upsample(self, method):\n from scipy.signal import resample\n from scipy.ndimage.interpolation import zoom\n #print \"mm: 100 x 100 x 131\"\n #print \"Dims:\", self.D.shape\n fact = np.array(self.info.shape).astype(\"float32\") / np.array(self.info.read_shape).astype(\"float32\")+0.00001 # hrmpf!!\n if method == \"zoom\":\n print \"Resampling...\"\n self.D = zoom(self.D, fact).astype(\"float32\")\n elif method == \"resample\":\n print \"Resampling...\"\n a = self.info.resample_ax\n s = self.info.shape[a]\n self.D = resample(self.D, s, axis=a, window=10).astype(\"float32\")\n elif method == None:\n pass\n else:\n raise NotImplementedError(\"Unknown upsampling method: %s\" % method)\n #print \"Dims:\", self.D.shape\n print \"done.\"", "def _infer_scale(\n print_h: Measurement, print_w: Measurement, viewbox_h: float, viewbox_w: float\n) -> float:\n if any(x < 0 for x in (print_h.value, print_w.value, viewbox_h, viewbox_w)):\n msg = \"Negative values are not allowed\"\n raise ValueError(msg)\n\n candidate_scales: set[float] = set()\n if print_w.value and viewbox_w:\n candidate_scales.add(print_w.value / viewbox_w)\n if print_h.value and viewbox_h:\n candidate_scales.add(print_h.value / viewbox_h)\n if candidate_scales:\n # size of picture is determined by print area\n return min(candidate_scales)\n if any([print_w.value, print_h.value]):\n msg = \"All potential scales would be infinite.\"\n raise ValueError(msg)\n # a print unit was given, but not a print size. Size of picture is determined\n # by interpreting viewbox dimensions as print_width or print_height units\n return print_w.native_unit.value[1]", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def to_inch(self):\r\n if self.units != 'inch':\r\n self.units = 'inch'\r\n for statement in self.statements:\r\n statement.to_inch()\r\n for tool in iter(self.tools.values()):\r\n tool.to_inch()\r\n for primitive in self.primitives:\r\n primitive.to_inch()\r\n for hit in self.hits:\r\n hit.to_inch()", "def _get_rupture_dimensions(src, mag, nodal_plane):\n area = src.magnitude_scaling_relationship.get_median_area(\n mag, nodal_plane.rake)\n rup_length = math.sqrt(area * src.rupture_aspect_ratio)\n rup_width = area / rup_length\n seismogenic_layer_width = (src.lower_seismogenic_depth\n - src.upper_seismogenic_depth)\n max_width = (seismogenic_layer_width\n / math.sin(math.radians(nodal_plane.dip)))\n if rup_width > max_width:\n rup_width = max_width\n rup_length = area / rup_width\n return rup_length, rup_width", "def change_resolution(img):\n scale_factor = np.random.choice(list(range(0, 6, 2)))\n if scale_factor == 0:\n return img\n downsample = nn.AvgPool2d(scale_factor)\n upsample = nn.UpsamplingNearest2d(scale_factor=scale_factor)\n new_res_img = upsample(downsample(img.unsqueeze(dim=1))).squeeze()\n return new_res_img", "def depolarization_ratio(self):\r\n if self._depol_ratio is not None:\r\n return round(self._depol_ratio,3)\r\n else:\r\n return self._depol_ratio", "def fraction_full_scale(self):\n return self._fraction_full_scale", "def scaling_factor(self):\n bin_scale = self.spabins * self.spebins\n return bin_scale * self.int_time", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def doppler_scale(self):\n return self._dopplerscale", "def getScale(self):\n return self.factor**self.turnOn", "def scale_up(self):\n self.load *= 10\n for pp in self.powerplants:\n if pp[\"type\"] != \"windturbine\":\n pp[\"pmin\"] *= 10\n pp[\"pmax\"] *= 10", "def compute_resolution(zoom, size_px):\n # Calibration data:\n dist_in_um = 10\n dist_in_px = np.array([21.13, 19.62, 8.93])\n zooms = np.array([1.5, 3, 4.5])\n image_max_sizes = np.array([330, 610, 410])\n \n return np.mean((dist_in_um/dist_in_px) * (zoom/zooms) * (image_max_sizes/size_px))", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def set_ui_scale():\n # TODO test on other OS and resolutions\n moniter_h = QtWidgets.QDesktopWidget().screenGeometry(-1).height()\n if sys.platform == 'win32':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.0\n else:\n scale = 1.0\n elif sys.platform == 'linux':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.23\n else:\n scale = 1.4\n elif sys.platform == 'darwin':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.25\n else:\n scale = 1.55\n return scale", "def upsample_nearest(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'nearest')", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def _set_resolution( self ):\r\n offset = 0\r\n # if current and skinned resolutions differ and skinned resolution is not\r\n # 1080i or 720p (they have no 4:3), calculate widescreen offset\r\n if ( ( not ( self.currentResolution == self.resolution ) ) and self.resolution > 1 ):\r\n # check if current resolution is 16x9\r\n if ( self.currentResolution == 0 or self.currentResolution % 2 ): iCur16x9 = 1\r\n else: iCur16x9 = 0\r\n # check if skinned resolution is 16x9\r\n if ( self.resolution % 2 ): i16x9 = 1\r\n else: i16x9 = 0\r\n # calculate widescreen offset\r\n offset = iCur16x9 - i16x9\r\n self.win.setCoordinateResolution( self.resolution + offset )", "def _update_suggested_precision(self) -> None:\n assert self.registry_entry\n\n device_class = self.device_class\n display_precision = self.suggested_display_precision\n default_unit_of_measurement = (\n self.suggested_unit_of_measurement or self.native_unit_of_measurement\n )\n unit_of_measurement = self.unit_of_measurement\n\n if (\n display_precision is not None\n and default_unit_of_measurement != unit_of_measurement\n and device_class in UNIT_CONVERTERS\n ):\n converter = UNIT_CONVERTERS[device_class]\n\n # Scale the precision when converting to a larger or smaller unit\n # For example 1.1 Wh should be rendered as 0.0011 kWh, not 0.0 kWh\n ratio_log = log10(\n converter.get_unit_ratio(\n default_unit_of_measurement, unit_of_measurement\n )\n )\n ratio_log = floor(ratio_log) if ratio_log > 0 else ceil(ratio_log)\n display_precision = max(0, display_precision + ratio_log)\n\n if display_precision is None and (\n DOMAIN not in self.registry_entry.options\n or \"suggested_display_precision\" not in self.registry_entry.options\n ):\n return\n sensor_options: Mapping[str, Any] = self.registry_entry.options.get(DOMAIN, {})\n if (\n \"suggested_display_precision\" in sensor_options\n and sensor_options[\"suggested_display_precision\"] == display_precision\n ):\n return\n\n registry = er.async_get(self.hass)\n sensor_options = dict(sensor_options)\n sensor_options.pop(\"suggested_display_precision\", None)\n if display_precision is not None:\n sensor_options[\"suggested_display_precision\"] = display_precision\n registry.async_update_entity_options(\n self.entity_id, DOMAIN, sensor_options or None\n )", "def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)", "def get_samp_rates(self):\n return _uhd_swig.usrp_source_get_samp_rates(self)", "def normalize(self):\n normalized = map(\n operator.methodcaller(\"normalize\"), self.gauges)\n\n # Convert the result to the list of normalized\n # float values.\n return list(map(float, normalized))", "def ratio(self):\n return float(self.max_width) / self.max_height", "def _pixel_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n if width is not None:\n scale = width / self._width\n elif height is not None:\n scale = height / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def raw_resolution(resolution, splitter=False):\n width, height = resolution\n if splitter:\n fwidth = (width + 15) & ~15\n else:\n fwidth = (width + 31) & ~31\n fheight = (height + 15) & ~15\n return fwidth, fheight", "def find_suggested_tonemap_scale(session):\n avg_film_luminance = session.GetFilm().GetFilmY()\n return (1.25 / avg_film_luminance * (118 / 255))\n\n # TODO\n # measure this all the time, show a message to the user if\n # abs(old - new) > threshold\n # so the user can set the new value with one click\n\n # imagepipeline = scene.camera.data.luxcore.imagepipeline\n # imagepipeline.tonemapper.linear_scale = suggested_linear_scale\n # imagepipeline.tonemapper.use_autolinear = False", "def get_scaling(self):\n if self.constrain_navigation:\n self.activate_navigation_constrain()\n return self.sx, self.sy", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):\n if not multiplier:\n return channels\n\n channels *= multiplier\n channel_min = channel_min or divisor\n new_channels = max(\n int(channels + divisor / 2) // divisor * divisor,\n channel_min)\n # Make sure that round down does not go down by more than 10%.\n if new_channels < 0.9 * channels:\n new_channels += divisor\n return new_channels", "def rounding_decimal_places(self, rounding_decimal_places):\n if self.local_vars_configuration.client_side_validation and rounding_decimal_places is None: # noqa: E501\n raise ValueError(\"Invalid value for `rounding_decimal_places`, must not be `None`\") # noqa: E501\n\n self._rounding_decimal_places = rounding_decimal_places", "def _round_sampling_rate(sampling_rate):\n # check if sampling rate is below 5 Hz in that case always round to one\n if sampling_rate < 5:\n\n # set sampling rate to 1\n rounded_sampling_rate = 1\n\n else:\n\n # round to the nearest 10 digit\n rounded_sampling_rate = round(sampling_rate/10) * 10\n\n return rounded_sampling_rate", "def aspectRatios(self):\n return np.array([f.aspectRatio() for f in self])", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def normalize(self, redraw=True):\n if self.screen_rect is not None:\n self.relative_sizes = []\n\n height = self.screen_rect.height\n left, right = self._get_columns()\n\n if left.count > 0:\n self.relative_sizes += self._split_integer(height, left.count)\n if right.count > 0:\n self.relative_sizes += self._split_integer(height, right.count)\n\n if redraw:\n self.group.layout_all()\n self.do_normalize = False", "def scale(self):\n return self._scale", "def get_scale():\r\n\r\n \r\n return 0.5", "def adjust_ratio(cat1,cat2, sm_sentlen, pos_unigrs):\n cat1_count = pos_unigrs.get(cat1, 0.0)\n cat2_count = pos_unigrs.get(cat2, 0.0)\n if cat1_count == 0.0 or cat2_count == 0.0:\n return 0.0\n else:\n return smooth(cat1_count, sm_sentlen) / smooth(cat2_count, sm_sentlen)", "def get_samp_rates(self):\n return _uhd_swig.usrp_sink_get_samp_rates(self)", "def pe_ratio(self):\n if self._pe_ratio == None:\n return float('inf')\n return self._pe_ratio", "def upsample(\n input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=False,\n):\n return interpolate(input, size, scale_factor, mode, align_corners)", "def _round_repeats(self, repeats):\n return int(math.ceil(self.depth_coefficient * repeats))", "def scale(self):\n return self._gev_bijector.scale", "def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")", "def Resolution(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_Resolution(self, *args)", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def _get_best_ratios(self, context):\n _logger.info('_get_best_ratios')\n pruned_params = []\n for param in context.eval_graph.all_parameters():\n if re.match(self.pruned_params, param.name()):\n pruned_params.append(param.name())\n\n min_ratio = 0.\n max_ratio = 1.\n\n flops = context.eval_graph.flops()\n model_size = context.eval_graph.numel_params()\n\n while min_ratio < max_ratio:\n ratio = (max_ratio + min_ratio) / 2\n _logger.debug(\n '-----------Try pruning ratio: {:.2f}-----------'.format(ratio))\n ratios = [ratio] * len(pruned_params)\n param_shape_backup = {}\n self._prune_parameters(\n context.eval_graph,\n context.scope,\n pruned_params,\n ratios,\n context.place,\n only_graph=True,\n param_shape_backup=param_shape_backup)\n\n pruned_flops = 1 - (float(context.eval_graph.flops()) / flops)\n pruned_size = 1 - (float(context.eval_graph.numel_params()) /\n model_size)\n _logger.debug('Pruned flops: {:.2f}'.format(pruned_flops))\n _logger.debug('Pruned model size: {:.2f}'.format(pruned_size))\n for param in param_shape_backup.keys():\n context.eval_graph.var(param).set_shape(param_shape_backup[\n param])\n\n if abs(pruned_flops - self.target_ratio) < 1e-2:\n break\n if pruned_flops > self.target_ratio:\n max_ratio = ratio\n else:\n min_ratio = ratio\n _logger.info('Get ratios: {}'.format([round(r, 2) for r in ratios]))\n return pruned_params, ratios", "def precision(self):\n self.overall_precision = precision_score(\n self.y_true, self.y_pred, average = self.average_type).round(self.digits_count_fp)\n self.classes_precision = precision_score(\n self.y_true, self.y_pred, average = None).round(self.digits_count_fp)", "def effective_resolution(self) -> Tuple[int, int]:\n import numpy as np\n\n assert self.info.resolution, 'No base resolution specified'\n rot = (self.info.rotate or 0) * math.pi / 180\n sin = math.sin(rot)\n cos = math.cos(rot)\n scale = np.array([[self.info.scale_x or 1.0, self.info.scale_y or 1.0]])\n resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])\n rot_matrix = np.array([[sin, cos], [cos, sin]])\n resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]\n return int(round(resolution[0])), int(round(resolution[1]))", "def prescaler(self) -> int:", "def unit_of_measurement(self) -> Any:\n return PERCENTAGE", "def guess_scaling(name, spectrum):\n spectra = '%s/disp/%s.1d.fits' % (name, zerocount(spectrum))\n skyname = '%s/sky.1d.fits' % name\n spectrafits = pyfits.open(spectra)\n skyfits = pyfits.open(skyname)\n scalings = []\n for line in LINES:\n spec_peak, spec_cont = get_peak_cont(spectrafits, line, 5)\n sky_peak, sky_cont = get_peak_cont(skyfits, line, 5)\n scale = ((spec_peak - spec_cont) / (sky_peak - sky_cont))\n scalings.append(scale)\n return avg(*scalings)", "def resolution(self) -> int:\n return self.options.resolution", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def convert_volts(self,data,places):\n volts = (data * self.max) / float(255)\n volts = round(volts,places)\n return volts", "def add_rup_params(self, rupture):\n for param in self.REQUIRES_RUPTURE_PARAMETERS:\n if param == 'mag':\n value = rupture.mag\n elif param == 'strike':\n value = rupture.surface.get_strike()\n elif param == 'dip':\n value = rupture.surface.get_dip()\n elif param == 'rake':\n value = rupture.rake\n elif param == 'ztor':\n value = rupture.surface.get_top_edge_depth()\n elif param == 'hypo_lon':\n value = rupture.hypocenter.longitude\n elif param == 'hypo_lat':\n value = rupture.hypocenter.latitude\n elif param == 'hypo_depth':\n value = rupture.hypocenter.depth\n elif param == 'width':\n value = rupture.surface.get_width()\n else:\n raise ValueError('%s requires unknown rupture parameter %r' %\n (type(self).__name__, param))\n setattr(rupture, param, value)", "def rolloff_scale(self):\n return self._rolloffscale", "def handle_proportionality_factors(self, scaling_candidates):\n\n if not len(scaling_candidates):\n return\n\n scalingsForHierarchicalIndices = [\n self.optimization_parameter_name_to_index[x] for x in\n scaling_candidates]\n order = np.argsort(scalingsForHierarchicalIndices)\n scalingsForHierarchicalIndices = \\\n [scalingsForHierarchicalIndices[i] for i in order]\n scaling_candidates = [scaling_candidates[i] for i in order]\n\n\n self.f.require_dataset(\"/scalingParameterIndices\",\n shape=(len(scalingsForHierarchicalIndices),),\n dtype='<i4',\n data=scalingsForHierarchicalIndices)\n print(Fore.CYAN, \"Number of proportionality factors for \"\n \"hierarchical optimization: %d\"\n % len(scalingsForHierarchicalIndices))\n\n # find usages for the selected parameters\n use = self.get_analytical_parameter_table(scaling_candidates,\n 'observable')\n\n self.f.require_dataset(\"/scalingParametersMapToObservables\",\n shape=(len(use), 3),\n dtype='<i4', data=use)", "def parallel_scale(self):\n return self.camera.parallel_scale", "def precisions(self):\n raise NotImplementedError", "def normalise( self, rWantedMax = 100. ):\n nWantedMax = int( self.getSampleMaxValue() * rWantedMax / 100)\n nCurrentMax = max( self.data.max(), -self.data.min() )\n rRatio = nWantedMax / float(nCurrentMax)\n if( nCurrentMax == nWantedMax ):\n return False\n logging.info( \"nCurrentMax: %s\" % nCurrentMax )\n logging.info( \"nWantedMax: %s\" % nWantedMax ) \n logging.info( \"applying a %f ratio to the whole sound\" % rRatio )\n self.data *= rRatio # another option is to make a np.round(self.data*rRatio), but it's perhaps less linear (on a linear elevation for example)\n return True", "def auto_scale_factor(self):\r\n return self.gref.auto_scale_factor", "def upround(x, base):\n return base * math.ceil(float(x) / base)", "def round_filters(filters, divisor=depth_divisor):\n filters *= width_coefficient\n new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += divisor\n return int(new_filters)", "def calculate_rf_size(rf_size, downsample):\n h = 61 # 24\" monitor\n d = 10 # 10cm from the right eye\n r = 1080 / downsample # Vertical resolution\n d_px = np.degrees(math.atan2(h / 2, d)) / (r / 2)\n return rf_size * d_px", "def computeRrup(self, lon, lat, depth):\n return self._computeRdist('Rrup', lon, lat, depth)", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def scaling_adjustment(self):\n return self._scaling_adjustment", "def getResolution(s) -> int:\n unit = getDurationUnit(s)\n #number of ticks is 1 / unit (if that is an integer)\n ticksPerQuarter = unit.denominator / unit.numerator\n if ticksPerQuarter.is_integer():\n return int(unit.denominator / unit.numerator)\n else:\n print(s.filePath, ' non integer number of ticks per Quarter')\n return 0", "def set_resolution(self):\n file_name = os.path.basename(self.in_file)\n if '1KM' in file_name:\n self.resolution = 1000\n else:\n raise ValueError(\n 'Cant read this data, please check its resolution: {}'.format(self.in_file))", "def _round_filters(self, filters, width_coefficient, depth_divisor, min_depth):\n\n if not width_coefficient:\n return filters\n\n filters *= width_coefficient\n min_depth = min_depth or depth_divisor\n new_filters = max(\n min_depth,\n int(filters + depth_divisor / 2) // depth_divisor*depth_divisor\n )\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += depth_divisor\n\n return int(new_filters)" ]
[ "0.74602205", "0.533336", "0.5087092", "0.50720173", "0.50386196", "0.49780598", "0.49676868", "0.49499795", "0.48785496", "0.48708156", "0.4858904", "0.48400638", "0.48303235", "0.48303235", "0.47874883", "0.47874883", "0.47874883", "0.47721013", "0.4771545", "0.47600517", "0.4727151", "0.47226942", "0.47173667", "0.47161293", "0.4699207", "0.469877", "0.46976975", "0.46971178", "0.46791884", "0.46785682", "0.46746865", "0.4670468", "0.46703637", "0.4664602", "0.46335375", "0.46259484", "0.4603217", "0.45926762", "0.45784098", "0.45761183", "0.45645064", "0.4564204", "0.4562376", "0.4552822", "0.45473462", "0.4543624", "0.45318654", "0.4523843", "0.45091927", "0.45051372", "0.44830972", "0.44783482", "0.44695678", "0.44529727", "0.4449248", "0.44447887", "0.4440811", "0.44385418", "0.44378328", "0.44315165", "0.4428458", "0.4425822", "0.44228685", "0.4419239", "0.44187066", "0.44187036", "0.44154528", "0.44119978", "0.4411682", "0.44106328", "0.44041172", "0.44036937", "0.44032833", "0.44021884", "0.43954882", "0.4393259", "0.4393081", "0.43862754", "0.43851703", "0.43836525", "0.43827793", "0.43819797", "0.43816188", "0.43796414", "0.43778953", "0.4368417", "0.43666047", "0.43651927", "0.43594384", "0.43556908", "0.43467966", "0.43457237", "0.4344513", "0.4342436", "0.4336351", "0.43302658", "0.43277872", "0.4321242", "0.43192276", "0.43147463" ]
0.72415364
1
Returns an iterable of TMS tiles that are outside this Dataset.
def GetWorldTmsBorders(self, resolution=None, transform=None): world_extents = self.GetWorldTmsExtents(resolution=resolution, transform=transform) data_extents = self.GetTmsExtents(resolution=resolution, transform=transform) return (XY(x, y) for x in range(world_extents.lower_left.x, world_extents.upper_right.x) for y in range(world_extents.lower_left.y, world_extents.upper_right.y) if XY(x, y) not in data_extents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_tiles(self):\n return list(filter(None, self.empty))", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def list_tiles_covering_land(self):\n\n land_tiles = Equi7Grid._static_data[self.core.tag][\"coverland\"][\n self.core.tiletype]\n return list(land_tiles)", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def outside(self,region):\n fs = FeatureSet()\n for f in self:\n if(f.isNotContainedWithin(region)):\n fs.append(f)\n return fs", "def available_spots_for(self, tile):\n return [spot for spot in self.available_spots() if self.tile_fits(spot, tile)]", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]", "def find_open_tiles(self, arena, units):\r\n tiles = []\r\n for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:\r\n if arena[x][y] == '.':\r\n tiles.append((x, y))\r\n return tiles", "def missing_tiles(mbtiles, required_tiles):\n for tile in required_tiles:\n if not mbtiles.tile_exists(tile.x, tile.y, tile.z):\n yield tile", "def copy_tiles(self):\n \n return self.tiles", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def get_candidate_tiles(self) -> List[Point]:\n\t\tempty_tiles = set()\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tfor d in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\n\t\t\t\t\t\tif x+d[0] >= 0 and y+d[1] >= 0 and x+d[0] < self.size and y+d[1] < self.size and self.tiles[x+d[0]][y+d[1]] == 0:\n\t\t\t\t\t\t\tempty_tiles.add(Point(x+d[0],y+d[1]))\n\t\treturn list(empty_tiles)", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles", "def tile_data(self) -> List[TileData]:\n return self._tiles.values()", "def any_empty_tiles(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def get_tile_slice(self, x_min, x_max, y_min, y_max):\n result = self.tiles[x_min:x_max]\n for column in result:\n column = column[y_min:y_max]\n return result", "def __get_neutral_tiles(self) -> List[List[int]]:\n neutral_tiles = []\n for x in range(self.num_columns):\n for y in range(self.num_rows):\n if self.is_flippable_tile([y, x]):\n neutral_tiles.append([y, x])\n return neutral_tiles", "def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)", "def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits", "def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T", "def get_flagged_tile_list ( self ) :\n tile_list = []\n stmt = \"select name from sdb_product where sys003 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list", "def tileslist(self, bbox, zoomlevels, tms_scheme=False):\n proj = GoogleProjection(self.tile_size, zoomlevels, tms_scheme)\n return proj.tileslist(bbox)", "def get_empty_slots(self):\n slots = np.reshape(range(0, self.size * self.size), (self.size, self.size))\n\n return slots[~self.tiles_taken]", "def remove_outliers(self, std_tol=1.5):\r\n from lsst.analysis import outlier\r\n for tnum in numpy.unique(self.data[\"tiles\"]):\r\n self.decimate(outlier.valid(self, self.data[\"tiles\"]==tnum, std_tol=std_tol))", "def test_tile_read_not_covering_the_whole_tile():\n bounds = (\n -11271098.442818949,\n 12210356.646387195,\n -10958012.374962866,\n 12523442.714243278,\n )\n tilesize = 16\n with pytest.raises(TileOutsideBounds):\n with rasterio.open(COG) as src_dst:\n reader.part(src_dst, bounds, tilesize, tilesize, minimum_overlap=0.6)", "def redundant_tiles(mbtiles, required_tiles):\n xyz_dict= lambda: defaultdict(xyz_dict)\n\n # Mark all tiles that are required\n marked_tiles = xyz_dict()\n for tile in required_tiles:\n marked_tiles[tile.z][tile.x][tile.y] = True\n\n\n for tile in mbtiles.all_tiles():\n required = marked_tiles[tile.z][tile.x][tile.y]\n if required != True:\n yield tile", "def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)", "def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();", "def get_work_tiles(self):\n work_tiles = []\n for zoom in self.config[\"zoom_levels\"]:\n bbox = self.config[\"zoom_levels\"][zoom][\"process_area\"]\n work_tiles.extend(self.tile_pyramid.tiles_from_geom(bbox, zoom))\n return work_tiles", "def immed_nbrs(tile):\n return [nbr for nbr in [TILES_BY_IDX.get(xy) for xy in xy_nbrs(tile['xyidx'])]\n if nbr is not None]", "def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4", "def detects_outside_grid(self):\r\n ii = self.rec_track['i']\r\n outside = sum(np.isnan(ii))\r\n\r\n return outside", "def discard_tile(self):\n raise NotImplemented()", "def filter_tile_neighbors(self, coord):\n coord = coord.int_tuple\n if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \\\n 0 and coord[0] >=\\\n 0 and (self.currentmap.boxAt(coord[0], coord[1])\n == 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):\n return True\n return False", "def get_all_closed_cells(self):\r\n num_rows = len(self.mine_field)\r\n num_cols = len(self.mine_field[0])\r\n closed_cells = [(row, col)\r\n for row in range(num_rows)\r\n for col in range(num_cols)\r\n if self.mine_field[row][col] == '?']\r\n return closed_cells", "def get_borders(self):\r\n return (self.tiles[0][0], self.tiles[-1][-1])", "def filter_tile_neighbors_metalbox(self, coord):\n coord = coord.int_tuple\n if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >=\\\n 0 and coord[0] >= 0 and (self.currentmap.boxAt(coord[0], coord[1])\n == 0 or self.currentmap.boxAt(coord[0],\n coord[1])\n == 2 or self.currentmap.boxAt(coord[0],\n coord[1])\n == 3):\n return True\n return False", "def get_floor_tiles():\n if client.LoggedIn:\n try:\n # Try to catch System.NUllReferenceException race condition\n if client.Map.GetTileWithPlayer(): #\n floor_tiles = list(client.Map.GetTilesOnSameFloor())\n return floor_tiles\n except:\n pass\n\n return None", "def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))", "def tileslist(self, bbox, zoomlevels, tms_osm=False):\n mercator = GlobalMercator(tms_osm,self.tile_size,zoomlevels)\n return mercator.tileslist(bbox)", "def test_unbounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = self.expected_tile_height\n\t\tself.expected_cols = self.expected_tile_width\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage without specifying dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')", "def under_rect(self, rect):\n x_min = self.clampx((rect.left - self._origin.x) // self._cell_size[0])\n x_max = self.clampx((rect.right - self._origin.x) // self._cell_size[0])\n y_min = self.clampy((rect.top - self._origin.y) // self._cell_size[1])\n y_max = self.clampy((rect.bottom - self._origin.y) // self._cell_size[1])\n cells = []\n for ix in range(x_min, x_max + 1):\n for iy in range(y_min, y_max + 1):\n index = iy * self._cell_count[0] + ix\n cells.append(self._cells[index])\n return cells", "def check_end(self) -> Optional[List[Point]]:\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tresult = self.check_around(x, y)\n\t\t\t\t\tif result is not None:\n\t\t\t\t\t\treturn result\n\t\tif len(self.get_empty_tiles()) == 0:\n\t\t\treturn []\n\t\treturn None", "def obstacle_iterator(self):\n for obstacle in self.tmx_data.get_layer_by_name(\"obstacles\"):\n yield obstacle", "def tiles_positions(self) -> Generator[TilePosition, None, None]:\r\n for i in range(self.width * self.height):\r\n yield TilePosition(i % self.width, i // self.width)", "def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)", "def all_descendant_tiles(x, y, zoom, max_zoom):\n if zoom < max_zoom:\n for child_tile in mercantile.children(x, y, zoom):\n yield child_tile\n for desc_tile in all_descendant_tiles(child_tile.x, child_tile.y,\n child_tile.z, max_zoom):\n yield desc_tile", "def get_land_cells(self):\n return {loc: cell for loc, cell in self.landscape.items() if cell.is_mainland}", "def get_modis_tile_list(ds):\n from demcoreg import modis_grid\n modis_dict = {}\n for key in modis_grid.modis_dict:\n modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])\n geom = geolib.ds_geom(ds)\n geom_dup = geolib.geom_dup(geom)\n ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)\n geom_dup.Transform(ct)\n tile_list = []\n for key, val in list(modis_dict.items()):\n if geom_dup.Intersects(val):\n tile_list.append(key)\n return tile_list", "def get_occupied_tiles(self):\r\n occupied = np.zeros(self.searchenv.conv.num_tiles)\r\n #Convert current state (positions of agents) to tile indices\r\n tiles = self.searchenv.conv.state_to_tile(self.searchstate.positions)\r\n valid_tiles = tiles[self.searchstate.actives == 1]\r\n occupied[valid_tiles] = 1\r\n return occupied", "def iter_waters(self):\n fpred = lambda f: f.is_water()\n return itertools.ifilter(fpred, self.iter_fragments())", "def test_tile_read_dataset_nodata():\n # non-boundless tile covering the nodata part 22-876431-1603670\n bounds = (\n -11663535.70066358,\n 4715027.644399633,\n -11663526.146035044,\n 4715037.199028169,\n )\n tilesize = 16\n with rasterio.open(S3_NODATA_PATH) as src_dst:\n arr, mask = reader.part(src_dst, bounds, tilesize, tilesize)\n assert arr.shape == (3, 16, 16)\n assert not mask.all()", "def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES", "def iter_unsolved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif not cell.value()\n\t\t)", "def noisy_cells(self, hists, thresholds):\n return [[[x + 1, z + 1, i + 1] for x in range(h.GetNbinsX()) for z in range(h.GetNbinsY()) if h.GetBinContent(x + 1, z + 1) > threshold] for i, (h, threshold) in enumerate(zip(hists, thresholds))]", "def iter_raw(self, ftcols=['x', 'y']):\n self.makeTree()\n data = self.frametracks[ftcols].values\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield data[i], data[inds.compress((dists > sd) & ~np.isinf(dists))]", "def test_outside_grid(dataset):\n\n tf = Delft3D_Mudflats(dataset, dry_depth=-10000) # make sure nothing is dry\n\n points = ((3.5, 54.0), # off\n (7.5, 53.4), # off\n (6.0, 52.0), # off\n (5.3, 53.3), # on\n (5.2, 53.25), # on\n )\n\n time = datetime(2009, 1, 15, 0)\n\n result = tf.is_dry(points, time)\n\n print \"results\", result\n\n assert list(result) == [True, True, True, False, False]", "def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords", "def test_tiled_data_generator(self):\n\n datagen = TiledDataGenerator(\n featurewise_center=True, featurewise_std_normalization=True\n )\n\n datagen.fit(self.test_file_2)\n\n mn = np.mean(self.test_data_2)\n std = np.std(self.test_data_2)\n\n self.assertAlmostEqual(mn / datagen.mean[0], 1, places=6)\n self.assertAlmostEqual(std / datagen.std[0], 1, places=6)\n\n tile_gen = TiledIterator(\n twod_image=self.test_file_1,\n overlap_log_2=1,\n image_data_generator=datagen,\n )\n\n next(tile_gen)\n next(tile_gen)\n tile = next(tile_gen)\n\n # 64//2**1 = 32\n ofst = 32 * 2\n img0 = self.test_data_1[ofst : ofst + 65, ofst : ofst + 65]\n\n np.allclose(tile, (img0 - mn) / std)", "def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True", "def collision_test(rect, tiles):\r\n hit_list = []\r\n for tile in tiles:\r\n if rect.colliderect(tile):\r\n hit_list.append(tile)\r\n return hit_list", "def chunks(self) -> List[OctreeChunk]:\n return [tile_data.octree_chunk for tile_data in self._tiles.values()]", "def _iter_points_out_of_bounds(pc, bounds):\n for i, axis_coords in enumerate(pc.arr):\n for compare, bound in zip((np.less, np.greater_equal),\n (bounds[i], bounds[i+3])):\n if bound is not None:\n yield compare(axis_coords, bound)", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def calc_tiles(raster, tile_x, tile_y):\n \n #get coordinates of upper left corner\n x_upper_left = raster.transform[2]\n y_upper_left = raster.transform[5]\n #calculate width and height based on tile_x and tile_y\n x,y = x_upper_left + tile_x, y_upper_left - tile_y\n height, width = raster.index(x,y)\n \n #get cols and rows of raster band\n ncols, nrows = raster.meta['width'], raster.meta['height']\n #create offsets for window processing\n subsets = product(range(0, ncols, width), range(0, nrows, height))\n #create bounding_window to fill missing windows\n bounding_window = rio.windows.Window(col_off=0, row_off=0, width=ncols, height=nrows)\n \n #create windows\n for col_off, row_off in subsets:\n #yield windows with the given parameters\n window = rio.windows.Window(col_off=col_off, row_off=row_off, \n width=width, height=height).intersection(bounding_window)\n yield window", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def get_tiles(self, numTiles, gameBag):\r\n tiles_picked = gameBag.pick_tiles(numTiles)\r\n for givenTile in tiles_picked:\r\n self.rack.append(givenTile)", "def test_tile_read_nodata():\n # Partial Tile 7-42-24\n bounds = [\n -6887893.4928338025,\n 12210356.646387195,\n -6574807.424977721,\n 12523442.714243278,\n ]\n tilesize = 16\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(src_dst, bounds, tilesize, tilesize, nodata=1)\n assert arr.shape == (1, 16, 16)\n assert mask.shape == (16, 16)\n assert not mask.all()", "def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]", "def possible_missile_targets(self, vg: Group) -> List[Point]:\n targets: List[Point] = []\n for cp in self.game.theater.controlpoints:\n if cp.captured != self.ground_object.control_point.captured:\n distance = cp.position.distance_to_point(vg.position)\n if distance < self.missile_site_range:\n targets.append(cp.position)\n return targets", "def test_tile_read_wrong_nodata():\n # non-boundless tile covering the nodata part\n with rasterio.open(S3_NODATA_PATH) as src_dst:\n arr, mask = reader.tile(\n src_dst, 438217, 801835, 21, tilesize=256, indexes=(1, 2, 3), nodata=1000\n )\n assert arr.shape == (3, 256, 256)\n assert mask.all()\n\n # Mask boundless values\n arr, mask = reader.tile(\n src_dst, 109554, 200458, 19, tilesize=256, indexes=(1, 2, 3), nodata=1000\n )\n assert arr.shape == (3, 256, 256)\n assert not mask.all()", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def top_tiles(self):\n sorted_tiles = self.tiles_by_score()\n top_tiles = sorted_tiles[:NUM_TOP_TILES]\n return top_tiles", "def tile(self):\n raise RuntimeError('Not implemented')", "def _check_large_tilt(self):\n large_tilt = []\n xy, xz, yz = self.tilt_factors\n x,y,_ = self.cell_lengths\n\n large_tilt.append(-x/2<xy<x/2)\n large_tilt.append(-x/2<xz<y/2)\n large_tilt.append(-x/2<yz<y/2)\n return not all(large_tilt)", "def fixture_tile_list():\n return {\n \"version\": 1,\n \"revision\": 1,\n \"timestamp\": \"2018-06-19T23:04:32.442Z\",\n \"timestamp_ms\": 1529449472442,\n \"result_code\": 0,\n \"result\": [\n {\n \"tileType\": \"TILE\",\n \"user_uuid\": TILE_USER_UUID,\n \"tile_uuid\": TILE_TILE_UUID,\n \"other_user_uuid\": \"\",\n \"other_user_email\": TILE_EMAIL,\n \"mode\": \"OWNER\",\n \"last_modified_timestamp\": 1482711833985,\n }\n ],\n }", "def iter_unsolved_cells(self):\n\t\treturn (\n\t\t\tcell for cell in\n\t\t\tself._cells\n\t\t\tif not cell.value()\n\t\t)", "def with_coordinates(self):\n for tweet in self.having(coordinates=self.default_coordinates):\n yield tweet", "def check_tile_covers_land(self, tilename=None):\n land_tiles = self.list_tiles_covering_land()\n if self.check_tilename(tilename):\n tilename = self.tilename2short(tilename)\n return tilename in land_tiles", "def split_tiles(module_data):\n raise NotImplementedError", "def _build_list_of_excluded_pixels(self, exclude_zones):\n \n pixels = []\n for x, y, width, height in exclude_zones:\n for row in range(height):\n for col in range(width):\n pixels.append(Pixel(col + x, row + y))\n \n return pixels", "def tile_is_out_of_borders(index, shape):\n return index[0] < 0 or index[1] < 0 or index[0] >= shape[0] or index[1] >= shape[1]", "def make_tevcat_exclusion_mask():\n\n # TODO: make this a method ExclusionMask.from_catalog()?\n from gammapy.catalog import load_catalog_tevcat\n\n tevcat = load_catalog_tevcat()\n all_sky_exclusion = ExclusionMask.empty(nxpix=3600, nypix=1800, binsz=0.1,\n fill=1, dtype='int')\n val_lon, val_lat = all_sky_exclusion.coordinates()\n lons = Longitude(val_lon, 'deg')\n lats = Latitude(val_lat, 'deg')\n\n for source in tevcat:\n lon = Longitude(source['coord_gal_lon'], 'deg')\n lat = Latitude(source['coord_gal_lat'], 'deg')\n x = Angle(source['size_x'], 'deg')\n y = Angle(source['size_y'], 'deg')\n if np.isnan(x) and np.isnan(y):\n rad = Angle('0.3 deg')\n else:\n rad = x if x > y else y\n\n mask = lon_lat_circle_mask(lons, lats, lon, lat, rad)\n all_sky_exclusion.data[mask] = 0\n\n return all_sky_exclusion", "def unisolvent_nodes(self):\r\n return self.grid.unisolvent_nodes", "def get_traversable_tiles(room, x, y, length, width):\n traversables = []\n # Checking that we are not going out of bounds\n if x > length - 1 or y > width - 1 or x < 0 or y < 0:\n return\n # Checking above\n if not (x - 1 < 0):\n if not room.tiles[x - 1][y].border:\n traversables.append([x - 1, y])\n # Checking left\n if not (y - 1 < 0):\n if not room.tiles[x][y - 1].border:\n traversables.append([x, y - 1])\n # Checking right\n if not (y + 1 > width - 1):\n if not room.tiles[x][y + 1].border:\n traversables.append([x, y + 1])\n # Checking below\n if not (x + 1 > length - 1):\n if not room.tiles[x + 1][y].border:\n traversables.append([x + 1, y])\n\n return traversables", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def gen_tasks(self):\n for zoom in range(MIN_ZOOM, MAX_ZOOM + 1):\n seen = set() # (x, y)\n M = 2 ** zoom - 1\n # Find all areas suitable for zoom\n for area in Area.objects.filter(is_active=True,\n min_zoom__lte=zoom,\n max_zoom__gte=zoom):\n # Get area tiles\n SW = ll_to_xy(zoom, area.SW)\n NE = ll_to_xy(zoom, area.NE)\n left = max(SW[0] - PAD_TILES, 0)\n right = min(NE[0] + PAD_TILES, M)\n top = max(NE[1] - PAD_TILES, 0)\n bottom = min(SW[1] + PAD_TILES, M)\n a_size = (right - left + 1) * (bottom - top + 1)\n self.log(\"Checking area '%s' at zoom level %d \"\\\n \" (%d x %d = %d tiles)\" % (area.name, zoom,\n right - left + 1,\n bottom - top + 1,\n a_size))\n seen |= set((tc.x, tc.y) for tc in TileCache.objects.filter(\n map=self.map.id, zoom=zoom).only(\"x\", \"y\"))\n for x in range(left, right + 1):\n for y in range(top, bottom + 1):\n c = (x, y)\n if c in seen:\n continue\n seen.add(c)\n if not self.force:\n # Check tile is ready\n tc = TileCache.objects.filter(map=self.map.id,\n zoom=zoom, x=x,\n y=y).first()\n if tc and tc.ready:\n continue\n yield (zoom, x, y)", "def island_feeding(self):\n for y in self.island_map:\n for cell in y:\n cell.feeding()", "def get_walls(world):\r\n return set(((x,y) for x in range(world.get_width()) for y in range(world.get_height()) if world.is_wall((x,y))))", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def stop_areas(self):\n seen_ids = set()\n for route in self.routes:\n for stop in route:\n st = stop.stoparea\n if st.id not in seen_ids:\n seen_ids.add(st.id)\n yield st", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def select_cells_in_trenches(props_all, trenchLocs = TrenchLocs.MIDDLE, below_trench_quantile = 90, above_trench_quantile = 100, mother_cell_y_offset=10,inversion_mult = 1):\n def indcs_in_trenches(centy,cell_indcs, invert):\n \n cy = centy[cell_indcs]\n above_trench_cut = np.percentile(invert*cy,100 - above_trench_quantile)\n below_trench_cut = np.percentile(invert*cy,below_trench_quantile)\n idx_above = (invert*centy) > above_trench_cut - mother_cell_y_offset\n idx_below = (invert*centy) < below_trench_cut\n idx_select = np.all(np.vstack((cell_indcs,idx_above,idx_below)),axis=0)\n return idx_select\n \n \n img_height = props_all.img_height\n centy = np.array(props_all.centy)\n props_all['trench_inversion_mult'] = 1*inversion_mult\n ### Note that the y-indx is flipped in image compared to matrix coords\n if trenchLocs == TrenchLocs.MIDDLE:\n idx_select = np.zeros(centy.shape,dtype=bool)\n for pos in np.unique(props_all.pos_num):\n idx_pos = np.array(props_all.pos_num == pos)\n idx_cell_pos = indcs_in_trenches(centy,idx_pos, 1*inversion_mult)\n idx_select = np.any(np.vstack((idx_select,idx_cell_pos)),axis=0)\n \n props_clean = props_all[idx_select]\n else:\n # top position in actual picture (smallest y value in matrix)\n idx_top = centy < (img_height/2) \n idx_bottom = centy > (img_height/2)\n props_all.loc[idx_bottom,'trench_inversion_mult'] = -1*inversion_mult\n \n idx_select_top = np.zeros(idx_top.shape,dtype=bool)\n idx_select_bottom = np.zeros(idx_bottom.shape,dtype=bool)\n\n for pos in np.unique(props_all.pos_num): \n idx_pos = (props_all.pos_num == pos)\n idx_top_pos = np.all(np.vstack((idx_top,idx_pos)),axis=0)\n idx_bottom_pos = np.all(np.vstack((idx_bottom,idx_pos)),axis=0)\n\n idx_select_top_pos = indcs_in_trenches(centy,idx_top_pos, 1*inversion_mult)\n idx_select_bottom_pos = indcs_in_trenches(centy,idx_bottom_pos, -1*inversion_mult)\n idx_select_top = np.any(np.vstack((idx_select_top,idx_select_top_pos)),axis=0)\n idx_select_bottom = np.any(np.vstack((idx_select_bottom,idx_select_bottom_pos)),axis=0)\n\n \n if trenchLocs == TrenchLocs.TOP:\n idx_reasonable_cells = idx_select_top\n elif trenchLocs == TrenchLocs.BOTTOM:\n idx_reasonable_cells = idx_select_bottom\n elif trenchLocs == TrenchLocs.TOP_AND_BOTTOM:\n idx_reasonable_cells = np.any(np.vstack((idx_select_top,idx_select_bottom)),axis=0) \n \n props_clean = props_all.loc[idx_reasonable_cells,:]\n\n return props_clean", "def select_all_active_tiles(self):\n self.ref_tiles = []\n number_grids = int(self.cfg['grids']['number_grids'])\n for grid in range(number_grids):\n for tile in self.gm.get_active_tiles(grid):\n self.ref_tiles.append(str(grid) + '.' + str(tile))", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def get_mines(self):\n mines = []\n for i in range(self.rows):\n for j in range(self.cols):\n if self.board[i][j].category == Tiles.mine:\n mines.append((i, j))\n return mines" ]
[ "0.664918", "0.65654457", "0.64280045", "0.61701643", "0.61044884", "0.6099539", "0.6057754", "0.60419196", "0.60385936", "0.60150456", "0.60135376", "0.5999694", "0.5931338", "0.59044945", "0.5892273", "0.585954", "0.57977283", "0.5784656", "0.5774202", "0.5733227", "0.57330537", "0.5706314", "0.56694484", "0.56594855", "0.5659008", "0.56496185", "0.5632151", "0.5627637", "0.5619144", "0.5589014", "0.5588704", "0.55750847", "0.55673915", "0.55671686", "0.5562582", "0.5551677", "0.55459636", "0.5527941", "0.55140686", "0.5490161", "0.54885507", "0.5480959", "0.5476525", "0.5460467", "0.5447397", "0.5439315", "0.54392755", "0.54245746", "0.54187304", "0.5395976", "0.53926563", "0.53738743", "0.53513193", "0.53467155", "0.5337099", "0.5325516", "0.53120995", "0.53052807", "0.5303041", "0.52933663", "0.5288878", "0.52888435", "0.5282029", "0.5271676", "0.52657986", "0.52647215", "0.52553916", "0.52539253", "0.52333266", "0.5232657", "0.5229115", "0.5221389", "0.52205276", "0.52189636", "0.5214058", "0.520708", "0.520705", "0.51908594", "0.51880074", "0.5184648", "0.51810646", "0.5173509", "0.51706576", "0.51703423", "0.51655287", "0.5148414", "0.51368815", "0.5136263", "0.51354504", "0.51351225", "0.51329774", "0.5128462", "0.5096021", "0.5088149", "0.5082576", "0.50770044", "0.50728977", "0.50675833", "0.50675833", "0.5063052" ]
0.5349394
53
Generate a GeoTIFF from a vrt string
def render(self, outputfile, cmd=GDALTRANSLATE, working_memory=1024, compress=None, tempdir=None): tmpfile = NamedTemporaryFile( suffix='.tif', prefix='gdalrender', dir=os.path.dirname(outputfile), delete=False ) try: with self.get_tempfile(dir=tempdir) as inputfile: warp_cmd = [ cmd, '-q', # Quiet - FIXME: Use logging '-of', 'GTiff', # Output to GeoTIFF '-co', 'BIGTIFF=IF_SAFER', # Use BigTIFF if >2GB '-co', 'NUM_THREADS=ALL_CPUS', # multithreaded compression for GeoTiff # gdal_translate does not support the following # '-multi', # Use multiple processes # '-overwrite', # Overwrite outputfile ] # Set the working memory so that gdalwarp doesn't stall of disk # I/O warp_cmd.extend([ # gdal_translate does not support -wm # '-wm', working_memory, '--config', 'GDAL_CACHEMAX', working_memory ]) # Use compression compress = str(compress).upper() if compress and compress != 'NONE': warp_cmd.extend(['-co', 'COMPRESS=%s' % compress]) if compress in ('LZW', 'DEFLATE'): warp_cmd.extend(['-co', 'PREDICTOR=2']) # Run gdalwarp and output to tmpfile.name warp_cmd.extend([inputfile.name, tmpfile.name]) check_output_gdal([str(e) for e in warp_cmd]) # If it succeeds, then we move it to overwrite the actual # output os.rename(tmpfile.name, outputfile) return outputfile finally: rmfile(tmpfile.name, ignore_missing=True) rmfile(tmpfile.name + '.aux.xml', ignore_missing=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ascii_to_tiff(infile, outfile, refIm):", "def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data", "def vl2img(vl_json_in, fileformat):\n\n # TODO would prefer to do this properly with pipes\n # using | and shell=True is safe though given no arguments\n executables = {\"svg\": \"vg2svg\", \"png\": \"vg2png\", \"pdf\": \"vg2pdf\"}\n try:\n exe = executables[fileformat]\n except KeyError as e:\n print(e.output)\n try:\n return subprocess.check_output(\"vl2vg | %s\" % exe, shell=True, input=vl_json_in)\n except subprocess.CalledProcessError as e:\n print(e.output)", "def as_tiff(imagename):\n return change_extension(imagename,'tif')", "def http_raster():\n return \"https://ungarj.github.io/mapchete_testdata/tiled_data/raster/cleantopo/1/0/0.tif\"", "def convertCSVToGeoTIFF(geoproperties, datafile, outputdir, outfile):\n \n data = numpy.genfromtxt(datafile, delimiter=',')\n \n Ny, Nx = data.shape\n \n #print data.size\n #print data.shape\n #print data\n \n # slice out the last column of null values\n if str(data[Ny-1][Nx-1]) == 'nan':\n data = data[:,:-1]\n \n Ny, Nx = data.shape\n \n #print data.size\n #print data.shape\n #print data\n #print Ny, Nx\n \n startPos = [geoproperties['tl']['long'],geoproperties['tl']['lat']]\n d_lat = (geoproperties['br']['lat'] - geoproperties['tl']['lat']) / (Ny - 1)\n d_long = (geoproperties['br']['long'] - geoproperties['tl']['long']) / (Nx - 1)\n \n #print startPos, d_lat, d_long\n \n driver = gdal.GetDriverByName(\"GTiff\")\n ds = driver.Create(os.path.join(outputdir,outfile),Nx,Ny,1,gdal.GDT_Float32)\n #ds = driver.Create('output/output.tif',Nx,Ny,1,gdal.GDT_Byte)\n #ds.SetGeoTransform( [ -158.584, .008, 0, 21.108, 0, .008 ] )\n ds.SetGeoTransform( [ startPos[0], d_long, 0, startPos[1], 0, d_lat ] )\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection( srs.ExportToWkt() )\n ds.GetRasterBand(1).WriteArray(data)\n ds = None", "def build_vrt(vrt: str, files: List[str], resample_name: str) -> None:\n\n options = gdal.BuildVRTOptions(srcNodata=0)\n gdal.BuildVRT(destName=vrt, srcDSOrSrcDSTab=files, options=options)\n add_pixel_fn(vrt, resample_name)", "def make_vrt(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n txt_file = i.joinpath('subnational/tiffs.txt')\n outfile = i.joinpath(f'{self.country}_{month}_normalised.vrt')\n if not outfile.exists():\n gdal_cmd = f'gdalbuildvrt -input_file_list {str(txt_file)} {str(outfile)}'\n subprocess.call(gdal_cmd, shell=True)", "def create_geotiff(pixels, gps_bounds, out_path, nodata=-99, asfloat=False, extractor_info=None, system_md=None, extra_metadata=None, compress=False):\n dimensions = numpy.shape(pixels)\n if len(dimensions) == 2:\n nrows, ncols = dimensions\n channels = 1\n else:\n nrows, ncols, channels = dimensions\n\n geotransform = (\n gps_bounds[2], # upper-left x\n (gps_bounds[3] - gps_bounds[2])/float(ncols), # W-E pixel resolution\n 0, # rotation (0 = North is up)\n gps_bounds[1], # upper-left y\n 0, # rotation (0 = North is up)\n -((gps_bounds[1] - gps_bounds[0])/float(nrows)) # N-S pixel resolution\n )\n\n # Create output GeoTIFF and set coordinates & projection\n if asfloat:\n dtype = gdal.GDT_Float32\n else:\n dtype = gdal.GDT_Byte\n\n if compress:\n output_raster = gdal.GetDriverByName('GTiff') \\\n .Create(out_path, ncols, nrows, channels, dtype, ['COMPRESS=LZW'])\n else:\n output_raster = gdal.GetDriverByName('GTiff') \\\n .Create(out_path, ncols, nrows, channels, dtype)\n\n output_raster.SetGeoTransform(geotransform)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326) # google mercator\n output_raster.SetProjection( srs.ExportToWkt() )\n\n if not extra_metadata:\n extra_metadata = prepare_metadata_for_geotiff(extractor_info, system_md)\n\n output_raster.SetMetadata(extra_metadata)\n\n if channels == 3:\n # typically 3 channels = RGB channels\n # TODO: Something wonky w/ uint8s --> ending up w/ lots of gaps in data (white pixels)\n output_raster.GetRasterBand(1).WriteArray(pixels[:,:,0].astype('uint8'))\n output_raster.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)\n output_raster.GetRasterBand(1).FlushCache()\n if nodata:\n output_raster.GetRasterBand(1).SetNoDataValue(nodata)\n\n output_raster.GetRasterBand(2).WriteArray(pixels[:,:,1].astype('uint8'))\n output_raster.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)\n output_raster.GetRasterBand(2).FlushCache()\n if nodata:\n output_raster.GetRasterBand(2).SetNoDataValue(nodata)\n\n output_raster.GetRasterBand(3).WriteArray(pixels[:,:,2].astype('uint8'))\n output_raster.GetRasterBand(3).SetColorInterpretation(gdal.GCI_BlueBand)\n output_raster.GetRasterBand(3).FlushCache()\n if nodata:\n output_raster.GetRasterBand(3).SetNoDataValue(nodata)\n\n elif channels > 1:\n # TODO: Something wonky w/ uint8s --> ending up w/ lots of gaps in data (white pixels)\n for chan in range(channels):\n band = chan + 1\n output_raster.GetRasterBand(band).WriteArray(pixels[:,:,chan].astype('uint8'))\n output_raster.GetRasterBand(band).FlushCache()\n if nodata:\n output_raster.GetRasterBand(band).SetNoDataValue(nodata)\n else:\n # single channel image, e.g. temperature\n output_raster.GetRasterBand(1).WriteArray(pixels)\n output_raster.GetRasterBand(1).FlushCache()\n if nodata:\n output_raster.GetRasterBand(1).SetNoDataValue(nodata)\n\n output_raster = None", "def nc2gtiff(input_name, epsg_in=3413, epsg_out=3857):\n sub_name=[\"dX\",\"dY\"]\n \n for sub in sub_name:\n result = os.system(\"echo ''\")\n #netCDF to geotiff (EPSG code is not changed) \n out_name = \"./data/tiff_raw/{}_{}_tmp.tiff\".format(os.path.basename(input_name)[:-3],sub)\n #c is shell command\n #options\n #-a_srs:EPSGcode of input files\n #-of:output format\n #\n print(\"nc2gtiff\")\n c = \"gdal_translate -a_srs '+proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45' NETCDF:'{in_name}':{sub} \\\n -of 'Gtiff' \\\n '{out_name}'\".format(epsg=epsg_in,sub=sub, \\\n in_name=input_name,out_name=out_name) \n result = os.system(c) #run shell command\n #print(c) #if you want to see the filled c, remove '#' in the head of this line\n if result!=0: # if it raises error, return filename \n print(input_name, result, \"translate\")\n \n result = os.system(\"echo ''\")\n\n #geotiff EPSG_in to EPCG_out\n print(\"geotiff2geotiff\")\n target_name=\"./data/tiff_target/{}_{}.tiff\".format(os.path.basename(input_name)[:-3],sub)\n c = \"gdalwarp -overwrite -s_srs '+proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45' {out_name} -r cubic\\\n {target_name} -t_srs EPSG:{epsg_out} -of \\\n 'GTIFF'\".format(out_name=out_name, target_name=target_name, \n epsg_in=epsg_in, epsg_out=epsg_out)\n result = os.system(c) # run shell command\n if result!=0:\n print(input_name, result, \"warp\")", "def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')", "def load(filename, imageprops):\n with gzip.open(filename, 'rb') as f:\n file_content = f.read()\n return parse_svg.parse_svg_string(file_content, imageprops, \"en\")", "def _export_geotiff(self, filepath: str, z_positive_up: bool = True):\n\n nodatavalue = 1000000.0\n data, geo_transform, bandnames = self._gdal_preprocessing(nodatavalue=nodatavalue, z_positive_up=z_positive_up)\n gdal_raster_create(filepath, data, geo_transform, self.crs, nodatavalue=nodatavalue, bandnames=bandnames,\n driver='GTiff')", "def tiff(location):\n\n for src_gtiff in os.listdir(location):\n if \".gtiff\" in src_gtiff:\n dest_tif = src_gtiff[:-6] + '.tiff'\n command = \"gdal_translate -co profile=baseline \" + src_gtiff + ' ' \\\n + dest_tif\n os.system(command)", "def gtiff(location):\n\n for src_asc in os.listdir(location):\n if \".asc\" in src_asc:\n dest_gtif = src_asc[:-4] + '.gtiff'\n command = \"gdal_translate -of GTiff -ot Float32 \" + location +'\\\\' \\\n + src_asc + ' ' + location +'\\\\' + dest_gtif\n os.system(command)", "def convertdataTOimage(data):\n data = data.partition(\",\")[2]\n padding = len(data)%4\n data += \"=\"*padding\n image = Image.open(BytesIO(b64decode(data)))\n return image", "def copy_vrt(in_fname, out_fname=None, bbox=None, verbose=True):\n from gdal import Translate\n\n if out_fname is None:\n out_fname = in_fname + \".vrt\"\n\n # Using Translate... but would use Warp if every reprojecting\n if bbox:\n left, bottom, right, top = bbox\n projwin = (left, top, right, bottom) # unclear why Translate does UL LR\n else:\n projwin = None\n if verbose:\n logger.info(f\"Creating {out_fname}, subset bbox: {bbox}\")\n Translate(out_fname, in_fname, projWin=projwin)", "def outputGeoTiff(data, filename, geo_t, proj, output_dir = os.getcwd(), dtype = 6, nodata = None):\n \n from osgeo import osr, gdal\n \n # Get full output path\n output_path = '%s/%s.tif'%(os.path.abspath(os.path.expanduser(output_dir)), filename.rstrip('.tif'))\n \n # Save image with georeference info\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(output_path, data.shape[1], data.shape[0], 1, dtype, options = ['COMPRESS=LZW'])\n ds.SetGeoTransform(geo_t)\n ds.SetProjection(proj)\n \n # Set nodata\n if nodata != None:\n ds.GetRasterBand(1).SetNoDataValue(nodata)\n \n # Write data for masked and unmasked arrays\n if np.ma.isMaskedArray(data):\n ds.GetRasterBand(1).WriteArray(data.filled(nodata))\n else:\n ds.GetRasterBand(1).WriteArray(data)\n ds = None", "def loadGeoTransform(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetGeoTransform()", "def __init__(self, filepath: str):\n self.filetype: str = filepath[len(filepath) - 3:].upper()\n self.tags = None\n self.locations: [Location] = None\n self.intermediaryImage = None\n self.outlined = None\n if self.filetype == 'TIF':\n print('found tif')\n with TiffFile(filepath) as tif:\n # fileInfo(tif)\n self.tags = metadataGeoTags(tif)\n self.image = tif.asarray()\n elif self.filetype == 'PNG' or self.filetype == 'JPG':\n print('found png')\n self.image = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)\n else:\n print('invalid file type:', self.filetype)", "def get_image_format_from_datatext(self, datatext):\n image_format = \"VIRT\"\n temp = re.search('VX_DF_IMAGE_(.+?)\\]', datatext) #Obs. Needed to ecape the [ ]'s\n if temp:\n image_format = temp.group(1)\n return image_format", "def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)", "def _encodeTiledImageFromVips(self, vimg, iterInfo, image, **kwargs):\n convertParams = large_image.tilesource.base._vipsParameters(\n defaultCompression='lzw', **kwargs)\n convertParams.pop('pyramid', None)\n vimg = large_image.tilesource.base._vipsCast(\n vimg, convertParams['compression'] in {'webp', 'jpeg'})\n gdalParams = large_image.tilesource.base._gdalParameters(\n defaultCompression='lzw', **kwargs)\n for ch in range(image['channels']):\n gdalParams += [\n '-b' if ch not in (1, 3) or ch + 1 != image['channels'] else '-mask', str(ch + 1)]\n tl = self.pixelToProjection(\n iterInfo['region']['left'], iterInfo['region']['top'], iterInfo['level'])\n br = self.pixelToProjection(\n iterInfo['region']['right'], iterInfo['region']['bottom'], iterInfo['level'])\n gdalParams += [\n '-a_srs',\n iterInfo['metadata']['bounds']['srs'],\n '-a_ullr',\n str(tl[0]),\n str(tl[1]),\n str(br[0]),\n str(br[1]),\n ]\n fd, tempPath = tempfile.mkstemp('.tiff', 'tiledRegion_')\n os.close(fd)\n fd, outputPath = tempfile.mkstemp('.tiff', 'tiledGeoRegion_')\n os.close(fd)\n try:\n vimg.write_to_file(tempPath, **convertParams)\n ds = gdal.Open(tempPath, gdalconst.GA_ReadOnly)\n gdal.Translate(outputPath, ds, options=gdalParams)\n os.unlink(tempPath)\n except Exception as exc:\n try:\n os.unlink(tempPath)\n except Exception:\n pass\n try:\n os.unlink(outputPath)\n except Exception:\n pass\n raise exc\n return pathlib.Path(outputPath), TileOutputMimeTypes['TILED']", "def gdal_read_geotiff_file(sFilename_in):\n \n if os.path.exists(sFilename_in):\n pass\n else:\n print('The file does not exist!')\n return\n\n sDriverName='GTiff'\n pDriver = gdal.GetDriverByName(sDriverName) \n\n if pDriver is None:\n print (\"%s pDriver not available.\\n\" % sDriverName)\n else:\n print (\"%s pDriver IS available.\\n\" % sDriverName) \n\n pDataset = gdal.Open(sFilename_in, gdal.GA_ReadOnly)\n\n if pDataset is None:\n print(\"Couldn't open this file: \" + sFilename_in)\n sys.exit(\"Try again!\")\n else: \n pProjection = pDataset.GetProjection()\n\n pDataset.GetMetadata()\n \n ncolumn = pDataset.RasterXSize\n nrow = pDataset.RasterYSize\n nband = pDataset.RasterCount\n\n pGeotransform = pDataset.GetGeoTransform()\n dOriginX = pGeotransform[0]\n dOriginY = pGeotransform[3]\n dPixelWidth = pGeotransform[1]\n pPixelHeight = pGeotransform[5]\n\n pBand = pDataset.GetRasterBand(1)\n\n # Data type of the values\n gdal.GetDataTypeName(pBand.DataType)\n # Compute statistics if needed\n if pBand.GetMinimum() is None or pBand.GetMaximum() is None:\n pBand.ComputeStatistics(0)\n\n dMissing_value = pBand.GetNoDataValue()\n \n aData_out = pBand.ReadAsArray(0, 0, ncolumn, nrow)\n \n #we will use one of them to keep the consistency\n pSpatial_reference = osr.SpatialReference(wkt=pProjection)\n \n\n pDataset = None\n pBand = None \n pBand = None\n\n return aData_out, dPixelWidth, dOriginX, dOriginY, nrow, ncolumn, dMissing_value, pGeotransform, pProjection, pSpatial_reference", "def GetGeoTransform(raster_path):\n \n #open a GDAL object containig the raster\n gdal_img = gdal.Open(raster_path)\n \n #extract basic geospatial data\n ulx, xres, xskew, uly, yskew, yres = gdal_img.GetGeoTransform()\n \n #calculate lower right coordinates from upper left coordinates and raster size\n lrx = ulx + (gdal_img.RasterXSize * xres)\n lry = uly + (gdal_img.RasterYSize * yres)\n \n geoinfo = {'ulx': ulx,\n 'lrx': lrx,\n 'uly': uly,\n 'lry': lry,\n 'xres': xres,\n 'xskew': xskew,\n 'yres': yres,\n 'yskew': yskew\n }\n \n return geoinfo", "def single_to_rgb(R_file,G_file,B_file): \n R=gdal_array.LoadFile(R_file)\n G=gdal_array.LoadFile(G_file)\n B=gdal_array.LoadFile(B_file)\n \n \n basename=os.path.basename(R_file)\n basename=basename[:3]+basename[4:]\n basename=basename[:-4]+\"_rgb_.tif\" \n \n\n file_path=os.path.dirname(os.path.abspath(R_file))+\"/\"+basename\n\n \n driver=osgeo.gdal.GetDriverByName(\"GTiff\")\n options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']\n print(file_path)\n print(np.max(np.array([R.shape[1],B.shape[1],G.shape[1]])), np.max(np.array([R.shape[0],B.shape[0],G.shape[0]])))\n Xlen=np.max(np.array([R.shape[1],B.shape[1],G.shape[1]]))\n Ylen= np.max(np.array([R.shape[0],B.shape[0],G.shape[0]]))\n dataset=driver.Create(file_path, int(Xlen),int(Ylen), 3, osgeo.gdal.GDT_UInt16, options) \n \n dataset.GetRasterBand(1).WriteArray(R)\n dataset.GetRasterBand(2).WriteArray(G)\n dataset.GetRasterBand(3).WriteArray(B)\n \n return file_path", "def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")", "def load_image(fname):\n return load_tiff(fname)", "def gpvtg_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[1] == '0.00': \r\n return\r\n #jsondata = {'Horizontal speed': gps[7] + ' kmph or ' + gps[5] + 'knots'}\r\n return []", "def load_image(nom):\n print(\"load_image : [\", nom, \"]\")\n fic = gdal.Open(nom)\n print(fic)\n return fic.ReadAsArray(), fic.GetGeoTransform()", "def searchTiffInfo(string, search_string):\n\n return re.search('(?<=' + search_string + '\\s=\\s)[-+]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?', string).group(0)", "def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'):\n\n # plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw)\n shapes = rasterio.features.shapes(mask, transform=geotransform)\n # the last shape contains all geometry\n shapes = list(shapes)[:-1]\n polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes]\n crs = {\n \"type\": \"name\",\n \"properties\": {\n \"name\": trg_crs}}\n gs = geojson.FeatureCollection(polygons, crs=crs)\n return geojson.dumps(gs)", "def read_tif(filename):\n raster = gdal.Open(filename, GA_ReadOnly)\n imarray = np.array(raster.ReadAsArray())\n return imarray", "def make_raster(in_ds, fn, data, data_type, nodata=None):\n driver = gdal.GetDriverByName('GTiff')\n out_ds = driver.Create(\n fn, in_ds.RasterXSize, in_ds.RasterYSize, 1, data_type)\n out_ds.SetProjection(in_ds.GetProjection())\n out_ds.SetGeoTransform(in_ds.GetGeoTransform())\n out_band = out_ds.GetRasterBand(1)\n if nodata is not None:\n out_band.SetNoDataValue(nodata)\n out_band.WriteArray(data)\n out_band.FlushCache()\n out_band.ComputeStatistics(False)\n return out_ds", "def getTiffAsMatrix(kv):\n file_name = kv[0].split('/')[-1]\n fbinary = kv[1]\n tiffmat = getOrthoTif(fbinary)\n return (file_name,tiffmat)", "def save_geotiff(path, raster, xform):\n # Import rasterio here, so that we don't need it in the cloud.\n import rasterio\n import affine\n n_lats, n_lons, depth = raster.shape\n\n assert raster.max() <= 255\n assert raster.min() >= 0\n \n profile = {\n 'crs': 'EPSG:4326',\n 'nodata': 0,\n 'dtype': rasterio.uint8,\n 'height': n_lats,\n 'width': n_lons,\n 'count': depth,\n 'driver':\"GTiff\",\n 'transform': xform}\n\n with rasterio.open(path, 'w', **profile) as dst:\n for i in range(depth):\n dst.write(raster[:, :, i], indexes=i+1)", "def tiffread(f):\n if type(f) is str:\n # single image\n im = tf.imread(f)\n return im\n\n elif type(f) is list and len(f) == 3:\n # return rgb stack\n f.sort(reverse=True) # so r, g, b\n ims = [tf.imread(x) for x in f]\n return np.dstack(ims)\n else:\n raise ValueError(\"f must be a string or list of 3 strings\")", "def align_rasters(ref_raster, tar_raster, output_suffix):\n command = [\"gdalbuildvrt\", \"-te\"]\n hDataset = gdal.Open(ref_raster, gdal.GA_ReadOnly)\n if hDataset is None:\n return False\n adfGeoTransform = hDataset.GetGeoTransform(can_return_null=True)\n\n tif_file=tar_raster\n vrt_file = tif_file.replace('.tif', '.vrt')\n\n if adfGeoTransform is not None:\n dfGeoXUL = adfGeoTransform[0]\n dfGeoYUL = adfGeoTransform[3]\n dfGeoXLR = adfGeoTransform[0] + adfGeoTransform[1] * hDataset.RasterXSize + \\\n adfGeoTransform[2] * hDataset.RasterYSize\n dfGeoYLR = adfGeoTransform[3] + adfGeoTransform[4] * hDataset.RasterXSize + \\\n adfGeoTransform[5] * hDataset.RasterYSize\n xres = str(abs(adfGeoTransform[1]))\n yres = str(abs(adfGeoTransform[5]))\n\n subprocess.call(command + [str(dfGeoXUL), str(dfGeoYLR), str(dfGeoXLR),\n str(dfGeoYUL), \"-q\", \"-tr\", xres, yres,\n vrt_file, tif_file])\n\n output_file = tif_file.replace('.tif', output_suffix)\n\n print('gdal_translate -q {} {}'.format(vrt_file, output_file))\n\n cmd = 'gdal_translate -q {} {}'.format(vrt_file, output_file)\n\n #print(dfGeoXUL, dfGeoYLR, dfGeoXLR, dfGeoYUL, xres, yres)\n\n subprocess.call(cmd, shell=True)\n os.remove(vrt_file)\n\n return True\n\n else:\n\n return False", "def write_tiff (self, filename, prototype_filename = None, proj_string = None):\n if prototype_filename is None:\n if self.prototype_filename is None:\n use_prototype = False\n else:\n prototype_filename = self.prototype_filename # use the pre-defined one \n use_prototype = True\n \n if use_prototype:\n raster = gdal.Open (prototype_filename)\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0]\n originY = geotransform[3]\n cell_Width = geotransform[1]\n cell_Height = geotransform[5]\n ncols = self.ras.shape[1]\n nrows = self.ras.shape[0]\n else:\n originX = self.originX\n originY = self.originY\n cell_Width = self.cell_Width\n cell_Height = self.cell_Height\n ncols = self.ncols\n nrows = self.nrows\n \n # create driver\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create (filename, ncols, nrows, 1, gdal.GDT_Float32)\n outRaster.SetGeoTransform((originX, cell_Width, 0, originY, 0, cell_Height))\n outband = outRaster.GetRasterBand(1)\n \n # make a copy to write out\n x = self.ras.copy()\n \n # set the nodata flag properly\n if use_prototype:\n nodata_flag = raster.GetRasterBand(1).GetNoDataValue() # get the original value\n else:\n nodata_flag = -9999.0 # use hardcoded nan value\n x [np.isnan(x)] = nodata_flag # assign it to the array\n outband.SetNoDataValue (nodata_flag) # set it in the output band\n \n # write array and set projection (if projection supplied)\n outband.WriteArray (x)\n outRasterSRS = osr.SpatialReference ()\n if use_prototype:\n outRasterSRS.ImportFromWkt (raster.GetProjectionRef())\n outRaster.SetProjection (outRasterSRS.ExportToWkt())\n else:\n if not proj_string is None:\n outRasterSRS.ImportFromWkt (proj_string)\n outRaster.SetProjection (outRasterSRS.ExportToWkt())\n \n outband.FlushCache ()\n return", "def geotif_image(self, tile_bounds, image_bounds, imagepath,image_gdal):\n i_srid=3857\n s_srid=\"WGS 84 / Pseudo-Mercator\"\n # i_srid=3395\n # s_srid=\"WGS 84 / World Mercator\"\n # 4326 Wsg84\n # Upper Left ( -8.4375000, 77.1571625) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -8.4375000, 35.4606700) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 80.1562500, 77.1571625) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 80.1562500, 35.4606700) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 35.8593750, 56.3089162) ( 35d51'33.75\"E, 56d18'32.10\"N)\n # 3857 'WGS 84 / Pseudo-Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 62d54'54.84\"N)\n # 3395 'WGS 84 / World Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d14'24.81\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d38'33.56\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d14'24.81\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d38'33.56\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 63d 4'14.87\"N)\n bounds_west,bounds_south,bounds_east,bounds_north=tile_bounds\n bounds_wsg84=\"bounds_wsg84: %f,%f,%f,%f\"% (bounds_west,bounds_south,bounds_east,bounds_north)\n mercator = GlobalMercator()\n tile_bounds=mercator.BoundsToMeters(tile_bounds)\n mbtiles_name=\"\";\n mbtiles_description=\"\"\n s_TIFFTAG_DOCUMENTNAME=\"\"\n s_TIFFTAG_IMAGEDESCRIPTION=\"\"\n s_TIFFTAG_SOFTWARE=\"\"\n s_TIFFTAG_DATETIME=\"\"\n s_TIFFTAG_ARTIST=\"\"\n s_TIFFTAG_HOSTCOMPUTER=\"\"\n s_TIFFTAG_COPYRIGHT=\"\"\n if self.metadata_input:\n metadata=dict(self.metadata_input)\n mbtiles_name=metadata.get('name','')\n mbtiles_description=metadata.get('description','')\n if self._metadata:\n for metadata_list in self._metadata:\n metadata=dict(metadata_list[0])\n mbtiles_name=metadata.get('name',mbtiles_name)\n mbtiles_description=metadata.get('description',mbtiles_description)\n s_TIFFTAG_DOCUMENTNAME=metadata.get('TIFFTAG_DOCUMENTNAME',mbtiles_name)\n s_TIFFTAG_IMAGEDESCRIPTION=metadata.get('TIFFTAG_IMAGEDESCRIPTION',mbtiles_description)\n s_TIFFTAG_SOFTWARE=metadata.get('TIFFTAG_SOFTWARE','')\n s_TIFFTAG_DATETIME=metadata.get('TIFFTAG_DATETIME','')\n s_TIFFTAG_ARTIST=metadata.get('TIFFTAG_ARTIST','')\n s_TIFFTAG_HOSTCOMPUTER=metadata.get('TIFFTAG_HOSTCOMPUTER','')\n s_TIFFTAG_COPYRIGHT=metadata.get('TIFFTAG_COPYRIGHT','')\n if s_TIFFTAG_DOCUMENTNAME == \"\":\n s_TIFFTAG_DOCUMENTNAME=mbtiles_name\n if s_TIFFTAG_IMAGEDESCRIPTION == \"\":\n s_TIFFTAG_IMAGEDESCRIPTION=mbtiles_description\n tiff_metadata=[]\n if s_TIFFTAG_DOCUMENTNAME != \"\":\n tiff_metadata.append(('TIFFTAG_DOCUMENTNAME',s_TIFFTAG_DOCUMENTNAME))\n if s_TIFFTAG_IMAGEDESCRIPTION != \"\":\n tiff_metadata.append(('TIFFTAG_IMAGEDESCRIPTION',s_TIFFTAG_IMAGEDESCRIPTION))\n if s_TIFFTAG_SOFTWARE != \"\":\n tiff_metadata.append(('TIFFTAG_SOFTWARE',s_TIFFTAG_SOFTWARE))\n else:\n tiff_metadata.append(('TIFFTAG_SOFTWARE',bounds_wsg84))\n if s_TIFFTAG_DATETIME != \"\":\n tiff_metadata.append(('TIFFTAG_DATETIME',s_TIFFTAG_DATETIME))\n if s_TIFFTAG_ARTIST != \"\":\n tiff_metadata.append(('TIFFTAG_ARTIST',s_TIFFTAG_ARTIST))\n if s_TIFFTAG_HOSTCOMPUTER != \"\":\n tiff_metadata.append(('TIFFTAG_HOSTCOMPUTER',s_TIFFTAG_HOSTCOMPUTER))\n if s_TIFFTAG_COPYRIGHT != \"\":\n tiff_metadata.append(('TIFFTAG_COPYRIGHT',s_TIFFTAG_COPYRIGHT))\n # this assumes the projection is Geographic lat/lon WGS 84\n xmin,ymin,xmax,ymax=tile_bounds\n image_width,image_height=image_bounds\n # Upper Left ( 20800.000, 22000.000)\n # Lower Right ( 24000.000, 19600.000)\n # Size is 15118, 11339\n # (24000-20800)/15118 = 3200 = 0,21166821 [xres]\n # (19600-22000)/11339 = 2400 = −0,211658876 [yres]\n # geo_transform = (20800.0, 0.2116682100807, 0.0, 22000.0, 0.0, -0.21165887644413)\n geo_transform = [xmin, (xmax-xmin)/image_width, 0, ymax, 0, (ymin-ymax)/image_height ]\n spatial_projection = osr.SpatialReference()\n spatial_projection.ImportFromEPSG(i_srid)\n logger.info(_(\"-I-> geotif_image: Saving as GeoTiff - image[%s] compression[%s]\") % (imagepath,self.tiff_compression))\n image_dataset = gdal.Open(image_gdal, gdal.GA_Update )\n image_dataset.SetProjection(spatial_projection.ExportToWkt())\n image_dataset.SetGeoTransform(geo_transform)\n driver = gdal.GetDriverByName(\"GTiff\")\n output_dataset = driver.CreateCopy(imagepath,image_dataset, 0, self.tiff_compression )\n if tiff_metadata:\n logger.info(_(\"-I-> geotif_image: tiff_metadata[%s]\") % tiff_metadata)\n output_dataset.SetMetadata(dict(tiff_metadata))\n # Once we're done, close properly the dataset\n output_dataset = None\n image_dataset = None\n os.remove(image_gdal)\n logger.info(_(\"-I-> geotif_image: Saved resulting image to '%s' as GeoTiff- bounds[%s]\") % (imagepath,tile_bounds))", "def convert(src: str, tag: str, size: int = 0, unzip=unzip):\n t1 = glob.glob(f'{src}/*GG/*/*t1.nii.gz')\n t2 = glob.glob(f'{src}/*GG/*/*t2.nii.gz')\n flair = glob.glob(f'{src}/*GG/*/*flair.nii.gz')\n t1ce = glob.glob(f'{src}/*GG/*/*t1ce.nii.gz')\n seg = glob.glob(f'{src}/*GG/*/*seg.nii.gz') # Ground Truth\n pat = re.compile('.*_(\\w*)\\.nii\\.gz')\n\n data_paths = [{\n pat.findall(item)[0]: item\n for item in items\n }\n for items in list(zip(t1, t2, t1ce, flair, seg))]\n\n if not size:\n size = len(data_paths)\n total = len(data_paths[:size])\n step = 25 / total\n\n for i, imgs in enumerate(data_paths[:size]):\n try:\n [unzip(imgs[m], tag) for m in ['t1', 't2', 't1ce', 'flair', 'seg']]\n print('\\r\\n' + f'Progress: '\n f\"[{'=' * int((i + 1) * step) + ' ' * (24 - int((i + 1) * step))}]\"\n f\"({math.ceil((i + 1) * 100 / (total))} %)\" + '\\r\\n',\n end=''\n )\n except Exception as e:\n print(f'Something went wrong with {imgs[\"t1\"]}, skipping...\\n Exception:\\n{str(e)}')\n continue", "def string_to_image(string, reference_images):\n # create string as array\n image = reference_images[string[0]]\n for i in string[1:]:\n image = np.hstack([image,reference_images[i]])\n return image", "def geotiff_read(ifile,metaData):\r\n\r\n file = gdal.Open(ifile, GA_ReadOnly)\r\n\r\n projection = file.GetProjection()\r\n src = osr.SpatialReference()\r\n src.ImportFromWkt(projection)\r\n proj = src.ExportToWkt()\r\n\r\n Nx = file.RasterXSize\r\n Ny = file.RasterYSize\r\n\r\n trans = file.GetGeoTransform()\r\n\r\n dx = trans[1]\r\n dy = trans[5]\r\n\r\n if metaData == \"A\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]\r\n\r\n if metaData == \"P\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + Xp*trans[4] + Yp*trans[5]\r\n\r\n band = file.GetRasterBand(1)\r\n\r\n Z = band.ReadAsArray()\r\n\r\n dx = np.abs(dx)\r\n dy = np.abs(dy)\r\n\r\n return X, Y, Z, dx, dy, proj", "def geo_from_pix(pix, gt):\n if len(gt) != 6:\n raise ValueError(\"gt must be a 6 element tuple as defined by GDAL\")\n if pix.ndim == 1 and pix.shape[0] == 2:\n pix = pix.reshape(1, 2)\n if pix.shape[1] != 2:\n raise ValueError(\"pix must be an Nx2 length numpy.ndarray\")\n\n # Need a leading column of ones for the matrix multiply to work\n pix = np.hstack([np.ones(pix.shape[0]).reshape(pix.shape[0], 1), pix])\n gt_x = np.array(gt).reshape(2, 3).transpose()\n return np.dot(pix, gt_x)", "def getTiffInfo(path):\n # py 2/3 comp\n first_file = glob.glob(os.path.join(path, '*.tif'))[0]\n if ScanImageTiffReader is not None and ScanImageTiffReader(first_file).metadata() != '':\n string = ScanImageTiffReader(first_file).metadata()\n else:\n tfh = tifffile.TiffFile(first_file)\n # If software key is in dict tags --> SI2016\n if 'software' in tfh.pages[0].tags:\n string = tfh.pages[0].tags['software'].value.decode('utf-8')\n else:\n string = tfh.pages[0].tags['image_description'].value.decode('utf-8')\n string = \" \".join(string.split()).replace('\\\\', ' ')\n string = string.replace(')', '')\n string = string.replace('(', '')\n return string", "def calculate_tumor(filename, verbose = False):\n img = nibabel.load(filename)\n data = img.get_data()\n pixdim = img.header['pixdim']\n xyzt_units = img.header['xyzt_units']\n #pixdim[1],pixdim[2],pixdim[3] stores width, depth and height\n volume_per_pix = pixdim[1]*pixdim[2]*pixdim[3]\n\n volumes = {}\n volumes['total vasogenic edema volume'] = round(sum(data[data ==2 ])*volume_per_pix/1000, 3)\n volumes['enhancing portion'] = round(sum(data[data == 4]) * volume_per_pix/1000, 3)\n volumes['non enhancing portion'] = round(sum(data[data == 1]) * volume_per_pix/1000, 3)\n volumes['total tumor volume'] = round(volumes['enhancing portion'] + volumes['non enhancing portion'], 3)\n if xyzt_units == 1:\n volumes['unit'] = 'L'\n if xyzt_units == 2:\n volumes['unit'] = 'ML'\n if xyzt_units == 3:\n volumes['unit'] = 'UL'\n\n return volumes", "def annotate(path):\n if path.startswith('http') or path.startswith('gs:'):\n image = types. Image()\n image.source.image_uri = path\n else:\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n web_detection = client.web_detection(image=image).web_detection\n return web_detection", "def convert_vtt_to_str(file):\n if \".vtt\" in file:\n vtt_to_srt(file)", "def make_raster_from_array(base_array, base_raster_path):\r\n srs = osr.SpatialReference()\r\n srs.ImportFromEPSG(26910) # UTM Zone 10N\r\n project_wkt = srs.ExportToWkt()\r\n\r\n pygeoprocessing.testing.create_raster_on_disk(\r\n [base_array],\r\n (1180000, 690000),\r\n project_wkt,\r\n -1,\r\n (1, -1), # Each pixel is 1x1 m\r\n filename=base_raster_path)", "def create_raster_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def array_to_raster(inTiff,array,outFile,dataType=gdal.GDT_Float32):\n \n inDataset = gdal.Open(inTiff, GA_ReadOnly)\n\n # You need to get those values like you did.\n x_pixels = inDataset.RasterXSize # number of pixels in x\n y_pixels = inDataset.RasterYSize # number of pixels in y\n PIXEL_SIZE = inDataset.GetGeoTransform()[1] # size of the pixel... \n x_min = inDataset.GetGeoTransform()[0] \n y_max = inDataset.GetGeoTransform()[3] # x_min & y_max are like the \"top left\" corner.\n wkt_projection = inDataset.GetProjectionRef()\n\n driver = gdal.GetDriverByName('GTiff')\n\n outDataset = driver.Create(\n outFile,\n x_pixels,\n y_pixels,\n 1,\n dataType, )\n\n outDataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE))\n\n outDataset.SetProjection(wkt_projection)\n outDataset.GetRasterBand(1).WriteArray(array)\n outDataset.FlushCache() # Write to disk.\n return outDataset, outDataset.GetRasterBand(1) #If you need to return, remenber to return also the dataset because the band don`t live without dataset.", "def _read_vtc(vtc_file):\r\n with open(vtc_file, 'rb') as f:\r\n filebytes = f.read()\r\n\r\n hdr = {}\r\n hdr['file_guid'] = hexlify(filebytes[:16])\r\n # not sure about the 4 Bytes inbetween\r\n\r\n i = 20\r\n mpg_file = []\r\n start_time = []\r\n end_time = []\r\n while i < len(filebytes):\r\n mpg_file.append(_make_str(unpack('c' * 261, filebytes[i:i + 261])))\r\n i += 261\r\n Location = filebytes[i:i + 16]\r\n correct = b'\\xff\\xfe\\xf8^\\xfc\\xdc\\xe5D\\x8f\\xae\\x19\\xf5\\xd6\"\\xb6\\xd4'\r\n assert Location == correct\r\n i += 16\r\n start_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n end_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n\r\n return mpg_file, start_time, end_time", "def create_dat_from_raster(raster):\n # TODO --- NOT DONE --- finish coding\n band = rio.open(raster)", "def _read_antti_location(location_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if location_file.split('.')[-1] == 'gz':\n ff = gzip.open(location_file, 'r')\n else:\n ff = open(location_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # read LatLon array (with optional labels...\n # either all have labels, or none, else genfromtxt() chokes)\n lll = list(zip(*np.atleast_1d(np.genfromtxt(\n sIO, comments=\"%\", dtype=None,\n names=['latReal','lonReal','radReal','labelString']\n ))))\n\n # handles older style(s) with no radius and/or labels\n if len(lll) > 3:\n lat, lon, rad = np.array(lll[0:3])\n label = np.array(lll[3])\n elif len(lll) > 2:\n lat, lon, rad = np.array(lll[0:3])\n if isinstance(rad[0], (str, bytes)):\n label = rad\n rad = np.ones(lat.shape)\n else:\n label = np.tile('', lat.shape)\n elif len(lll) == 2:\n lat, lon = np.array(lll[0:2])\n rad = np.ones(lat.shape)\n label = np.tile('', lat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n return lat, lon, rad, label", "def _parse_wkt(s):\n if s.startswith('SRID'):\n s = s[s.index(';') + 1:]\n return shapely.wkt.loads(s)", "def array2tif(raster_file, new_raster_file, array):\n # Invoke the GDAL Geotiff driver\n raster = gdal.Open(raster_file)\n\n driver = gdal.GetDriverByName('GTiff')\n out_raster = driver.Create(new_raster_file,\n raster.RasterXSize,\n raster.RasterYSize,\n 1,\n gdal.GDT_Float32)\n out_raster.SetProjection(raster.GetProjection())\n # Set transformation - same logic as above.\n out_raster.SetGeoTransform(raster.GetGeoTransform())\n # Set up a new band.\n out_band = out_raster.GetRasterBand(1)\n # Set NoData Value\n out_band.SetNoDataValue(-1)\n # Write our Numpy array to the new band!\n out_band.WriteArray(array)", "def cellomics2tiff((file_in,dir_out)):\n \n file_out = cutils.getTifPath(file_in,dir_out)\n\n # don't repeat conversion if converted file exists\n # and is newer than the original data\n if os.path.isfile(file_out) \\\n and os.stat(file_out).st_mtime > os.stat(file_in).st_mtime:\n return\n\n if platform.system() == 'Linux':\n #cmd = ['bfconvert','-nogroup',file_in,file_out,'> /dev/null']\n #cmd = ['/opt/bftools/bfconvert','-nogroup',file_in,file_out,']\n #print \" \".join(cmd)\n #FNULL = open(os.devnull,'w')\n #subprocess.call(cmd, stdout=FNULL, shell=False)\n #FNULL.close()\n cmd = '/opt/bftools/bfconvert -overwrite -nogroup %s %s > /dev/null'%(file_in,file_out)\n #print cmd\n os.system(cmd)\n else:\n cmd = ['bfconvert','-nogroup',file_in,file_out]\n print \" \".join(cmd)\n subprocess.call(cmd, shell=True)", "def read_tif_to_np(tif_path: str):\n with rasterio.open(tif_path) as f:\n return f.read()", "def save_array_as_geotif(array, source_tif_path, out_path):\n if len(array.shape) > 2:\n height, width, depth = array.shape\n else:\n height, width = array.shape\n depth = 1\n source_tif = gdal.Open(source_tif_path)\n driver = gdal.GetDriverByName(\"GTiff\")\n dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32)\n if depth != 1:\n for i in range(depth):\n dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i])\n else:\n dataset.GetRasterBand(1).WriteArray(array)\n geotrans = source_tif.GetGeoTransform()\n proj = source_tif.GetProjection()\n dataset.SetGeoTransform(geotrans)\n dataset.SetProjection(proj)\n dataset.FlushCache()\n dataset = None", "def test_gini():\n sat = gini.GINIZFile(get_test_file(\"TIGH05\", fponly=True))\n assert sat.archive_filename() == \"GOES_HI_WV_201507161745.png\"\n assert str(sat) == \"TIGH05 KNES 161745 Line Size: 560 Num Lines: 520\"\n assert sat.awips_grid() == 208", "def convert_str_to_image(image_string):\n image = image_string.partition('base64,')[2]\n img_data = base64.b64decode(image)\n return img_data", "def loadtrkfile(T_filename, threshold_short_streamlines=10.0):\r\n print(\"Loading %s\" % T_filename)\r\n T, hdr = trackvis.read(T_filename, as_generator=False)\r\n T = np.array([s[0] for s in T], dtype=np.object)\r\n \r\n\r\n \r\n return T", "def test_convert_to_geotiff(self):\n fnames = modis.convert_to_geotiff(self.fnames[0], outdir=os.path.dirname(__file__))\n for f in fnames:\n ext = os.path.splitext(f)[1]\n suffix = os.path.splitext(f)[0].split('_')[1]\n self.assertTrue(os.path.exists(f))\n if ext != '.ovr':\n self.assertTrue(suffix in products['MCD43A4.006']['bandnames'])", "def retrieve_geotiff_metadata(sFilename_geotiff_in):\n pDriver = gdal.GetDriverByName('GTiff')\n \n pDataset = gdal.Open(sFilename_geotiff_in, gdal.GA_ReadOnly)\n\n if pDataset is None:\n print(\"Couldn't open this file: \" + sFilename_geotiff_in)\n sys.exit(\"Try again!\")\n else: \n pProjection = pDataset.GetProjection()\n pSpatial_reference = osr.SpatialReference(wkt=pProjection) \n ncolumn = pDataset.RasterXSize\n nrow = pDataset.RasterYSize \n pGeotransform = pDataset.GetGeoTransform()\n dOriginX = pGeotransform[0]\n dOriginY = pGeotransform[3]\n dPixelWidth = pGeotransform[1]\n pPixelHeight = pGeotransform[5] \n return dPixelWidth, dOriginX, dOriginY, nrow, ncolumn, pSpatial_reference, pProjection, pGeotransform", "def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg", "def convert(threshold, infile, tmpfile_1, tmpfile_2, outfile):\n args = [\n \"gdal_calc.py\",\n '-A', infile,\n '--outfile={}'.format(tmpfile_1),\n '--calc=logical_and(A>={}, A<999)'.format(threshold),\n '--type=Byte', '--NoDataValue=0',\n '--co=SPARSE_OK=YES',\n '--co=NBITS=1',\n '--quiet'\n # Could enable compression\n # --co=\"COMPRESS=LZW\"\n ]\n subprocess.run(args)\n\n subprocess.run([\n \"gdal_polygonize.py\",\n tmpfile_1,\n '-q',\n '-f', 'ESRI Shapefile',\n tmpfile_2\n ])\n\n subprocess.run([\n \"ogr2ogr\",\n '-a_srs', 'EPSG:4326',\n outfile,\n tmpfile_2\n ])\n\n subprocess.run([\"rm\", tmpfile_1])\n subprocess.run([\"rm\", tmpfile_2])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'shx')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'dbf')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'prj')])", "def readTIFImage(filename): \n sType = 'uint16'\n with tff.TiffFile(filename) as tif:\n im_out = tif.asarray().astype(sType)\n\n return im_out", "def transfer_rast_to_vect(poly_cstr, lyrname, out_field, rast_cstr, srs, method, where=None, geom_field=\"geometry\",\n id_field=\"ogc_fid\", buffer_rad=0, restrict_to_tile=True):\n ds = gdal.Open(rast_cstr)\n georef = ds.GetGeoTransform()\n raster_array = ds.ReadAsArray()\n img_shape = (ds.RasterYSize, ds.RasterXSize)\n LOG.info(\"Done reading raster, shape is: %s\", img_shape)\n ctx = {\n 'lyrname': lyrname,\n 'out_field': out_field,\n 'where': where,\n 'geom_field': geom_field,\n 'id_field': id_field,\n \"srs\": srs\n }\n if buffer_rad:\n ctx['geom_field'] = 'st_buffer({}, {})'.format(geom_field, buffer_rad)\n layer_sql = \"\"\"select {geom_field}, {out_field}, {id_field} as the_id from {lyrname}\"\"\".format(**ctx)\n if restrict_to_tile:\n # Weird geoms could be skipped by this, so add as an optione\n layer_sql += \" where st_intersects({geom_field}, st_geomfromtext(WKT_EXT, {srs}))\".format(**ctx)\n\n if where:\n if restrict_to_tile:\n layer_sql += \" and \" + where\n else:\n layer_sql += \" where \" + where\n LOG.info(\"Layersql: %s\", layer_sql)\n extent = get_extent(georef, img_shape)\n LOG.info(\"Extent: %s\", extent)\n vec_ds, lyr = open(poly_cstr, layersql=layer_sql, extent=extent, open_for_update=True)\n mask = just_burn_layer(lyr, georef, img_shape, attr='the_id', dtype=np.int32, all_touched=False)\n LOG.info(\"Done burning - setting attr in %d features\", lyr.GetFeatureCount())\n LOG.debug(\"%s\", np.unique(mask))\n n_ok = 0\n for n, feat in enumerate(lyr):\n if n % 100 == 0:\n LOG.info(\"Done: %d, ok: %d\", n, n_ok)\n daid = feat['the_id']\n ctx['the_id'] = daid\n area = feat.GetGeometryRef().GetArea()\n I, J = np.where(mask == daid)\n # At least 30% covered if already set - todo: provide this as argument\n if I.size > 0 and (feat[out_field] is None or I.size * (georef[1] ** 2) > area * 0.3):\n is_ok, val = method(raster_array, I, J)\n if is_ok:\n n_ok += 1\n ctx['_value_'] = val\n updatesql = \"update {lyrname} set {out_field}={_value_} where {id_field}={the_id}\".format(**ctx)\n LOG.debug(\"Executing: %s\", updatesql)\n vec_ds.ExecuteSQL(updatesql)\n else:\n LOG.debug(\"Nothing found for %s - mask size: %s, valid: %s, area: %s\",\n daid, I.size, feat.GetGeometryRef().IsValid(), area)", "def _makeWGSurl(WGSline) :\n if not WGSline.startswith(\"WGS \") :\n raise Exception(\"Line does not start with \\\"WGS \\\"\")\n accession = WGSline.split(\" \")[-1]\n accRoot = accession.split(\"-\")[0][0:6]\n url = \"http://www.ncbi.nlm.nih.gov/Traces/wgs/?download=\" + accRoot + \".1.gbff.gz\"\n return url", "def handle_as_url(view: sublime.View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except Exception:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except Exception:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(formats_to_convert) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n temp_img = osp.join(TEMP_DIR, \"tmp_image\" + ext) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(temp_img, \"wb\") as img:\n img.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n ext = \".png\"\n # keep the image's temporary file and name for later use\n conv_file = temp_img # => \"TEMP_DIR/tmp_image.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n temp_png = osp.splitext(temp_img)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(temp_img, temp_png)\n\n # read data from the resulting png\n with open(temp_png, \"rb\") as png:\n content = png.read()\n\n # set temp_file and name to the png file\n temp_img = temp_png # => \"TEMP_DIR/tmp_image.png\"\n\n width, height, real_width, real_height, size = get_data(view, temp_img)\n encoded = str(base64.b64encode(content), \"utf-8\")\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"url\")\n else:\n save(temp_img, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", name)\n else:\n convert(temp_img, \"url\", name)\n else:\n sublime.active_window().open_file(temp_img)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )", "def gtiff_to_array(fname, get_global = False):\n tif = gdal.Open(fname)\n a = tif.ReadAsArray()\n gt = tif.GetGeoTransform()\n print(gt)\n if get_global:\n print(gdal.Info(tif))\n global lons, lats, loni, lati, xx, yy, xi, yi\n lons = np.array([round(gt[0]+gt[1]*i,5) for i in range(a.shape[1])])\n lats = np.array([round(gt[3]+gt[5]*i,5) for i in range(a.shape[0])])\n loni = np.array([i for i in range(a.shape[1])])\n lati = np.array([i for i in range(a.shape[0])])\n xx,yy = np.meshgrid(lons, lats)\n xi,yi = np.meshgrid(loni,lati)\n return", "def readtiff(datafile, sub_im, cr):\n\n with rasterio.open(datafile) as src:\n d = src.read(1, window=Window(cr[0]-sub_im, cr[1]-sub_im, sub_im*2, sub_im*2))\n\n #print(\"Number of elements and size of the array is\",d.size, d.shape)\n #d[d==0]= np.nan # convert zeros to nan\n return d", "def rasterToBinary(input_raster,output_dir,name_override=None):\n\tif name_override:\n\t\tout_path = os.path.join(output_dir,name_override)\n\telse:\n\t\tin_base,in_ext = os.path.splitext(os.path.basename(input_raster))\n\t\tout_path = os.path.join(output_dir,in_base+\"_BINARY\"+in_ext)\n\n\tds = gdal.Open(input_raster,0)\n\tband = ds.GetRasterBand(1)\n\tnoData = band.GetNoDataValue()\n\tsrs = ds.GetProjection()\n\tgt = ds.GetGeoTransform()\n\tarr = BandReadAsArray(band)\n\tds = band = None # close dataset and band\n\tarr[arr != noData] = 1\n\tarr[arr == noData] = noData\n\trasterYSize, rasterXSize = arr.shape\n\tdriver = gdal.GetDriverByName('GTiff')\n\tdataset = driver.Create(out_path,rasterXSize,rasterYSize,1,gdal.GDT_Byte,['COMPRESS=DEFLATE'])\n\tdataset.GetRasterBand(1).WriteArray(arr)\n\tdataset.GetRasterBand(1).SetNoDataValue(noData)\n\tdataset.SetGeoTransform(gt)\n\tdataset.SetProjection(srs)\n\tdataset.FlushCache() # Write to disk\n\tdel dataset\n\n\treturn out_path", "def from_path(fname):\n def parse_header(lines):\n metadata = {}\n for ln in lines:\n if ln.startswith('#') or len(ln) < 2:\n continue\n match = re.match('(\\w+)\\s+([\\w\\s\\.]+)', ln)\n if not match:\n warnings.warn(\"warning: can't understand line: %s\" % ln)\n continue\n key, value = match.group(1).lower(), match.group(2)\n if key == 'version':\n metadata[key] = value\n elif key in ('fields', 'type'):\n metadata[key] = value.split()\n elif key in ('size', 'count'):\n metadata[key] = map(int, value.split())\n elif key in ('width', 'height', 'points'):\n metadata[key] = int(value)\n elif key == 'viewpoint':\n metadata[key] = map(float, value.split())\n elif key == 'data':\n metadata[key] = value.strip().lower()\n # TO-DO apparently count is not required?\n # add some reasonable defaults\n if 'count' not in metadata:\n metadata['count'] = [1] * len(metadata['fields'])\n if 'viewpoint' not in metadata:\n metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n if 'version' not in metadata:\n metadata['version'] = '.7'\n return metadata\n\n def _build_dtype(metadata_):\n \"\"\" build numpy structured array dtype from pcl metadata.\n note that fields with count > 1 are 'flattened' by creating multiple\n single-count fields.\n TO-DO: allow 'proper' multi-count fields.\n \"\"\"\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype\n\n def parse_binary_pc_data(f, dtype, metadata):\n rowstep = metadata['points'] * dtype.itemsize\n # for some reason pcl adds empty space at the end of files\n buf = f.read(rowstep)\n return np.fromstring(buf, dtype=dtype)\n\n def parse_binary_compressed_pc_data(f, dtype, metadata):\n # compressed size of data (uint32)\n # uncompressed size of data (uint32)\n # compressed data\n # junk\n fmt = 'II'\n compressed_size, uncompressed_size = struct.unpack(fmt, f.read(struct.calcsize(fmt)))\n compressed_data = f.read(compressed_size)\n # (compressed > uncompressed)\n # should we read buf as raw binary?\n buf = lzf.decompress(compressed_data, uncompressed_size)\n if len(buf) != uncompressed_size:\n raise Exception('Error decompressing data')\n # the data is stored field-by-field\n pcs_data = np.zeros(metadata['width'], dtype=dtype)\n ix = 0\n for dti in range(len(dtype)):\n dt = dtype[dti]\n bytess = dt.itemsize * metadata['width']\n column = np.fromstring(buf[ix:(ix + bytess)], dt)\n pcs_data[dtype.names[dti]] = column\n ix += bytess\n return pcs_data\n\n with open(fname, 'rb') as f:\n header = []\n while True:\n ln = f.readline().strip()\n header.append(ln)\n if ln.startswith('DATA'):\n metadata = parse_header(header)\n dtype = _build_dtype(metadata)\n break\n if metadata['data'] == 'ascii':\n pc_data = np.loadtxt(f, dtype=dtype, delimiter=' ')\n pc_data.dtype = np.float32\n pc_data = pc_data.reshape(-1, 4)\n elif metadata['data'] == 'binary':\n pc_data = parse_binary_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary_compressed':\n pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)\n else:\n print('File->py_pcd.py: DATA field is not \"ascii\",maybe \"binary\" or \"binary_compressed\", try to add method for both')\n return 'CODE: 0x123'\n pc = point_cloud(metadata, pc_data)\n return pc", "def shorten_with_is_gd(url):\n u = urllib2.urlopen(\"http://is.gd/api.php?longurl=\"+url)\n return u.read()", "def asc_to_gtif(i_dir):\n\n # Set search for all files with suffix in specified folder\n q = join(i_dir, \"*.asc\")\n # List of all TIF files\n asc_fps = glob.glob(q)\n\n # Loop over all files\n for item in asc_fps:\n # Open ASC file\n data = np.loadtxt(item, delimiter=\";\")\n\n # Determine the size of the output array\n x_size = np.count_nonzero(data[:, 0] == data[0, 0])\n y_size = np.count_nonzero(data[:, 1] == data[0, 1])\n\n # Transform columns to grid\n arr = np.reshape(data[:, 2], (1, x_size, y_size), order=\"F\")\n arr = np.flip(arr, axis=1)\n\n # Determine pixel resolution\n arr_x = np.reshape(data[:, 0], (x_size, y_size), order=\"F\")\n pix_x = arr_x[0, 1] - arr_x[0, 0]\n arr_y = np.reshape(data[:, 1], (x_size, y_size), order=\"F\")\n pix_y = arr_y[1, 0] - arr_y[0, 0]\n\n # Determine top-left coordinates\n left = data[:, 0].min()\n top = data[:, 1].max() + pix_y # Adjust for pixel size\n\n # Set meta data for GeoTIF\n transform = from_origin(left, top, pix_x, pix_y)\n si_crs = {'init': 'EPSG:3794'} # D96/TM\n\n _, name = split(item[:-4])\n save_file = join(i_dir, name + '.tif')\n\n # Save array as with metadata as GeoTIFF\n new_dataset = rasterio.open(save_file, \"w\", driver=\"GTiff\",\n height=arr.shape[1], width=arr.shape[2],\n count=1, dtype=str(arr.dtype),\n crs=si_crs,\n transform=transform, compress=\"lzw\")\n new_dataset.write(arr)\n new_dataset.close()\n\n # Remove ASC file\n # remove(item)\n\n # Output message:\n out_msg = 'Successfully converted ASC files to GeoTIFF!'\n\n return out_msg", "def handle_as_url(view: View, point: int, string: str, name: str):\n\n # Let's assume this url as input:\n # (https://upload.wikimedia.org/wikipedia/commons/8/84/Example.svg)\n\n # Download the image\n # FIXME: avoid nested try-except clauses\n try:\n try:\n f = urlopen(unquote(string)) # <==\n except:\n try:\n url_path = quote(string).replace(\"%3A\", ':', 1)\n f = urlopen(url_path)\n except:\n f = urlopen(string)\n # don't fill the console with stack-trace when there`s no connection !!\n except Exception as e:\n print(e)\n return\n\n # file needs conversion ?\n need_conversion = name.endswith(FORMAT_TO_CONVERT) # => True\n basename, ext = osp.splitext(name) # => (\"Example\", \".svg\")\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR,\n \"tmp_image\" + (ext if need_conversion else \".png\")\n ) # => \"TEMP_DIR/tmp_image.svg\"\n\n # Save downloaded data in the temporary file\n content = f.read()\n with open(tmp_file, \"wb\") as dst:\n dst.write(content)\n\n # if the file needs conversion, convert it then read data from the resulting png\n if need_conversion:\n # keep the image's temporary file and name for later use\n conv_file = tmp_file # => \"TEMP_DIR/tmp_image.svg\"\n conv_name = name # => \"Example.svg\"\n\n # => \"TEMP_DIR/tmp_image.png\"\n png = osp.splitext(tmp_file)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(tmp_file, png)\n\n # set temp_file and name to the png file\n tmp_file = png # => \"TEMP_DIR/tmp_image.png\"\n name = basename + \".png\" # => \"Example.png\"\n\n # read data from the resulting png\n with open(tmp_file, \"rb\") as dst:\n content = dst.read()\n\n real_width, real_height, size = get_image_size(tmp_file)\n width, height = get_dimensions(view, tmp_file)\n encoded = str(base64.b64encode(content), \"utf-8\")\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"url\")\n else:\n save(tmp_file, name, \"url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"url\", conv_name)\n else:\n convert(tmp_file, \"url\", name)\n else:\n sublime.active_window().open_file(tmp_file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width, real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )", "def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]", "def reader():\n return GeometryTestGen()", "def decode_tilename(self, tilename):\n tf = self.core.tile_ysize_m // 100000\n\n # allow short-form of tilename (e.g. \"E012N018T6\")\n if len(tilename) == 10:\n tile_size_m = int(tilename[-1]) * 100000\n if tile_size_m != self.core.tile_xsize_m:\n raise ValueError(self.msg1)\n llx = int(tilename[1:4])\n if llx % tf:\n raise ValueError(self.msg2)\n lly = int(tilename[5:8])\n if lly % tf:\n raise ValueError(self.msg2)\n tilecode = tilename[-2:]\n if tilecode != self.core.tiletype:\n raise ValueError(self.msg1)\n subgrid_id = self.core.tag\n sampling = self.core.sampling\n\n # allow long-form of tilename (e.g. \"EU500M_E012N018T6\")\n elif len(tilename) == 17:\n subgrid_id = tilename[0:2]\n if subgrid_id != self.core.tag:\n raise ValueError(self.msg1)\n sampling = Equi7Grid.decode_sampling(tilename[2:5])\n if sampling != self.core.sampling:\n raise ValueError(self.msg1)\n tile_size_m = int(tilename[-1]) * 100000\n if tile_size_m != self.core.tile_xsize_m:\n raise ValueError(self.msg1)\n llx = int(tilename[8:11])\n if llx % tf:\n raise ValueError(self.msg2)\n lly = int(tilename[12:15])\n if lly % tf:\n raise ValueError(self.msg2)\n tilecode = tilename[-2:]\n if tilecode != self.core.tiletype:\n raise ValueError(self.msg1)\n\n # wrong length\n else:\n raise ValueError(self.msg1)\n\n return subgrid_id, sampling, tile_size_m, llx * 100000, lly * 100000, tilecode", "def UpdateGT(out_file, data, src_file, epsg = 4326, drv = 'GTiff', datatype = gdal.GDT_Float32, NoData = -999):\n \n #assign which driver to use for the file format - default is .tif\n driver = gdal.GetDriverByName(drv)\n \n #source raster from which to use the geospatial metadata\n src_gt = gdal.Open(src_file)\n \n #data shape - detects whether to produce multiple bands or not\n if len(data.shape) == 2:\n [cols, rows] = data.shape\n n = 1\n else:\n [n, cols, rows] = data.shape\n\n #create the destination file\n dst_gt = driver.Create(out_file, rows, cols, n, datatype)\n \n #get the geotransform from the source file\n gt = src_gt.GetGeoTransform()\n \n #coordinate system in which to create the raster, in wkt form\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(epsg)\n dest_wkt = srs.ExportToWkt()\n \n #set the projection in the desired reference system\n dst_gt.SetGeoTransform(gt)\n dst_gt.SetProjection(dest_wkt)\n \n #set the data\n if n==1:\n dst_gt.GetRasterBand(1).WriteArray(data)\n dst_gt.GetRasterBand(1).SetNoDataValue(NoData)\n #close and write to file\n dst_gt.FlushCache()\n #used if there are multiple bands\n else:\n for i in range(n):\n dst_gt.GetRasterBand(i+1).WriteArray(data[i])\n dst_gt.GetRasterBand(i+1).SetNoDataValue(NoData)\n dst_gt.FlushCache()", "def ocr_to_tsv(img):\n try:\n tsv_result = pytesseract.image_to_data(img, config='--psm 6 --oem 3 -l eng+spa tsv')\n except Exception as err:\n from utilities import logs\n logs.exception_report(f\"Exception encountered in pytesseract.image_to_data() function: {err}\")\n return None\n return tsv_result", "def image_to_template(filename):\n return _image2template(filename)", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def loads(string):\n endianness = string[0:1]\n if endianness == BIG_ENDIAN:\n big_endian = True\n elif endianness == LITTLE_ENDIAN:\n big_endian = False\n else:\n raise ValueError(\"Invalid endian byte: '0x%s'. Expected 0x00 or 0x01\"\n % binascii.hexlify(endianness.encode()).decode())\n\n type_bytes = string[1:5]\n if not big_endian:\n # To identify the type, order the type bytes in big endian:\n type_bytes = type_bytes[::-1]\n\n geom_type = __BINARY_TO_GEOM_TYPE.get(type_bytes)\n data_bytes = string[5:] # FIXME: This won't work for GeometryCollections\n\n importer = __loads_registry.get(geom_type)\n\n if importer is None:\n __unsupported_geom_type(geom_type)\n return importer(big_endian, type_bytes, data_bytes)", "def create_tourism_raster(self):\n self.create_raster('flickr',\n pixeltype='32BF', noData=0,\n value_col='pictures')", "def unpack_landsat(inpath, outpath, bands=None, clouds=None):\r\n tfile = tarfile.open(inpath,'r:gz')\r\n if bands is None:\r\n #Extract all of the bands.\r\n tfile.extractall(outpath);\r\n else:\r\n #Extract just the bands specified.\r\n\r\n #Get the names of the members of the archive\r\n names = tfile.getnames()\r\n\r\n #Make sure the bands are given as a list\r\n bands = list(bands)\r\n\r\n #Find those elements of the archive that match the bands specified\r\n #\t(plus metadata file)\r\n members = tfile.getmembers()\r\n elements = []\r\n blist = ''.join(str(i) for i in bands)\r\n if clouds:\r\n patternStr = '.*_B[' + blist + ']\\.TIF|.*_MTL.txt|.*_band[' + blist + \\\r\n ']\\.tif|.*_cfmask.*\\.tif|.*\\.xml|.*cloud.*\\.tif'\r\n else:\r\n patternStr = '.*_B[' + blist + ']\\.TIF|.*_MTL.txt|.*_band[' + blist + \\\r\n ']\\.tif|.*_cfmask.*\\.tif|.*\\.xml'\r\n\r\n pattern = re.compile(patternStr)\r\n for i, name in enumerate(names):\r\n if pattern.match(name):\r\n elements.append(members[i])\r\n tfile.extractall(outpath,members = elements)\r\n\r\n '''\r\n Note: the below section was written with the intention of\r\n adding valuable metadata info to the .TIF files themselves.\r\n This theoretically allows easy retreival of key metadata info,\r\n but the method to set new metadata seems unreliable. Appears to\r\n be an open issue, but there may be a solution. For now, assume\r\n this does not work.\r\n\r\n #Now we loop through each output, adding important metadata.\r\n #Fetch metadata and make sure there is only one file.\r\n meta = glob.glob(outpath + '/*_MTL.txt')\r\n if len(meta) > 1:\r\n raise('MORE THAN ONE METADATA FILE FOUND!')\r\n\r\n metadata = read_metadata(meta[0])\r\n\r\n #Add the metadata.\r\n for tif in glob.glob(outpath + '*.TIF'):\r\n #Open the tif file\r\n ras = gdal.Open(tif)\r\n\r\n #Preserve any existing metadata & add important metadata from file\r\n meta = ras.GetMetadata()\r\n meta['SCENE_CENTER_TIME'] = metadata['SCENE_CENTER_TIME']\r\n meta['DATE_ACQUIRED'] = metadata['DATE_ACQUIRED']\r\n meta['LANDSAT_SCENE_ID'] = metadata['LANDSAT_SCENE_ID']\r\n\r\n #Now write the new metadata back to the tif.\r\n ras.SetMetadata(meta)\r\n ras.FlushCache()\r\n ras = None\r\n '''", "def loadtrkfile(T_filename, threshold_short_streamlines=10.0):\r\n print(\"Loading %s\" % T_filename)\r\n T, hdr = trackvis.read(T_filename, as_generator=False)\r\n T = np.array([s[0] for s in T], dtype=np.object)\r\n \r\n\r\n \r\n return T, hdr", "def bfr2generic(native_img):\n n_records = native_img.lat.shape[0]\n generic_data = get_template_ASCATL2_SMX(n_records)\n\n fields = [('jd', 'jd', None),\n ('sat_id', 'Satellite Identifier', None),\n ('abs_line_nr', None, None),\n ('abs_orbit_nr', 'Orbit Number', None),\n ('node_num', 'Cross-Track Cell Number', None),\n ('line_num', 'line_num', None),\n ('as_des_pass', 'as_des_pass', None),\n ('swath', 'swath_indicator', None),\n ('azif', 'f_Antenna Beam Azimuth', 1.7e+38),\n ('azim', 'm_Antenna Beam Azimuth', 1.7e+38),\n ('azia', 'a_Antenna Beam Azimuth', 1.7e+38),\n ('incf', 'f_Radar Incidence Angle', 1.7e+38),\n ('incm', 'm_Radar Incidence Angle', 1.7e+38),\n ('inca', 'a_Radar Incidence Angle', 1.7e+38),\n ('sigf', 'f_Backscatter', 1.7e+38),\n ('sigm', 'm_Backscatter', 1.7e+38),\n ('siga', 'a_Backscatter', 1.7e+38),\n ('sm', 'Surface Soil Moisture (Ms)', 1.7e+38),\n (\n 'sm_noise', 'Estimated Error In Surface Soil Moisture', 1.7e+38),\n ('sm_sensitivity', 'Soil Moisture Sensitivity', 1.7e+38),\n ('sig40', 'Backscatter', 1.7e+38),\n ('sig40_noise',\n 'Estimated Error In Sigma0 At 40 Deg Incidence Angle', 1.7e+38),\n ('slope40', 'Slope At 40 Deg Incidence Angle', 1.7e+38),\n ('slope40_noise',\n 'Estimated Error In Slope At 40 Deg Incidence Angle', 1.7e+38),\n ('dry_backscatter', 'Dry Backscatter', 1.7e+38),\n ('wet_backscatter', 'Wet Backscatter', 1.7e+38),\n ('mean_surf_sm', 'Mean Surface Soil Moisture', 1.7e+40),\n ('correction_flag', 'Soil Moisture Correction Flag', 1.7e+38),\n ('processing_flag', 'Soil Moisture Processing Flag', 1.7e+38),\n ('aggregated_quality_flag', None),\n ('snow_cover_probability', 'Snow Cover', 1.7e+38),\n ('frozen_soil_probability', 'Frozen Land Surface Fraction',\n 1.7e+38),\n ('innudation_or_wetland', 'Inundation And Wetland Fraction',\n 1.7e+38),\n ('topographical_complexity', 'Topographic Complexity', 1.7e+38)]\n\n for field in fields:\n if field[1] is None:\n continue\n\n if field[2] is not None:\n valid_mask = (native_img.data[field[1]] != field[2])\n generic_data[field[0]][valid_mask] = native_img.data[field[1]][\n valid_mask]\n else:\n generic_data[field[0]] = native_img.data[field[1]]\n\n # convert sat_id (spacecraft id) to the intern definition\n sat_id_lut = np.array([0, 0, 0, 4, 3, 5])\n generic_data['sat_id'] = sat_id_lut[generic_data['sat_id']]\n\n img = Image(native_img.lon, native_img.lat, generic_data,\n native_img.metadata, native_img.timestamp,\n timekey='jd')\n\n return img", "def array_to_raster(array, x, y):\n\n # Files info\n dst_filename = 'atiff.tiff'\n \n # Load matlab file\n front_dict = loadmat(infile,squeeze_me=True, struct_as_record=False)\n #print front_dict\n \n # You need to get those values like you did.\n x_pixels = len(x) # number of pixels in x\n y_pixels = len(y) # number of pixels in y\n PIXEL_SIZE = 1000 # size of the pixel...(in m?) \n x_min = np.min(x)\n y_max = np.min(y) # x_min & y_max are like the \"top left\" corner.\n wkt_projection = 'a projection in wkt that you got from other file'\n\n driver = gdal.GetDriverByName('GTiff')\n\n dataset = driver.Create(\n dst_filename,\n x_pixels,\n y_pixels,\n 1,\n gdal.GDT_Float32, )\n\n dataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE)) \n\n dataset.SetProjection(wkt_projection)\n dataset.GetRasterBand(1).WriteArray(array)\n dataset.FlushCache() # Write to disk.\n return dataset, dataset.GetRasterBand(1) #If you need to ret", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def fileInfo(tif: TiffFile):\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)", "def exif_to_datetimestr(exif_data_string):\n dateregex = re.compile(r\"\"\"\n (?P<year>\\d\\d\\d\\d): # match the year\n (?P<month>\\d\\d): # match the month\n (?P<day>\\d\\d) # match the day\n \\s\n (?P<hour>\\d\\d): # match the hour\n (?P<min>\\d\\d): # match the minute\n (?P<sec>\\d\\d) # match the second\n \"\"\", re.VERBOSE)\n\n match = re.match(dateregex, exif_data_string)\n if match:\n year = match.group(1)\n month = match.group(2)\n day = match.group(3)\n hour = match.group(4)\n mins = match.group(5)\n sec = match.group(6)\n return year + month + day + hour + mins + sec\n else:\n raise DateStrError", "def ocr_tsv_to_ocrdf(tsv_result):\n \n if tsv_result:\n try: \n df_result = pd.read_csv(io.StringIO(tsv_result), sep='\\t', quotechar='', quoting=3) # no quoting\n except Exception as err:\n from utilities import logs\n logs.exception_report(f\"Exception encountered in converting tsv_result from pytesseract: {err}\\n\"\n f\"pytesseract result:\\n {tsv_result}\")\n import pdb; pdb.set_trace()\n return None \n return df_result\n return None", "def cr2lonlat_for_geotif(path):\n old_cs, new_cs, gta, local_vars = _create_xform(path)\n transform = osr.CoordinateTransformation(old_cs, new_cs)\n\n def composite(c, r):\n \"\"\"xform from (c, r) to (lon, lat)\"\"\"\n x, y = gta * (c, r)\n lat, lon = transform.TransformPoint(x, y)[:2]\n if not -90 <= lat <= 90:\n raise ValueError('illegal lat value, did you switch coordinates')\n return lon, lat\n \n return composite", "def from_file(cls, filename, tel_id):\n filetype = get_file_type(filename)\n if filetype == 'simtel':\n return _load_camera_geometry_from_hessio_file(tel_id, filename)\n else:\n raise TypeError(\"File type {} not supported\".format(filetype))", "def tiff_to_ndarray(fn):\n return tifffile.imread(fn)", "def tiffwrite(filename, im):\n tf.imwrite(filename, im)", "def fgm2iaga(path,\n fgm_fname,\n ftype='v',\n output_template='{stn}{date:%Y%m%d}{ftype}{interval}.{interval}'):\n df = parse(fgm_fname)\n delta = (df.index[1] - df.index[0]).total_seconds()\n if delta == 1.0:\n interval = 'sec'\n elif delta == 60.0:\n interval = 'min'\n else:\n raise ValueError('unknown data interval found in {}'.format(fgm_fname))\n stn = df.siteid[:3].upper()\n out_fname = os.path.join(path,\n output_template.format(stn=stn.lower(),\n date=df.date,\n ftype=ftype,\n interval=interval))\n with open(out_fname, 'w') as fid:\n fid.write(HEADER_TEMPLATE.format(stn=stn.upper(),\n lat=df.lat,\n lon=df.lon,\n el=0))\n for row in df.itertuples():\n dt = row.Index\n if row.flag:\n X = Y = Z = F = 99999\n else:\n X = row.x\n Y = row.y\n Z = row.z\n F = np.linalg.norm([X, Y, Z])\n fid.write('{date:%Y-%m-%d %H:%M:%S.000} {date:%j}'\n ' {X:>9.2f} {Y:>9.2f} {Z:>9.2f} {F:>9.2f}\\n'.format(date=dt,\n X=X,\n Y=Y,\n Z=Z,\n F=F))\n return out_fname", "def decode_geometry(geom: str) -> BasePolygon:\n return shape(geobuf.decode(bytes.fromhex(geom))).buffer(0)", "def arr_to_tif(raster_f, segments, out_tif='s.tif', ndval=-10001):\n # get geospatial profile, will apply for output file\n with rio.open(raster_f) as src:\n meta = src.profile\n nodatavals = src.read_masks(1).astype('int16')\n\n # load numpy array if file is given\n if type(segments) == str:\n segments = np.load(segments)\n segments = segments.astype('int16')\n\n nodatavals[nodatavals == 0] = ndval\n segments[nodatavals == ndval] = nodatavals[nodatavals == ndval]\n\n out_meta = meta # modify profile based on numpy array\n out_meta['count'] = 1 # output is single band\n out_meta['dtype'] = 'int16' # data type is float64\n\n # write to a raster\n with rio.open(out_tif, 'w', **out_meta) as dst:\n dst.write(segments, 1)" ]
[ "0.5859697", "0.5527902", "0.54801387", "0.5408986", "0.5402602", "0.53733236", "0.535592", "0.52642715", "0.51979923", "0.5132323", "0.5090385", "0.5080128", "0.50617826", "0.50474924", "0.49894196", "0.4950945", "0.49262798", "0.4917392", "0.49108842", "0.49036774", "0.49000454", "0.48969093", "0.48832598", "0.48617065", "0.4856312", "0.4843212", "0.48326096", "0.48159057", "0.4812618", "0.4788052", "0.47842386", "0.4779904", "0.4768055", "0.47603354", "0.47587857", "0.4749086", "0.473164", "0.47287858", "0.47080827", "0.46978152", "0.46912023", "0.4678034", "0.4675693", "0.4660171", "0.46561804", "0.46540636", "0.4646049", "0.4632653", "0.46110588", "0.46031797", "0.45848837", "0.45797622", "0.4573663", "0.4563023", "0.45540804", "0.4547454", "0.45421442", "0.45418864", "0.45313394", "0.45253366", "0.45211822", "0.45196593", "0.45194024", "0.45112264", "0.45086965", "0.45025545", "0.45006496", "0.44914198", "0.44886622", "0.4488565", "0.44885522", "0.44849822", "0.44787174", "0.44706222", "0.4468943", "0.4457689", "0.44553718", "0.44537732", "0.4449093", "0.44411635", "0.4441007", "0.44354767", "0.4432837", "0.44318044", "0.44306743", "0.44261596", "0.44213873", "0.44183913", "0.44176644", "0.44157946", "0.4406674", "0.44049945", "0.44043347", "0.44022512", "0.44009516", "0.43986437", "0.43978688", "0.43943542", "0.43941396", "0.43941164", "0.43933806" ]
0.0
-1
Create a FooterHtml object
def __init__(self, footer_html=None): self._footer_html = None if footer_html is not None: self.footer_html = footer_html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_footer(self):\n self.footer = '</div>' \\\n '</div>' \\\n '</div>' \\\n '<div class=\"footer\">' \\\n '<div class=\"container\">' \\\n '<p class=\"text-muted\">Copyright Harm Brugge 2014.</p>' \\\n '</div>' \\\n '</div>' \\\n '</body>' \\\n '</html>'\n return self.footer", "def _get_footer(self, footer):\n if footer is None:\n html = self.footer()\n else:\n html = footer\n return html", "def gen_footer():\n return '</body></html>'", "def get_footer() -> html:\n footer = dbc.Container([\n html.Hr(),\n dbc.Row([\n dbc.Col([\n 'Made with ❤ in Frankfurt from ',\n dcc.Link(children='STATWORX',\n href='https://www.statworx.com/',\n style={\"color\": COLOR_STATWORX}),\n ]),\n dbc.Col(dcc.Link(\n children='Try Again!', href='/', style={\"color\": COLOR_STATWORX}),\n className=\"text-right\")\n ])\n ],\n className='mb-4')\n\n return footer", "def footer_html(self, html):\n self._footer_html = html", "def footer_html(self):\n return self._footer_html", "def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container", "def WriteFooter(self):\n return", "def get(self):\n return self.footer_html", "def footer():\n\treturn \"\"\"<footer><table width=\"100%\"><th>Weather Icons by <a href=\"https://github.com/erikflowers/weather-icons\">Erik Flowers</a></th>\n\t<th><a href=\"http://forecast.io/\">Powered by Forecast</a></th></table></footer></div>\n\t</body></html>\"\"\"", "def writeFooter(self):\n pass", "def getFooter():\n return _FOOTER", "def common_html_footer(outfile: TextIO, indexpath: str = \"\") -> None:\n outfile.write(\"\\n\")\n outfile.write(\" <footer>\\n\")\n outfile.write(\" <figure id=\\\"footmap\\\"><script type=\\\"text/javascript\\\" \"\n \"src=\\\"//rf.revolvermaps.com/0/0/4.js?i=5f9t1sywiez&amp;m=0&amp;h=75&amp;c=ff0000&amp;r=30\\\" \"\n \"async=\\\"async\\\"></script><figcaption>Visitors</figcaption></figure>\\n\")\n outfile.write(\" <p id=\\\"citation\\\"><a href=\\\"\" + indexpath + init_data().cite_url +\n \"\\\">\" + fetch_fa_glyph(\"site cite\") + \"How to cite this site</a></p>\\n\")\n outfile.write(\" <p id=\\\"contact\\\">Questions or comments about the site? Contact \"\n \"<a href=\\\"mailto:\" + init_data().site_author_email + \"\\\">\" + fetch_fa_glyph(\"mail\") +\n \"Dr. Michael S. Rosenberg</a></p>\\n\")\n outfile.write(\" <p id=\\\"copyright\\\">Release: \" + init_data().version +\n \" &mdash; Copyright &copy; 2003&ndash;\" + str(init_data().current_year) +\n \" All Rights Reserved</p>\\n\")\n outfile.write(\" </footer>\\n\")\n outfile.write(\" </body>\\n\")\n outfile.write(\"</html>\\n\")", "def email_footer():\n footer = \"\"\n\n return footer", "def WriteFooter(self):\n self.WriteText('}')", "def footer():\n return u'</form></body></html>\\n'", "def getFooter(HTMLstring):\n footer = open(os.path.dirname(os.path.realpath(__file__))+\"/html/footer.html\", \"r\")\n HTMLstring += footer.read()\n footer.close()\n return HTMLstring", "def footer(self, **args):\n return self.pageConfig['footer'] % self.pageConfig", "def footer(self):\n pass", "def footer(self):\n pass", "def footer(node):\n\n current_time = datetime.datetime.now()\n return '''\n </div>\n <div id=\"edit\">\n Last edit: ''' + time.strftime(\"%m/%d/%Y %I:%M:%S %p\", node.page.last_edit) + '''\n </div>\n </div>\n <footer>\n &copy; ''' + str(current_time.year) + ' ' + AUTHOR + ''' | Generated with <a href=\"http://www.minimalblue.com/software/minimalsite.html\">minimalsite</a> \n </footer>\n </div>\n </body>\n</html>'''", "def format_report_footer(self):", "def test_format_emperor_html_footer_string(self):\n self.maxDiff = 5000\n\n # footer for a jackknifed pcoa plot without biplots\n out_string = format_emperor_html_footer_string(False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_A.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_A)\n\n # footer for biplots without jackknifing\n out_string = format_emperor_html_footer_string(True, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_B.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_B)\n\n # no biplots nor jackknifing\n out_string = format_emperor_html_footer_string(False, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_C.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_C)\n\n # no biplots no jackknifing but with vectors\n out_string = format_emperor_html_footer_string(False, False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_D.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_D)\n\n # comparison plot\n out_string = format_emperor_html_footer_string(False, False,\n False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_E.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_E)", "def footer(self):\n return self._footer", "def print_footer():\n print('</text>')", "def render_footer_log(self):\n\n # clean up the log file\n if self._selenium_log_file:\n\n # render the footer\n html = render_to_string(\n self.selenium_footer_template,\n {'id': self.id(), 'description': self.__doc__})\n\n # write it to the file\n self._selenium_log_file.write(html)", "def parse_footer(self):\n lines=self.lines\n bodyfinish=re.compile(r\"</body>\", re.IGNORECASE).search(lines).span()[0]\n self.footer=lines[bodyfinish:]", "def makeHTML(header, body, footer):\n f = open(\"crimenews.html\", \"w\")\n f.write(header+body+footer)", "def write_footer(out):\n\n out.write(\"\"\" </g>\n</svg>\n\"\"\")", "def html_close():\n return(\"\"\"\n\n </section>\n\n </div>\n\n</main>\n</body>\n</html>\"\"\")", "def _get_footer_text(self):\n return _(THEME_FOOTER_TEXT)", "def footer(self):\n file = open(\"imdb_output.html\", \"a\")\n file.write(\"\\t\\t</table>\\n\\t</body>\\n</html>\\n\")", "def set_footer(self, **kwargs):\n self.footer_text = kwargs.get('text')\n self.footer_icon = kwargs.get('icon_url')", "def get_footer_text(self):\n return self._get_footer_text()", "def close_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</body>\\n\n \"\"\"\n return self.html_doc", "def footer(self):\n self.set_y(-15)\n self.set_font(self.police, 'I', 8)\n self.cell(w=0, h=10, txt=f\"Page {self.page_no()}\" + '/{nb}', border='B', ln=0, align='R')", "def format_emperor_html_footer_string(has_biplots=False, has_ellipses=False,\n has_vectors=False, has_edges=False):\n\n # we use python's built-in ternary operator to add or not a string\n # see _EMPEROR_FOOTER_HTML_STRING\n format_dict = {'biplot_spheres_color_selector':\n _BIPLOT_SPHERES_COLOR_SELECTOR if has_biplots else '',\n 'biplot_visibility_selector':\n _BIPLOT_VISIBILITY_SELECTOR if has_biplots else '',\n 'taxa_labels_selector':\n _TAXA_LABELS_SELECTOR if has_biplots else '',\n 'taxa_labels_color_selector':\n _TAXA_LABELS_COLOR_SELECTOR if has_biplots else '',\n 'edges_color_selector':\n _EDGES_COLOR_SELECTOR if has_edges else '',\n 'ellipse_opacity_slider':\n _ELLIPSE_OPACITY_SLIDER if has_ellipses else '',\n 'vectors_opacity_slider':\n _VECTORS_OPACITY_SLIDER if has_vectors else '',\n 'edges_visibility_selector':\n _EDGES_VISIBILITY_SELECTOR if has_edges else ''}\n\n return _EMPEROR_FOOTER_HTML_STRING.format(**format_dict)", "def footer(self, footer):\n\n self._footer = footer", "def footer(self):\n # close the svg tag\n yield '</svg>'\n # if this is a stand-alone document\n if self.standalone:\n # render a blank line\n yield ''\n # and the document footer\n yield from super().footer()\n # all done\n return", "def writeFooter( self ):\n total_branch_length = (max(self.mNodeWidthsEnd) - min(self.mNodeWidthsStart)) / self.mBranchScaleFactor\n \n self.mFooterX = self.getHeaderWidth()\n self.mFooterY = self.getHeaderHeight() + self.mDataHeight + self.mSeparatorHeight\n\n ruler_start = self.mFooterX\n ruler_end = self.mFooterX + int(total_branch_length * self.mBranchScaleFactor)\n\n if \"ruler\" in self.mRulerElements:\n ## full length ruler with tick marks and labels\n e = SVGdraw.line( ruler_start,\n self.mFooterY + self.mRulerTickSize + 1,\n ruler_end,\n self.mFooterY + self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n self.addElement( e )\n\n ## get reasonable intervalls\n\n increment = self.mRulerIncrement * self.mBranchScaleFactor\n\n ## adjust increment for extremely long trees\n if (ruler_end - ruler_start) / increment > 1000:\n increment = (ruler_end - ruler_start) / 1000.0\n self.mRulerIncrement = increment / self.mBranchScaleFactor\n \n if \"right-ticks\" in self.mRulerElements:\n\n x = ruler_end\n while x >= ruler_start:\n e = SVGdraw.line( x,\n self.mFooterY,\n x,\n self.mFooterY + 2 * self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n self.addElement( e )\n x -= self.mRulerIncrement * self.mBranchScaleFactor\n \n self.mFooterY += 2 * self.mRulerTickSize + 1 + self.mSeparatorHeight\n\n if \"left-ticks\" in self.mRulerElements:\n \n x = ruler_start\n while x <= ruler_end:\n e = SVGdraw.line( x,\n self.mFooterY,\n x,\n self.mFooterY + 2 * self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n self.addElement( e )\n x += increment\n \n self.mFooterY += 2 * self.mRulerTickSize + 1 + self.mSeparatorHeight\n\n if \"scale\" in self.mRulerElements:\n\n w = int(self.mRulerIncrement * self.mBranchScaleFactor)\n \n e = SVGdraw.line( ruler_end,\n self.mFooterY + self.mRulerTickSize + 1,\n ruler_end - w,\n self.mFooterY + self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n \n self.addElement( e )\n \n e = SVGdraw.line( ruler_end,\n self.mFooterY,\n ruler_end,\n self.mFooterY + 2 * self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n \n self.addElement( e )\n\n e = SVGdraw.line( ruler_end - w,\n self.mFooterY,\n ruler_end - w,\n self.mFooterY + 2 * self.mRulerTickSize + 1,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n stroke_width = 1 )\n \n self.addElement( e )\n\n e = SVGdraw.text( ruler_end - w / 2,\n self.mFooterY + 2 * self.mRulerTickSize + 1 + self.mRulerFontSize ,\n self.mRulerFormat % self.mRulerIncrement,\n self.mRulerFontSize,\n self.mRulerFont,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n text_anchor = \"middle\" )\n\n self.addElement(e)\n self.mFooterY += 2 * self.mRulerTickSize + 1 + self.mRulerFontSize \n \n e, self.mFooterX, self.mFooterY = self.mDecoratorExternalNodes.getLegend( self.mFooterX, self.mFooterY )\n self.addElements(e)\n e, self.mFooterX, self.mFooterY = self.mDecoratorInternalNodes.getLegend( self.mFooterX, self.mFooterY )\n self.addElements(e)", "def test_create_page_with_footer(self):\n\n footer_block = PageFooterBlock.objects.create(**_footer_block_data)\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n footer_block.top_links.add(link_1, link_2, link_3)\n contact_address = Address.objects.create(**_address_block)\n footer_block.contact_address.add(contact_address)\n footer_block.a_link.add(link_element_1)\n\n Page.objects.create(footer_block=footer_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('top_links', response.context)\n self.assertIn('contact', response.context)\n self.assertIn('top_contacts', response.context)\n self.assertIn('bot_copy_right', response.context)\n self.assertIn('bot_link', response.context)\n self.assertIn('bot_back_top', response.context)", "def close(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</report_html>\\n\n \"\"\"\n return self.html_doc", "def html_wrapper(content):\n\n header = '''<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <title>''' + SITE_NAME + '''</title>\n</head>\n<body>\n'''\n\n footer = '''\n</body>\n</html>'''\n return header + content + footer", "def footer_links(request):\n data = _get_footer_data(request) \n response_data = json.dumps(data)\n\n response = HttpResponse(response_data, mimetype= \"application/json\")\n response['Cache-Control'] = 'public'\n\n return response", "def generate_html(self):\n html_text_1 = \"\"\"\n <div class=\"concept\">\n\n \t\t<div class=\"concept-title\">\n\n \t\t\t\t\"\"\" + self.title\n\n html_text_2 = \"\"\"\n \t\t</div>\n\n \t\t<div class=\"concept-description\">\n\n\t\t <p>\n\t\t\t\n \t\t \t\t\"\"\" + self.description + \"\"\" \n \n </p>\"\"\"\n\n html_text_3 = '''\n\n \t\t</div>\n\n </div>'''\n\n return html_text_1 + html_text_2 + html_text_3", "def close_body(self) -> str:\n self.html_table = self.html_table + \"\"\"</tbody> \\n\"\"\"\n return self.html_table", "def __footer(self):\n result = \"\"\"This API and the related documentation has been created with <a href=\"https://github.com/opencitations/ramose\" target=\"_blank\">RAMOSE</a>, the *Restful API Manager Over SPARQL Endpoints*, developed by <a href=\"http://orcid.org/0000-0003-0530-4305\" target=\"_blank\">Silvio Peroni</a> and <a href=\"https://marilenadaquino.github.io\">Marilena Daquino</a>.\"\"\"\n return markdown(result)", "def foot(cls):\n return ''", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to admin users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout()", "def produce_header_footer(self):\n header = pylatex.PageStyle(\"header\", header_thickness=0.1)\n\n image_filename = self.get_image()\n with header.create(pylatex.Head(\"L\")) as logo:\n logo.append(pylatex.StandAloneGraphic(image_options=\"width=110px\", filename=image_filename))\n\n # Date\n with header.create(pylatex.Head(\"R\")):\n header.append(\"Date Report Issued: \" + datetime.today().strftime('%Y-%m-%d'))\n\n # Footer\n with header.create(pylatex.Foot(\"C\")):\n with header.create(pylatex.Tabular('lcr')) as table:\n table.add_row('', bold('Data interpretation guidelines can be found in RDIMS document ID: 10401305'),\n '')\n table.add_row('', bold('This report was generated with OLC AutoROGA v0.0.1'), '')\n return header", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to admin users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout()", "def footer(node):\n\n return '''\n <img src=\"/images/colosseo.png\" title=\"disegno del colosseo\" alt=\"colosseo\" class=\"colosseo\"/>\n\t \t </section>\n </div>\n\t\t\t <div class=\"clear\"></div>\n </div>\n </div>\n\t\t<div class=\"container_12 clearfix\">\n\t <footer class=\"grid_12\">\n\t\t\t\t <p>&copy <a href=\"http://luca.postregna.name\">lucapost</a> ''' + str(current_time.year) + '''; <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc/3.0/\">license</a>; <a href=\"/privacy.html\" title=\"normativa per la privacy\">privacy</a>; edit: ''' + time.strftime(\"%Y%m%d %I:%M:%S %p\", node.page.last_edit) + '''</p>\n <p>email: <a href=\"mailto:patdilenardo@gmail.com\" title=\"contatto email\">patdilenardo@gmail.com</a>; phone: +39 3389456208</p>\n\t\t </footer>\t\n\t\t\t<div class=\"clear\"></div>\n \t\t<script src=\"/js/hashgrid.js\"></script> \n\t\t<script src=\"/js/flux.min.js\" type=\"text/javascript\" charset=\"utf-8\"></script>\n\t\t<script type=\"text/javascript\" charset=\"utf-8\">\n\t\t\t$(function(){\n\t\t\t\tif(!flux.browser.supportsTransitions)\n\t\t\t\t\talert(\"Flux Slider requires a browser that supports CSS3 transitions\");\n\t\t\t\t\t\n\t\t\t\twindow.f = new flux.slider('#slider', {\n\t\t\t\t\tpagination: false,\n controls: true,\n captions: true,\n\t\t\t\t\ttransitions: [ 'dissolve' ],\n\t\t\t\t\tdelay: 5500\n\t\t\t\t});\n\t\t\t});\n\t\t</script> \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n ga('create', 'UA-6164762-14', 'nicelyventilated.it');\n ga('send', 'pageview');\n </script>\n\n<!--[if lt IE 7]><p class=chromeframe>Your browser is <em>ancient!</em> <a href=\"http://browsehappy.com/\">Upgrade to a different browser</a> or <a href=\"http://www.google.com/chromeframe/?redirect=true\">install Google Chrome Frame</a> to experience this site.</p><![endif]-->\n</body>\n</html>'''", "def write_footer(self, fd):\n fd.write(f\"END {self.name}\")\n if len(self.data_items) > 0:\n one_based = self.data_items[0].structure.type == DatumType.integer\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n fd.write(\"\\n\")", "def create_footer(categories):\n\n footer = '\\n'\n row_len = max([len(category.name) for category in categories])\n for idx in range(row_len):\n footer += ' '\n for category in categories:\n try:\n footer += f' {category.name[idx]} '\n except IndexError:\n footer += ' '\n footer += ' \\n'\n return footer", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def make_bottom_text( self ):\n return None", "async def set_ban_footer(self, ctx, *, footer_string):\n if not footer_string:\n local_embed = discord.Embed(\n title=f'No string detected, I need a string parameter to work',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n success = await self.bot.pg_utils.set_ban_footer(\n ctx.guild.id,\n footer_string,\n self.bot.logger\n )\n if success:\n desc = footer_string.replace(\n f'%user%', ctx.message.author.mention)\n local_embed = discord.Embed(\n title=f'Footer message set:',\n description=f'**Preview:**\\n{desc}',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured, please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return", "def footprint(dynamic_footprint_modifier=0.):", "def parse_footer(self): # -> tuple[list[Unknown], Literal['']]:\n ...", "def footer_nav(self):\r\n buttons = [NamedButton(\"help\", False, nocname=True),\r\n NamedButton(\"blog\", False, nocname=True),\r\n NamedButton(\"stats\", False, nocname=True),\r\n NamedButton(\"feedback\", False),\r\n NamedButton(\"bookmarklets\", False),\r\n NamedButton(\"socialite\", False),\r\n NamedButton(\"buttons\", True),\r\n NamedButton(\"widget\", True),\r\n NamedButton(\"code\", False, nocname=True),\r\n NamedButton(\"mobile\", False, nocname=True),\r\n NamedButton(\"store\", False, nocname=True),\r\n NamedButton(\"ad_inq\", False, nocname=True),\r\n ]\r\n\r\n return NavMenu(buttons, base_path = \"/\", type = \"flatlist\")", "def serialize_footer(signer):\n footer = b\"\"\n if signer is not None:\n signature = signer.finalize()\n footer = struct.pack(\">H{sig_len}s\".format(sig_len=len(signature)), len(signature), signature)\n return footer", "def close(self) -> str:\n self.html_table = self.html_table + \"\"\"</table>\\n\"\"\"\n return self.html_table", "def set_footer(embed: Embed, ctx: commands.Context) -> Embed:\n return embed.set_footer(icon_url=ctx.author.avatar_url, text=f\"{ctx.author.display_name} • {get_time()}\")", "def create_html(self):\n rows = self.check()\n htmlrows = \"\"\n for row in rows:\n data = self._format_row(row)\n htmlrows += data\n \n return self.TEMPLATE.format(content=htmlrows)", "def test_write_page_margins_footer(self):\n\n self.worksheet.set_footer(margin=0.5)\n self.worksheet._write_page_margins()\n\n exp = \"\"\"<pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.5\"/>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)", "def write_db_footer(table,runnumber):\n \n table.write('}\\n')", "def testWriteFooter(self):\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFooter()\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n self.assertTrue(output_data.endswith(b'</Project>'))", "def write_html_body(self, recipient: str) -> str:\n declaration = '<!DOCTYPE html>\\n\\n'\n header: str = attach_tag_head(\n attach_tag_title('reimburser')\n ) + '\\n'\n preamble: str = self._write_html_preamble(recipient)\n\n torso: str = self._write_html_torso(recipient)\n\n middle: str = attach_tag_div(\n attach_tag_p('The rest of the email gives an overview '\n + f'of all the costs from {self.trip_title}:'))\n\n costs_table: str = self._construct_html_table(self.table)\n\n matrices = list()\n for currency, matrix in self.reimbursement_matrices.items():\n matrices.append(self._construct_html_matrix(\n currency, matrix))\n matrix_tables = f'\\n{LEVEL_2}<br>\\n'.join(matrices)\n\n\n body: str = attach_tag_body(\n preamble + '\\n'\n + torso + '\\n'\n + middle + '\\n'\n + costs_table + f'\\n{LEVEL_2}<br>\\n'\n + matrix_tables)\n\n email_content: str = declaration \\\n + attach_tag_html(\n header\n + body)\n\n return email_content", "def renderFooter(self, plot, painter, rect):\r\n painter.setFont(plot.footerLabel().font())\r\n color = plot.footerLabel().palette().color(QPalette.Active, QPalette.Text)\r\n painter.setPen(color)\r\n plot.footerLabel().text().draw(painter, rect)", "async def set_kick_footer(self, ctx, *, footer_string):\n if not footer_string:\n local_embed = discord.Embed(\n title=f'No string detected, I need a string parameter to work',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return\n success = await self.bot.pg_utils.set_kick_footer(\n ctx.guild.id,\n footer_string,\n self.bot.logger\n )\n if success:\n desc = footer_string.replace(\n f'%user%', ctx.message.author.mention)\n local_embed = discord.Embed(\n title=f'Footer message set:',\n description=f'**Preview:**\\n{desc}',\n color=0x419400\n )\n else:\n local_embed = discord.Embed(\n title=f'Internal error occured, please contact @dashwav#7785',\n description=' ',\n color=0x651111\n )\n await ctx.send(embed=local_embed)\n return", "def get_footer_email(object, use_string_1=False, class_link='navLink'):\n if smart_unicode(str(type(object))) == \"<class 'dms.models.DmsItem'>\":\n item = object\n else:\n item = object.item\n if use_string_1:\n return encode_email(item.string_2, item.string_1, 'navLink')\n else:\n name = item.owner.get_full_name()\n if name == u'Unbekannte Person' and item.string_1 != '':\n return encode_email(item.string_2, item.string_1, class_link)\n else:\n return encode_email(item.owner.email, name, class_link)", "def latex_footer():\n print(\" \\end{tikzpicture}\")\n print(\" }\")\n print(\" \\end{frame}\")\n print(\"\\end{document}\")", "def setup_footer_panel(self):\r\n self.create_footer_push_button(\"F3 View\", \"F3\")\r\n self.create_footer_push_button(\"F4 Edit\", \"F4\")\r\n self.create_footer_push_button(\"F5 Copy\", \"F5\")\r\n self.create_footer_push_button(\"F6 move\", \"F6\")\r\n self.create_footer_push_button(\"F7 New Folder\", \"F7\")\r\n self.create_footer_push_button(\"F8 Delete\", \"F8\")\r\n self.create_footer_push_button(\"ALT+F4 Exit\")", "def close_head(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</head>\"\"\"\n return self.html_doc", "def write_footer(self, stream):\n return\n ##################################################\n # You MUST implement this method in the subclass #\n # if the file format defines a file footer. #\n ##################################################", "def testWriteFooter(self):\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFooter()\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = b'</VisualStudioProject>\\r\\n'\n self.assertEqual(output_data, expected_output_data)", "def get_html_string(self, **kwargs):\n ...", "def endContent(self, newline=True):\n\n result = []\n result.append(self.anchordef('bottom'))\n result.append(self._close('div', newline=newline))\n if hasattr(self, 'page'):\n self.request.uid_generator.end()\n return ''.join(result)", "def html_output(self, order, view, mode):\n instance = self.instance\n data = {}\n data['order'] = order\n data['chartname'] = instance.table_name\n data['describe'] = view.table.describe\n data['x_name'] = view.fx.name\n data['y_name'] = view.fy.name\n data['chart'] = Chart.chart[view.chart]\n data['classify'] = [v[0] for v in view.table.classes]\n data['x_data'] = view.X\n data['y_data'] = view.Y\n data['title_top'] = 5\n\n [chart,filename] = self.html_handle(data)\n grid = Grid()\n grid.add(chart, grid_opts=opts.GridOpts(pos_bottom='20%', pos_top='20%'))\n if mode == 'single':\n self.page.add(grid) #the grid is added in the same page\n elif mode == 'multiple':\n grid.render('./html/' + self.table_name + '/' + filename) #the grid is added in a new file", "def to_html(self, data=None, **kwargs) -> str:\n html = self.create_container()\n return html", "def footnotes(self, text):\n html = '<div class=\"footnotes\">\\n%s<ol>%s</ol>\\n</div>\\n'\n return html % (self.hrule(), text)", "def finish(self):\r\n\r\n self.text += \"</html>\\n\"\r\n\r\n if self.filename != None:\r\n with open(self.filename, \"w\") as f:\r\n f.write(self.text)\r\n\r\n return self.text", "def _html(self, parent, group):\r\n return ItemFactory.create(\r\n parent_location=parent.location,\r\n category=\"html\",\r\n display_name=\"Group {} Sees This HTML\".format(group),\r\n data=\"Some HTML for group {}\".format(group),\r\n )", "def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)", "def test__EmbedFooter__to_data():\n icon_url = 'attachment://orin.png'\n icon_proxy_url = 'https://www.astil.dev/project/hata/'\n text = 'orin'\n \n data = {\n 'icon_url': icon_url,\n 'proxy_icon_url': icon_proxy_url,\n 'text': text,\n }\n \n field = EmbedFooter.from_data(data)\n \n expected_output = data\n \n vampytest.assert_eq(\n field.to_data(defaults = True, include_internals = True),\n expected_output,\n )", "def create_html(self, filename=None):\n if isinstance(self.style, str):\n style = \"'{}'\".format(self.style)\n else:\n style = self.style\n options = dict(\n gl_js_version=GL_JS_VERSION,\n accessToken=self.access_token,\n div_id=self.div_id,\n style=style,\n center=list(self.center),\n zoom=self.zoom,\n opacity=self.opacity,\n minzoom=self.min_zoom,\n maxzoom=self.max_zoom,\n pitch=self.pitch,\n bearing=self.bearing,\n boxZoomOn=json.dumps(self.box_zoom_on),\n doubleClickZoomOn=json.dumps(self.double_click_zoom_on),\n scrollZoomOn=json.dumps(self.scroll_zoom_on),\n touchZoomOn=json.dumps(self.touch_zoom_on),\n legendFill=self.legend_fill,\n legendHeaderFill=self.legend_header_fill,\n legendTextColor=self.legend_text_color,\n legendTitleHaloColor=self.legend_title_halo_color,\n legendKeyBordersOn=json.dumps(self.legend_key_borders_on)\n )\n\n if self.label_property is None:\n options.update(labelProperty=None)\n else:\n options.update(labelProperty='{' + self.label_property + '}')\n\n html = []\n html.append(templates.format(self.template, **options))\n for layer in self.layers:\n html.append(layer.create_html())\n if filename:\n with codecs.open(filename, \"w\", \"utf-8-sig\") as f:\n f.write(\"\\n\".join(html))\n return None\n else:\n return \"\\n\".join(html)", "def rollup_header_footer(self, context):\n header = tags.html_string_to_element_tree(\n '<link href=\"%s/markdown.css\" rel=\"stylesheet\" '\n 'type=\"text/css\">' % RESOURCE_FOLDER)\n footer = tags.html_string_to_element_tree('')\n return (header, footer)", "def end_google_chart_header(outfile: TextIO) -> None:\n outfile.write(\" }\\n\")\n outfile.write(\" </script>\\n\")", "def parse_footer_block(lines):\n # najdeme pocatek paticky\n bottom_index = [idx for idx, line in enumerate(lines[-MAX_FOOTER_HEIGHT:]) \\\n if FOOTER_RE.match(line)]\n if not len(bottom_index):\n return None\n\n # vyparsujeme data z ni\n data = [line for line in lines[-(MAX_FOOTER_HEIGHT-bottom_index[0]):] if line.strip()]\n out = {'date':None}\n for item in data:\n # typ a poradove cislo zastupitelstva\n m = FOOTER_RE.match(item)\n if m:\n try:\n out['date'] = date(int(m.group(3).strip()), int(m.group(2).strip()), int(m.group(1).strip()))\n except ValueError:\n pass\n\n return out", "def end_page_division(outfile: TextIO) -> None:\n outfile.write(\" </div>\\n\")", "def closer(response):\n response.out.write('</div><div class=\"footer\">')\n response.out.write('<b>%4f seconds</b></div>' % (time.time() - response.starttime))\n response.out.write('</body></html>')", "def append_javascript_on_bottom_page(html_data, js_data):\n tmp = re.split(\"</body>\", html_data)\n html_data = tmp[0] + \"<script>\" + js_data + \"</script></body>\" + tmp[1]\n return html_data", "def get_html_body_str(self):\n svg_contents = self.__make_svg()\n if svg_contents is None:\n return ''\n else:\n head = '<b>Task graph</b><div id=\\'task-graph\\'>\\n'\n img = svg_contents\n tail = '</div><br>'\n return head + img + tail", "def footer_sql(self, footer_sql):\n if footer_sql is not None and len(footer_sql) > 5000:\n raise ValueError(\"Invalid value for `footer_sql`, length must be less than or equal to `5000`\") # noqa: E501\n\n self._footer_sql = footer_sql", "def print_copyright_page(outfile: TextIO, refdict: dict) -> None:\n outfile.write(\" <div id=\\\"copyright_page\\\">\\n\")\n outfile.write(\" <p>Copyright &copy; 2003&ndash;\" + str(init_data().current_year) +\n \" by \" + init_data().site_author + \". All Rights Reserved</p>\\n\")\n outfile.write(\" <p>Release: \" + init_data().version + \"</p>\\n\")\n outfile.write(\" <p><a href=\\\"\" + init_data().site_url() + \"\\\">\" + init_data().site_address + \"</a></p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" The data and code used to produce this document can be found on GitHub at\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" <a href=\\\"https://github.com/msrosenberg/fiddlercrab.info\\\">\"\n \"https://github.com/msrosenberg/fiddlercrab.info</a>\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p>and</p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" <a href=\\\"https://github.com/msrosenberg/TaxonomyMonographBuilder\\\">\"\n \"https://github.com/msrosenberg/TaxonomyMonographBuilder</a>.\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p class=\\\"copy_cite\\\">\\n\")\n outfile.write(\" Please cite this document as:\\n\")\n outfile.write(\" </p>\\n\")\n outfile.write(\" <p>\\n\")\n outfile.write(\" Rosenberg, M.S. (\" + str(init_data().current_year) + \") www.fiddlercrab.info, v.\" +\n init_data().version + \".\\n\")\n outfile.write(\" </p>\\n\")\n\n outfile.write(\" <p>\\n\")\n outfile.write(\" Certain key elements of this work are described in:\\n\")\n key_ref = \"Rosenberg2014\" # citation describing the database\n ref = refdict[key_ref]\n outfile.write(\" <div class=\\\"reference_list\\\">\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"\" + rel_link_prefix(True, \"references/\") + key_ref + \".html\\\">\" +\n ref.formatted_html + \"</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </div>\\n\")\n outfile.write(\" </div>\\n\")\n outfile.write(\"\\n\")", "def __html__(self):\n return self.html", "def get_html(self):\r\n pass", "def close_cell(self) -> str:\n self.html_table = self.html_table + \"\"\"</td>\\n\"\"\"\n return self.html_table", "def create_footer_push_button(self, text, shortcut=None, connection=None):\r\n button = QtGui.QPushButton(self.commander_window.footer_container)\r\n button.setFont(self.font)\r\n button.setText(text)\r\n if (shortcut is not None):\r\n button.setShortcut(shortcut)\r\n if connection is not None:\r\n button.triggered.connect(connection)\r\n self.commander_window.footer_layout.addWidget(button)\r\n\r\n return button" ]
[ "0.76971847", "0.744824", "0.73707694", "0.72728646", "0.7272721", "0.72693896", "0.7252219", "0.7141228", "0.7088616", "0.70732653", "0.7065509", "0.6761187", "0.6724753", "0.67033243", "0.66451037", "0.6532951", "0.6486145", "0.6434139", "0.63833433", "0.63833433", "0.63632464", "0.63625926", "0.62865883", "0.627472", "0.6237597", "0.61557865", "0.6122214", "0.60678184", "0.6067368", "0.6035679", "0.6013438", "0.6004449", "0.5991936", "0.5933376", "0.5929293", "0.59240705", "0.58794653", "0.5870124", "0.57746655", "0.5771044", "0.56287134", "0.5621224", "0.5579838", "0.55502504", "0.5544188", "0.55423313", "0.5532389", "0.5513896", "0.5503877", "0.546109", "0.54315174", "0.5414734", "0.5411037", "0.53243756", "0.5309553", "0.5309553", "0.53046536", "0.5293615", "0.52890587", "0.5282927", "0.527304", "0.5244038", "0.52417785", "0.5229004", "0.5193615", "0.51807845", "0.51316774", "0.5127084", "0.51142853", "0.5110724", "0.5109595", "0.51041794", "0.5089332", "0.50617254", "0.50596625", "0.5046926", "0.5033809", "0.50314164", "0.5023161", "0.5020498", "0.5012785", "0.50117207", "0.49969727", "0.49954924", "0.49760085", "0.49687862", "0.49665013", "0.49617538", "0.49439508", "0.49091965", "0.48684373", "0.48607796", "0.48493293", "0.48450217", "0.48440373", "0.48396757", "0.48389843", "0.48228717", "0.4822318", "0.48145643" ]
0.6722909
13
The html content of your footer.
def footer_html(self): return self._footer_html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return self.footer_html", "def get_footer(self):\n self.footer = '</div>' \\\n '</div>' \\\n '</div>' \\\n '<div class=\"footer\">' \\\n '<div class=\"container\">' \\\n '<p class=\"text-muted\">Copyright Harm Brugge 2014.</p>' \\\n '</div>' \\\n '</div>' \\\n '</body>' \\\n '</html>'\n return self.footer", "def footer():\n\treturn \"\"\"<footer><table width=\"100%\"><th>Weather Icons by <a href=\"https://github.com/erikflowers/weather-icons\">Erik Flowers</a></th>\n\t<th><a href=\"http://forecast.io/\">Powered by Forecast</a></th></table></footer></div>\n\t</body></html>\"\"\"", "def _get_footer(self, footer):\n if footer is None:\n html = self.footer()\n else:\n html = footer\n return html", "def gen_footer():\n return '</body></html>'", "def footer(self):\n return self._footer", "def footer_html(self, html):\n self._footer_html = html", "def print_footer():\n print('</text>')", "def footer(self, **args):\n return self.pageConfig['footer'] % self.pageConfig", "def footer():\n return u'</form></body></html>\\n'", "def footer(self):\n pass", "def footer(self):\n pass", "def WriteFooter(self):\n return", "def email_footer():\n footer = \"\"\n\n return footer", "def get_footer_text(self):\n return self._get_footer_text()", "def footer(node):\n\n current_time = datetime.datetime.now()\n return '''\n </div>\n <div id=\"edit\">\n Last edit: ''' + time.strftime(\"%m/%d/%Y %I:%M:%S %p\", node.page.last_edit) + '''\n </div>\n </div>\n <footer>\n &copy; ''' + str(current_time.year) + ' ' + AUTHOR + ''' | Generated with <a href=\"http://www.minimalblue.com/software/minimalsite.html\">minimalsite</a> \n </footer>\n </div>\n </body>\n</html>'''", "def get_footer() -> html:\n footer = dbc.Container([\n html.Hr(),\n dbc.Row([\n dbc.Col([\n 'Made with ❤ in Frankfurt from ',\n dcc.Link(children='STATWORX',\n href='https://www.statworx.com/',\n style={\"color\": COLOR_STATWORX}),\n ]),\n dbc.Col(dcc.Link(\n children='Try Again!', href='/', style={\"color\": COLOR_STATWORX}),\n className=\"text-right\")\n ])\n ],\n className='mb-4')\n\n return footer", "def writeFooter(self):\n pass", "def getFooter():\n return _FOOTER", "def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container", "def __footer(self):\n result = \"\"\"This API and the related documentation has been created with <a href=\"https://github.com/opencitations/ramose\" target=\"_blank\">RAMOSE</a>, the *Restful API Manager Over SPARQL Endpoints*, developed by <a href=\"http://orcid.org/0000-0003-0530-4305\" target=\"_blank\">Silvio Peroni</a> and <a href=\"https://marilenadaquino.github.io\">Marilena Daquino</a>.\"\"\"\n return markdown(result)", "def _get_footer_text(self):\n return _(THEME_FOOTER_TEXT)", "def WriteFooter(self):\n self.WriteText('}')", "def getFooter(HTMLstring):\n footer = open(os.path.dirname(os.path.realpath(__file__))+\"/html/footer.html\", \"r\")\n HTMLstring += footer.read()\n footer.close()\n return HTMLstring", "def format_report_footer(self):", "def render_footer_log(self):\n\n # clean up the log file\n if self._selenium_log_file:\n\n # render the footer\n html = render_to_string(\n self.selenium_footer_template,\n {'id': self.id(), 'description': self.__doc__})\n\n # write it to the file\n self._selenium_log_file.write(html)", "def parse_footer(self):\n lines=self.lines\n bodyfinish=re.compile(r\"</body>\", re.IGNORECASE).search(lines).span()[0]\n self.footer=lines[bodyfinish:]", "def footer(self):\n # close the svg tag\n yield '</svg>'\n # if this is a stand-alone document\n if self.standalone:\n # render a blank line\n yield ''\n # and the document footer\n yield from super().footer()\n # all done\n return", "def html_close():\n return(\"\"\"\n\n </section>\n\n </div>\n\n</main>\n</body>\n</html>\"\"\")", "def foot(cls):\n return ''", "def common_html_footer(outfile: TextIO, indexpath: str = \"\") -> None:\n outfile.write(\"\\n\")\n outfile.write(\" <footer>\\n\")\n outfile.write(\" <figure id=\\\"footmap\\\"><script type=\\\"text/javascript\\\" \"\n \"src=\\\"//rf.revolvermaps.com/0/0/4.js?i=5f9t1sywiez&amp;m=0&amp;h=75&amp;c=ff0000&amp;r=30\\\" \"\n \"async=\\\"async\\\"></script><figcaption>Visitors</figcaption></figure>\\n\")\n outfile.write(\" <p id=\\\"citation\\\"><a href=\\\"\" + indexpath + init_data().cite_url +\n \"\\\">\" + fetch_fa_glyph(\"site cite\") + \"How to cite this site</a></p>\\n\")\n outfile.write(\" <p id=\\\"contact\\\">Questions or comments about the site? Contact \"\n \"<a href=\\\"mailto:\" + init_data().site_author_email + \"\\\">\" + fetch_fa_glyph(\"mail\") +\n \"Dr. Michael S. Rosenberg</a></p>\\n\")\n outfile.write(\" <p id=\\\"copyright\\\">Release: \" + init_data().version +\n \" &mdash; Copyright &copy; 2003&ndash;\" + str(init_data().current_year) +\n \" All Rights Reserved</p>\\n\")\n outfile.write(\" </footer>\\n\")\n outfile.write(\" </body>\\n\")\n outfile.write(\"</html>\\n\")", "def footer(self, footer):\n\n self._footer = footer", "def footer(self):\n self.set_y(-15)\n self.set_font(self.police, 'I', 8)\n self.cell(w=0, h=10, txt=f\"Page {self.page_no()}\" + '/{nb}', border='B', ln=0, align='R')", "def footer(node):\n\n return '''\n <img src=\"/images/colosseo.png\" title=\"disegno del colosseo\" alt=\"colosseo\" class=\"colosseo\"/>\n\t \t </section>\n </div>\n\t\t\t <div class=\"clear\"></div>\n </div>\n </div>\n\t\t<div class=\"container_12 clearfix\">\n\t <footer class=\"grid_12\">\n\t\t\t\t <p>&copy <a href=\"http://luca.postregna.name\">lucapost</a> ''' + str(current_time.year) + '''; <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc/3.0/\">license</a>; <a href=\"/privacy.html\" title=\"normativa per la privacy\">privacy</a>; edit: ''' + time.strftime(\"%Y%m%d %I:%M:%S %p\", node.page.last_edit) + '''</p>\n <p>email: <a href=\"mailto:patdilenardo@gmail.com\" title=\"contatto email\">patdilenardo@gmail.com</a>; phone: +39 3389456208</p>\n\t\t </footer>\t\n\t\t\t<div class=\"clear\"></div>\n \t\t<script src=\"/js/hashgrid.js\"></script> \n\t\t<script src=\"/js/flux.min.js\" type=\"text/javascript\" charset=\"utf-8\"></script>\n\t\t<script type=\"text/javascript\" charset=\"utf-8\">\n\t\t\t$(function(){\n\t\t\t\tif(!flux.browser.supportsTransitions)\n\t\t\t\t\talert(\"Flux Slider requires a browser that supports CSS3 transitions\");\n\t\t\t\t\t\n\t\t\t\twindow.f = new flux.slider('#slider', {\n\t\t\t\t\tpagination: false,\n controls: true,\n captions: true,\n\t\t\t\t\ttransitions: [ 'dissolve' ],\n\t\t\t\t\tdelay: 5500\n\t\t\t\t});\n\t\t\t});\n\t\t</script> \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n ga('create', 'UA-6164762-14', 'nicelyventilated.it');\n ga('send', 'pageview');\n </script>\n\n<!--[if lt IE 7]><p class=chromeframe>Your browser is <em>ancient!</em> <a href=\"http://browsehappy.com/\">Upgrade to a different browser</a> or <a href=\"http://www.google.com/chromeframe/?redirect=true\">install Google Chrome Frame</a> to experience this site.</p><![endif]-->\n</body>\n</html>'''", "def footer(self):\n file = open(\"imdb_output.html\", \"a\")\n file.write(\"\\t\\t</table>\\n\\t</body>\\n</html>\\n\")", "def __init__(self, footer_html=None):\n self._footer_html = None\n\n if footer_html is not None:\n self.footer_html = footer_html", "def close_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</body>\\n\n \"\"\"\n return self.html_doc", "def content(self):\n return self.template.render(weblogsnippet=self.weblogsnippet, pathto=pathto)", "def footer_links(request):\n data = _get_footer_data(request) \n response_data = json.dumps(data)\n\n response = HttpResponse(response_data, mimetype= \"application/json\")\n response['Cache-Control'] = 'public'\n\n return response", "def html_body(self):\n return self._html_body", "def get_inner_html(self):\n\n pass", "def set_footer(embed: Embed, ctx: commands.Context) -> Embed:\n return embed.set_footer(icon_url=ctx.author.avatar_url, text=f\"{ctx.author.display_name} • {get_time()}\")", "def content(self, **args):\n return self.pageConfig['content'] % self.pageConfig", "def test_format_emperor_html_footer_string(self):\n self.maxDiff = 5000\n\n # footer for a jackknifed pcoa plot without biplots\n out_string = format_emperor_html_footer_string(False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_A.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_A)\n\n # footer for biplots without jackknifing\n out_string = format_emperor_html_footer_string(True, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_B.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_B)\n\n # no biplots nor jackknifing\n out_string = format_emperor_html_footer_string(False, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_C.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_C)\n\n # no biplots no jackknifing but with vectors\n out_string = format_emperor_html_footer_string(False, False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_D.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_D)\n\n # comparison plot\n out_string = format_emperor_html_footer_string(False, False,\n False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_E.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_E)", "def write_footer(out):\n\n out.write(\"\"\" </g>\n</svg>\n\"\"\")", "def getHtml(self):\n return self.html", "def serialize_footer(signer):\n footer = b\"\"\n if signer is not None:\n signature = signer.finalize()\n footer = struct.pack(\">H{sig_len}s\".format(sig_len=len(signature)), len(signature), signature)\n return footer", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def html_wrapper(content):\n\n header = '''<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <title>''' + SITE_NAME + '''</title>\n</head>\n<body>\n'''\n\n footer = '''\n</body>\n</html>'''\n return header + content + footer", "def close(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</report_html>\\n\n \"\"\"\n return self.html_doc", "def content(self):\n return unicode(self.main_frame.toHtml())", "def __html__(self):\n return self.html", "def get_html(self):\r\n pass", "def set_footer(self, **kwargs):\n self.footer_text = kwargs.get('text')\n self.footer_icon = kwargs.get('icon_url')", "def html(self):\n return self._html", "def parse_footer(self): # -> tuple[list[Unknown], Literal['']]:\n ...", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def finish(self):\r\n\r\n self.text += \"</html>\\n\"\r\n\r\n if self.filename != None:\r\n with open(self.filename, \"w\") as f:\r\n f.write(self.text)\r\n\r\n return self.text", "def endContent(self, newline=True):\n\n result = []\n result.append(self.anchordef('bottom'))\n result.append(self._close('div', newline=newline))\n if hasattr(self, 'page'):\n self.request.uid_generator.end()\n return ''.join(result)", "def html(self) -> str:\n return self._html", "def write_html(self, content):\n self.write(content)", "def get_html(self):\r\n return u'This is supposed to be test html.'", "def content(self):\n raise NotImplementedError()", "def usage_footer():\n print \"\"\"--\nSee more information about this project at:\n%(url)s\n\nReport bugs to authors at:\n%(source_url)s\n\"\"\" % {\n 'url': constants.App.URL,\n 'source_url': constants.App.SOURCE_URL,\n }", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to admin users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout()", "def write_footer(self, fd):\n fd.write(f\"END {self.name}\")\n if len(self.data_items) > 0:\n one_based = self.data_items[0].structure.type == DatumType.integer\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n fd.write(\"\\n\")", "def content(self) -> str:\r\n return self._content", "def html_body(self):\n return self.getattr('html_body')", "def get_html_body_str(self):\n svg_contents = self.__make_svg()\n if svg_contents is None:\n return ''\n else:\n head = '<b>Task graph</b><div id=\\'task-graph\\'>\\n'\n img = svg_contents\n tail = '</div><br>'\n return head + img + tail", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to admin users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout()", "def content (self):\n return self.__content", "def message_body_html(self):\n ...", "def close_body(self) -> str:\n self.html_table = self.html_table + \"\"\"</tbody> \\n\"\"\"\n return self.html_table", "def footprint(dynamic_footprint_modifier=0.):", "def rawHTMLrendered(self):", "def contents(self) -> str:\n return(self._contents)", "def getFooter():\n return \"# job finished in %i seconds at %s -- %s -- %s\" %\\\n (time.time() - global_starting_time,\n time.asctime(time.localtime(time.time())),\n \" \".join(map(lambda x: \"%5.2f\" % x, os.times()[:4])),\n global_id)", "def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html", "def render_content(self):\n return mark_safe(markdown(self.content))", "def html_content(self):\n\t\thilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n\t\textras = ExtraExtension()\n\t\tmarkdown_content = markdown(self.content, extensions=[hilite, extras])\n\t\toembed_content = parse_html(\n\t\t\tmarkdown_content,\n\t\t\toembed_providers,\n\t\t\turlize_all=True,\n\t\t\tmaxwidth=app.config['SITE_WIDTH'])\n\t\treturn Markup(oembed_content)", "def get_raw_data(self):\n return self.HTML", "def content(self):\n return \"\".join(self.lines)", "def content(self):\n return self._content", "def content(self):\n return self._content", "def content(self):\n return self._content", "def content(self):\n return self._content", "def contents(self) -> str:\n return pulumi.get(self, \"contents\")", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)", "def latex_footer():\n print(\" \\end{tikzpicture}\")\n print(\" }\")\n print(\" \\end{frame}\")\n print(\"\\end{document}\")", "def html_content(self):\n hilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n extras = ExtraExtension()\n markdown_content = markdown(self.content, extensions=[hilite, extras])\n oembed_content = parse_html(\n markdown_content,\n oembed_providers,\n urlize_all=True,\n maxwidth=app.config['SITE_WIDTH'])\n return Markup(oembed_content)", "def output_footer(stream=sys.stdout):\n print(_module_footer, file=stream)", "def open_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"<body>\\n\n \"\"\"\n return self.html_doc", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def code(self):\n return '{}\\n<script>{}</script>'.format(self.html, self.js)", "def rawHTML(self):\n #TODO : do checking for scripts and hacks here?\n return mark_safe(self.html)", "def get_footer_links(self):\n nav_data = self.get_navigation_data()\n return nav_data.get('footer_links', [])" ]
[ "0.84020877", "0.83206654", "0.77823853", "0.7600698", "0.7569941", "0.75131357", "0.74960965", "0.7441762", "0.7427407", "0.74167025", "0.7381049", "0.7381049", "0.73269355", "0.73111933", "0.73045343", "0.71055436", "0.7100233", "0.70798254", "0.7018429", "0.69782525", "0.6810585", "0.67525476", "0.6744736", "0.65345556", "0.646225", "0.6406132", "0.62832254", "0.626507", "0.6245877", "0.6215444", "0.62086", "0.6109538", "0.61076045", "0.60960835", "0.60601866", "0.6051465", "0.6027375", "0.6022173", "0.5903179", "0.5898711", "0.58838964", "0.5881808", "0.58796936", "0.58596295", "0.5835468", "0.58333", "0.58278954", "0.5825976", "0.5825976", "0.57999843", "0.5784243", "0.578235", "0.5781752", "0.57457966", "0.57196903", "0.57162195", "0.57124203", "0.5691115", "0.5691115", "0.5691115", "0.5689017", "0.56729984", "0.56585187", "0.56326455", "0.56158036", "0.55959135", "0.55846053", "0.5579947", "0.55711955", "0.5559254", "0.55424666", "0.5539908", "0.5529832", "0.5509776", "0.5504514", "0.549669", "0.5474271", "0.5453696", "0.54471993", "0.5429555", "0.54148316", "0.54120976", "0.5405157", "0.5373746", "0.5369421", "0.5363674", "0.5363674", "0.5363674", "0.5363674", "0.53622645", "0.533922", "0.53356767", "0.53257513", "0.53066456", "0.5306605", "0.5303149", "0.5303149", "0.52910703", "0.52902883", "0.5289154" ]
0.8446734
0
The html content of your footer.
def footer_html(self, html): self._footer_html = html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def footer_html(self):\n return self._footer_html", "def get(self):\n return self.footer_html", "def get_footer(self):\n self.footer = '</div>' \\\n '</div>' \\\n '</div>' \\\n '<div class=\"footer\">' \\\n '<div class=\"container\">' \\\n '<p class=\"text-muted\">Copyright Harm Brugge 2014.</p>' \\\n '</div>' \\\n '</div>' \\\n '</body>' \\\n '</html>'\n return self.footer", "def footer():\n\treturn \"\"\"<footer><table width=\"100%\"><th>Weather Icons by <a href=\"https://github.com/erikflowers/weather-icons\">Erik Flowers</a></th>\n\t<th><a href=\"http://forecast.io/\">Powered by Forecast</a></th></table></footer></div>\n\t</body></html>\"\"\"", "def _get_footer(self, footer):\n if footer is None:\n html = self.footer()\n else:\n html = footer\n return html", "def gen_footer():\n return '</body></html>'", "def footer(self):\n return self._footer", "def print_footer():\n print('</text>')", "def footer(self, **args):\n return self.pageConfig['footer'] % self.pageConfig", "def footer():\n return u'</form></body></html>\\n'", "def footer(self):\n pass", "def footer(self):\n pass", "def WriteFooter(self):\n return", "def email_footer():\n footer = \"\"\n\n return footer", "def get_footer_text(self):\n return self._get_footer_text()", "def footer(node):\n\n current_time = datetime.datetime.now()\n return '''\n </div>\n <div id=\"edit\">\n Last edit: ''' + time.strftime(\"%m/%d/%Y %I:%M:%S %p\", node.page.last_edit) + '''\n </div>\n </div>\n <footer>\n &copy; ''' + str(current_time.year) + ' ' + AUTHOR + ''' | Generated with <a href=\"http://www.minimalblue.com/software/minimalsite.html\">minimalsite</a> \n </footer>\n </div>\n </body>\n</html>'''", "def get_footer() -> html:\n footer = dbc.Container([\n html.Hr(),\n dbc.Row([\n dbc.Col([\n 'Made with ❤ in Frankfurt from ',\n dcc.Link(children='STATWORX',\n href='https://www.statworx.com/',\n style={\"color\": COLOR_STATWORX}),\n ]),\n dbc.Col(dcc.Link(\n children='Try Again!', href='/', style={\"color\": COLOR_STATWORX}),\n className=\"text-right\")\n ])\n ],\n className='mb-4')\n\n return footer", "def writeFooter(self):\n pass", "def getFooter():\n return _FOOTER", "def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container", "def __footer(self):\n result = \"\"\"This API and the related documentation has been created with <a href=\"https://github.com/opencitations/ramose\" target=\"_blank\">RAMOSE</a>, the *Restful API Manager Over SPARQL Endpoints*, developed by <a href=\"http://orcid.org/0000-0003-0530-4305\" target=\"_blank\">Silvio Peroni</a> and <a href=\"https://marilenadaquino.github.io\">Marilena Daquino</a>.\"\"\"\n return markdown(result)", "def _get_footer_text(self):\n return _(THEME_FOOTER_TEXT)", "def WriteFooter(self):\n self.WriteText('}')", "def getFooter(HTMLstring):\n footer = open(os.path.dirname(os.path.realpath(__file__))+\"/html/footer.html\", \"r\")\n HTMLstring += footer.read()\n footer.close()\n return HTMLstring", "def format_report_footer(self):", "def render_footer_log(self):\n\n # clean up the log file\n if self._selenium_log_file:\n\n # render the footer\n html = render_to_string(\n self.selenium_footer_template,\n {'id': self.id(), 'description': self.__doc__})\n\n # write it to the file\n self._selenium_log_file.write(html)", "def parse_footer(self):\n lines=self.lines\n bodyfinish=re.compile(r\"</body>\", re.IGNORECASE).search(lines).span()[0]\n self.footer=lines[bodyfinish:]", "def footer(self):\n # close the svg tag\n yield '</svg>'\n # if this is a stand-alone document\n if self.standalone:\n # render a blank line\n yield ''\n # and the document footer\n yield from super().footer()\n # all done\n return", "def html_close():\n return(\"\"\"\n\n </section>\n\n </div>\n\n</main>\n</body>\n</html>\"\"\")", "def foot(cls):\n return ''", "def common_html_footer(outfile: TextIO, indexpath: str = \"\") -> None:\n outfile.write(\"\\n\")\n outfile.write(\" <footer>\\n\")\n outfile.write(\" <figure id=\\\"footmap\\\"><script type=\\\"text/javascript\\\" \"\n \"src=\\\"//rf.revolvermaps.com/0/0/4.js?i=5f9t1sywiez&amp;m=0&amp;h=75&amp;c=ff0000&amp;r=30\\\" \"\n \"async=\\\"async\\\"></script><figcaption>Visitors</figcaption></figure>\\n\")\n outfile.write(\" <p id=\\\"citation\\\"><a href=\\\"\" + indexpath + init_data().cite_url +\n \"\\\">\" + fetch_fa_glyph(\"site cite\") + \"How to cite this site</a></p>\\n\")\n outfile.write(\" <p id=\\\"contact\\\">Questions or comments about the site? Contact \"\n \"<a href=\\\"mailto:\" + init_data().site_author_email + \"\\\">\" + fetch_fa_glyph(\"mail\") +\n \"Dr. Michael S. Rosenberg</a></p>\\n\")\n outfile.write(\" <p id=\\\"copyright\\\">Release: \" + init_data().version +\n \" &mdash; Copyright &copy; 2003&ndash;\" + str(init_data().current_year) +\n \" All Rights Reserved</p>\\n\")\n outfile.write(\" </footer>\\n\")\n outfile.write(\" </body>\\n\")\n outfile.write(\"</html>\\n\")", "def footer(self, footer):\n\n self._footer = footer", "def footer(self):\n self.set_y(-15)\n self.set_font(self.police, 'I', 8)\n self.cell(w=0, h=10, txt=f\"Page {self.page_no()}\" + '/{nb}', border='B', ln=0, align='R')", "def footer(node):\n\n return '''\n <img src=\"/images/colosseo.png\" title=\"disegno del colosseo\" alt=\"colosseo\" class=\"colosseo\"/>\n\t \t </section>\n </div>\n\t\t\t <div class=\"clear\"></div>\n </div>\n </div>\n\t\t<div class=\"container_12 clearfix\">\n\t <footer class=\"grid_12\">\n\t\t\t\t <p>&copy <a href=\"http://luca.postregna.name\">lucapost</a> ''' + str(current_time.year) + '''; <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc/3.0/\">license</a>; <a href=\"/privacy.html\" title=\"normativa per la privacy\">privacy</a>; edit: ''' + time.strftime(\"%Y%m%d %I:%M:%S %p\", node.page.last_edit) + '''</p>\n <p>email: <a href=\"mailto:patdilenardo@gmail.com\" title=\"contatto email\">patdilenardo@gmail.com</a>; phone: +39 3389456208</p>\n\t\t </footer>\t\n\t\t\t<div class=\"clear\"></div>\n \t\t<script src=\"/js/hashgrid.js\"></script> \n\t\t<script src=\"/js/flux.min.js\" type=\"text/javascript\" charset=\"utf-8\"></script>\n\t\t<script type=\"text/javascript\" charset=\"utf-8\">\n\t\t\t$(function(){\n\t\t\t\tif(!flux.browser.supportsTransitions)\n\t\t\t\t\talert(\"Flux Slider requires a browser that supports CSS3 transitions\");\n\t\t\t\t\t\n\t\t\t\twindow.f = new flux.slider('#slider', {\n\t\t\t\t\tpagination: false,\n controls: true,\n captions: true,\n\t\t\t\t\ttransitions: [ 'dissolve' ],\n\t\t\t\t\tdelay: 5500\n\t\t\t\t});\n\t\t\t});\n\t\t</script> \n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','//www.google-analytics.com/analytics.js','ga');\n ga('create', 'UA-6164762-14', 'nicelyventilated.it');\n ga('send', 'pageview');\n </script>\n\n<!--[if lt IE 7]><p class=chromeframe>Your browser is <em>ancient!</em> <a href=\"http://browsehappy.com/\">Upgrade to a different browser</a> or <a href=\"http://www.google.com/chromeframe/?redirect=true\">install Google Chrome Frame</a> to experience this site.</p><![endif]-->\n</body>\n</html>'''", "def footer(self):\n file = open(\"imdb_output.html\", \"a\")\n file.write(\"\\t\\t</table>\\n\\t</body>\\n</html>\\n\")", "def __init__(self, footer_html=None):\n self._footer_html = None\n\n if footer_html is not None:\n self.footer_html = footer_html", "def close_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</body>\\n\n \"\"\"\n return self.html_doc", "def content(self):\n return self.template.render(weblogsnippet=self.weblogsnippet, pathto=pathto)", "def footer_links(request):\n data = _get_footer_data(request) \n response_data = json.dumps(data)\n\n response = HttpResponse(response_data, mimetype= \"application/json\")\n response['Cache-Control'] = 'public'\n\n return response", "def html_body(self):\n return self._html_body", "def get_inner_html(self):\n\n pass", "def set_footer(embed: Embed, ctx: commands.Context) -> Embed:\n return embed.set_footer(icon_url=ctx.author.avatar_url, text=f\"{ctx.author.display_name} • {get_time()}\")", "def content(self, **args):\n return self.pageConfig['content'] % self.pageConfig", "def test_format_emperor_html_footer_string(self):\n self.maxDiff = 5000\n\n # footer for a jackknifed pcoa plot without biplots\n out_string = format_emperor_html_footer_string(False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_A.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_A)\n\n # footer for biplots without jackknifing\n out_string = format_emperor_html_footer_string(True, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_B.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_B)\n\n # no biplots nor jackknifing\n out_string = format_emperor_html_footer_string(False, False)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_C.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_C)\n\n # no biplots no jackknifing but with vectors\n out_string = format_emperor_html_footer_string(False, False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_D.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_D)\n\n # comparison plot\n out_string = format_emperor_html_footer_string(False, False,\n False, True)\n self.assertItemsEqual(out_string.split('\\n'),\n EXPECTED_FOOTER_E.split('\\n'))\n self.assertEqual(out_string, EXPECTED_FOOTER_E)", "def write_footer(out):\n\n out.write(\"\"\" </g>\n</svg>\n\"\"\")", "def getHtml(self):\n return self.html", "def serialize_footer(signer):\n footer = b\"\"\n if signer is not None:\n signature = signer.finalize()\n footer = struct.pack(\">H{sig_len}s\".format(sig_len=len(signature)), len(signature), signature)\n return footer", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def parse_footer(self): # -> tuple[list[Unknown], str]:\n ...", "def html_wrapper(content):\n\n header = '''<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <title>''' + SITE_NAME + '''</title>\n</head>\n<body>\n'''\n\n footer = '''\n</body>\n</html>'''\n return header + content + footer", "def close(self) -> str:\n self.html_doc = self.html_doc + \"\"\"</report_html>\\n\n \"\"\"\n return self.html_doc", "def content(self):\n return unicode(self.main_frame.toHtml())", "def __html__(self):\n return self.html", "def get_html(self):\r\n pass", "def set_footer(self, **kwargs):\n self.footer_text = kwargs.get('text')\n self.footer_icon = kwargs.get('icon_url')", "def html(self):\n return self._html", "def parse_footer(self): # -> tuple[list[Unknown], Literal['']]:\n ...", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def content(self) -> str:\n return pulumi.get(self, \"content\")", "def finish(self):\r\n\r\n self.text += \"</html>\\n\"\r\n\r\n if self.filename != None:\r\n with open(self.filename, \"w\") as f:\r\n f.write(self.text)\r\n\r\n return self.text", "def endContent(self, newline=True):\n\n result = []\n result.append(self.anchordef('bottom'))\n result.append(self._close('div', newline=newline))\n if hasattr(self, 'page'):\n self.request.uid_generator.end()\n return ''.join(result)", "def html(self) -> str:\n return self._html", "def write_html(self, content):\n self.write(content)", "def get_html(self):\r\n return u'This is supposed to be test html.'", "def content(self):\n raise NotImplementedError()", "def usage_footer():\n print \"\"\"--\nSee more information about this project at:\n%(url)s\n\nReport bugs to authors at:\n%(source_url)s\n\"\"\" % {\n 'url': constants.App.URL,\n 'source_url': constants.App.SOURCE_URL,\n }", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to admin users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout()", "def write_footer(self, fd):\n fd.write(f\"END {self.name}\")\n if len(self.data_items) > 0:\n one_based = self.data_items[0].structure.type == DatumType.integer\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n fd.write(\"\\n\")", "def content(self) -> str:\r\n return self._content", "def html_body(self):\n return self.getattr('html_body')", "def get_html_body_str(self):\n svg_contents = self.__make_svg()\n if svg_contents is None:\n return ''\n else:\n head = '<b>Task graph</b><div id=\\'task-graph\\'>\\n'\n img = svg_contents\n tail = '</div><br>'\n return head + img + tail", "def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to admin users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout()", "def content (self):\n return self.__content", "def message_body_html(self):\n ...", "def close_body(self) -> str:\n self.html_table = self.html_table + \"\"\"</tbody> \\n\"\"\"\n return self.html_table", "def footprint(dynamic_footprint_modifier=0.):", "def rawHTMLrendered(self):", "def contents(self) -> str:\n return(self._contents)", "def getFooter():\n return \"# job finished in %i seconds at %s -- %s -- %s\" %\\\n (time.time() - global_starting_time,\n time.asctime(time.localtime(time.time())),\n \" \".join(map(lambda x: \"%5.2f\" % x, os.times()[:4])),\n global_id)", "def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html", "def render_content(self):\n return mark_safe(markdown(self.content))", "def html_content(self):\n\t\thilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n\t\textras = ExtraExtension()\n\t\tmarkdown_content = markdown(self.content, extensions=[hilite, extras])\n\t\toembed_content = parse_html(\n\t\t\tmarkdown_content,\n\t\t\toembed_providers,\n\t\t\turlize_all=True,\n\t\t\tmaxwidth=app.config['SITE_WIDTH'])\n\t\treturn Markup(oembed_content)", "def get_raw_data(self):\n return self.HTML", "def content(self):\n return \"\".join(self.lines)", "def content(self):\n return self._content", "def content(self):\n return self._content", "def content(self):\n return self._content", "def content(self):\n return self._content", "def contents(self) -> str:\n return pulumi.get(self, \"contents\")", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)", "def latex_footer():\n print(\" \\end{tikzpicture}\")\n print(\" }\")\n print(\" \\end{frame}\")\n print(\"\\end{document}\")", "def html_content(self):\n hilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n extras = ExtraExtension()\n markdown_content = markdown(self.content, extensions=[hilite, extras])\n oembed_content = parse_html(\n markdown_content,\n oembed_providers,\n urlize_all=True,\n maxwidth=app.config['SITE_WIDTH'])\n return Markup(oembed_content)", "def output_footer(stream=sys.stdout):\n print(_module_footer, file=stream)", "def open_body(self) -> str:\n self.html_doc = self.html_doc + \"\"\"<body>\\n\n \"\"\"\n return self.html_doc", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def code(self):\n return '{}\\n<script>{}</script>'.format(self.html, self.js)", "def rawHTML(self):\n #TODO : do checking for scripts and hacks here?\n return mark_safe(self.html)", "def get_footer_links(self):\n nav_data = self.get_navigation_data()\n return nav_data.get('footer_links', [])" ]
[ "0.8446734", "0.84020877", "0.83206654", "0.77823853", "0.7600698", "0.7569941", "0.75131357", "0.7441762", "0.7427407", "0.74167025", "0.7381049", "0.7381049", "0.73269355", "0.73111933", "0.73045343", "0.71055436", "0.7100233", "0.70798254", "0.7018429", "0.69782525", "0.6810585", "0.67525476", "0.6744736", "0.65345556", "0.646225", "0.6406132", "0.62832254", "0.626507", "0.6245877", "0.6215444", "0.62086", "0.6109538", "0.61076045", "0.60960835", "0.60601866", "0.6051465", "0.6027375", "0.6022173", "0.5903179", "0.5898711", "0.58838964", "0.5881808", "0.58796936", "0.58596295", "0.5835468", "0.58333", "0.58278954", "0.5825976", "0.5825976", "0.57999843", "0.5784243", "0.578235", "0.5781752", "0.57457966", "0.57196903", "0.57162195", "0.57124203", "0.5691115", "0.5691115", "0.5691115", "0.5689017", "0.56729984", "0.56585187", "0.56326455", "0.56158036", "0.55959135", "0.55846053", "0.5579947", "0.55711955", "0.5559254", "0.55424666", "0.5539908", "0.5529832", "0.5509776", "0.5504514", "0.549669", "0.5474271", "0.5453696", "0.54471993", "0.5429555", "0.54148316", "0.54120976", "0.5405157", "0.5373746", "0.5369421", "0.5363674", "0.5363674", "0.5363674", "0.5363674", "0.53622645", "0.533922", "0.53356767", "0.53257513", "0.53066456", "0.5306605", "0.5303149", "0.5303149", "0.52910703", "0.52902883", "0.5289154" ]
0.74960965
7
Get a JSONready representation of this FooterHtml.
def get(self): return self.footer_html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def footer_html(self):\n return self._footer_html", "def get_footer(self):\n self.footer = '</div>' \\\n '</div>' \\\n '</div>' \\\n '<div class=\"footer\">' \\\n '<div class=\"container\">' \\\n '<p class=\"text-muted\">Copyright Harm Brugge 2014.</p>' \\\n '</div>' \\\n '</div>' \\\n '</body>' \\\n '</html>'\n return self.footer", "def footer_links(request):\n data = _get_footer_data(request) \n response_data = json.dumps(data)\n\n response = HttpResponse(response_data, mimetype= \"application/json\")\n response['Cache-Control'] = 'public'\n\n return response", "def _get_footer(self, footer):\n if footer is None:\n html = self.footer()\n else:\n html = footer\n return html", "def footer(self):\n return self._footer", "def json(self):\n return {\n \"hwnd\": self.hwnd,\n \"text\": self.text,\n \"x\": self.x,\n \"y\": self.y,\n \"width\": self.width,\n \"height\": self.height,\n \"formatted\": str(self)\n }", "def json(self) -> PageJson:\n\n json: PageJson = {}\n json[\"id\"] = self.id\n json[\"cells\"] = [cell.json() for cell in self.cells]\n json[\"data\"] = self.data\n return json", "def json(self):\n return {\n 'author': self.author,\n 'email': self.email,\n 'display_email': self.display_email,\n 'title': self.title,\n 'trailer_path': self.trailer_path,\n 'date': self.date,\n 'link': self.link,\n '_id': self._id\n }", "def footer_html(self, html):\n self._footer_html = html", "def serialize(self):\r\n return {\r\n 'title': self.title,\r\n 'id': self.id,\r\n 'domain': self.domain,\r\n 'd_h1': self.d_h1,\r\n 'd_p': self.d_p,\r\n 'd_img': self.d_img,\r\n 'd_textcolor': self.d_textcolor,\r\n 'd_pagecolor': self.d_pagecolor,\r\n 'd_background': self.d_background,\r\n 'd_pwidth': self.d_pwidth,\r\n 'd_margin': self.d_margin,\r\n 'd_padding': self.d_padding,\r\n 'd_textalign': self.d_textalign,\r\n 'd_imgwidth': self.d_imgwidth,\r\n 'd_imgheight': self.d_imgheight,\r\n 'd_imgradius': self.d_imgradius,\r\n 'd_borderwidth': self.d_borderwidth,\r\n 'd_bordertype': self.d_bordertype,\r\n 'd_bordercolor': self.d_bordercolor,\r\n 'h1_image': self.h1_image,\r\n 'h1_background': self.h1_background,\r\n 'h1_bradius': self.h1_bradius,\r\n 'h1_fsize': self.h1_fsize,\r\n 'h1_customsize': self.h1_customsize,\r\n 'h1_width': self.h1_width,\r\n 'user_post': self.user_post,\r\n 'comment': self.comment,\r\n 'extra': self.extra,\r\n 'extra_int': self.extra_int \r\n }", "def getFooter():\n return _FOOTER", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def as_html(self):\n html = json.dumps(self.db_dict, sort_keys=True, indent=4)\n return make_safe_for_html(html)", "def json(self):\n return self.__json", "def tojson(self) -> ty.Text:\n return json.dumps(self.todict())", "def to_json(self):\n return {\n \"item_name\": self.item_name,\n \"summary\": self.summary,\n \"content\": self.content,\n \"date_published\": self.date_published,\n \"item_slug\": self.item_slug,\n \"category_name\": self.category_name,\n }", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def GetJSON(self):\n return json.dumps(self.GetDict())", "def dict(self):\n\t\treturn self.json", "def to_json_string(self) -> None:\n return json.dumps(self.to_dict(), indent=2) + \"\\n\"", "def getHtml(self):\n return self.html", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def self_json(self):\n return {\n 'id': self.id, \n 'description': self.description, \n 'price': str(self.price),\n 'quantity': self.quantity\n }", "def get_footer_text(self):\n return self._get_footer_text()", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def writeFooter(self):\n pass", "def to_json(self):\n return json.dumps(self.for_json())", "def build_configuration_json(self):\r\n # <root> added for interface compatibility with xmltodict.parse\r\n # class added for javascript's part purposes\r\n root = '<root class=\"{}\">{}</root>'.format(\r\n self.html_class,\r\n self.configuration)\r\n return json.dumps(xmltodict.parse(root))", "def json(self):\n return {\n \"qualified_name\": self.qualified_name,\n \"description\": self.description,\n \"data\": self.data,\n }", "def get_raw_data(self):\n return self.HTML", "def toJson(self):\n return json.dumps(self.toDict())", "def toJson(self):\n return json.dumps(self.toDict())", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def toJSON(self):\n\t\treturn json.dumps(self.root, default=lambda o: o.__dict__)", "def json(self):\n return self._json", "def to_json(self) -> str:\n return json.dumps(asdict(self))", "def get_html_ajax(self, data):\r\n return {'html': self.get_html()}", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def json(self):\n return {\n 'id': self.id,\n 'id_bank_data': self.id_bank_data,\n 'national_id_document': self.national_id_document,\n 'country': self.country,\n 'name': self.name,\n 'surname': self.surname,\n 'mail': self.mail,\n 'google_token': self.google_token,\n 'role': self.role\n }", "def toJson(self):\r\n return self.__dict__", "def ToJson(self):\n output = json.dumps(self.ToDictionary(), sort_keys=True, indent=4)\n return output", "def to_json(self):\n payload = {\n \"btc_addr\": self.btc_addr,\n \"last_seen\": (datetime.utcnow() - self.last_seen).seconds,\n \"height\": self.height\n }\n return json.dumps(payload)", "def to_json(self):\n return json.dumps(\n {\n \"long_url\": self.long_url,\n \"special_code\": str(self.special_code),\n \"stub\": self.stub,\n }\n )", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def json(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)", "def __repr__(self):\n return json.dumps(self.__dict__)", "def to_json(self):\n return None", "def get_html_string_representation(self):\n return self.map.get_root().render()", "def to_json(self):\n return json.dumps(self.__dict__)", "def WriteFooter(self):\n return", "def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}", "def __init__(self, footer_html=None):\n self._footer_html = None\n\n if footer_html is not None:\n self.footer_html = footer_html", "def to_json(self):\n\n d = {\n \"title\": self.title,\n \"abstract\": self.abstract,\n \"intellectual_merit\": self.intellectual_merit,\n \"broader_impact\": self.broader_impact,\n \"use_of_fg\": self.use_of_fg,\n \"scale_of_use\": self.scale_of_use,\n \"categories\": self.categories,\n \"keywords\": self.keywords,\n \"primary_discipline\": self.primary_discipline,\n \"orientation\": self.orientation,\n \"contact\": self.contact,\n \"url\": self.url,\n \"active\": self.active,\n \"status\": self.status,\n \"lead\": self.lead,\n \"members\": self.members,\n \"resources_services\": self.resources_services,\n \"resources_software\": self.resources_software,\n \"resources_clusters\": self.resources_clusters,\n \"resources_provision\": self.resources_provision\n }\n return d", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def tojson(self):\n return json.dumps(self.jsonable())", "def to_json(self):\n return json.dumps(self.dict)", "def to_json_string(self):\n return json.dumps(dict(self), indent=2, sort_keys=True) + \"\\n\"", "def serialize(self):\n return json.dumps(self.as_dict())", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def getJSON(self):\n text = super().getJSON() + f', \"exchange\": \"{self.__exchange}\"'\n text += f', \"market pair\": \"{self.__market_pairs}\"'\n text += f', \"interval\": \"{self.__interval}\"}}'\n return text", "def get_footer() -> html:\n footer = dbc.Container([\n html.Hr(),\n dbc.Row([\n dbc.Col([\n 'Made with ❤ in Frankfurt from ',\n dcc.Link(children='STATWORX',\n href='https://www.statworx.com/',\n style={\"color\": COLOR_STATWORX}),\n ]),\n dbc.Col(dcc.Link(\n children='Try Again!', href='/', style={\"color\": COLOR_STATWORX}),\n className=\"text-right\")\n ])\n ],\n className='mb-4')\n\n return footer", "def json(self):\n if self.valid:\n return {\n 'articleID': self._id,\n 'ticker_symbol': self.ticker,\n 'published_date': self.pub_date,\n 'author_name': self.author,\n 'title': self.title,\n 'text': self.text,\n 'num_likes': 0,\n 'includes_symbols': self.includes\n }\n\n return {}", "def build_configuration_json(self):\n # <root> added for interface compatibility with xmltodict.parse\n # class added for javascript's part purposes\n return json.dumps(xmltodict.parse('<root class=\"' + self.html_class +\n '\">' + self.configuration + '</root>'))", "def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container", "def html(self):\n return self._html", "def to_json(self):\n return {\n 'id': self.id,\n 'date_created': self.date_created,\n 'date_modified': self.date_modified,\n 'link': self.link,\n 'vulnerability_details_id': self.vulnerability_details_id\n }", "def to_json(self):\n return self.__dict__", "def to_json(self):\n return self.__dict__", "def to_json(self):\n return self.__dict__", "def to_json(self):\n return self.__dict__", "def as_json(self):", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))" ]
[ "0.73172146", "0.66936934", "0.6251055", "0.6243386", "0.6088809", "0.60859615", "0.6078452", "0.6061365", "0.6044317", "0.58748573", "0.5841819", "0.58229464", "0.58227277", "0.57736146", "0.5748268", "0.57245976", "0.5719315", "0.5718041", "0.57159793", "0.57119656", "0.56994605", "0.56936526", "0.56936526", "0.56936526", "0.5690896", "0.56812584", "0.567702", "0.5671774", "0.5669389", "0.5666012", "0.56655294", "0.5653788", "0.563135", "0.563135", "0.5626602", "0.56250757", "0.5615764", "0.5615278", "0.5613351", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.56127536", "0.5605244", "0.56010616", "0.5599026", "0.5595635", "0.5593837", "0.558835", "0.558835", "0.5585934", "0.5581432", "0.5575108", "0.55719423", "0.556391", "0.55561733", "0.5555922", "0.5554396", "0.5553817", "0.554881", "0.5547523", "0.55326563", "0.55289596", "0.5528795", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527579", "0.5527282", "0.5525878", "0.55197024", "0.5515323", "0.55062765", "0.55035394", "0.5495503", "0.547269", "0.547269", "0.547269", "0.547269", "0.5470843", "0.54699016", "0.54699016", "0.54699016", "0.54699016", "0.54699016", "0.54699016", "0.54699016", "0.54699016", "0.54699016" ]
0.7223077
1
unauthorized access is forbidden
def test_artifactpriority_list_api_unauthorized(self): # get response response = self.client.get('/api/artifactpriority/') # compare self.assertEqual(response.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forbidden():\n return HttpError(403)", "def get_authenticated_denied(self):", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def access_forbidden(e):\n return render_template(\"error/403.html\"), 403", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def resource_forbidden(exc, request):\r\n request.response_status = \"403 Forbidden\"\r\n return {'message': str(exc)}", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def forbidden_page(error):\n return render_template(\"access_forbidden.html\"), 403", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def unauthorized():\n return HttpError(401)", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def get_everyone_denied(self):", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def authorized(self):\n pass", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def DeniedPermissions(self) -> _n_6_t_0:", "def forbidden(e):\n return render_template(\"errors/403.html\"), 403", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def authorization():\n pass", "def xhr_forbidden_view(request):\n return HTTPForbidden()", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def protect_endpoint():\n pass", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def __acl__(self):\n yield 'Allow', 'system.Everyone', 'none'\n yield security.DENY_ALL", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_get(self):\n self.assertEqual(403, self.response.status_code)", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def get(self, *args, **kwargs):\n self.write(\"Not allowed\")\n self.finish()", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def unauthorized():\n #flash('You must be logged in to view that page.')\n return redirect(url_for('login'))", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def forbidden(e):\n\n return render_template('errors/403.html'), 500", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def unauthorized(e):\n media = session.query(Medium).all()\n return render_template('401.html', media=media), 401", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def request_access(self):\n pass", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_authorization_required(self, method):\n self.user.user_permissions.clear()\n\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 403", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def forbidden(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseForbidden, *args, **kwargs)", "def unauthorized():\n flash('You must be logged in to view that page.', 'warning')\n return redirect(url_for('auth.login'))", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_handler(self):\n return flask.redirect(\"/login\")", "def unprotected_method():\n return {\"message\": \"Anyone access this function\"}", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def protected():\n return jsonify(message=f'protected endpoint (allowed user {flask_praetorian.current_user().username})')", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def unauthorized():\n flash(\"You must be logged in to view that page.\")\n return redirect(url_for(\"auth.login_view\"))", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n response = new_client.get('/posts/', kwargs={'pk': 3}, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def handle_no_permission(self):\n if self.is_ajax():\n return JsonResponse({'error': 'unauthorized'}, status=401)\n return super().handle_no_permission()", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def permission_denied(request, exception):\n return defaults.permission_denied(request, exception, template_name=get_template_name(request, \"403.html\"))", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def unauthorized(self, error):\n return jsonify({'error': \"NOT AUTHORIZED\"}), 401", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def unauthorized():\n flask.flash('You must be logged in to view that page.')\n return redirect(url_for('auth.sign_in'))", "def forbidden(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_FORBIDDEN,\n 'message': ERROR_MESSAGES[STATUS_FORBIDDEN]\n }), STATUS_FORBIDDEN", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def abort_unauthorized(description):\n raise Unauthorized(description=description)" ]
[ "0.8034033", "0.7552204", "0.737518", "0.73615354", "0.73118114", "0.7307068", "0.7258209", "0.7217089", "0.72039396", "0.7159985", "0.714443", "0.7120388", "0.71163434", "0.7105373", "0.71046525", "0.70585066", "0.7024013", "0.7016623", "0.7005272", "0.69919777", "0.69636977", "0.6920033", "0.6918109", "0.69017965", "0.6870412", "0.6868477", "0.6867308", "0.6820907", "0.67869484", "0.6773462", "0.6765973", "0.6759784", "0.6759784", "0.6759784", "0.6759784", "0.67438793", "0.67240983", "0.67223525", "0.6707933", "0.6700254", "0.6680997", "0.66720223", "0.6639217", "0.6631793", "0.6631242", "0.662136", "0.66158235", "0.66158235", "0.6613311", "0.6605004", "0.6605004", "0.6605004", "0.6605004", "0.6603566", "0.65961754", "0.65949106", "0.65940183", "0.6572278", "0.656859", "0.6567581", "0.65650916", "0.6564467", "0.6564467", "0.65418166", "0.6535078", "0.6531677", "0.65032357", "0.65005845", "0.6475662", "0.6470674", "0.646905", "0.64604145", "0.644479", "0.6435317", "0.64272255", "0.6423142", "0.64230007", "0.64225316", "0.6420489", "0.64190125", "0.6411388", "0.6407726", "0.6407726", "0.64066297", "0.64066297", "0.64066297", "0.6395139", "0.639439", "0.6392693", "0.6388089", "0.63809973", "0.6378072", "0.6378072", "0.63550186", "0.6347419", "0.634468", "0.6342554", "0.6336366", "0.63319486", "0.6325954", "0.63250804" ]
0.0
-1
test redirect with appending slash
def test_artifactpriority_list_api_redirect(self): # login testuser self.client.login( username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw' ) # create url destination = urllib.parse.quote('/api/artifactpriority/', safe='/') # get response response = self.client.get('/api/artifactpriority', follow=True) # compare self.assertRedirects( response, destination, status_code=301, target_status_code=200 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_slash_redirect(self):\n request = self.rf.get(\"/slash\")\n r = CommonMiddleware(get_response_empty).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_empty).process_response(request, response)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/slash/\")", "def test_append_slash_redirect_querystring_have_slash(self):\n request = self.rf.get(\"/slash?test=slash/\")\n resp = CommonMiddleware(get_response_404)(request)\n self.assertIsInstance(resp, HttpResponsePermanentRedirect)\n self.assertEqual(resp.url, \"/slash/?test=slash/\")", "def test_append_slash_redirect_querystring(self):\n request = self.rf.get(\"/slash?test=1\")\n resp = CommonMiddleware(get_response_404)(request)\n self.assertEqual(resp.url, \"/slash/?test=1\")", "def test_redirection(self):\n self.assertRedirects(self.response, self.home_url)", "def redirect(url):", "def test_document_based_redirection(base_url):\n url = base_url + \"/en-US/docs/concat\"\n resp = request(\"get\", url)\n assert resp.status_code == 301\n assert (\n resp.headers[\"Location\"]\n == \"/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/concat\"\n )", "def test_append_slash():\n assert normalize_url(\"http://example.com\") == \"http://example.com/\"", "def testRedirect(self):\n self.assertRaises(NotImplementedError, self.handler.redirect, '/')", "def test_append_slash_quoted(self):\n request = self.rf.get(quote(\"/needsquoting#\"))\n r = CommonMiddleware(get_response_404)(request)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/needsquoting%23/\")", "def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)", "def test_redirect_view(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(reverse(testurl))\n #self.assertEqual(301, response.status_code)", "def test_append_slash_leading_slashes(self):\n # Use 4 slashes because of RequestFactory behavior.\n request = self.rf.get(\"////evil.com/security\")\n r = CommonMiddleware(get_response_404).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_404).process_response(request, response)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/%2Fevil.com/security/\")\n r = CommonMiddleware(get_response_404)(request)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/%2Fevil.com/security/\")", "def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )", "def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def test_redirect_status(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(testurl)\n #self.assertEqual(301, response.status_code)", "def test_append_slash_have_slash(self):\n request = self.rf.get(\"/slash/\")\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_homepage_redirect(self):\n with self.client as client:\n resp = client.get(\"/\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Davis Test', html)", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Patient Dashboard\", result.data)", "def test_append_slash_disabled(self):\n request = self.rf.get(\"/slash\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_append_slash_redirect_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash\")\n request.urlconf = \"middleware.extra_urls\"\n r = CommonMiddleware(get_response_404)(request)\n self.assertIsNotNone(\n r,\n \"CommonMiddleware failed to return APPEND_SLASH redirect using \"\n \"request.urlconf\",\n )\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/customurlconf/slash/\")", "def starts_slash(url):\n return url if url.startswith(\"/\") else \"/\" + url", "def test_login_redirect(self):\n create_userRes = self.client.get(url_for('create_user'))\n endpoint = urlparse(create_userRes.location).path\n\n assert endpoint == url_for('login')", "def fix_slash(environ, wantslash):\n from colubrid.exceptions import HttpMoved\n #FIXME\n # argh. never did something that supid\n # find a better solution for that problem.\n url = quote(environ.get('SCRIPT_NAME', ''))\n url += quote(environ.get('PATH_INFO', ''))\n query = environ.get('QUERY_STRING', '')\n oldurl = query and ('%s?%s' % (url, query)) or url\n \n if oldurl and oldurl != '/':\n if url.endswith('/'):\n if not wantslash:\n url = url[:-1]\n else:\n if wantslash:\n url += '/'\n \n newurl = query and ('%s?%s' % (url, query)) or url\n if oldurl != newurl:\n raise HttpMoved(newurl)", "def test_redirects_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'https://www.seinfeld.com'", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Dietitian Dashboard\", result.data)", "def test_forward(self):\n short_url = ShortURL.objects.create(url='http://example.com')\n response = self.client.get('/%s'%(short_url.key))\n self.assertEqual(response.status_code, 301)", "def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)", "def assertRedirects(self, response, url):\n server_name = self.app.config.get('SERVER_NAME') or 'localhost'\n redirect_url = response.headers.get('Location', None)\n target_url = urljoin('http://{}'.format(server_name), url)\n self.assertEqual(redirect_url, target_url)", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def should_redirect_without_slash(self, request):\n if getattr(settings, 'REMOVE_SLASH', False) and trailing_slash_regexp.search(request.get_full_path()):\n urlconf = getattr(request, 'urlconf', None)\n return (not urlresolvers.is_valid_path(request.path_info, urlconf) and urlresolvers.is_valid_path(\n request.path_info[:-1], urlconf))\n return False", "def test_redirect_register_home(self):\n with self.client as c:\n\n res = c.get(\"/\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/users/tester1\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/lists/111111\")\n self.assertEqual(res.status_code, 200)", "def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'", "def test_redirect_for_patient_home_route(self):\n\n result = self.client.get(\"/patient/1\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/patient/4\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def catch_all(path):\n return redirect('/', code=302)", "def test_basic(self):\n application_url = 'http://foo.com:1234'\n continue_url = ('http://foo.com:1234/my/album/of/pictures?'\n 'with=some&query=parameters')\n\n expected_location = (\n 'https://localhost:1443/login?continue='\n 'http%3A//foo.com%3A1234'\n '/my/album/of/pictures%3Fwith%3Dsome%26query%3Dparameters')\n\n def start_response(status, headers, exc_info=None):\n self.assertTrue(status.startswith('302'))\n headers = dict(headers)\n self.assertEqual({'Location': expected_location}, headers)\n self.assertEqual(None, exc_info)\n body = login.login_redirect(application_url, continue_url, start_response)\n\n self.assertEqual('', ''.join(body))", "def test_append_slash_slashless_resource(self):\n\n def get_response(req):\n return HttpResponse(\"Here's the text of the web page.\")\n\n request = self.rf.get(\"/noslash\")\n self.assertIsNone(CommonMiddleware(get_response).process_request(request))\n self.assertEqual(\n CommonMiddleware(get_response)(request).content,\n b\"Here's the text of the web page.\",\n )", "def test_append_slash_opt_out(self):\n request = self.rf.get(\"/sensitive_fbv\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)\n\n request = self.rf.get(\"/sensitive_cbv\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_redirect_login_page(self):\n r = requests.get(self.url, allow_redirects=False)\n self.assertEqual(r.status_code, 302)\n self.assertRegexpMatches(r.headers['location'], '%s/login.*' % self.url)", "def assert_redirect_to_register_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_REGISTER, response.get('Location'))", "def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash\")\n request.urlconf = \"middleware.extra_urls\"\n request.method = \"POST\"\n with self.assertRaisesMessage(RuntimeError, \"end in a slash\"):\n CommonMiddleware(get_response_404)(request)", "def redirected_to_url(url):\r\n def was_redirected(client, response, testcase):\r\n status(302)(client, response, testcase)\r\n testcase.assertEqual(\r\n response['Location'],\r\n url\r\n )\r\n return was_redirected", "def test_redirect(self):\r\n sess = FuturesSession()\r\n future = sess.get(httpbin('redirect-to?url=get'))\r\n self.assertIsInstance(future, Future)\r\n resp = future.result()\r\n self.assertIsInstance(resp, Response)\r\n self.assertEqual(200, resp.status_code)\r\n\r\n future = sess.get(httpbin('redirect-to?url=status/404'))\r\n resp = future.result()\r\n self.assertEqual(404, resp.status_code)", "def test_redirection(self):\n dashboard_url = reverse('dashboard')\n self.assertRedirects(self.response, dashboard_url)", "def assertRedirects(self, response, url):\n self.assert302(response)\n\n location = response.headers.get('Location')\n if url.startswith('http'):\n location = self.get_url(location)\n self.assertEqual(location, url)", "def redirection(l):\r\n l= str(l)\r\n if l.count('//')>1:\r\n return 1\r\n else:\r\n return 0", "def redirects_to(response, url):\n is_redirect = response.status_code == 302\n parsed_url = urlparse(response.get('Location'))\n is_url = parsed_url.path == url\n\n return is_redirect and is_url", "def test_append_slash_slashless_unknown(self):\n request = self.rf.get(\"/unknown\")\n response = CommonMiddleware(get_response_404)(request)\n self.assertEqual(response.status_code, 404)", "def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"", "def test_redirect(self):\n\n result = self.client.post(\"/login\", data={\"user_email\": \"bobbybob@gmail.com\", \"user_password\": \"1234\"},\n follow_redirects=True)\n self.assertIn(b\"I'd like to Select an Image to View Matches\", result.data)", "def test_redirect_postpage(self):\n with self.client:\n response=self.client.get('/post',follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def assert_redirect_to_login(path):\n app = HelperApp(server.message_app)\n\n # Assert that we get redirected\n response = app.get(path)\n assert response.status == \"302 Found\"\n\n # Make sure the redirect is going to the right place\n assert urlsplit(response.location).path == \"/login/\"", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def test_base_path(original_base_path, args):\n if args.skip_redirects:\n return original_base_path\n\n # WARNING: some redirects are hardcoded to production URLs.\n # Both staging and production will rate limit us.\n response = session.head(args.root_url + original_base_path, allow_redirects=True)\n\n if 200 <= response.status_code < 300:\n return response.url.replace('https://www.gov.uk', '').replace(args.root_url, '')\n elif response.status_code == 429:\n response.raise_for_status()\n else:\n if response.status_code not in (410,):\n sys.stderr.write(\"Unexpected response {} for {}\\n\".format(response.status_code, original_base_path))\n return None", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def test__safe_postlogin_redirect(self):\r\n HOST = 'testserver' # pylint: disable=C0103\r\n ONSITE1 = '/dashboard' # pylint: disable=C0103\r\n ONSITE2 = '/courses/org/num/name/courseware' # pylint: disable=C0103\r\n ONSITE3 = 'http://{}/my/custom/url'.format(HOST) # pylint: disable=C0103\r\n OFFSITE1 = 'http://www.attacker.com' # pylint: disable=C0103\r\n\r\n for redirect_to in [ONSITE1, ONSITE2, ONSITE3]:\r\n redir = _safe_postlogin_redirect(redirect_to, HOST)\r\n self.assertEqual(redir.status_code, 302)\r\n self.assertEqual(redir['location'], redirect_to)\r\n\r\n redir2 = _safe_postlogin_redirect(OFFSITE1, HOST)\r\n self.assertEqual(redir2.status_code, 302)\r\n self.assertEqual(\"/\", redir2['location'])", "def test_can_be_redirected(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)\n\n # Slug page should always state what the url is\n self.assertIn(r.url, body.text, 'Link url not displayed on slug page!')\n\n # Slug page should always have a link to the correct page!\n links = self.browser.find_elements_by_tag_name('a')\n\n ok = False\n for link in links:\n if link.get_attribute('href').rstrip('/') == r.url.rstrip('/'):\n ok = True\n break\n\n self.failIf(not ok,'No link to target!')", "def test_shred_login():\n assert_redirect_to_login('/shred/')\n assert_not_redirect_to_login('/shred/')", "def contains_special_redirect(content, response_url):\n regs = [\n \"<meta\\s+http-equiv\\s?=\\s?[\\'\\\"]?refresh[\\'\\\"]?[\\s\\n]*content\\s?=\\s?[\\'\\\"]?[\\d];\\s?url\\s?=\\s?(.*?)\\\"?\\s?\\/??>\",\n \"This Cargo website is currently available here:\\s?<a href=[\\\"\\'](.*?)[\\\"\\']\"\n ]\n for reg in regs:\n p = re.compile(reg, re.IGNORECASE)\n match = re.search(p, content)\n if match != None:\n if len(match.group(1)) > 0:\n append_string = match.group(1).replace('\"', '').replace(\"'\", '')\n debug(append_string)\n if append_string.startswith(\"http\"):\n return append_string\n else:\n if not append_string.startswith(\"/\") and not response_url.endswith(\"/\"):\n append_string = \"/\" + append_string\n if append_string.startswith(\"/\") and response_url.endswith(\"/\"):\n append_string = append_string[1:]\n special_redirect = \"http://\" + response_url + append_string\n return special_redirect.strip()\n else:\n return False\n return False", "def ends_slash(url):\n return url if url.endswith(\"/\") else url + \"/\"", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def validate(self, uri_path, fs_path):\n if not os.path.exists(fs_path):\n raise Response(404)\n elif os.path.isdir(fs_path) and not uri_path.endswith('/'):\n new_location = '%s/' % uri_path\n response = Response(301)\n response.headers['Location'] = new_location\n raise response\n return fs_path", "def test_append_slash_quoted_custom_urlconf(self):\n request = self.rf.get(quote(\"/customurlconf/needsquoting#\"))\n request.urlconf = \"middleware.extra_urls\"\n r = CommonMiddleware(get_response_404)(request)\n self.assertIsNotNone(\n r,\n \"CommonMiddleware failed to return APPEND_SLASH redirect using \"\n \"request.urlconf\",\n )\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/customurlconf/needsquoting%23/\")", "def assert_redirect_to_login_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_LOGIN, response.get('Location'))", "def is_redirect(response: aiohttp.ClientResponse) -> bool:\n return response.status in (300, 301, 302, 303, 307)", "def ensure_slash(text):\n if text.startswith('/'):\n return text\n return '/' + text", "def contains_redirect(content, _url):\n frame_url = contains_frame_redirect(content) if (len(content) < 10000000) else False\n if frame_url:\n debug(\"frame_url: {}\".format(frame_url))\n return frame_url\n\n meta_redir = contains_special_redirect(content, _url)\n if meta_redir:\n debug(\"metaredir: {}\".format(meta_redir))\n return meta_redir\n\n return False", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")", "def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('search:do_search'))\n \n self.assertRedirects(response, '/accounts/login/?next=/search/result/')", "def test_append_slash_have_slash_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash/\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_non_ascii_query_string_does_not_crash(self):\n request = self.rf.get(\"/slash\")\n request.META[\"QUERY_STRING\"] = \"drink=café\"\n r = CommonMiddleware(get_response_empty).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_empty).process_response(request, response)\n self.assertEqual(r.status_code, 301)", "def test_contact_landing_404(self):\n response = self.client.get(reverse(\n 'contact_landing', kwargs={'slug': 'sssss'}))\n self.assertEqual(response.status_code, 404)\n response = self.client.get(\n reverse('contact_landing', kwargs={'slug': 'sss--ss'}))\n self.assertEqual(response.status_code, 404)", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def test_absolute_url(self):\n response = self.client.get(self.htsv.get_absolute_url())\n self.assertEqual(response.status_code, 200)", "def test_action_redirects(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n\n res = self.view(req)\n\n self.assertEqual(res.status_code, 302)\n self.assertEqual(res[\"Location\"], \"/the/url\")", "def redirected(path, status=302, ssl=False):\r\n def was_redirected(client, response, testcase):\r\n if ssl:\r\n client.defaults['HTTPS'] = 'on'\r\n testcase.assertRedirects(response, path, status_code=status)\r\n return was_redirected", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def ensure_path(self, page):\n return page if page.startswith('/') else \"/{0}\".format(page)", "def test_wiki_redirect(self):\r\n self.login(self.student, self.password)\r\n\r\n self.enroll(self.toy)\r\n\r\n referer = reverse(\"progress\", kwargs={'course_id': self.toy.id.to_deprecated_string()})\r\n destination = reverse(\"wiki:get\", kwargs={'path': 'some/fake/wiki/page/'})\r\n\r\n redirected_to = referer.replace(\"progress\", \"wiki/some/fake/wiki/page/\")\r\n\r\n resp = self.client.get(destination, HTTP_REFERER=referer)\r\n self.assertEqual(resp.status_code, 302)\r\n\r\n self.assertEqual(resp['Location'], 'http://testserver' + redirected_to)\r\n\r\n # Now we test that the student will be redirected away from that page if the course doesn't exist\r\n # We do this in the same test because we want to make sure the redirected_to is constructed correctly\r\n # This is a location like /courses/*/wiki/* , but with an invalid course ID\r\n bad_course_wiki_page = redirected_to.replace(self.toy.location.course, \"bad_course\")\r\n\r\n resp = self.client.get(bad_course_wiki_page, HTTP_REFERER=referer)\r\n self.assertEqual(resp.status_code, 302)\r\n self.assertEqual(resp['Location'], 'http://testserver' + destination)", "def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))", "def detect_redirection(server, session, logger):\n response = session.get(server.rstrip('/') + '/data/projects')\n logger.debug('Response url: {}'.format(response.url))\n response_url = response.url[:-13]\n if response_url != server and response_url != server + '/':\n logger.warning('Detected a redirect from {0} to {1}, using {1} from now on'.format(server, response_url))\n return response_url", "def test_url_path(self):\n response = self.client.get('/planner/recipes/1/')\n self.assertEqual(response.status_code, 200)", "def test_api_redirects_to_docs(self):\n\n\t\twith self.client:\n\t\t\tget_doc = self.client.get('/')\n\t\t\tself.assertTrue(get_doc.status_code == 302)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def test_routing_policy_replace_path(api_client):\n response = api_client().get(\"/anything/anything\")\n assert response.status_code == 200\n\n echoed_request = EchoedRequest.create(response)\n assert echoed_request.path == \"/anything\"", "def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"", "def test_absolute_url_without_request(self):\n context = {}\n\n abs_url = absolute_url(context, \"/ciao/\")\n self.assertEqual(abs_url, \"/ciao/\")\n self.assertNotEqual(abs_url, \"/\")", "def test_get_redirect(test_case, page):\n with test_case.app.test_client() as c:\n test_case.assertEqual(302, c.get('dashboard/{}'.format(page)).status_code)", "def test_url_path(self):\n url = create_url(url=\"http://www.example.com\", path=\"path/to/resource\")\n self.assertEqual(url, \"http://www.example.com/path/to/resource\")", "def test_redirect_with_empty_cart(self):\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 302)", "def test_append_slash_no_redirect_on_POST_in_DEBUG(self):\n msg = \"maintaining %s data. Change your form to point to testserver/slash/\"\n request = self.rf.get(\"/slash\")\n request.method = \"POST\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)\n request = self.rf.get(\"/slash\")\n request.method = \"PUT\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)\n request = self.rf.get(\"/slash\")\n request.method = \"PATCH\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)", "def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"", "def ensureOneSlash(s):\n\treturn s.rstrip(\"/\")+\"/\"", "def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))", "def test_index(self):\n r = self.client.get('/')\n self.assertEqual(r.status_code, 302)", "def test_edge_redirect_to_login(self):\r\n\r\n request = self.factory.get('/')\r\n request.user = AnonymousUser()\r\n\r\n # HTTP Host changed to edge.\r\n request.META[\"HTTP_HOST\"] = \"edge.edx.org\"\r\n response = index(request)\r\n\r\n # Response should be instance of HttpResponseRedirect.\r\n self.assertIsInstance(response, HttpResponseRedirect)\r\n # Location should be \"/login\".\r\n self.assertEqual(response._headers.get(\"location\")[1], \"/login\")", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('my-borrowed'))\n self.assertRedirects(\n response,\n '/accounts/login/?next=/catalog/mybooks/'\n )", "def test_mineral_list_redirect(self):\n resp = self.client.get(reverse('minerals:list'))\n self.assertEqual(resp.status_code, 302)", "def test_redirect(self):\n\n result = self.client.post(\"/registration\", data={\"first_name\": \"Bobby\", \"last_name\": \"Bob\", \"email\": \"bobbers@gmail.com\", \"password\": \"1234\",\n \"birthday_month\": \"January\", \"birthday_day\": 12, \"birthday_year\": 1991}, follow_redirects=True)\n self.assertIn(b\"Email address\", result.data)" ]
[ "0.7745988", "0.7601577", "0.749149", "0.69618195", "0.69307077", "0.68707025", "0.6825404", "0.66935575", "0.66655666", "0.6664395", "0.6626923", "0.6614033", "0.65820646", "0.65820646", "0.6524852", "0.65176207", "0.6499768", "0.64643717", "0.64202327", "0.6407755", "0.6398387", "0.6393049", "0.634507", "0.6342756", "0.63416094", "0.6339316", "0.6338891", "0.6335258", "0.63273966", "0.63058966", "0.6303642", "0.6297208", "0.627749", "0.626543", "0.62523186", "0.6232873", "0.6203312", "0.62002456", "0.61969066", "0.6165525", "0.6145896", "0.61346084", "0.6124077", "0.6110931", "0.611036", "0.6096171", "0.6076292", "0.60666883", "0.6041674", "0.6029842", "0.6017436", "0.6007792", "0.59686226", "0.5957761", "0.5950977", "0.5947412", "0.59381706", "0.59180665", "0.59063977", "0.5883906", "0.5866194", "0.58657175", "0.5854244", "0.5854066", "0.58473575", "0.5835963", "0.58034635", "0.5802274", "0.5800893", "0.5778162", "0.57748663", "0.5773612", "0.57734257", "0.5760888", "0.57602715", "0.5730362", "0.5728612", "0.57171965", "0.57051706", "0.5698938", "0.5689726", "0.5680875", "0.56783503", "0.5676821", "0.56738526", "0.5669843", "0.56569", "0.5656569", "0.565606", "0.5652365", "0.56281006", "0.56217253", "0.5620249", "0.5619179", "0.5610698", "0.5610058", "0.560483", "0.5592861", "0.5583396", "0.5579063", "0.55788535" ]
0.0
-1
unauthorized access is forbidden
def test_artifactpriority_detail_api_unauthorized(self): # get object artifactpriority_api_1 = Artifactpriority.objects.get( artifactpriority_name='artifactpriority_api_1' ) # get response response = self.client.get( '/api/artifactpriority/' + str(artifactpriority_api_1.artifactpriority_id) + '/' ) # compare self.assertEqual(response.status_code, 401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forbidden():\n return HttpError(403)", "def get_authenticated_denied(self):", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def access_forbidden(e):\n return render_template(\"error/403.html\"), 403", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def resource_forbidden(exc, request):\r\n request.response_status = \"403 Forbidden\"\r\n return {'message': str(exc)}", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def forbidden_page(error):\n return render_template(\"access_forbidden.html\"), 403", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def unauthorized():\n return HttpError(401)", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def get_everyone_denied(self):", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def authorized(self):\n pass", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def DeniedPermissions(self) -> _n_6_t_0:", "def forbidden(e):\n return render_template(\"errors/403.html\"), 403", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def authorization():\n pass", "def xhr_forbidden_view(request):\n return HTTPForbidden()", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def protect_endpoint():\n pass", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def __acl__(self):\n yield 'Allow', 'system.Everyone', 'none'\n yield security.DENY_ALL", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_get(self):\n self.assertEqual(403, self.response.status_code)", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def get(self, *args, **kwargs):\n self.write(\"Not allowed\")\n self.finish()", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def unauthorized():\n #flash('You must be logged in to view that page.')\n return redirect(url_for('login'))", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def forbidden(e):\n\n return render_template('errors/403.html'), 500", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def unauthorized(e):\n media = session.query(Medium).all()\n return render_template('401.html', media=media), 401", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def request_access(self):\n pass", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_authorization_required(self, method):\n self.user.user_permissions.clear()\n\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 403", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def forbidden(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseForbidden, *args, **kwargs)", "def unauthorized():\n flash('You must be logged in to view that page.', 'warning')\n return redirect(url_for('auth.login'))", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_handler(self):\n return flask.redirect(\"/login\")", "def unprotected_method():\n return {\"message\": \"Anyone access this function\"}", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def protected():\n return jsonify(message=f'protected endpoint (allowed user {flask_praetorian.current_user().username})')", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def unauthorized():\n flash(\"You must be logged in to view that page.\")\n return redirect(url_for(\"auth.login_view\"))", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n response = new_client.get('/posts/', kwargs={'pk': 3}, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def handle_no_permission(self):\n if self.is_ajax():\n return JsonResponse({'error': 'unauthorized'}, status=401)\n return super().handle_no_permission()", "def test_not_permitted(self):\r\n test_user_client, test_user = self.create_non_staff_authed_user_client()\r\n CourseEnrollment.enroll(test_user, self.course.id)\r\n response = test_user_client.get(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)\r\n response = test_user_client.delete(self.orphan_url)\r\n self.assertEqual(response.status_code, 403)", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def permission_denied(request, exception):\n return defaults.permission_denied(request, exception, template_name=get_template_name(request, \"403.html\"))", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def unauthorized(self, error):\n return jsonify({'error': \"NOT AUTHORIZED\"}), 401", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def unauthorized():\n flask.flash('You must be logged in to view that page.')\n return redirect(url_for('auth.sign_in'))", "def forbidden(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_FORBIDDEN,\n 'message': ERROR_MESSAGES[STATUS_FORBIDDEN]\n }), STATUS_FORBIDDEN", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def abort_unauthorized(description):\n raise Unauthorized(description=description)" ]
[ "0.8034033", "0.7552204", "0.737518", "0.73615354", "0.73118114", "0.7307068", "0.7258209", "0.7217089", "0.72039396", "0.7159985", "0.714443", "0.7120388", "0.71163434", "0.7105373", "0.71046525", "0.70585066", "0.7024013", "0.7016623", "0.7005272", "0.69919777", "0.69636977", "0.6920033", "0.6918109", "0.69017965", "0.6870412", "0.6868477", "0.6867308", "0.6820907", "0.67869484", "0.6773462", "0.6765973", "0.6759784", "0.6759784", "0.6759784", "0.6759784", "0.67438793", "0.67240983", "0.67223525", "0.6707933", "0.6700254", "0.6680997", "0.66720223", "0.6639217", "0.6631793", "0.6631242", "0.662136", "0.66158235", "0.66158235", "0.6613311", "0.6605004", "0.6605004", "0.6605004", "0.6605004", "0.6603566", "0.65961754", "0.65949106", "0.65940183", "0.6572278", "0.656859", "0.6567581", "0.65650916", "0.6564467", "0.6564467", "0.65418166", "0.6535078", "0.6531677", "0.65032357", "0.65005845", "0.6475662", "0.6470674", "0.646905", "0.64604145", "0.644479", "0.6435317", "0.64272255", "0.6423142", "0.64230007", "0.64225316", "0.6420489", "0.64190125", "0.6411388", "0.6407726", "0.6407726", "0.64066297", "0.64066297", "0.64066297", "0.6395139", "0.639439", "0.6392693", "0.6388089", "0.63809973", "0.6378072", "0.6378072", "0.63550186", "0.6347419", "0.634468", "0.6342554", "0.6336366", "0.63319486", "0.6325954", "0.63250804" ]
0.0
-1
test redirect with appending slash
def test_artifactpriority_detail_api_redirect(self): # get object artifactpriority_api_1 = Artifactpriority.objects.get( artifactpriority_name='artifactpriority_api_1' ) # login testuser self.client.login( username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw' ) # create url destination = urllib.parse.quote( '/api/artifactpriority/' + str(artifactpriority_api_1.artifactpriority_id) + '/', safe='/', ) # get response response = self.client.get( '/api/artifactpriority/' + str(artifactpriority_api_1.artifactpriority_id), follow=True, ) # compare self.assertRedirects( response, destination, status_code=301, target_status_code=200 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_slash_redirect(self):\n request = self.rf.get(\"/slash\")\n r = CommonMiddleware(get_response_empty).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_empty).process_response(request, response)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/slash/\")", "def test_append_slash_redirect_querystring_have_slash(self):\n request = self.rf.get(\"/slash?test=slash/\")\n resp = CommonMiddleware(get_response_404)(request)\n self.assertIsInstance(resp, HttpResponsePermanentRedirect)\n self.assertEqual(resp.url, \"/slash/?test=slash/\")", "def test_append_slash_redirect_querystring(self):\n request = self.rf.get(\"/slash?test=1\")\n resp = CommonMiddleware(get_response_404)(request)\n self.assertEqual(resp.url, \"/slash/?test=1\")", "def test_redirection(self):\n self.assertRedirects(self.response, self.home_url)", "def redirect(url):", "def test_document_based_redirection(base_url):\n url = base_url + \"/en-US/docs/concat\"\n resp = request(\"get\", url)\n assert resp.status_code == 301\n assert (\n resp.headers[\"Location\"]\n == \"/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/concat\"\n )", "def test_append_slash():\n assert normalize_url(\"http://example.com\") == \"http://example.com/\"", "def testRedirect(self):\n self.assertRaises(NotImplementedError, self.handler.redirect, '/')", "def test_append_slash_quoted(self):\n request = self.rf.get(quote(\"/needsquoting#\"))\n r = CommonMiddleware(get_response_404)(request)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/needsquoting%23/\")", "def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)", "def test_redirect_view(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(reverse(testurl))\n #self.assertEqual(301, response.status_code)", "def test_append_slash_leading_slashes(self):\n # Use 4 slashes because of RequestFactory behavior.\n request = self.rf.get(\"////evil.com/security\")\n r = CommonMiddleware(get_response_404).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_404).process_response(request, response)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/%2Fevil.com/security/\")\n r = CommonMiddleware(get_response_404)(request)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/%2Fevil.com/security/\")", "def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )", "def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def test_redirect_status(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(testurl)\n #self.assertEqual(301, response.status_code)", "def test_append_slash_have_slash(self):\n request = self.rf.get(\"/slash/\")\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_homepage_redirect(self):\n with self.client as client:\n resp = client.get(\"/\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Davis Test', html)", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Patient Dashboard\", result.data)", "def test_append_slash_disabled(self):\n request = self.rf.get(\"/slash\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_append_slash_redirect_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash\")\n request.urlconf = \"middleware.extra_urls\"\n r = CommonMiddleware(get_response_404)(request)\n self.assertIsNotNone(\n r,\n \"CommonMiddleware failed to return APPEND_SLASH redirect using \"\n \"request.urlconf\",\n )\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/customurlconf/slash/\")", "def starts_slash(url):\n return url if url.startswith(\"/\") else \"/\" + url", "def test_login_redirect(self):\n create_userRes = self.client.get(url_for('create_user'))\n endpoint = urlparse(create_userRes.location).path\n\n assert endpoint == url_for('login')", "def fix_slash(environ, wantslash):\n from colubrid.exceptions import HttpMoved\n #FIXME\n # argh. never did something that supid\n # find a better solution for that problem.\n url = quote(environ.get('SCRIPT_NAME', ''))\n url += quote(environ.get('PATH_INFO', ''))\n query = environ.get('QUERY_STRING', '')\n oldurl = query and ('%s?%s' % (url, query)) or url\n \n if oldurl and oldurl != '/':\n if url.endswith('/'):\n if not wantslash:\n url = url[:-1]\n else:\n if wantslash:\n url += '/'\n \n newurl = query and ('%s?%s' % (url, query)) or url\n if oldurl != newurl:\n raise HttpMoved(newurl)", "def test_redirects_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'https://www.seinfeld.com'", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Dietitian Dashboard\", result.data)", "def test_forward(self):\n short_url = ShortURL.objects.create(url='http://example.com')\n response = self.client.get('/%s'%(short_url.key))\n self.assertEqual(response.status_code, 301)", "def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)", "def assertRedirects(self, response, url):\n server_name = self.app.config.get('SERVER_NAME') or 'localhost'\n redirect_url = response.headers.get('Location', None)\n target_url = urljoin('http://{}'.format(server_name), url)\n self.assertEqual(redirect_url, target_url)", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def should_redirect_without_slash(self, request):\n if getattr(settings, 'REMOVE_SLASH', False) and trailing_slash_regexp.search(request.get_full_path()):\n urlconf = getattr(request, 'urlconf', None)\n return (not urlresolvers.is_valid_path(request.path_info, urlconf) and urlresolvers.is_valid_path(\n request.path_info[:-1], urlconf))\n return False", "def test_redirect_register_home(self):\n with self.client as c:\n\n res = c.get(\"/\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/users/tester1\")\n self.assertEqual(res.status_code, 302)\n\n res = c.get(\"/lists/111111\")\n self.assertEqual(res.status_code, 200)", "def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'", "def test_redirect_for_patient_home_route(self):\n\n result = self.client.get(\"/patient/1\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/patient/4\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def catch_all(path):\n return redirect('/', code=302)", "def test_basic(self):\n application_url = 'http://foo.com:1234'\n continue_url = ('http://foo.com:1234/my/album/of/pictures?'\n 'with=some&query=parameters')\n\n expected_location = (\n 'https://localhost:1443/login?continue='\n 'http%3A//foo.com%3A1234'\n '/my/album/of/pictures%3Fwith%3Dsome%26query%3Dparameters')\n\n def start_response(status, headers, exc_info=None):\n self.assertTrue(status.startswith('302'))\n headers = dict(headers)\n self.assertEqual({'Location': expected_location}, headers)\n self.assertEqual(None, exc_info)\n body = login.login_redirect(application_url, continue_url, start_response)\n\n self.assertEqual('', ''.join(body))", "def test_append_slash_slashless_resource(self):\n\n def get_response(req):\n return HttpResponse(\"Here's the text of the web page.\")\n\n request = self.rf.get(\"/noslash\")\n self.assertIsNone(CommonMiddleware(get_response).process_request(request))\n self.assertEqual(\n CommonMiddleware(get_response)(request).content,\n b\"Here's the text of the web page.\",\n )", "def test_append_slash_opt_out(self):\n request = self.rf.get(\"/sensitive_fbv\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)\n\n request = self.rf.get(\"/sensitive_cbv\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_redirect_login_page(self):\n r = requests.get(self.url, allow_redirects=False)\n self.assertEqual(r.status_code, 302)\n self.assertRegexpMatches(r.headers['location'], '%s/login.*' % self.url)", "def assert_redirect_to_register_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_REGISTER, response.get('Location'))", "def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash\")\n request.urlconf = \"middleware.extra_urls\"\n request.method = \"POST\"\n with self.assertRaisesMessage(RuntimeError, \"end in a slash\"):\n CommonMiddleware(get_response_404)(request)", "def redirected_to_url(url):\r\n def was_redirected(client, response, testcase):\r\n status(302)(client, response, testcase)\r\n testcase.assertEqual(\r\n response['Location'],\r\n url\r\n )\r\n return was_redirected", "def test_redirect(self):\r\n sess = FuturesSession()\r\n future = sess.get(httpbin('redirect-to?url=get'))\r\n self.assertIsInstance(future, Future)\r\n resp = future.result()\r\n self.assertIsInstance(resp, Response)\r\n self.assertEqual(200, resp.status_code)\r\n\r\n future = sess.get(httpbin('redirect-to?url=status/404'))\r\n resp = future.result()\r\n self.assertEqual(404, resp.status_code)", "def test_redirection(self):\n dashboard_url = reverse('dashboard')\n self.assertRedirects(self.response, dashboard_url)", "def assertRedirects(self, response, url):\n self.assert302(response)\n\n location = response.headers.get('Location')\n if url.startswith('http'):\n location = self.get_url(location)\n self.assertEqual(location, url)", "def redirection(l):\r\n l= str(l)\r\n if l.count('//')>1:\r\n return 1\r\n else:\r\n return 0", "def redirects_to(response, url):\n is_redirect = response.status_code == 302\n parsed_url = urlparse(response.get('Location'))\n is_url = parsed_url.path == url\n\n return is_redirect and is_url", "def test_append_slash_slashless_unknown(self):\n request = self.rf.get(\"/unknown\")\n response = CommonMiddleware(get_response_404)(request)\n self.assertEqual(response.status_code, 404)", "def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"", "def test_redirect(self):\n\n result = self.client.post(\"/login\", data={\"user_email\": \"bobbybob@gmail.com\", \"user_password\": \"1234\"},\n follow_redirects=True)\n self.assertIn(b\"I'd like to Select an Image to View Matches\", result.data)", "def test_redirect_postpage(self):\n with self.client:\n response=self.client.get('/post',follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def assert_redirect_to_login(path):\n app = HelperApp(server.message_app)\n\n # Assert that we get redirected\n response = app.get(path)\n assert response.status == \"302 Found\"\n\n # Make sure the redirect is going to the right place\n assert urlsplit(response.location).path == \"/login/\"", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def test_base_path(original_base_path, args):\n if args.skip_redirects:\n return original_base_path\n\n # WARNING: some redirects are hardcoded to production URLs.\n # Both staging and production will rate limit us.\n response = session.head(args.root_url + original_base_path, allow_redirects=True)\n\n if 200 <= response.status_code < 300:\n return response.url.replace('https://www.gov.uk', '').replace(args.root_url, '')\n elif response.status_code == 429:\n response.raise_for_status()\n else:\n if response.status_code not in (410,):\n sys.stderr.write(\"Unexpected response {} for {}\\n\".format(response.status_code, original_base_path))\n return None", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def test__safe_postlogin_redirect(self):\r\n HOST = 'testserver' # pylint: disable=C0103\r\n ONSITE1 = '/dashboard' # pylint: disable=C0103\r\n ONSITE2 = '/courses/org/num/name/courseware' # pylint: disable=C0103\r\n ONSITE3 = 'http://{}/my/custom/url'.format(HOST) # pylint: disable=C0103\r\n OFFSITE1 = 'http://www.attacker.com' # pylint: disable=C0103\r\n\r\n for redirect_to in [ONSITE1, ONSITE2, ONSITE3]:\r\n redir = _safe_postlogin_redirect(redirect_to, HOST)\r\n self.assertEqual(redir.status_code, 302)\r\n self.assertEqual(redir['location'], redirect_to)\r\n\r\n redir2 = _safe_postlogin_redirect(OFFSITE1, HOST)\r\n self.assertEqual(redir2.status_code, 302)\r\n self.assertEqual(\"/\", redir2['location'])", "def test_can_be_redirected(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)\n\n # Slug page should always state what the url is\n self.assertIn(r.url, body.text, 'Link url not displayed on slug page!')\n\n # Slug page should always have a link to the correct page!\n links = self.browser.find_elements_by_tag_name('a')\n\n ok = False\n for link in links:\n if link.get_attribute('href').rstrip('/') == r.url.rstrip('/'):\n ok = True\n break\n\n self.failIf(not ok,'No link to target!')", "def test_shred_login():\n assert_redirect_to_login('/shred/')\n assert_not_redirect_to_login('/shred/')", "def contains_special_redirect(content, response_url):\n regs = [\n \"<meta\\s+http-equiv\\s?=\\s?[\\'\\\"]?refresh[\\'\\\"]?[\\s\\n]*content\\s?=\\s?[\\'\\\"]?[\\d];\\s?url\\s?=\\s?(.*?)\\\"?\\s?\\/??>\",\n \"This Cargo website is currently available here:\\s?<a href=[\\\"\\'](.*?)[\\\"\\']\"\n ]\n for reg in regs:\n p = re.compile(reg, re.IGNORECASE)\n match = re.search(p, content)\n if match != None:\n if len(match.group(1)) > 0:\n append_string = match.group(1).replace('\"', '').replace(\"'\", '')\n debug(append_string)\n if append_string.startswith(\"http\"):\n return append_string\n else:\n if not append_string.startswith(\"/\") and not response_url.endswith(\"/\"):\n append_string = \"/\" + append_string\n if append_string.startswith(\"/\") and response_url.endswith(\"/\"):\n append_string = append_string[1:]\n special_redirect = \"http://\" + response_url + append_string\n return special_redirect.strip()\n else:\n return False\n return False", "def ends_slash(url):\n return url if url.endswith(\"/\") else url + \"/\"", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def validate(self, uri_path, fs_path):\n if not os.path.exists(fs_path):\n raise Response(404)\n elif os.path.isdir(fs_path) and not uri_path.endswith('/'):\n new_location = '%s/' % uri_path\n response = Response(301)\n response.headers['Location'] = new_location\n raise response\n return fs_path", "def test_append_slash_quoted_custom_urlconf(self):\n request = self.rf.get(quote(\"/customurlconf/needsquoting#\"))\n request.urlconf = \"middleware.extra_urls\"\n r = CommonMiddleware(get_response_404)(request)\n self.assertIsNotNone(\n r,\n \"CommonMiddleware failed to return APPEND_SLASH redirect using \"\n \"request.urlconf\",\n )\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/customurlconf/needsquoting%23/\")", "def assert_redirect_to_login_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_LOGIN, response.get('Location'))", "def is_redirect(response: aiohttp.ClientResponse) -> bool:\n return response.status in (300, 301, 302, 303, 307)", "def ensure_slash(text):\n if text.startswith('/'):\n return text\n return '/' + text", "def contains_redirect(content, _url):\n frame_url = contains_frame_redirect(content) if (len(content) < 10000000) else False\n if frame_url:\n debug(\"frame_url: {}\".format(frame_url))\n return frame_url\n\n meta_redir = contains_special_redirect(content, _url)\n if meta_redir:\n debug(\"metaredir: {}\".format(meta_redir))\n return meta_redir\n\n return False", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")", "def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('search:do_search'))\n \n self.assertRedirects(response, '/accounts/login/?next=/search/result/')", "def test_append_slash_have_slash_custom_urlconf(self):\n request = self.rf.get(\"/customurlconf/slash/\")\n request.urlconf = \"middleware.extra_urls\"\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_non_ascii_query_string_does_not_crash(self):\n request = self.rf.get(\"/slash\")\n request.META[\"QUERY_STRING\"] = \"drink=café\"\n r = CommonMiddleware(get_response_empty).process_request(request)\n self.assertIsNone(r)\n response = HttpResponseNotFound()\n r = CommonMiddleware(get_response_empty).process_response(request, response)\n self.assertEqual(r.status_code, 301)", "def test_contact_landing_404(self):\n response = self.client.get(reverse(\n 'contact_landing', kwargs={'slug': 'sssss'}))\n self.assertEqual(response.status_code, 404)\n response = self.client.get(\n reverse('contact_landing', kwargs={'slug': 'sss--ss'}))\n self.assertEqual(response.status_code, 404)", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def test_absolute_url(self):\n response = self.client.get(self.htsv.get_absolute_url())\n self.assertEqual(response.status_code, 200)", "def test_action_redirects(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n\n res = self.view(req)\n\n self.assertEqual(res.status_code, 302)\n self.assertEqual(res[\"Location\"], \"/the/url\")", "def redirected(path, status=302, ssl=False):\r\n def was_redirected(client, response, testcase):\r\n if ssl:\r\n client.defaults['HTTPS'] = 'on'\r\n testcase.assertRedirects(response, path, status_code=status)\r\n return was_redirected", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def ensure_path(self, page):\n return page if page.startswith('/') else \"/{0}\".format(page)", "def test_wiki_redirect(self):\r\n self.login(self.student, self.password)\r\n\r\n self.enroll(self.toy)\r\n\r\n referer = reverse(\"progress\", kwargs={'course_id': self.toy.id.to_deprecated_string()})\r\n destination = reverse(\"wiki:get\", kwargs={'path': 'some/fake/wiki/page/'})\r\n\r\n redirected_to = referer.replace(\"progress\", \"wiki/some/fake/wiki/page/\")\r\n\r\n resp = self.client.get(destination, HTTP_REFERER=referer)\r\n self.assertEqual(resp.status_code, 302)\r\n\r\n self.assertEqual(resp['Location'], 'http://testserver' + redirected_to)\r\n\r\n # Now we test that the student will be redirected away from that page if the course doesn't exist\r\n # We do this in the same test because we want to make sure the redirected_to is constructed correctly\r\n # This is a location like /courses/*/wiki/* , but with an invalid course ID\r\n bad_course_wiki_page = redirected_to.replace(self.toy.location.course, \"bad_course\")\r\n\r\n resp = self.client.get(bad_course_wiki_page, HTTP_REFERER=referer)\r\n self.assertEqual(resp.status_code, 302)\r\n self.assertEqual(resp['Location'], 'http://testserver' + destination)", "def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))", "def detect_redirection(server, session, logger):\n response = session.get(server.rstrip('/') + '/data/projects')\n logger.debug('Response url: {}'.format(response.url))\n response_url = response.url[:-13]\n if response_url != server and response_url != server + '/':\n logger.warning('Detected a redirect from {0} to {1}, using {1} from now on'.format(server, response_url))\n return response_url", "def test_url_path(self):\n response = self.client.get('/planner/recipes/1/')\n self.assertEqual(response.status_code, 200)", "def test_api_redirects_to_docs(self):\n\n\t\twith self.client:\n\t\t\tget_doc = self.client.get('/')\n\t\t\tself.assertTrue(get_doc.status_code == 302)", "def test_login_openid_handle_redirection(self):\r\n response = self._send_bad_redirection_login()\r\n self.assertEquals(response.status_code, 302)", "def test_routing_policy_replace_path(api_client):\n response = api_client().get(\"/anything/anything\")\n assert response.status_code == 200\n\n echoed_request = EchoedRequest.create(response)\n assert echoed_request.path == \"/anything\"", "def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"", "def test_absolute_url_without_request(self):\n context = {}\n\n abs_url = absolute_url(context, \"/ciao/\")\n self.assertEqual(abs_url, \"/ciao/\")\n self.assertNotEqual(abs_url, \"/\")", "def test_get_redirect(test_case, page):\n with test_case.app.test_client() as c:\n test_case.assertEqual(302, c.get('dashboard/{}'.format(page)).status_code)", "def test_url_path(self):\n url = create_url(url=\"http://www.example.com\", path=\"path/to/resource\")\n self.assertEqual(url, \"http://www.example.com/path/to/resource\")", "def test_redirect_with_empty_cart(self):\n response = self.client.get(\"/cart/\")\n self.assertEqual(response.status_code, 302)", "def test_append_slash_no_redirect_on_POST_in_DEBUG(self):\n msg = \"maintaining %s data. Change your form to point to testserver/slash/\"\n request = self.rf.get(\"/slash\")\n request.method = \"POST\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)\n request = self.rf.get(\"/slash\")\n request.method = \"PUT\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)\n request = self.rf.get(\"/slash\")\n request.method = \"PATCH\"\n with self.assertRaisesMessage(RuntimeError, msg % request.method):\n CommonMiddleware(get_response_404)(request)", "def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"", "def ensureOneSlash(s):\n\treturn s.rstrip(\"/\")+\"/\"", "def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))", "def test_index(self):\n r = self.client.get('/')\n self.assertEqual(r.status_code, 302)", "def test_edge_redirect_to_login(self):\r\n\r\n request = self.factory.get('/')\r\n request.user = AnonymousUser()\r\n\r\n # HTTP Host changed to edge.\r\n request.META[\"HTTP_HOST\"] = \"edge.edx.org\"\r\n response = index(request)\r\n\r\n # Response should be instance of HttpResponseRedirect.\r\n self.assertIsInstance(response, HttpResponseRedirect)\r\n # Location should be \"/login\".\r\n self.assertEqual(response._headers.get(\"location\")[1], \"/login\")", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def test_redirect_if_not_logged_in(self):\n response = self.client.get(reverse('my-borrowed'))\n self.assertRedirects(\n response,\n '/accounts/login/?next=/catalog/mybooks/'\n )", "def test_mineral_list_redirect(self):\n resp = self.client.get(reverse('minerals:list'))\n self.assertEqual(resp.status_code, 302)", "def test_redirect(self):\n\n result = self.client.post(\"/registration\", data={\"first_name\": \"Bobby\", \"last_name\": \"Bob\", \"email\": \"bobbers@gmail.com\", \"password\": \"1234\",\n \"birthday_month\": \"January\", \"birthday_day\": 12, \"birthday_year\": 1991}, follow_redirects=True)\n self.assertIn(b\"Email address\", result.data)" ]
[ "0.7745988", "0.7601577", "0.749149", "0.69618195", "0.69307077", "0.68707025", "0.6825404", "0.66935575", "0.66655666", "0.6664395", "0.6626923", "0.6614033", "0.65820646", "0.65820646", "0.6524852", "0.65176207", "0.6499768", "0.64643717", "0.64202327", "0.6407755", "0.6398387", "0.6393049", "0.634507", "0.6342756", "0.63416094", "0.6339316", "0.6338891", "0.6335258", "0.63273966", "0.63058966", "0.6303642", "0.6297208", "0.627749", "0.626543", "0.62523186", "0.6232873", "0.6203312", "0.62002456", "0.61969066", "0.6165525", "0.6145896", "0.61346084", "0.6124077", "0.6110931", "0.611036", "0.6096171", "0.6076292", "0.60666883", "0.6041674", "0.6029842", "0.6017436", "0.6007792", "0.59686226", "0.5957761", "0.5950977", "0.5947412", "0.59381706", "0.59180665", "0.59063977", "0.5883906", "0.5866194", "0.58657175", "0.5854244", "0.5854066", "0.58473575", "0.5835963", "0.58034635", "0.5802274", "0.5800893", "0.5778162", "0.57748663", "0.5773612", "0.57734257", "0.5760888", "0.57602715", "0.5730362", "0.5728612", "0.57171965", "0.57051706", "0.5698938", "0.5689726", "0.5680875", "0.56783503", "0.5676821", "0.56738526", "0.5669843", "0.56569", "0.5656569", "0.565606", "0.5652365", "0.56281006", "0.56217253", "0.5620249", "0.5619179", "0.5610698", "0.5610058", "0.560483", "0.5592861", "0.5583396", "0.5579063", "0.55788535" ]
0.0
-1
Test initialization under Q.
def test_init_q(self): riskfree = .01 lmbd = .01 lmbd_s = .5 lmbd_y = .5 mean_v = .5 kappa_s = 1.5 kappa_y = .5 eta_s = .1 eta_y = .01 rho = -.5 param = CentTendParam(riskfree=riskfree, lmbd=lmbd, lmbd_s=lmbd_s, lmbd_y=lmbd_y, mean_v=mean_v, kappa_s=kappa_s, kappa_y=kappa_y, eta_s=eta_s, eta_y=eta_y, rho=rho, measure='Q') kappa_sq = kappa_s - lmbd_s * eta_s kappa_yq = kappa_y - lmbd_y * eta_y scale = kappa_s / kappa_sq self.assertEqual(param.measure, 'Q') self.assertEqual(param.riskfree, riskfree) self.assertEqual(param.lmbd, 0) self.assertEqual(param.lmbd_s, lmbd_s) self.assertEqual(param.lmbd_y, lmbd_y) self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale) self.assertEqual(param.kappa_s, kappa_sq) self.assertEqual(param.kappa_y, kappa_yq) self.assertEqual(param.eta_s, eta_s) self.assertEqual(param.eta_y, eta_y * scale**.5) self.assertEqual(param.rho, rho) self.assertTrue(param.is_valid()) with warnings.catch_warnings(): warnings.simplefilter("ignore") param.convert_to_q()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init(q: qreg) -> control:\n\n return", "def test_init(self):\r\n sq = SeqQualBad('Q', None)\r\n self.assertEqual(sq.Name, 'Q')\r\n self.assertEqual(sq.F, None)\r\n self.assertEqual(sq.FailedIds, [])", "def __init__(self,Q=None):\n \n self.Q = Q", "def test_01_Init(self):\n pass", "def test_init(self):\n self.assertIsNotNone(DatabaseIntermediary(), self.ec.db)", "def test_initialise(self):\n # Make sure the variables are all updated\n assert isinstance(gcmc_system_sampler.context, Context)\n assert isinstance(gcmc_system_sampler.positions, Quantity)\n assert isinstance(gcmc_system_sampler.simulation_box, Quantity)\n\n return None", "def test_initialise(self):\n\n # Make sure the variables are all updated\n assert isinstance(gcmc_sphere_sampler.context, Context)\n assert isinstance(gcmc_sphere_sampler.positions, Quantity)\n assert isinstance(gcmc_sphere_sampler.sphere_centre, Quantity)\n\n return None", "def setUp(self):\n # Simple QP problem\n sp.random.seed(4)\n\n self.n = 30\n self.m = 30\n self.P = sparse.csc_matrix((self.n, self.n))\n self.q = np.zeros(self.n)\n self.A = sparse.random(self.m, self.n, density=1.0, format='csc')\n self.u = np.random.rand(self.m)\n self.l = self.u\n self.opts = {'verbose': False,\n 'eps_abs': 1e-06,\n 'eps_rel': 1e-06,\n 'scaling': True,\n 'alpha': 1.6,\n 'max_iter': 5000,\n 'polish': False,\n 'warm_start': True,\n 'polish_refine_iter': 4}\n self.model = osqp.OSQP()\n self.model.setup(P=self.P, q=self.q, A=self.A, l=self.l, u=self.u,\n **self.opts)", "def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')", "def setUp(self):\n self.number_of_tests = 20 # number of pseudo-random seeds\n self.max_nq = 2 # maximal number of qubits to check", "def test_init_default(self):\n self._test_init_default()", "def experiment_init(self):\n pass", "def initialize(self, **kwargs):", "def initialize(self):\n\t\tpass", "def init():", "def initialise(self):", "def init(self) -> None:", "def __init__(self, qobj):\n self._qobj = qobj\n self._configuration = None # IMPLEMENT for your backend", "def test_init(self):\n ex = Experiment(note=\"Test\")\n self.assertEqual(ex.note, \"Test\")", "def do_init(self):\n\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def test_init():\n from parenthetics import Queue\n my_queue = Queue()\n assert isinstance(my_queue, Queue)", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self) -> None:\n ...", "def initialize(self):\n pass # pragma: no cover", "def _initialise_run(self) -> None:", "def test_init(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample('SKM7.640188', SampleTemplate(1))", "def test_constructor(self):\n pass", "def __init__(self):\n raise NoInitiation", "def iaq_init(self) -> List[int]:\n # name, command, signals, delay\n self._run_profile((\"iaq_init\", [0x20, 0x03], 0, 0.01))", "def init():\n pass", "def test_AFQ_init():\n for n_sessions in [1, 2, 3]:\n n_subjects = 3\n bids_path = create_dummy_bids_path(n_subjects, n_sessions,\n (n_subjects != n_sessions))\n my_afq = api.AFQ(bids_path,\n dmriprep=\"synthetic\")\n\n for subject in range(n_subjects):\n sub = f\"0{subject+1}\"\n if n_subjects == n_sessions:\n npt.assert_equal(\n len(my_afq.wf_dict[sub][sub]),\n 40)\n else:\n for session in range(n_sessions):\n if n_sessions == 1:\n sess = \"None\"\n else:\n sess = f\"0{session+1}\"\n npt.assert_equal(\n len(my_afq.wf_dict[sub][sess]),\n 40)", "def test_init(self):\n try:\n VeilRestPaginator(name='name', ordering='ordering', limit=10, offset=5)\n except TypeError:\n raise AssertionError()\n else:\n assert True\n try:\n VeilRestPaginator(name=123, ordering='ordering', limit=10, offset=5)\n except TypeError:\n assert True\n else:\n raise AssertionError()", "def test_constructor(self):\n p = Priorityq()\n self.assertIsInstance(p, Priorityq)", "def test_init(self):\n sample = PrepSample(self.sample_id, self.prep_template)\n # Check that the internal id have been correctly set\n self.assertEqual(sample._id, '1.SKB8.640193')\n # Check that the internal template have been correctly set\n self.assertEqual(sample._md_template, self.prep_template)\n # Check that the internal dynamic table name have been correctly set\n self.assertEqual(sample._dynamic_table, \"prep_1\")", "def test_init(self):\n self.view.__init__()\n self.assertIsInstance(self.view.questionnaire, Questionnaire)\n self.assertEqual(self.view.questionnaire, self.questionnaire)", "def initialize(self, *args, **kwargs):", "def test_init__default(self):\n fact_query = FactQuery()\n self.assertIsNone(fact_query.parsed_query)", "def initialize(self):\n pass", "def experiment_init(self):\n raise NotImplementedError(\"this needs to be implemented!\")", "def _init(self):\n pass", "def test_init(self):\n db = database.Database()\n self.assertTrue(isinstance(db, database.Database))\n self.assertTrue(db._connection is self.mocked_connection)\n self.assertTrue(db._cursor is self.mocked_cursor)", "def test_init_with_fire_villan(self):\n pass", "def setUp(self):\n self.data = DatabaseIntermediary()", "def initialize(self): \r\n pass", "def test_init(self):\n self.assertEqual(self.new_quote.id,23)\n self.assertEqual(self.new_quote.author,'J.F.Kenedy')\n self.assertEqual(self.new_quote.quote,'never Give up')", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def __init__(self, name, q_arg):\n super().__init__(name)\n self._q_arg = q_arg\n pass", "def __init__(self, name, q_arg):\n super().__init__(name)\n self._q_arg = q_arg\n pass", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def request_initialization(self) -> global___Snippet.SimpleRequestInitialization:", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initTest(self, myargs):\n return", "def __init__(self, name, q_arg):\n super().__init__(name)\n self._q_arg = q_arg", "def test_initialization(self):\n self.assertEqual(self.widget.buffer, '')\n self.assertEqual(self.widget.cursor, 0)\n self.assertEqual(self.widget.killRing, [])\n self.assertEqual(self.widget.getInputHistory(), [])", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _init_q(self, ttt, state):\n if state not in self.Q:\n initial = dict((action, 0.0) for action, _ in ttt.moves(self.name, state))\n\n self.Q[state] = initial\n self.visited[state] = dict(initial)", "def __init__(self):\n self.setup_called = False", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self, *args):\n _snap.TFltQ_swiginit(self, _snap.new_TFltQ(*args))", "def init(self):", "def init(self):", "def _init(self):\n raise NotImplementedError", "def init(self):\n return True", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def _real_initialize(self):\n pass", "def _env_setup(self, initial_qpos):\n raise NotImplementedError()", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def setUp(self):\n self.a = backend.dbconnection.DBConnect()", "def setUp(self):\n self.p_state = p_state", "def init(self, args):\n return True", "def testInitializer(self):\n request = http.HttpRequest()\n\n data, check, mutator = initialize.MELANGE_INITIALIZER.initialize(\n request, [], {})\n self.assertEqual(request, data.request)\n self.assertEqual(data, check.data)\n self.assertEqual(data, mutator.data)", "def test_preferences_init(self):\n self.preferences.init(\n exchange_params_by_currency_id=self.exchange_params,\n utility_params_by_good_id=self.utility_params,\n tx_fee=self.tx_fee,\n )\n assert self.preferences.utility_params_by_good_id is not None\n assert self.preferences.exchange_params_by_currency_id is not None\n assert self.preferences.transaction_fees[\"seller_tx_fee\"] == 4\n assert self.preferences.transaction_fees[\"buyer_tx_fee\"] == 5\n assert self.preferences.is_initialized", "def initialize(self) -> None:\n pass" ]
[ "0.79838145", "0.74229264", "0.7260709", "0.7092228", "0.69702667", "0.69632655", "0.6950236", "0.68146306", "0.68144906", "0.67040694", "0.66863024", "0.6641684", "0.6620748", "0.6618044", "0.6603013", "0.6602111", "0.65866554", "0.65761757", "0.6568877", "0.6567648", "0.65524334", "0.65524334", "0.65524334", "0.65524334", "0.65524334", "0.65524334", "0.65524334", "0.65524334", "0.6546493", "0.6546493", "0.6515894", "0.6515894", "0.6515894", "0.6515894", "0.6515894", "0.6484275", "0.6482771", "0.6482771", "0.6482771", "0.6482771", "0.6475596", "0.64606553", "0.6460449", "0.6456697", "0.64526033", "0.64407814", "0.64391154", "0.6437065", "0.6436812", "0.64346886", "0.6434057", "0.6426033", "0.64210117", "0.6416675", "0.64160085", "0.6411455", "0.64049244", "0.63998216", "0.6393037", "0.6390242", "0.6389292", "0.6389277", "0.63850296", "0.6384081", "0.6383675", "0.6383675", "0.63824236", "0.63824236", "0.63824236", "0.63824236", "0.637694", "0.637694", "0.637694", "0.63708085", "0.6368879", "0.63654417", "0.6365084", "0.6365084", "0.6365084", "0.6362082", "0.6360258", "0.6359903", "0.6359903", "0.63555", "0.63503516", "0.63503516", "0.6346601", "0.63456553", "0.63411367", "0.63411367", "0.6331298", "0.6325714", "0.6321984", "0.6321984", "0.63217515", "0.6319931", "0.63144976", "0.6308679", "0.6305451", "0.6301449" ]
0.65442336
30
Test from theta under Q.
def test_from_theta_q(self): riskfree = .01 lmbd = .01 lmbd_s = .5 lmbd_y = .5 mean_v = .5 kappa_s = 1.5 kappa_y = .5 eta_s = .1 eta_y = .01 rho = -.5 theta = [riskfree, mean_v, kappa_s, kappa_y, eta_s, eta_y, rho, lmbd, lmbd_s, lmbd_y] param = CentTendParam.from_theta(theta, measure='Q') kappa_sq = kappa_s - lmbd_s * eta_s kappa_yq = kappa_y - lmbd_y * eta_y scale = kappa_s / kappa_sq self.assertEqual(param.measure, 'Q') self.assertEqual(param.riskfree, riskfree) self.assertEqual(param.lmbd, 0) self.assertEqual(param.lmbd_s, lmbd_s) self.assertEqual(param.lmbd_y, lmbd_y) self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale) self.assertEqual(param.kappa_s, kappa_sq) self.assertEqual(param.kappa_y, kappa_yq) self.assertEqual(param.eta_s, eta_s) self.assertEqual(param.eta_y, eta_y * scale**.5) self.assertEqual(param.rho, rho) self.assertTrue(param.is_valid())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta_given_s(theta, q):\n if q == 0:\n return .3333\n else:\n if theta == 0:\n return 0.25\n elif theta == 1:\n return 0.25\n else:\n return 0.5", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def theta():\n pass", "def theta(flag, S, K, t, r, sigma, q): \n\n b = r-q\n\n return numerical_theta(flag, S, K, t, r, sigma, b, f)", "def confirm_Alex_fast(p, q, r):\n if p*q*r < 0:\n return False\n if q*r + p*r + p*q -1 == 0:\n return True\n else:\n return False", "def _Q(self, chi, eta, L):\n return self.r**2 * chi**2 * np.cos(eta)**2 + L**2 / np.tan(self.theta)**2", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def evaluate(data, theta, beta):\n pred = []\n for i, q in enumerate(data[\"question_id\"]):\n u = data[\"user_id\"][i]\n x = (theta[u] - beta[q]).sum()\n p_a = sigmoid(x)\n pred.append(p_a >= 0.5)\n return np.sum((data[\"is_correct\"] == np.array(pred))) \\\n / len(data[\"is_correct\"])", "def a_q(self, phi, ci, tl):\n\t return (self.j(phi, tl)*(ci - self.gamma(tl)))/(4.*(ci + 2.*self.gamma(tl)))", "def Qfun(Phieq,Phi,Phibar,taurad):\n #note Q is different from Perez-Becker and Showman by a factor of g (for consistency with Phi vs H)\n Q=(1/taurad)*(Phieq-(Phi+Phibar))\n\n return Q", "def resultant(P, Q):\n return np.linalg.det(P.sylvester(Q))", "def q_criterion(a):\n print(\"Detection method: Q criterion\")\n Q = np.zeros((a.u.shape[0], a.u.shape[1]))\n print(a.u.shape[0], a.u.shape[1])\n #print(Q.shape)\n for i in range(a.u.shape[0]):\n for j in range(a.u.shape[1]):\n Q[i, j] = -0.5*(a.derivative['dudx'][i, j]**2 + a.derivative['dvdy'][i, j]**2) \\\n - a.derivative['dudy'][i, j] * a.derivative['dvdx'][i, j]\n return Q", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def RXX(self, theta:Union[int, float], qubit_expr): \n self.apply_gate_operation(RXX(theta), qubit_expr)", "def RZZ(self, theta:Union[int, float], qubit_expr): \n self.apply_gate_operation(RZZ(theta), qubit_expr)", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def compute_Q_value(self, phi, act) -> float:\n q_vec = phi @ self.Wq\n return q_vec[act]", "def test_convert_to_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n theta = [riskfree, mean_v, kappa_s, kappa_y, eta_s, eta_y,\n rho, lmbd, lmbd_s, lmbd_y]\n param = CentTendParam.from_theta(theta)\n param.convert_to_q()\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())", "def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r", "def RXX(self, theta:Union[int, float], qubit_expr): \n self.apply_gate_operation(RYY(theta), qubit_expr)", "def Q(self, k, x):\n g = np.asarray(self.g(k, x))\n Q = g @ g.T\n return Q", "def testQDA(self):\n qdaObj = qda.QDA(self.data, self.classes)\n qdaObj.fit_model()\n correctAns = np.array([5.01, 3.42, 1.46, .24])\n npTest = np.testing.assert_array_almost_equal(qdaObj.fitted_model.means_[0], correctAns, decimal=2)\n self.assertEqual(npTest, None)", "def test_step(self):\n sampler = Sampler()\n estimator = Estimator()\n fidelity_primitive = ComputeUncompute(sampler)\n pvqd = PVQD(\n fidelity_primitive,\n self.ansatz,\n self.initial_parameters,\n estimator,\n optimizer=L_BFGS_B(maxiter=100),\n )\n\n # perform optimization for a timestep of 0, then the optimal parameters are the current\n # ones and the fidelity is 1\n theta_next, fidelity = pvqd.step(\n self.hamiltonian,\n self.ansatz,\n self.initial_parameters,\n dt=0.0,\n initial_guess=np.zeros_like(self.initial_parameters),\n )\n\n self.assertTrue(np.allclose(theta_next, self.initial_parameters))\n self.assertAlmostEqual(fidelity, 1)", "def advantage(self, state, Q: torch.Tensor = None):\n return Q - Q.max()\n # return Q - torch.matmul(self.π.pmf(state, action_values=Q), Q)", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def f(self, x, theta):\n raise NotImplementedError(\n \"f has not been implemented for this Experiment\")", "def test_pauliz_expectation_analytic(self, device, tol):\n n_wires = 2\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n supports_tensor = (\n \"supports_tensor_observables\" in dev.capabilities()\n and dev.capabilities()[\"supports_tensor_observables\"]\n )\n\n if not supports_tensor:\n pytest.skip(\"Device does not support tensor observables.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n theta = 0.432\n phi = 0.123\n\n def circuit(theta, phi):\n qml.RX(theta, wires=[0])\n qml.RX(phi, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(wires=0) @ qml.PauliZ(wires=1))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n grad_def = qml.grad(qnode_def, argnum=[0, 1])\n grad = qml.grad(qnode, argnum=[0, 1])\n\n assert np.allclose(qnode(theta, phi), qnode_def(theta, phi), atol=tol(dev.shots))\n assert np.allclose(grad(theta, phi), grad_def(theta, phi), atol=tol(dev.shots))", "def test_differentiable_expand(self, execute_kwargs, tol):\n\n class U3(qml.U3):\n def expand(self):\n theta, phi, lam = self.data\n wires = self.wires\n return [\n qml.Rot(lam, theta, -lam, wires=wires),\n qml.PhaseShift(phi + lam, wires=wires),\n ]\n\n def cost_fn(a, p, device):\n qscript = qml.tape.QuantumScript(\n [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]\n )\n qscript = qscript.expand(stop_at=lambda obj: device.supports_operation(obj.name))\n return execute([qscript], device, **execute_kwargs)[0]\n\n a = jax.numpy.array(0.1)\n p = jax.numpy.array([0.1, 0.2, 0.3])\n\n dev = qml.device(\"default.qubit\", wires=1)\n res = jax.jit(cost_fn, static_argnums=2)(a, p, device=dev)\n expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (\n np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(cost_fn, argnums=(1)), static_argnums=2)\n res = jac_fn(a, p, device=dev)\n expected = jax.numpy.array(\n [\n np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),\n np.cos(p[1]) * np.cos(p[2]) * np.sin(a)\n - np.sin(p[1])\n * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),\n np.sin(a)\n * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),\n ]\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def Y(t, p, q):\n \n if t <= 0:\n return float('inf')\n \n if q == 1:\n return (t**(p+1) - 1) / (p * (p+1)) - np.log(t) / q + (p - 1) / p * (t-1)\n else:\n return (t**(p+1) - 1) / (p * (p+1)) + (t**(1-q) - 1) / (q*(q-1)) + (p - q) / (p * q) * (t-1)", "def theta(self):\n self.eigenvalues()", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def quartic_potential(x):\n k1=1\n k2=10\n return (k1*x**4)-(k2*x**2)", "def quasinewton(x, V, alpha, p, gprev, gnew):\r\n broken = False\r\n p = np.array(p)[np.newaxis]\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gk = gprev.T\r\n gk1 = gnew.T\r\n yk = gk1 - gk\r\n sk = alpha*p.T\r\n I = np.eye(len(x))\r\n if sk.T@yk == 0:\r\n broken = True\r\n p = p.flatten()\r\n return p, V, broken\r\n rhok = (1/(sk.T@yk)).flatten()\r\n v = I - (rhok*yk@sk.T)\r\n Vnew = (v.T@V@v) + (rhok*sk@sk.T)\r\n pnew = -Vnew@gk1\r\n pnew = pnew.flatten()\r\n return pnew, Vnew, broken", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def getTheta (q_table, state_ind, old_theta_ind, last_event_type, entropy, actions):\n global eps, actions_list_size\n max_ind_array = np.where (q_table[state_ind] == np.amax (q_table[state_ind]))\n target_index = max_ind_array[0][0]\n #print (\"max ind array = {}\".format (max_ind_array))\n\n\n if len (max_ind_array[0]) == 1: \n if (random.uniform (0, 1) < eps):\n return target_index\n else:\n if old_theta_ind == 0:\n return random.randint (0, 2)\n elif old_theta_ind == len (q_table[state_ind]) - 1:\n return random.randint (old_theta_ind - 2, old_theta_ind)\n else:\n return random.randint (old_theta_ind - 1, old_theta_ind + 1)\n else:\n if (random.uniform (0, 1) < eps):\n # find min dist ind\n min_dist = actions_list_size\n min_dist_ind = old_theta_ind\n for ind in range (len (max_ind_array[0])):\n dist = math.fabs (old_theta_ind - max_ind_array[0][ind])\n if min_dist > dist:\n min_dist = dist\n min_dist_ind = max_ind_array[0][ind] \n # authenticated true attacks or benign traffic flows\n if last_event_type == 1 or last_event_type == 4:\n #print (\"min_dist_ind = {}\".format (min_dist_ind))\n #print (\"min_dist = {}\".format (min_dist))\n return min_dist_ind\n # false alarm\n elif last_event_type == 2:\n target_ind = getLowerBoundActionInd (entropy, actions)\n smaller_ind_array = []\n for ind in max_ind_array[0]:\n if ind == target_ind - 1 or ind == target_ind - 2 or ind == target_ind or ind == target_ind - 3:\n smaller_ind_array.append (ind)\n if len (smaller_ind_array) == 0:\n return min_dist_ind\n else:\n return smaller_ind_array[random.randint (0, len (smaller_ind_array) - 1)]\n # real attacks, but missed\n elif last_event_type == 3:\n target_ind = getUpperBoundActionInd (entropy, actions)\n #higher_ind = min_dist_ind\n #min_dist2 = actions_list_size\n #for ind in max_ind_array[0]:\n # dist = math.fabs (higher_ind - old_theta_ind)\n # if ind > old_theta_ind and dist <= min_dist2:\n # higher_ind = ind\n # min_dist2 = dist\n\n #if higher_ind == actions_list_size:\n # return min_dist_ind\n #else:\n # return higher_ind\n higher_ind_array = []\n for ind in max_ind_array[0]:\n if ind == target_ind + 1 or ind == target_ind + 2 or ind == target_ind or ind == target_ind + 3:\n higher_ind_array.append (ind)\n if len (higher_ind_array) == 0:\n return min_dist_ind\n else:\n return higher_ind_array[random.randint (0, len (higher_ind_array) - 1)]\n else:\n print (\"Fatal error: incorrect event type\")\n exit (-1)\n return target_index\n else:\n if old_theta_ind == 0:\n return random.randint (0, 2)\n elif old_theta_ind == len (q_table[state_ind]) - 1:\n return random.randint (old_theta_ind - 2, old_theta_ind)\n else:\n return random.randint (old_theta_ind - 1, old_theta_ind + 1)", "def __init__(self, *args, **kwargs):\n super(Theta, self).__init__(*args, **kwargs)\n\n # add additional assertions tab\n self.assertions = set()", "def check_constrained(self, theta):\n #initially no flips\n sign = np.ones_like(theta)\n oob = True #pretend we started out-of-bounds to force at least one check\n #print('theta_in ={0}'.format(theta))\n while oob:\n above = theta > self.upper\n theta[above] = 2*self.upper[above] - theta[above]\n sign[above] *= -1\n below = theta < self.lower\n theta[below] = 2*self.lower[below] - theta[below]\n sign[below] *= -1\n oob = np.any(below | above)\n #print('theta_out ={0}'.format(theta))\n return theta, sign, oob", "def test_hermitian_expectation(self, device, tol):\n n_wires = 2\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n if \"Hermitian\" not in dev.observables:\n pytest.skip(\"Device does not support the Hermitian observable.\")\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n theta = 0.432\n phi = 0.123\n A_ = np.array(\n [\n [-6, 2 + 1j, -3, -5 + 2j],\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\n [-3, 2 + 1j, 0, -4 + 3j],\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\n ]\n )\n A_.requires_grad = False\n\n def circuit(theta, phi):\n qml.RX(theta, wires=[0])\n qml.RX(phi, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.Hermitian(A_, wires=[0, 1]))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n grad_def = qml.grad(qnode_def, argnum=[0, 1])\n grad = qml.grad(qnode, argnum=[0, 1])\n\n assert np.allclose(qnode(theta, phi), qnode_def(theta, phi), atol=tol(dev.shots))\n assert np.allclose(grad(theta, phi), grad_def(theta, phi), atol=tol(dev.shots))", "def test_single_quadrant(self):", "def learn(self):\n Qsa = self.evalQsa(self.features)[self.chosenA]\n print Qsa\n dQ = self.alpha*(self.reward + self.gamma * self.maxQsa(self.next_features) - Qsa)\n self.thetas += dQ*self.features[:,self.chosenA]\n print self.thetas\n # self.thetas /= np.sqrt(np.sum(np.power(self.thetas,2)))", "def check_constrained(self, theta):\n\n #initially no flips\n sign = np.ones_like(theta)\n oob = True #pretend we started out-of-bounds to force at least one check\n #print('theta_in ={0}'.format(theta))\n while oob:\n above = theta > self.upper\n theta[above] = 2*self.upper[above] - theta[above]\n sign[above] *= -1\n below = theta < self.lower\n theta[below] = 2*self.lower[below] - theta[below]\n sign[below] *= -1\n oob = np.any(below | above)\n #print('theta_out ={0}'.format(theta))\n return theta, sign, oob", "def psi(x, a, q):\n T = q.shape[1]\n covmat = calculate_variance(x + tile(a, [T, 1]).T)\n psi1 = covmat[0, 0] - covmat[1, 1]\n psi2 = covmat[0, 1]\n psi3 = x[0, -1]\n psi4 = x[1, -1]\n\n return (psi1, psi2, psi3, psi4)", "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def f(self, (k,t), (J,q,dq), **params):\n f = 0.*q\n return f", "def B(q):\n # print('Value q')\n # print(q)\n if q > 0 and q != 0 and q != 1:\n result = -(q*math.log(q,2) + (1-q)*math.log(1-q,2))\n else:\n result = 0\n # print('Result of B')\n # print(result)\n return result", "def ispos(qob, tol=1e-15):\n try:\n np.linalg.cholesky(qob + tol * np.eye(qob.shape[0]))\n return True\n except np.linalg.LinAlgError:\n return False", "def test_kempton_taylor_q(self):\n c = array([2,3,3,3,3,3,4,4,4,6,6,7,7,9,9,11,14,15,15,20,29,33,34,\n 36,37,53,57,138,146,170])\n self.assertFloatEqual(kempton_taylor_q(c), 14/log(34/4))", "def _hypothesis(self, X):\n # * is element wise multiplication\n # numpy.dot(), or @ operator will work\n result = np.transpose(self.theta)@ X \n #emptyResult = np.zeros((1,X.shape[1]))\n return result", "def test_tf(self, approx_order, strategy, tol):\r\n tf = pytest.importorskip(\"tensorflow\")\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n params = tf.Variable([0.543, -0.654], dtype=tf.float64)\r\n\r\n with tf.GradientTape() as t:\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(params[0], wires=[0])\r\n qml.RY(params[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn(dev.batch_execute(tapes))\r\n\r\n x, y = 1.0 * params\r\n\r\n expected = np.array([-np.sin(x) * np.sin(y), np.cos(x) * np.cos(y)])\r\n assert np.allclose(jac, expected, atol=tol, rtol=0)\r\n\r\n res = t.jacobian(jac, params)\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def quantum_theta(self):\n return quantum_theta(self.T_e, self.n_e)", "def test_quick_answer(self):\n pass", "def test_q_hat(self):\n # Set weights and pack data into PSpecData\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)\n Nfreq = self.ds.Nfreqs\n Ntime = self.ds.Ntimes\n Ndlys = Nfreq - 3\n self.ds.spw_Ndlys = Ndlys\n\n\n # Set baselines to use for tests\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n key3 = [(0, 24, 38), (0, 24, 38)]\n key4 = [(1, 25, 38), (1, 25, 38)]\n\n for input_data_weight in ['identity', 'iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n # Loop over list of taper functions\n for taper in taper_selection:\n self.ds.set_taper(taper)\n\n # Calculate q_hat for a pair of baselines and test output shape\n q_hat_a = self.ds.q_hat(key1, key2)\n self.assertEqual(q_hat_a.shape, (Ndlys, Ntime))\n\n\n # Check that swapping x_1 <-> x_2 results in complex conj. only\n q_hat_b = self.ds.q_hat(key2, key1)\n q_hat_diff = np.conjugate(q_hat_a) - q_hat_b\n for i in range(Ndlys):\n for j in range(Ntime):\n self.assertAlmostEqual(q_hat_diff[i,j].real,\n q_hat_diff[i,j].real)\n self.assertAlmostEqual(q_hat_diff[i,j].imag,\n q_hat_diff[i,j].imag)\n\n # Check that lists of keys are handled properly\n q_hat_aa = self.ds.q_hat(key1, key4) # q_hat(x1, x2+x2)\n q_hat_bb = self.ds.q_hat(key4, key1) # q_hat(x2+x2, x1)\n q_hat_cc = self.ds.q_hat(key3, key4) # q_hat(x1+x1, x2+x2)\n\n # Effectively checks that q_hat(2*x1, 2*x2) = 4*q_hat(x1, x2)\n for i in range(Ndlys):\n\n for j in range(Ntime):\n self.assertAlmostEqual(q_hat_a[i,j].real,\n 0.25 * q_hat_cc[i,j].real)\n self.assertAlmostEqual(q_hat_a[i,j].imag,\n 0.25 * q_hat_cc[i,j].imag)\n\n\n self.ds.spw_Ndlys = Nfreq\n # Check that the slow method is the same as the FFT method\n for input_data_weight in ['identity', 'iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n # Loop over list of taper functions\n for taper in taper_selection:\n\n self.ds.set_taper(taper)\n q_hat_a_slow = self.ds.q_hat(key1, key2, allow_fft=False)\n q_hat_a = self.ds.q_hat(key1, key2, allow_fft=True)\n self.assertTrue(np.isclose(np.real(q_hat_a/q_hat_a_slow), 1).all())\n self.assertTrue(np.isclose(np.imag(q_hat_a/q_hat_a_slow), 0, atol=1e-6).all())\n\n #Test if error is raised when one tried FFT approach on exact_norm\n pytest.raises(NotImplementedError, self.ds.q_hat, key1, key2, exact_norm=True, allow_fft = True)", "def T(self, q = np.zeros(1) , dq = np.zeros(1) , ddq = np.zeros(1) , R = 1 ): \n \n F = self.F( q , dq , ddq )\n \n Tl = self.Tlosses( dq , ddq )\n \n T = np.dot( 1. / R , F ) + np.dot( R , Tl ) \n \n return T", "def censoring_fcn(self, q):\n return 1.0", "def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)", "def __call__(self, q):\n completeness = np.zeros_like(q)\n for alpha, beta in zip(self.alpha_vals, self.beta_vals):\n completeness += self.sigmoid(q, alpha, beta)\n return completeness", "def closed_toda_3(x):\n q, p = extract_q_p(x)\n x = q[:,0,0,0]\n y = q[:,0,1,0]\n V = tf.exp(-2.*y) + tf.exp(y - tf.sqrt(3.)*x) + tf.exp(y + tf.sqrt(3.)*x)\n return tf.reduce_sum(tf.square(p), axis=2) + V", "def rochelobe(q):\n return 0.49*q**(2./3)/(0.6*q**(2./3) + log(1+q**(1./3)))", "def decision():\n return random.random() > 0.5", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def theta_v_time():\n pass", "def open_toda(x):\n q, p = extract_q_p(x)\n # q2, q3, ... , qN, q1\n qshift = tf.manip.roll(q, shift=-1, axis=2)\n # q1-q2, q2-q3, ... , q{N-1}-qN -> omit qN-q1, so qdiff shape (N,1,n-1,1)\n qdiff = q[:,:,:-1,:] - qshift[:,:,:-1,:]\n V = tf.reduce_sum(tf.exp(qdiff), axis=2)\n K = 0.5 * tf.reduce_sum(tf.square(p), axis=2)\n return K + V", "def single_qubit_ansatz(theta: float, phi: float) -> circuit.qc:\n\n qc = circuit.qc('single-qubit ansatz Y')\n qc.qubit(1.0)\n qc.rx(0, theta)\n qc.ry(0, phi)\n return qc", "def qp(self, eta_u, eta_v):\n z = 1\n v = np.array(eta_u)\n n_features = v.shape[0]\n u = np.sort(v)[::-1]\n cssv = np.cumsum(u) - z\n ind = np.arange(n_features) + 1\n cond = u - cssv / ind > 0\n rho = ind[cond][-1]\n theta = cssv[cond][-1] / float(rho)\n uu = np.maximum(v - theta, 0)\n vv = np.array(())\n return uu, vv", "def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)", "def J(theta, x, y):\n m = len(y)\n z = theta.dot(x.T) #argument for hypothesis function\n return 1. / m * np.sum(-y * np.log(g(z)) - (1. - y) * np.log(1 - g(z)))", "def test_set_ph(self):\n s = State(substance=\"water\")\n s.ph = Q_(101325.0, \"Pa\"), Q_(1061602.391543017, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[1], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.ph = Q_(101325.0, \"Pa\"), Q_(3336406.139862406, \"J/kg\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[1], Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def __call__(self, q: Tensor, p: Tensor, *args, **kwargs) -> Tensor:\n return self.potential(q) + self.kinetic(p)", "def test_set_vp(self):\n s = State(substance=\"water\")\n s.vp = Q_(0.4772010021515822, \"m**3/kg\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vp[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.vp = Q_(3.189303132125469, \"m**3/kg\"), Q_(101325.0, \"Pa\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vp[0], Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_q_total(fla_q, swe_q, theta_fea, wing_axis):\n q_swe_fla = swe_q.__mul__(fla_q)\n wa_rot = q_swe_fla.rotate(wing_axis)\n q_fea = pq.Quaternion(axis=wa_rot, angle=theta_fea)\n return q_fea.__mul__(q_swe_fla)", "def Q4_test():\n chemin = [3,2,1,0]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n d = longueur(chemin, tab_dist)\n return (d > 13.34523076) and (d < 13.34523077)", "def Yp(t, p, q):\n \n return (t**p - 1) / p + (1-t**(-q)) / q", "def test_inverse_a(self):\n for q in self.all:\n self.assertTrue(\n (q.inverse()*q).almost_equal(Quaternion(1, 0, 0, 0)))", "def qval(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def test_get_Q(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n #Test if there is a warning if user does not pass the beam\n key1 = (0, 24, 38)\n key2 = (1, 24, 38)\n uvd = copy.deepcopy(self.uvd)\n ds_t = pspecdata.PSpecData(dsets=[uvd, uvd])\n\n for i in range(vect_length):\n try:\n Q_matrix = self.ds.get_Q(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q(vect_length/2)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n try:\n Q_matrix = self.ds.get_Q(i)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q((vect_length-2)/2-1)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q, vect_length-1)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def at_angles(desired_thetas, q, q_dot, angle_precision=.2, angvel_precision=.01):\n angle_tuples = [(q[0], desired_thetas[0]),\n (q[1], desired_thetas[1]),\n (q[2], desired_thetas[2])]\n \n # check if angles are at the goal\n for angle, goal in angle_tuples:\n if goal != None:\n if abs(angle - goal) > angle_precision:\n return False\n\n # if angvel_precision is not None\n if angvel_precision:\n # check if velocity is zero\n for vel in [q_dot[0], q_dot[1], q_dot[2]]:\n if abs(vel) > angvel_precision:\n return False\n\n # if here, we have completed\n return True", "def positive_eval(self, input_tensor: torch.Tensor, theta: float):\n y = self(input_tensor)\n return y, torch.square(y).mean(dim=1) - theta", "def theta(lam, gam, p):\n #lam = lam - 1e-15\n return np.pi - np.arccos(np.divide(-1 + lam*np.cos(2*np.pi*p ), w(lam, gam, p) ) )", "def qi(self, tl, psi_l):\n\t try: \n\t ans = .622*esat(tl)/P_ATM*exp(psi_l*1000000.*VW/R/tl)\n\t except OverflowError:\n\t ans = 0.\n\t return ans", "def act(self, q_values, *args, **kwargs):\n if np.random.binomial(1, p=self.epsilon_updater.cur_value):\n action = np.array([np.random.choice(range(len(q_values)))])\n else:\n action = np.array([np.argmax(q_values)])\n self.epsilon_updater.update()\n return action", "def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h", "def test_U3(self, tol):\n dev = qml.device(\"default.qubit\", wires=1)\n\n @qml.qnode(dev)\n def circuit(x, y, z):\n qml.QubitStateVector(1j * np.array([1, -1]) / np.sqrt(2), wires=[0])\n qml.U3(x, y, z, wires=[0])\n return qml.expval(qml.PauliX(0))\n\n theta = 0.543\n phi = -0.234\n lam = 0.654\n\n res = circuit(theta, phi, lam)\n expected = np.sin(lam) * np.sin(phi) - np.cos(theta) * np.cos(lam) * np.cos(phi)\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n grad_fn = autograd.grad(circuit, argnum=[0, 1, 2])\n res = grad_fn(theta, phi, lam)\n expected = np.array(\n [\n np.sin(theta) * np.cos(lam) * np.cos(phi),\n np.cos(theta) * np.cos(lam) * np.sin(phi) + np.sin(lam) * np.cos(phi),\n np.cos(theta) * np.sin(lam) * np.cos(phi) + np.cos(lam) * np.sin(phi),\n ]\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))", "def closed_toda(x):\n q, p = extract_q_p(x)\n # q2, q3, ... , qN, q1\n qshift = tf.manip.roll(q, shift=-1, axis=2)\n # q1-q2, q2-q3, ... , q{N-1}-qN,qN-q1\n qdiff = q - qshift\n return tf.reduce_sum(0.5 * tf.square(p)+tf.exp(qdiff), axis=2)", "def q(self):\n return self._x", "def test_qnode_sample(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n grad_meth = (\n execute_kwargs[\"gradient_kwargs\"][\"method\"]\n if \"gradient_kwargs\" in execute_kwargs\n else \"\"\n )\n if \"adjoint\" in grad_meth or \"backprop\" in grad_meth:\n pytest.skip(\"Adjoint does not support probs\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.sample(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res\n\n res = jax.jit(cost, static_argnums=1)(params, cache=None)\n assert res.shape == (dev.shots,)", "def Tlosses(self, q = np.zeros(2) , dq = np.zeros(2) , ddq = np.zeros(2) ): \n \n J_a = self.jacobian_actuators( q )\n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n T = np.dot( J_a , np.dot( self.Ia , ddq ) + np.dot( self.Da , dq ) ) + np.dot( dJ_a , np.dot( self.Ia , dq ) )\n \n return T", "def Tlosses(self, q = np.zeros(2) , dq = np.zeros(2) , ddq = np.zeros(2) ): \n \n J_a = self.jacobian_actuators( q )\n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n T = np.dot( J_a , np.dot( self.Ia , ddq ) + np.dot( self.Da , dq ) ) + np.dot( dJ_a , np.dot( self.Ia , dq ) )\n \n return T", "def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)", "def test_linear_in_tut(self):\n # reproducible arbitrariness\n np.random.seed(5000)\n\n tut_out = np.random.randn(self.Ns)\n alpha = 0.7\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_fct = lambda _: self.rule.theta + tut_out\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_linear_in_tut(self):\n # reproducible arbitrariness\n np.random.seed(5000)\n\n tut_out = np.random.randn(self.Ns)\n alpha = 0.7\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_fct = lambda _: self.rule.theta + tut_out\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_new():\n from qiskit import BasicAer\n from qiskit.aqua.algorithms import Grover\n from qiskit.aqua.components.oracles import LogicalExpressionOracle\n\n expr = \"your logical expression goes here\"\n algorithm = Grover(LogicalExpressionOracle(expr))\n backend = BasicAer.get_backend('qasm_simulator')\n result = algorithm.run(backend, seed=101110)\n print(result)", "def Wq(self):\n if not self.isVaild():\n pass\n return self.Lq()/self.lamda", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def target_distribution(q):\n weight = q ** 2 / q.sum(0)\n return (weight.T / weight.sum(1)).T", "def AcceptOrReject(gamma):\n \n u = np.random.rand()\n if(u<gamma):\n return True\n else:\n return False", "def test_var_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.var(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_purity():\n psi = qt.fock(3)\n rho_test = qt.ket2dm(psi)\n test_pure = purity(rho_test)\n assert_equal(test_pure,1)" ]
[ "0.6844436", "0.66966224", "0.61778736", "0.61521673", "0.612918", "0.61256754", "0.60950434", "0.6005836", "0.59359986", "0.59097683", "0.58760226", "0.5869871", "0.579352", "0.57821816", "0.577765", "0.57773805", "0.5768191", "0.5727558", "0.56556517", "0.56249905", "0.5595829", "0.559574", "0.55752057", "0.55570525", "0.55420774", "0.55187094", "0.55132186", "0.5512386", "0.5475502", "0.54709226", "0.54626834", "0.5455153", "0.54547393", "0.54503983", "0.54437166", "0.5440584", "0.54362005", "0.54316306", "0.5426348", "0.5420884", "0.5412525", "0.5411717", "0.5384449", "0.53681135", "0.5360291", "0.5352182", "0.5349068", "0.5344743", "0.533926", "0.5337319", "0.533485", "0.5328857", "0.5325031", "0.53185856", "0.5316725", "0.5306194", "0.5281874", "0.5276705", "0.5275875", "0.527067", "0.5266966", "0.52634776", "0.52601", "0.5257162", "0.5256779", "0.5246815", "0.52461255", "0.5241448", "0.5237852", "0.52377445", "0.522201", "0.52216876", "0.521579", "0.52152413", "0.5214953", "0.5212828", "0.5212828", "0.5208072", "0.52072537", "0.520307", "0.5201347", "0.5198427", "0.51983273", "0.5198104", "0.51973206", "0.5195805", "0.5192423", "0.5185202", "0.518213", "0.518213", "0.5182112", "0.5180675", "0.5180675", "0.5178643", "0.5176904", "0.5174418", "0.5172787", "0.5164294", "0.51615584", "0.51586616" ]
0.6628237
2
Test conversion to Q.
def test_convert_to_q(self): riskfree = .01 lmbd = .01 lmbd_s = .5 lmbd_y = .5 mean_v = .5 kappa_s = 1.5 kappa_y = .5 eta_s = .1 eta_y = .01 rho = -.5 theta = [riskfree, mean_v, kappa_s, kappa_y, eta_s, eta_y, rho, lmbd, lmbd_s, lmbd_y] param = CentTendParam.from_theta(theta) param.convert_to_q() kappa_sq = kappa_s - lmbd_s * eta_s kappa_yq = kappa_y - lmbd_y * eta_y scale = kappa_s / kappa_sq self.assertEqual(param.measure, 'Q') self.assertEqual(param.riskfree, riskfree) self.assertEqual(param.lmbd, 0) self.assertEqual(param.lmbd_s, lmbd_s) self.assertEqual(param.lmbd_y, lmbd_y) self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale) self.assertEqual(param.kappa_s, kappa_sq) self.assertEqual(param.kappa_y, kappa_yq) self.assertEqual(param.eta_s, eta_s) self.assertEqual(param.eta_y, eta_y * scale**.5) self.assertEqual(param.rho, rho) self.assertTrue(param.is_valid())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_Q(self):\n return isinstance(self,Q)", "def test_decode_qdc(self):\n self.assertEqual(td.qdc(), decoder.decode_qdc(BytesIO(td.qdc(True))))", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def test_str(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(str(sq), 'Q\\t0')\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(str(sq), 'Q\\t1')", "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)", "def qs_to_q_function(func: FunctionType, q_1: Qs) -> Q:\n\n scalar = func(q_1)\n\n if scalar.qs_type != \"scalar_q\":\n raise Exception(f\"Oops, does not evaluate to a scalar: {scalar}\")\n\n return scalar.qs[0]", "def test_conversion_type(self):\r\n self.assertRaises(ValueError, convert_fastaqual_fastq,\r\n self.fasta_file_path, self.qual_file_path, conversion_type='soijdfl',\r\n output_directory=self.output_dir)", "def test_quick_answer(self):\n pass", "def test_domain_and_target_type(self):\n t = Quantize()\n assert t.domain_type == \"real\"\n assert t.target_type == \"integer\"", "def test_canConvert(string, cast, expected):\n assert canConvert(string, cast) == expected", "def test_cast(self, option, value, expected):\n actual = option.cast(value)\n assert actual == expected", "def scalar_q(q_1: Q) -> Q:\n\n end_q_type = f\"scalar_q({q_1.q_type})\"\n s = Q([q_1.t, 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n return s", "def xut2q( self, x , u , t ):\n \n # default is q = x\n \n return x", "def static_quant_test_qdq(\n self,\n model_fp32_path,\n data_reader,\n activation_type,\n weight_type,\n extra_options=None,\n ):\n\n activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8\n activation_type_str = \"u8\" if (activation_type == QuantType.QUInt8) else \"s8\"\n weight_type_str = \"u8\" if (weight_type == QuantType.QUInt8) else \"s8\"\n model_int8_path = f\"conv_transpose_fp32.quant_dqd_{activation_type_str}{weight_type_str}.onnx\"\n\n data_reader.rewind()\n quantize_static(\n model_fp32_path,\n model_int8_path,\n data_reader,\n quant_format=QuantFormat.QDQ,\n activation_type=activation_type,\n weight_type=weight_type,\n extra_options=extra_options,\n )\n\n # Check node counts in quantized model.\n quant_node_counts = {\"ConvTranspose\": 1, \"QuantizeLinear\": 2, \"DequantizeLinear\": 4}\n check_op_type_count(self, model_int8_path, **quant_node_counts)\n\n # Check input/output types for QuantizeLinear nodes.\n qnode_io_qtypes = {\n \"QuantizeLinear\": [\n [\"i\", 2, activation_proto_qtype],\n [\"o\", 0, activation_proto_qtype],\n ]\n }\n check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)\n\n # Check model correctness.\n data_reader.rewind()\n check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())", "def test_constructor(self):\n p = Priorityq()\n self.assertIsInstance(p, Priorityq)", "def test_quintic2(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array7), 0.0)", "def test_domain_and_target_type(self):\n t = Reverse(Quantize())\n assert t.domain_type == \"integer\"\n assert t.target_type == \"real\"", "def parse_qpf(q):\n if q == \"M\":\n value = \"NULL\"\n trace = \"0\"\n elif q == \"T\":\n value = \"0.00\"\n trace = \"1\"\n else:\n value = q\n trace = \"0\"\n\n return quote(value), quote(trace)", "def test_convert_logical():", "def q1(n: float = 1.0, q_type: str = \"1\", representation: str = \"\") -> Q:\n\n return Q([n, 0, 0, 0], q_type=q_type, representation=representation)", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def call_q(self, _):\n return False", "def test_dq_rules(self,DQ):\r\n pass", "def from_QQ_python(K1, a, K0):\n return None", "def test_round_trip_conversion(self):\n\n for num_qubits in range(1, 5):\n # Since we know the implementations don't depend on well-formed input data,\n # just generate a random d^2 x d^2 matrix as test data for all conversions.\n dim = 2**(2 * num_qubits)\n mat = np.random.rand(dim, dim)\n assert_allclose(mat, vec2mat(mat2vec(mat)))\n assert_allclose(liou2choi(choi2liou(mat)), mat)\n assert_allclose(choi2liou(liou2choi(mat)), mat)", "def test_transform(self):\n t = Quantize()\n assert t.transform(8.6) == 9\n assert t.transform(8.4) == 8\n assert t.transform(5.3) == 5\n assert numpy.all(t.transform([8.6, 5.3]) == numpy.array([9, 5], dtype=int))", "def __init__(self,Q=None):\n \n self.Q = Q", "def get_quant(q):\n\n try:\n e_q = eval(q)\n except:\n return None\n\n if isinstance(e_q, (int,float,complex)):\n return e_q\n \n return None", "def validate(self, value):\r\n if isinstance(value, self.Quoter):\r\n value = value.value\r\n return bool(value)", "def test_call(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(sq.FailedIds, ['y'])", "def test_convert():", "def test_flux_conversion_vega(in_q, out_u, ans):\n result = units.convert_flux(_wave, in_q, out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans, rtol=1e-2)\n\n # Scalar\n i = 0\n result = units.convert_flux(_wave[i], in_q[i], out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans[i], rtol=1e-2)", "def convertToQuad(self,qtype,pts):\n return self.convertToQuadDict[qtype](pts)", "def test_equality_with_quat(self):\n quat = Quat(1, 2, 3, 4)\n self.assertEqual(quat, Quat(1, 2, 3, 4))", "def test_query_yes(self):\n self.assertEqual(query_yes_no(question=\"Is anyone wiser than Socrates?\"), True)", "def check_representations(self: Q, q_2: Q) -> bool:\n\n if self.representation == q_2.representation:\n return True\n\n else:\n raise Exception(f\"Oops, 2 have different representations: {self.representation} {q_2.representation}\")", "def q0(q_type: str = \"0\", representation: str = \"\") -> Q:\n\n return Q([0, 0, 0, 0], q_type=q_type, representation=representation)", "def qchannel_to_qiskit(representation):\n\n rep = representation.representation\n # Find what representation it is.\n # Then create the corresponding matrix and shape it like qiskit is expecting it.\n # Finally, create the qiskit representation from that matrix.\n if rep in (RepresentationType.PTM, RepresentationType.CHOI):\n matri = representation.matrix\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)\n if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):\n final_data = []\n for matri in representation.basis:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n if rep == RepresentationType.CHI:\n return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])\n return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])\n if rep == RepresentationType.KRAUS:\n final_data = []\n for matri in representation.kraus_ops:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n return Kraus(final_data)\n return None", "def Q(self):\n return self._Q", "def test_transform(self):\n t = Reverse(Quantize())\n assert t.transform(9) == 9.0\n assert t.transform(5) == 5.0\n assert numpy.all(t.transform([9, 5]) == numpy.array([9.0, 5.0], dtype=float))", "def qs_qs_to_q_function(func: FunctionType, q_1: Qs, q_2: Qs) -> Q:\n\n scalar = func(q_1, q_2)\n\n if scalar.qs_type != \"scalar_q\":\n raise Exception(f\"Oops, does not evaluate to a scalar: {scalar}\")\n\n return scalar.qs[0]", "def test_qtmultimedia():\n from qtpy import QtMultimedia\n\n assert QtMultimedia.QAbstractVideoBuffer is not None\n assert QtMultimedia.QAudio is not None\n assert QtMultimedia.QAudioDeviceInfo is not None\n assert QtMultimedia.QAudioInput is not None\n assert QtMultimedia.QSound is not None", "def serialize_to_python(cls, value):\n q = value\n num_children = len(q.children)\n\n result = []\n\n if value.negated:\n result.append('~')\n\n if num_children == 0:\n result.append('models.Q()')\n elif num_children == 1:\n child = value.children[0]\n\n result.append('models.Q(%s=%s)' % (child[0],\n serialize_to_python(child[1])))\n else:\n children = []\n\n for child in value.children:\n if isinstance(child, tuple):\n children.append(\n 'models.Q(%s=%s)' % (child[0],\n serialize_to_python(child[1])))\n elif isinstance(child, Q):\n children.append(serialize_to_python(child))\n else:\n raise TypeError('Unexpected type %s (value %r) in Q()'\n % (type(child), child))\n\n if len(children) == 1:\n result.append(children)\n elif len(children) > 1:\n result.append(\n '(%s)'\n % cls.child_separators[value.connector].join(children))\n\n return ''.join(result)", "def __int__(self):\n return int(self.q[0])", "def to_q(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n return self.to_quaternion(method=method, **kw)", "def rotationInterpolation(*args, convert: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def _queryTest(self, filter):\n reactor = MemoryReactor()\n resolver = Resolver([], reactor=reactor)\n d = resolver._query(\n Query(b'foo.example.com', A, IN), [('1.1.2.3', 1053)], (30,),\n filter)\n\n # A UDP port should have been started.\n portNumber, transport = reactor.udpPorts.popitem()\n\n # And a DNS packet sent.\n [(packet, address)] = transport._sentPackets\n\n msg = Message()\n msg.fromStr(packet)\n\n # It should be a query with the parameters used above.\n self.assertEqual(msg.queries, [Query(b'foo.example.com', A, IN)])\n self.assertEqual(msg.answers, [])\n self.assertEqual(msg.authority, [])\n self.assertEqual(msg.additional, [])\n\n response = []\n d.addCallback(response.append)\n self.assertEqual(response, [])\n\n # Once a reply is received, the Deferred should fire.\n del msg.queries[:]\n msg.answer = 1\n msg.answers.append(RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21')))\n transport._protocol.datagramReceived(msg.toStr(), ('1.1.2.3', 1053))\n return response[0]", "def abs_of_q(q_1: Q) -> Q:\n\n end_q_type = f\"|{q_1.q_type}|\"\n\n a = norm_squared(q_1)\n sqrt_t = a.t ** (1 / 2)\n a.t = sqrt_t\n a.q_type = end_q_type\n a.representation = q_1.representation\n\n return a", "def test_qubit_operator_consec_int_wires(self, obs, expected):\n dev = QeQiskitDevice(wires=3, shots=1000, backend=\"qasm_simulator\", analytic=False)\n op_str = dev.qubit_operator_string(obs)\n assert op_str == expected", "def test_eq(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert st_1 == st_2", "def from_q(self, q: np.ndarray) -> np.ndarray:\n return self.from_quaternion(self, q)", "def q(self, q: ComType):\n if isinstance(q, complex):\n self._pwr = q\n else:\n self._pwr = complex(0, q)", "def test_bool():\n assert Quantity(1, unit('m'))\n assert not Quantity(0, unit('m'))", "def q_v_q():\n p1_strategy = strategies.QStrategy('X')\n p2_strategy = strategies.QStrategy('O')\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_one()\n p1.strategy.save_q()\n p2.strategy.save_q()", "def _prepare(self, q):\n # store it in _lastr which is consulted in BasePDFGenerator.__call__\n self._lastr = q\n self._calc.qstep = q[1] - q[0]\n self._calc.qmin = q[0]\n self._calc.qmax = q[-1] + 0.5*self._calc.qstep\n return", "def qval(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def test_oef_serialization_query():\n query = Query([Constraint(\"foo\", ConstraintType(\"==\", \"bar\"))], model=None)\n msg = OefSearchMessage(\n performative=OefSearchMessage.Performative.SEARCH_SERVICES,\n dialogue_reference=(str(1), \"\"),\n query=query,\n )\n msg_bytes = OefSearchMessage.serializer.encode(msg)\n assert len(msg_bytes) > 0\n recovered_msg = OefSearchMessage.serializer.decode(msg_bytes)\n assert recovered_msg == msg", "def is_qword(self):\n return ida_bytes.is_qword(self.flags)", "def test_conversion(self):\n r = self.RNA(\"TCAtu\")\n self.assertEqual(str(r), \"UCAUU\")\n\n d = self.DNA(\"UCAtu\")\n self.assertEqual(str(d), \"TCATT\")", "def qst2(self):\n self.success = False", "def test_should_choice_convert_string():\n assert_conversion(forms.ChoiceField, Int)", "def test_str(self):\n\t\tself.filter.set_operator('.match')\n\t\tself.filter.set_limit(\"test\")\n\t\tself.assertTrue(str(self.filter), \"String conversion failed!\")", "def setQ(self,Q):\n self.Q = Q", "def validatePayload(q, request):\r\n\t\tassert type(q) is str\r\n\t\t# Queries with subtitutions must have the format {var.type} for input validation purposes.\r\n\t\tformatType = Validator.getFormatType(request)\r\n\t\tif formatType == 'str':\r\n\t\t\titems = request.args.items()\r\n\t\t\tpayload = {}\r\n\t\t\t[payload.update({item[0]: item[1]}) for item in items]\r\n\t\telif formatType == 'json':\r\n\t\t\tpayload = request.get_json()\r\n\t\t\tpayload = json.loads(payload) if payload else None\r\n\t\telif formatType == 'urlencoded':\r\n\t\t\tpayload = {item:request.form[item] for item in request.form}\r\n\t\t\tif payload is None:\r\n\t\t\t\titems = request.args.items()\r\n\t\t\t\tpayload = {}\r\n\t\t\t\t[payload.update({item[0]: item[1]}) for item in items]\r\n\t\tnewQuery, expectedParamsAndTypes = Validator.extractQueryParams(q)\r\n\t\tpayload = Validator.validateJsonRequestParams(payload, expectedParamsAndTypes, formatType)\r\n\t\treturn (payload, newQuery)", "def test_to_qcschema(self):\n # the molecule has no coordinates so this should fail\n ethanol = Molecule.from_smiles(\"CCO\")\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema()\n\n # now remake the molecule from the sdf\n ethanol = Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\"))\n # make sure that requests to missing conformers are caught\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema(conformer=1)\n # now make a valid qcschema and check its properties\n qcschema = ethanol.to_qcschema(extras={\"test_tag\": \"test\"})\n # make sure the properties match\n charge = 0\n connectivity = [\n (0, 1, 1.0),\n (0, 4, 1.0),\n (0, 5, 1.0),\n (0, 6, 1.0),\n (1, 2, 1.0),\n (1, 7, 1.0),\n (1, 8, 1.0),\n (2, 3, 1.0),\n ]\n symbols = [\"C\", \"C\", \"O\", \"H\", \"H\", \"H\", \"H\", \"H\", \"H\"]\n\n def assert_check():\n assert charge == qcschema.molecular_charge\n assert connectivity == qcschema.connectivity\n assert symbols == qcschema.symbols.tolist()\n assert (\n qcschema.geometry.all()\n == ethanol.conformers[0].in_units_of(unit.bohr).all()\n )\n\n assert_check()\n assert qcschema.extras[\"test_tag\"] == \"test\"\n # now run again when no extras\n qcschema = ethanol.to_qcschema()\n assert_check()\n assert qcschema.extras is None", "def from_QQ_python(K1, a, K0=None):\n if a.denominator == 1:\n return K1.from_ZZ_python(a.numerator)", "def test_equivalent():\n # Positive test\n assert u.equivalent(np.arange(10)*q.um, q.cm)\n\n # Negative units test\n assert not u.equivalent(np.arange(10)*q.um, q.Jy)\n\n # Negative dtype test\n assert not u.equivalent(np.arange(10), q.um)", "def test_was_speech_valid(self):\n \n future_question = Question(pub_date=time)\n self.assertEqual(response.code)", "def q_to_qs_function(func, q_1):\n\n return Qs([func(q) for q in q_1.qs], qs_type=q_1.qs_type, rows=q_1.rows, columns=q_1.columns)", "def test_direct_qnode_integration():\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev)\r\n def qfunc(a, w):\r\n qml.Hadamard(0)\r\n qml.CRX(a, wires=[0, 1])\r\n qml.Rot(w[0], w[1], w[2], wires=[1])\r\n qml.CRX(-a, wires=[0, 1])\r\n\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\r\n\r\n a, w = 2.3, [1.2, 3.2, 0.7]\r\n\r\n assert qml.draw(qfunc)(a, w) == (\r\n \" 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩ \\n\"\r\n + \" 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩ \\n\"\r\n )\r\n\r\n assert qml.draw(qfunc, charset=\"ascii\")(a, w) == (\r\n \" 0: --H--+C----------------------------+C---------+| <Z @ Z> \\n\"\r\n + \" 1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z> \\n\"\r\n )", "def test_torque_job(self):\r\n exp = 'echo \"abc; echo $? > xyz\" | qsub -k oe -N MOTU -q queue'\r\n obs = torque_job('abc', 'xyz', '123', 'queue')\r\n self.assertEqual(obs, exp)", "def convert(self, value):\n\n\t\tif self.converter is not None:\n\t\t\treturn self.converter(value)\n\t\telif self.units is not None:\n\t\t\tq = Quantity(value)\n\t\t\tq.assert_dimensions(self.units)\n\n\t\t\treturn q\n\t\telse:\n\t\t\treturn value", "def test_xyzp_qm_7a():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='psi4')", "def test_init_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n param = CentTendParam(riskfree=riskfree,\n lmbd=lmbd, lmbd_s=lmbd_s, lmbd_y=lmbd_y,\n mean_v=mean_v, kappa_s=kappa_s, kappa_y=kappa_y,\n eta_s=eta_s, eta_y=eta_y, rho=rho, measure='Q')\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n param.convert_to_q()", "def test_1qubit(self):\n qc = QuantumCircuit(1)\n qc.measure_all()\n qc2 = QuantumCircuit(1)\n qc2.x(0)\n qc2.measure_all()\n\n sampler = Sampler()\n result = sampler.run([qc, qc2]).result()\n self.assertIsInstance(result, SamplerResult)\n self.assertEqual(len(result.quasi_dists), 2)\n self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1})\n self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1})", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_check_quaternions():\n Q_list = [[1, 0, 0, 0]]\n Q = pr.check_quaternions(Q_list)\n assert_array_almost_equal(Q_list, Q)\n assert_equal(type(Q), np.ndarray)\n assert_equal(Q.dtype, np.float64)\n assert_equal(Q.ndim, 2)\n assert_array_equal(Q.shape, (1, 4))\n\n Q = np.array([\n [2, 0, 0, 0],\n [3, 0, 0, 0],\n [4, 0, 0, 0],\n [5, 0, 0, 0]\n ])\n Q = pr.check_quaternions(Q)\n for i in range(len(Q)):\n assert_almost_equal(np.linalg.norm(Q[i]), 1)\n\n assert_raises_regexp(ValueError, \"Expected quaternion array with shape\",\n pr.check_quaternions, np.zeros(4))\n assert_raises_regexp(ValueError, \"Expected quaternion array with shape\",\n pr.check_quaternions, np.zeros((3, 3)))\n\n Q = np.array([[0.0, 1.2, 0.0, 0.0]])\n Q2 = pr.check_quaternions(Q, unit=False)\n assert_array_almost_equal(Q, Q2)", "def test_query_value_conversion(self):\n\n class BlogPost(Document):\n author = ReferenceField(self.Person)\n\n BlogPost.drop_collection()\n\n person = self.Person(name=\"test\", age=30)\n person.save()\n\n post = BlogPost(author=person)\n post.save()\n\n # Test that query may be performed by providing a document as a value\n # while using a ReferenceField's name - the document should be\n # converted to an DBRef, which is legal, unlike a Document object\n post_obj = BlogPost.objects(author=person).first()\n assert post.id == post_obj.id\n\n # Test that lists of values work when using the 'in', 'nin' and 'all'\n post_obj = BlogPost.objects(author__in=[person]).first()\n assert post.id == post_obj.id\n\n BlogPost.drop_collection()", "def test_QSe_Run(self):\n fit_group, result = BayesQuasi(Program='QSe',\n SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._res_ws,\n MinRange=-0.547607,\n MaxRange=0.543216,\n SampleBins=1,\n ResolutionBins=1,\n Elastic=False,\n Background='Sloping',\n FixedWidth=False,\n UseResNorm=False,\n WidthFile='',\n Loop=True,\n Save=False,\n Plot='None')\n self._validate_QSe_shape(result, fit_group)\n self._validate_QSe_value(result, fit_group)", "def test_output_type():\n assert type(song_decoder(\"WUWUBUBWUBUWUB\")) is str", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)", "def simple_q(self: Q) -> Q:\n\n self.t = sp.simplify(self.t)\n self.x = sp.simplify(self.x)\n self.y = sp.simplify(self.y)\n self.z = sp.simplify(self.z)\n return self", "def convert_type(atype, btype):\n # type + type => type\n if atype == btype:\n return atype\n\n # [Q]BOOLEAN + [Q]BOOLEAN => BOOLEAN\n if atype == 'BOOLEAN' and btype == 'QBOOLEAN':\n return 'BOOLEAN'\n if atype == 'QBOOLEAN' and btype == 'BOOLEAN':\n return 'BOOLEAN'\n\n # [Q]INTEGER + [Q]INTEGER => INTEGER\n if atype == 'QINTEGER' and btype == 'INTEGER':\n return 'INTEGER'\n if atype == 'INTEGER' and btype == 'QINTEGER':\n return 'INTEGER'\n\n # [Q]FLOAT + [Q]FLOAT => FLOAT\n if atype == 'QFLOAT' and btype == 'FLOAT':\n return 'FLOAT'\n if atype == 'FLOAT' and btype == 'QFLOAT':\n return 'FLOAT'\n\n # QINTEGER + QFLOAT => QFLOAT\n if atype == 'QINTEGER' and btype == 'QFLOAT':\n return 'QFLOAT'\n\n # QFLOAT + QINTEGER => QFLOAT\n if atype == 'QFLOAT' and btype == 'QINTEGER':\n return 'QFLOAT'\n\n # [Q]INTEGER + [Q]FLOAT => FLOAT (except QINTEGER + QFLOAT => QFLOAT)\n if atype == 'INTEGER' and btype == 'FLOAT':\n return 'FLOAT'\n if atype == 'INTEGER' and btype == 'QFLOAT':\n return 'FLOAT'\n if atype == 'QINTEGER' and btype == 'FLOAT':\n return 'FLOAT'\n\n # [Q]FLOAT + [Q]INTEGER => FLOAT (except # QFLOAT + QINTEGER => QFLOAT)\n if atype == 'FLOAT' and btype == 'INTEGER':\n return 'FLOAT'\n if atype == 'FLOAT' and btype == 'QINTEGER':\n return 'FLOAT'\n if atype == 'QFLOAT' and btype == 'INTEGER':\n return 'FLOAT'\n\n # All remaining combination of:\n # (DATE, TIME, TIMESTAMP, QBOOLEAN, QINTEGER, QFLOAT, STRING) +\n # (DATE, TIME, TIMESTAMP, QBOOLEAN, QINTEGER, QFLOAT, STRING) => STRING\n if is_string_type(atype) and is_string_type(btype):\n return 'STRING'\n\n return None", "def is_int(q):\n if isinstance(q, (Integer, int)):\n return True\n if isinstance(q, Rational):\n if q.denominator() == 1:\n return True\n if isinstance(q, tuple):\n return False\n try:\n if floor(q) == ceil(q):\n return True\n except TypeError:\n pass\n return False", "def test_check_quaternion():\n q_list = [1, 0, 0, 0]\n q = pr.check_quaternion(q_list)\n assert_array_almost_equal(q_list, q)\n assert_equal(type(q), np.ndarray)\n assert_equal(q.dtype, np.float64)\n\n random_state = np.random.RandomState(0)\n q = random_state.randn(4)\n q = pr.check_quaternion(q)\n assert_almost_equal(np.linalg.norm(q), 1.0)\n\n assert_raises_regexp(ValueError, \"Expected quaternion with shape\",\n pr.check_quaternion, np.zeros(3))\n assert_raises_regexp(ValueError, \"Expected quaternion with shape\",\n pr.check_quaternion, np.zeros((3, 3)))\n\n q = np.array([0.0, 1.2, 0.0, 0.0])\n q2 = pr.check_quaternion(q, unit=False)\n assert_array_almost_equal(q, q2)", "def __call__(self, q: str = \"\"):\n if q:\n return self.fixed_content in q\n return False", "def test_query_no(self):\n self.assertEqual(query_yes_no(question=\"Is anyone wiser than Socrates?\"), False)", "def test_init(self):\r\n sq = SeqQualBad('Q', None)\r\n self.assertEqual(sq.Name, 'Q')\r\n self.assertEqual(sq.F, None)\r\n self.assertEqual(sq.FailedIds, [])", "def from_QQ_gmpy(K1, a, K0=None):\n if a.denominator == 1:\n return K1.from_ZZ_gmpy(a.numerator)", "def sample_qubo(self, Q, **parameters):\n return self.child.sample_qubo(Q, **parameters)", "def test_save_as_qdp(self):\n arrays = [np.array([0, 1, 3]), np.array([1, 4, 5])]\n errors = [np.array([1, 1, 1]), np.array([[1, 0.5], [1, 0.5], [1, 1]])]\n hen.io.save_as_qdp(arrays, errors,\n filename=os.path.join(self.datadir,\n \"monol_test_qdp.txt\"))\n hen.io.save_as_qdp(arrays, errors,\n filename=os.path.join(self.datadir,\n \"monol_test_qdp.txt\"),\n mode='a')", "def play_question(self, q):\n\t\tq[\"question\"].play(True) #playing the question\n\t\tchoice = self.get_user_choice() #getting the user's choice\n\t\tnext_question = self.next_question(self.questions.index(q))\n\t\tif choice == \"LESSON MODE\":\n\t\t\tself.lesson.take_lesson()\n\t\telif choice == \"QUIZ MODE\":\n\t\t\tif next_question is not None:\n\t\t\t\treturn self.play_question(next_question)\n\t\telif choice == q[\"correct\"]: #checking if choice is correct\n\t\t\tif q.get('on_correct') is not None:\n\t\t\t\tq['on_correct'].play(True)\n\t\t\tif q.get('return_on_correct') is not None:\n\t\t\t\treturn self.play_question(q['return_on_correct'])\n\t\telif q.get('return_on_wrong') is not None: #in case choice is not correct\n\t\t\tif q.get('on_wrong') is not None:\n\t\t\t\tq['on_wrong'].play(True)\n\t\t\tif q.get('return_on_wrong') is not None:\n\t\t\t\treturn self.play_question(q['return_on_wrong'])\n\t\telif q.get('on_wrong') is not None:\n\t\t\tq['on_wrong'].play(True)\n\t\t\n\t\tif next_question is not None:\n\t\t\treturn self.play_question(next_question)", "def is_qword(self, value):\n if value:\n if not ida_bytes.create_data(self.ea, ida_bytes.FF_QWORD, 8, idc.BADADDR):\n raise RuntimeError(\"Unable to set type for {}\".format(self))\n else:\n del self.type", "def job_to_qiskit_circuit(qlm_job):\n # Check processing type\n assert_qpu(qlm_job.type == ProcessingType.SAMPLE,\n \"Only jobs having a SAMPLE processing type \"\n \"could be translated into Qiskit circuits\")\n\n # Convert\n return qlm_to_qiskit(qlm_job.circuit, qlm_job.qubits)", "def test_str(self):\n f66: Fraction = Fraction(6, 6)\n f24: Fraction = Fraction(2, 4)\n self.assertEqual(str(f66), \"6/6\")\n self.assertNotEqual(str(f24), \"8/6\")\n self.assertTrue(str(f24), \"2/4\")" ]
[ "0.67130417", "0.621103", "0.61342704", "0.5916669", "0.58767176", "0.57915777", "0.5747716", "0.5717606", "0.57136637", "0.5693663", "0.5692092", "0.5674936", "0.5654241", "0.56443626", "0.56386477", "0.5627663", "0.5570281", "0.55655336", "0.55531347", "0.5550286", "0.55312324", "0.5490929", "0.54852307", "0.5480135", "0.5480135", "0.5480135", "0.5466924", "0.5424252", "0.54182833", "0.5376017", "0.53657526", "0.5358548", "0.53490996", "0.5347019", "0.5345797", "0.53408384", "0.53400093", "0.53372264", "0.53314537", "0.5331407", "0.53083444", "0.5298219", "0.5294266", "0.5290165", "0.52812475", "0.5265221", "0.5259644", "0.52560574", "0.52552885", "0.52491844", "0.5238157", "0.52279377", "0.5215761", "0.51848906", "0.5182005", "0.51675737", "0.5164876", "0.51612866", "0.5156807", "0.51525337", "0.51514995", "0.5141311", "0.5131895", "0.5129938", "0.5121394", "0.5120203", "0.5114689", "0.5105556", "0.5101961", "0.5099984", "0.509442", "0.509148", "0.5091075", "0.508487", "0.5082553", "0.507958", "0.50679797", "0.5066549", "0.506415", "0.50602514", "0.5055348", "0.5054902", "0.50484586", "0.5045885", "0.50269794", "0.50237", "0.5023691", "0.5012036", "0.50054795", "0.5002457", "0.4992933", "0.4990837", "0.49822822", "0.49816206", "0.49710134", "0.49690112", "0.49666506", "0.49644783", "0.49636677", "0.495887" ]
0.61365926
2
Returns all the SKU's names which are available in the database.
def get_all_skus(): sku_ids = set() for sku_id in sku_database.find({}, {"_id": 0, "SKU_id": 1}): if sku_id.get("SKU_id"): sku_ids.add(sku_id["SKU_id"]) else: continue return list(sku_ids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")", "def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")", "def sku_name(self) -> str:\n return pulumi.get(self, \"sku_name\")", "def getNames(self) -> List[unicode]:\n ...", "def sku_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sku_name\")", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def get_all_supplier_names() -> List[str]:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier order by id\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def user_names(self):\n results = []\n for user_detail in self.users:\n results.append(user_detail.user_name)\n results.sort()\n return results", "def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")", "def sku_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku_name\")", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def get_names(self):\n return self.names", "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def keys(self):\r\n if self.db == None:\r\n raise AssertionError(\"DB not open\")\r\n\r\n self.lock.acquire()\r\n try:\r\n usernames = self.db.keys()\r\n finally:\r\n self.lock.release()\r\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\r\n return usernames", "def _getNames(self):\n return self._items.keys()", "def keys(self):\n if self.db == None:\n raise AssertionError(\"DB not open\")\n\n self.lock.acquire()\n try:\n usernames = list(self.db.keys())\n finally:\n self.lock.release()\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\n return usernames", "def find_by_sku(cls, sku: int):\n cls.logger.info(\"Processing sku query for %s ...\", sku)\n return cls.query.filter(cls.sku == sku).order_by(cls.id).all()", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def names(self):\n return [da.name for da in self]", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def name_get(self):\n result = []\n for product in self:\n if product.code and product.name:\n name = '[%s] %s'%(product.code, product.name)\n else: \n name = product.name\n \n result.append((product.id, name))\n \n return result", "def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2", "def name_list(qbo_session):\n\n return qbo_session.name_list()", "def names(self) -> list[str]:", "def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")", "def sku(self) -> 'outputs.SkuResponse':\n return pulumi.get(self, \"sku\")", "def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products", "def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)", "def names(self) -> List:\n ...", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def _get_all_summoners(self):\n return [self.user_values['Summoner Name'][row].get().lower()\n for row, obj in enumerate(self.user_values['Summoner Name'])]", "def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def getItemNames(self):\n\t\treturn self.items.keys()", "def get_store_item_list(spark) -> list:\n sqlDF = spark.sql(\"SELECT DISTINCT SKU, Store FROM dfView\")\n store_item_list = sqlDF.rdd.map(tuple).collect()\n return store_item_list", "def sku(self):\n return self._sku", "def return_names(self):\n return self.__name_list", "def get_names(self):\n\n return self.mod_suites.keys()", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def all_names():\n for x in RecordIOShard.all(keys_only=True).filter(\"index =\", True):\n yield RecordIOShard.get_name(x.name())", "def name(self) -> pulumi.Input['SkuName']:\n return pulumi.get(self, \"name\")", "def stl_names(self):\n return [stl.member.get_full_name() for stl in self.stls.all()]", "def get_status_skus(sku_list, status):\n values = []\n if not (sku_list, status):\n return values\n\n for sku_id in sku_list:\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id), \"Status\": status}, {'_id': 0, 'Status': 1}))\n if status_query:\n values.append(sku_id)\n return values", "def getNames(self):\n return self._Names", "def get_users():\n table_response = USER_FAVORITES_TABLE.scan()\n return table_response['Items']", "def list_available_strains(self):\n return [strain for strain in self.sample_dict]", "def get_currencies_names():\n names = [x for x in cur_dict]\n return names", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def sku(self) -> pulumi.Output['outputs.SkuResponse']:\n return pulumi.get(self, \"sku\")", "def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")", "def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:\n return pulumi.get(self, \"sku\")", "def get_all_song_names(self):\n try:\n # Auto-close\n with closing(self.connection) as con:\n # Auto-commit\n with con:\n # Auto-close\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT name\n FROM nodes\n WHERE type = \"song\";\n \"\"\")\n return [x[0] for x in cursor.fetchall()]\n except sqlite3.OperationalError as e:\n print(\"ERROR: Could not retrieve songs: {}\".format(str(e)))\n return []", "def drug_names():\n results = set()\n if 'qry' in request.args and len(request.args['qry']) >= 3:\n look_for = f\"{request.args['qry'].lower()}%\"\n drug_list = FTA.find_by_name(look_for, False )\n results = set([f\"{d.PROPRIETARY_NAME} - {d.NONPROPRIETARY_NAME}\" for d in drug_list if d.ACTIVE])\n\n results = sorted(list(results))\n return jsonify(results)", "def get_names(self):\n return self.__names", "def name_variants(self):\n out = []\n fields = 'indexed_name initials surname given_name doc_count'\n variant = namedtuple('Variant', fields)\n items = self._json['author-profile'].get('name-variant', [])\n if not isinstance(items, list):\n items = [items]\n for var in items:\n new = variant(indexed_name=var['indexed-name'],\n initials=var['initials'], surname=var['surname'],\n given_name=var.get('given-name'),\n doc_count=var.get('@doc-count'))\n out.append(new)\n return out", "def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result", "def names(self):\r\n return self.get_field(self.name_field)", "def getNames():\n imgs = Image.objects.raw({})\n ans = []\n for img in imgs:\n ans.append(img.name)\n ans.sort()\n return ans", "def get_all_genres(self):\n self.cursor.execute(\"select * from genres\")\n self.connection.commit()\n return self.cursor.fetchall()", "def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")", "def sku(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sku\")", "def getOqiNames( self ):\n\n if self.oqiNames:\n return self.oqiNames.keys()\n\n n = self.adb.get( \"nSrss\" )\n for indx in xrange( n ):\n name = self.adb.get( \"srsName\", indx )\n self.oqiNames[ name ] = indx\n\n return self.oqiNames.keys()", "def list_minerals():\n return _list_tindyb_unique_values(\"name\", dbpath=__dbpath__)", "def vm_sku_name(self) -> str:\n return pulumi.get(self, \"vm_sku_name\")", "def get_record_names(self, stripped=False):\n return _get_record_names(self._budget, stripped=stripped)", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def get_coin_names() -> list:\n df = read_data_from_csv('data_aggregator.csv')\n return df['coin_name'].to_list()", "def stores(self):\n sql = u\"SELECT name FROM `sqlite_master` WHERE type='table'\"\n rows = self.conn.execute(sql)\n return [r['name'] for r in rows\n if r['name'] not in self.invalid_names]", "def names(self):\n return [x for x in self._dict.keys()]", "def get_all_names(cls, exclude_values: Iterator['CommonBucksType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "def get_list ( self, object_class_id ) :\n name_list = []\n stmt = \"select name from \" + self.get_table_name ( object_class_id )\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n name_list.append(str(row[0]))\n return name_list", "def namelist(self):\n return []", "def get_students(self):\n return u', '.join([c.student.username for c in self.candidates.all()])", "def get_all_supplier_id_name() -> List[Tuple]:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id, name from supplier\"\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data", "def namelist(self):\n return set(self.names())", "def names(cls):\n return cls.__by_name.keys()", "def GetUserNamesList():\n\n # Create a list\n time.sleep(5)\n usernameslist = []\n\n html = Global.driver.page_source\n\n page = soup(html, \"lxml\")\n\n # Get all usernames\n table = page.find('div', class_=\"user-management-table-view\")\n\n tablebody = table.find('tbody')\n\n elements = tablebody.find_all('tr')\n\n for tr_tag in elements:\n usernameelement = tr_tag.find('span')\n\n username = usernameelement.text.strip('\\n')\n\n usernameslist.append(username)\n\n return usernameslist", "def names():\n\n SamplesFirstRow = session.query(Samples).first()\n results = SamplesFirstRow.__dict__ \n\n names = []\n for aName in results:\n namesDict = {}\n # namesDict[\"Name\"] = \"Sample ID\"\n namesDict[\"Value\"] = aName\n # names.append(namesDict)\n names.append(aName)\n\n return jsonify(names)", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def names(self):\n return self._names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def get_cryptomatte_names(self):\n return [self.cryptomattes[x][\"name\"] for x in self.cryptomattes]", "def alert_product_names(self) -> Sequence[str]:\n return pulumi.get(self, \"alert_product_names\")", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def names(cls) -> List[str]:", "def getTableNames(self, lsstLevel, dbName):\n return self._doRequest(self.httpClient.getTableNames, lsstLevel, dbName)", "def products(self):\n return list(Product.select())", "def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def donor_names():\n return list(donor_db.keys())", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def get_names(self):\n return sorted(list(self.df[[Data.SDATA_NAME]].drop_duplicates().iloc[:, 0]))", "def get_driver_names():\n return drivers.keys()" ]
[ "0.62859315", "0.62859315", "0.62859315", "0.6216947", "0.6172846", "0.59829146", "0.59336996", "0.5914514", "0.5886475", "0.58093053", "0.58093053", "0.5785581", "0.57802486", "0.57603806", "0.5718735", "0.57042855", "0.5676707", "0.56748796", "0.5674839", "0.5670397", "0.5669379", "0.56296736", "0.56257635", "0.56183803", "0.5613643", "0.560023", "0.560023", "0.5587678", "0.55740035", "0.5549445", "0.5546504", "0.5546504", "0.5542122", "0.55411625", "0.5536329", "0.5531744", "0.5526461", "0.55155784", "0.55071694", "0.54807687", "0.54737103", "0.5465872", "0.5461997", "0.5458195", "0.5457844", "0.5455617", "0.54431754", "0.54397094", "0.54304504", "0.54267067", "0.54248667", "0.5417616", "0.5417616", "0.5410252", "0.5405585", "0.5401003", "0.5392949", "0.5392512", "0.5383438", "0.5379288", "0.5373058", "0.5370793", "0.5370793", "0.5365313", "0.5365049", "0.5360001", "0.5357306", "0.5349793", "0.5349793", "0.5341956", "0.53334224", "0.53333503", "0.5318683", "0.5316367", "0.5316305", "0.531384", "0.53121793", "0.5311117", "0.5307615", "0.5304255", "0.52916294", "0.52861726", "0.52808553", "0.5276253", "0.5276253", "0.5276253", "0.52742416", "0.52719736", "0.5265585", "0.52594995", "0.5256714", "0.5256269", "0.5254509", "0.52523047", "0.5245696", "0.5245183", "0.5241192", "0.52345216", "0.52295786", "0.52261204" ]
0.72144353
0
For given sku_id, status and time span, it queries and returns back the data.
def get_sku_id(sku_id, status, start_time, end_time): all_data = [] if not (sku_id, status, start_time, end_time): return all_data for i in sku_database.find({"SKU_id": sku_id, "Status": status}, {"_id": 0}): if start_time < i["Time_stamp"] < end_time: all_data.append(i) else: continue return all_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timespan(self, timespan=None, timezone=None):\r\n url = '{0}/{1}'.format(self.get_pull_url(), 'timespan')\r\n params = base.get_params(('timespan', 'timezone'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def query_tas_status(self):\n response = {}\n def _tas_status_callback(mqtt, userdata, msg):\n if 'STATUS' in msg.topic[-8:]:\n stat_num=re.sub(r'.*STATUS([0-9]*)$', r'\\1', msg.topic)\n msg = json.loads(msg.payload.decode('UTF-8'))\n for datum in tasmota_status_query[stat_num]:\n datumPath=tasmota_status_query[stat_num][datum]\n response[datum] = nested_get(msg, datumPath)\n response['status{num}'.format(num=stat_num)] = dt.datetime.now()\n s_topic = '{s_topic}/+'.format(**self)\n c_topic = '{c_topic}/status'.format(**self)\n self.mqtt.message_callback_add(s_topic, _tas_status_callback)\n self.mqtt.connect(self.mqtt_host)\n self.mqtt.subscribe(s_topic)\n\n #publish requests\n for status_number, ignored in tasmota_status_query.items():\n self.mqtt.publish(c_topic, status_number)\n\n # status numbers, converted to status2, etc\n def _status_words():\n return ['status{num}'.format(num=key) for key \\\n in tasmota_status_query.keys()]\n\n # while not all of the responses exist,\n # and we aren't too old since the start time\n startTime = dt.datetime.now()\n done = False\n while(not done and not too_old(startTime, waitForStatus)):\n done = True\n for status in _status_words():\n done = done and status in response\n if not done:\n self.mqtt.loop(timeout=loop_time)\n\n self.mqtt.unsubscribe(s_topic)\n self.mqtt.message_callback_remove(s_topic)\n self.mqtt.disconnect()\n\n self.reported = response\n return response", "def get_status_skus(sku_list, status):\n values = []\n if not (sku_list, status):\n return values\n\n for sku_id in sku_list:\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id), \"Status\": status}, {'_id': 0, 'Status': 1}))\n if status_query:\n values.append(sku_id)\n return values", "def get_status_of_id(sku_id):\n if not sku_id:\n return None\n\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id)}, {'_id': 0, 'Status': 1}))\n status = status_query[0][\"Status\"]\n return status", "def get_data_by_uuid_helper(uuid, hours):\n\tparser = HTMLParser()\n\tuuid = parser.unescape(uuid)\n\tquery_url = 'http://localhost:8079/api/query'\n\tquery = \"select data in (now -\"+hours+\"h, now) where uuid='\"+uuid+\"'\"\n\tr = requests.post(query_url, query)\n\treadings = json.loads(r.content)\n\ttry:\n\t\treadings = readings[0][\"Readings\"]\n\texcept:\n\t\treturn []\n\treturn readings", "def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)", "def find_by_sku(cls, sku: int):\n cls.logger.info(\"Processing sku query for %s ...\", sku)\n return cls.query.filter(cls.sku == sku).order_by(cls.id).all()", "def QueryByUser(cls, client, callback, user_id, include_expired=False,\n include_history=False):\n history_results = []\n latest = {}\n def _VisitSub(sub, callback):\n if include_history:\n history_results.append(sub)\n else:\n if sub.expiration_ts < time.time() and not include_expired:\n callback()\n return\n # Only one transaction per subscription.\n if (sub.subscription_id in latest and\n latest[sub.subscription_id].timestamp > sub.timestamp):\n callback()\n return\n latest[sub.subscription_id] = sub\n callback()\n\n def _OnVisitDone():\n if include_history:\n assert not latest\n callback(history_results)\n else:\n assert not history_results\n callback(latest.values())\n\n Subscription.VisitRange(client, user_id, None, None, _VisitSub, _OnVisitDone)", "def fetchraw(self, pv, callback,\n cbArgs=(), cbKWs={},\n T0=None, Tend=None,\n count=None, chunkSize=None,\n archs=None, breakDown=None,\n enumAsInt=False, displayMeta=False, rawTimes=False):\n if breakDown is None:\n breakDown = yield self.search(exact=pv, archs=archs,\n breakDown=True, rawTime=True)\n\n breakDown = breakDown[pv]\n\n if len(breakDown)==0:\n _log.error(\"PV not archived\")\n defer.returnValue(0)\n\n if rawTimes:\n Tcur, Tend = T0, Tend\n else:\n Tcur, Tend = timeTuple(T0), timeTuple(Tend)\n\n _log.debug(\"Time range: %s -> %s\", Tcur, Tend)\n _log.debug(\"Planning with: %s\", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))\n\n plan = []\n \n # Plan queries\n # Find a set of non-overlapping regions\n for F, L, K in breakDown:\n # some mis-match of definitions\n # the search results give the times\n # of the first and last samples\n # inclusive.\n # time range [F, L]\n # However, values() query end time\n # is exclusive\n # time range [F, L)\n # We step the end time forward by 1 micro-second\n # to ensure that the last sample can be returned.\n # Note: it seems that Channel Archiver uses\n # micro-sec resolution times for comparisons...\n _log.debug(\"Before: %s\", L)\n LS, LN = L\n LN += 1000\n if LN>1000000000:\n LS += 1\n LN = 0\n L = LS, LN\n _log.debug(\"After: %s\", L)\n\n if L <= Tcur:\n continue # Too early, keep going\n elif F >= Tend:\n break # No more data in range\n\n # range to request from this archive\n Rstart = max(Tcur, F)\n Rend = min(Tend, L)\n\n plan.append((Rstart, Rend, K))\n \n Tcur = Rend\n\n if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:\n # requested range is later than last recorded sample,\n # which is all we can return\n F, L, K = breakDown[-1]\n LS, LN = L\n plan.append(((LS+1,0),(LS+2,0),K))\n count=1\n _log.debug(\"Returning last sample. No data in or after requested time range.\")\n elif len(plan)==0:\n # requested range is earlier than first recorded sample.\n _log.warn(\"Query plan empty. No data in or before request time range.\")\n defer.returnValue(0)\n\n _log.debug(\"Using plan of %d queries %s\", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))\n\n N = yield self._nextraw(0, pv=pv, plan=plan,\n Ctot=0, Climit=count,\n callback=callback, cbArgs=cbArgs,\n cbKWs=cbKWs, chunkSize=chunkSize,\n enumAsInt=enumAsInt, displayMeta=displayMeta)\n\n defer.returnValue(N)", "def request_device_readings_quartiles(device_uuid):\n\n # Set the db that we want and open the connection\n start = request.args.get('start')\n end = request.args.get('end')\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n #check for start\n if start != None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, end, \n device_uuid, \n start, end, \n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end,\n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start != None and end == None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, \n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n end, \n device_uuid, \n end, \n device_uuid, \n device_uuid, \n end,\n device_uuid, \n end,\n device_uuid, \n device_uuid, \n end,\n device_uuid, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end == None:\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n ) as T3\n '''.format(device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200", "def get_sondes(client, start, end):\n\n sonde_query_str = \"SELECT * FROM cfog.sharp_radiosonde \" + \\\n f\"WHERE LaunchTime BETWEEN '{start}' AND '{end}' \" + \\\n \"ORDER BY LaunchTime ASC\"\n\n print(f\"Executing bigquery query string: \")\n print(sonde_query_str + '\\n')\n\n sonde_data = {f\"{s['LaunchTime'].strftime('%m-%d_%H')}\":s for s in client.query(query=sonde_query_str)}\n\n print(\"Radiosondes obtained within the queried time bounds: \")\n print(list(sonde_data))\n\n sonde_data_out = {}\n for t in sonde_data:\n # ignored col: SoundingIdPk, RadioRxTimePk, PtuStatus\n sonde_data_out[t] = {}\n sonde_data_out[t]['df'] = pd.DataFrame({\n 'DataSrvTime' : sonde_data[t]['DataSrvTime'],\n 'Pressure' : sonde_data[t]['Pressure'],\n 'Temperature' : sonde_data[t]['Temperature'],\n 'Humidity' : sonde_data[t]['Humidity'],\n 'WindDir' : sonde_data[t]['WindDir'],\n 'WindSpeed' : sonde_data[t]['WindSpeed'],\n 'WindNorth' : sonde_data[t]['WindNorth'],\n 'WindEast' : sonde_data[t]['WindEast'],\n 'Height' : sonde_data[t]['Height'],\n 'WindInterpolated' : sonde_data[t]['WindInterpolated'],\n 'Latitude' : sonde_data[t]['Latitude'],\n 'Longitude' : sonde_data[t]['Longitude'],\n 'North' : sonde_data[t]['North'],\n 'East' : sonde_data[t]['East'],\n 'Up' : sonde_data[t]['Up'],\n 'Altitude' : sonde_data[t]['Altitude'],\n 'Dropping' : sonde_data[t]['Dropping']\n }\n )\n sonde_data_out[t]['LaunchTime'] = sonde_data[t]['LaunchTime']\n sonde_data_out[t]['LaunchLatitude'] = sonde_data[t]['LaunchLatitude']\n sonde_data_out[t]['LaunchLongitude'] = sonde_data[t]['LaunchLongitude']\n\n print(f\"Query complete. Total number of data entries: {len(sonde_data_out)}.\\n\\n\")\n\n del sonde_data\n return sonde_data_out", "def _run_query(self):\n self._search_query()\n payload_describe = {\n 'zone': self._zone,\n 'owner': self._owner,\n }\n if self._subnet_name:\n payload_describe.update({\"subnet_name\": self._subnet_name})\n _resp = SubnetService.describe_subnet(payload_describe)\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list", "def query_single(self, records, streamid, debug=False):\n \"\"\"Works for both postgres and mysql\"\"\"\n conn, s = self.dbconn, self.dbstate\n debugout = []\n \n querystr = \"select * from grindertest where streamid=\"\n querystr += str(streamid)\n querystr += \" order by time desc limit \"\n querystr += str(records)\n starttime = time.time()\n temp = s.executeQuery(querystr)\n endtime = time.time()\n if debug:\n self.query_debugger(temp, debugout)\n return debugout\n\n completiontime = endtime - starttime\n return [starttime, endtime, completiontime]", "def get_data_by_uuid(request, uuid, hours):\n\tif request.method == 'GET':\n\t\treadings = get_data_by_uuid_helper(uuid, hours)\n\t\treturn Response(readings)", "def pull_from_kvstore(helper, name, start, stats):\n resp = helper.send_http_request(\n url=_uri(helper),\n headers=_headers(helper),\n method='GET',\n verify=False,\n parameters={'query': json.dumps({'splunk_source': name})})\n resp.raise_for_status()\n\n ans = {}\n for v in resp.json():\n ans[v['indicator']] = {\n '_key': v['_key'],\n 'is_present': False,\n 'splunk_last_seen': v.get('splunk_last_seen', 0.0)}\n\n return ans", "def _query(self, mapping, from_date=None, to_date=None, max_count=None,\n offset=None, ascendingly=True, describe=False):\n group, key = mapping.data_var.split(self._data_var_separator)\n\n # build params\n params = 'describe={describe}&keys={key}'.format(describe=str(describe).lower(), key=key)\n if self._api['token'] is not None:\n params += '&apitoken={}'.format(self._api['token'])\n if from_date is not None:\n params += '&from-date={}'.format(from_date.isoformat())\n if to_date is not None:\n params += '&to-date={}'.format(to_date.isoformat())\n\n # build url\n url = '{}{}?{}'.format(self._api['host'], self._api['url'], params).format(group=group)\n\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content.decode('utf-8'))\n # return query result\n if not describe:\n # sort\n data = sorted(\n data,\n key=lambda k: k.get(self._timestampkey),\n reverse=(not ascendingly))\n # apply constraints\n if offset is not None:\n data = data[offset:]\n if max_count is not None:\n data = data[:max_count]\n # process to query result\n res = QueryResult(mapping.obs_uri)\n for r in data:\n res.add_row(\n dateutil.parser.parse(r.get(self._timestampkey)),\n r.get(self._valuekey))\n # return\n return res\n # return query result description\n else:\n min = data.get('mindate', None)\n if min is not None:\n min = dateutil.parser.parse(min)\n max = data.get('maxdate', None)\n if max is not None:\n max = dateutil.parser.parse(max)\n return QueryResultDescription(mapping.obs_uri, min, max, data.get('count', 0))\n else:\n # empty/erronous response\n self.pyerr(\"Failed calling API: {}\".format(url))\n if not describe:\n return QueryResult(mapping.obs_uri)\n return QueryResultDescription(mapping.obs_uri, None, None, 0)", "def QueryTimespan(cls, client, group_key, start_time, end_time, callback, excl_start_key=None):\n # Query from start_time to end_time + 1 - because they contain a machine id, actual\n # sort keys will always follow a key comprised only of a timestamp.\n assert start_time is not None or end_time is not None, 'must specify at least one of start_time and end_time'\n operator = None\n start_rk = util.CreateSortKeyPrefix(start_time, randomness=False) if start_time is not None else None\n end_rk = util.CreateSortKeyPrefix(end_time + 1, randomness=False) if end_time is not None else None\n if start_time is None:\n operator = db_client.RangeOperator([end_rk], 'LE')\n elif end_time is None:\n operator = db_client.RangeOperator([start_rk], 'GE')\n else:\n operator = db_client.RangeOperator([start_rk, end_rk], 'BETWEEN')\n Metric.RangeQuery(client, group_key, operator, None, None,\n callback=callback, excl_start_key=excl_start_key)", "def get(self, region=None):\n base_url = self.base.base_url[region]\n url = '{}/lol/status/v3/shard-data'.format(base_url)\n r = requests.get(url, headers=self.base.headers)\n return r", "def test_fetch():\n service = WebService(TestFactory())\n query = service.parse(\n parse_qs(\n \"id=BOU&starttime=2016-06-06\"\n \"&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60\"\n \"&format=iaga2002&type=variation\"\n )\n )\n timeseries = service.fetch(query)\n assert_equal(isinstance(timeseries, Stream), True)", "def _get_consumption(self, url, start, end, aggregation):\n start = self._to_milliseconds(start)\n end = self._to_milliseconds(end)\n\n headers = {\"Authorization\": \"Bearer {}\".format(self.access_token)}\n params = {\n \"aggregation\": aggregation,\n \"from\": start,\n \"to\": end\n }\n r = requests.get(url, headers=headers, params=params)\n r.raise_for_status()\n return r.json()", "async def _stats_handler(request, params):\n # validation is performed in the rest dispatching method\n target = params['target']\n groupid = int(params['groupid'])\n rtypeid = int(params['rtypeid'])\n start_ts = int(params['start_ts'])\n end_ts = int(params['end_ts'])\n resp_body = dict()\n # call the appropriate db handler based on target\n try:\n if target == 'group':\n resp_body['stats'] = [doc async for doc in request.app['db'].stats_group(groupid, rtypeid, start_ts, end_ts)]\n elif target == 'sensor':\n sensorid = int(params['sensorid'])\n resp_body['stats'] = await request.app['db'].stats_sensor(sensorid, groupid, rtypeid, start_ts, end_ts)\n except Exception as e:\n if request.app['config'].debug:\n return generate_error(traceback_str(e), 403)\n else:\n return generate_error('ERROR: There was an issue understanding your request!', 403)\n # the standard return - if we got here, then everything went ok\n return aiohttp.web.Response(body=simplejson.dumps(resp_body))", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def query(\n region: str,\n profile: str,\n query_params: ConfigType,\n quiet: bool = False,\n interval: float = 0.05,\n) -> QueryResultResponse:\n\n client = InsightsClient(region, profile)\n client.start_query(**query_params)\n\n counter = 0\n progress = Progress(\n processing_msg=\"Search for matching logs...\",\n end_msg=\"Search completed!\",\n quiet=quiet,\n )\n\n try:\n while True:\n progress.show(counter)\n\n if (results := client.fetch_result()) is not None:\n progress.done()\n return cast(QueryResultResponse, results)\n\n counter += 1\n sleep(interval)\n\n except (\n QueryNotYetStartError,\n NotFetchQueryResultError,\n QueryTimeoutError,\n QueryAlreadyCancelled,\n QueryUnknownError,\n ) as err:\n sys.exit(err)\n\n except KeyboardInterrupt:\n client.end_query()\n sys.exit(\"\\nAbort\")", "def GetStatus(handler, query):\n json_config = {}\n\n lock = None\n if 'Url' in query:\n url = query['Url'][0]\n status, lock = ToGo.get_status(url)\n\n if not lock:\n # no Url or no status found for url\n handler.send_json(json.dumps(json_config))\n return\n\n with lock:\n state = 'queued'\n if status['running']:\n state = 'running'\n elif status['finished']:\n if status['error'] == '':\n state = 'finished'\n else:\n state = 'error'\n json_config['error'] = status['error']\n\n json_config['state'] = state\n json_config['rate'] = status['rate']\n json_config['size'] = status['size']\n json_config['retry'] = status['retry']\n json_config['maxRetries'] = status['ts_max_retries']\n json_config['errorCount'] = status['ts_error_count']\n\n handler.send_json(json.dumps(json_config))", "def get(self):\n args = search_parser.parse_args()\n return_status = None\n deviceid = request.args['deviceid']\n start_time=request.args['start_time']\n end_time=request.args['end_time']\n log.debug(request.args)\n result = {}\n try:\n start_time=start_time.replace(\"T\", \" \")\n end_time=end_time.replace(\"T\", \" \")\n log.debug(\"deviceId searched for : \" + deviceid+ \" Start Time:\"+start_time+\" end_time:\"+end_time)\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n \n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT * FROM \\\"ttd_devices\\\" WHERE deviceId=\\'%s\\' AND time >= '%s' AND time <= '%s' \"%(deviceid,start_time,end_time)}\n response = requests.request(\"GET\", url, params=querystring) \n D=json.loads(response.text)\n #log.debug('------------------------------------------')\n #log.debug(D)\n #log.debug('------------------------------------------')\n response_dict=[]\n for element in D['results'][0]['series'][0]['values']:\n temp_dict=dict(zip(D['results'][0]['series'][0]['columns'],element))\n processed_dict=dict()\n for key,value in temp_dict.items():\n if value is not None and value != np.nan:\n if key == 'tStamp':\n timestamp = datetime.fromtimestamp(eval(value))\n value=timestamp.strftime('%Y-%m-%d %H:%M:%S')\n elif key == 'ipAddress':\n value=eval(value)\n elif key == 'time':\n value=str(pd.to_datetime(value, format=\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n processed_dict[key]=value \n response_dict.append(processed_dict)\n #log.debug('------------------------------------------')\n #log.debug(response_dict)\n #log.debug('------------------------------------------')\n result['status'] = 1\n result['message']=response_dict\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while processing the request for search')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while doing search')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while processing the request for search'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def test_get_small_and_light_enrollment_by_seller_sku(self):\n pass", "def fetch_stats() -> dict[Any, Any] | None:\n attempts = 4\n statistic: dict[str, dict[Any, Any]] = {}\n encoded_key = str(\n b64encode(bytes(str(wk_i.waka_key), 'utf-8')), 'utf-8'\n )\n logger.debug(\n f'Pulling WakaTime stats from {\" \".join(wk_i.time_range.split(\"_\"))}'\n )\n while attempts > 0:\n resp_message, fake_ua = '', cryptogenic.choice(\n [str(fake.user_agent()) for _ in range(5)]\n )\n # making a request\n if (resp := rq_get(\n url=f'{str(wk_i.api_base_url).rstrip(\"/\")}/v1/users/current/stats/{wk_i.time_range}',\n headers={\n 'Authorization': f'Basic {encoded_key}',\n 'User-Agent': fake_ua,\n },\n timeout=30 * (5 - attempts)\n )).status_code != 200:\n resp_message += f' • {conn_info}' if (\n conn_info := resp.json().get(\"message\")\n ) else ''\n logger.debug(\n f'API response #{5 - attempts}: {resp.status_code} • {resp.reason}{resp_message}'\n )\n if resp.status_code == 200 and (statistic := resp.json()):\n logger.debug('Fetched WakaTime statistics')\n break\n logger.debug(f'Retrying in {30 * (5 - attempts )}s ...')\n sleep(30 * (5 - attempts))\n attempts -= 1\n\n if err := (statistic.get('error') or statistic.get('errors')):\n logger.error(f'{err}\\n')\n sys.exit(1)\n\n print()\n return statistic.get('data')", "def test_query(self):\n\n size = 10\n\n start = (long(time.time()) / 60) * 60\n end = start + size\n times = range(start, end)\n values1 = [1] * size\n values2 = [2] * size\n\n curr = start\n items = []\n timeserie1 = 'test-serie-1'\n timeserie2 = 'test-serie-2'\n for index in range(size):\n items.append({\n 'timeserie': timeserie1,\n 'time': curr,\n 'value': values1[index]\n })\n items.append({\n 'timeserie': timeserie2,\n 'time': curr,\n 'value': values2[index]\n })\n curr += 1\n\n client = boto3.client('dynamodb', endpoint_url=self.DYNAMODB_ENDPOINT)\n for item in items:\n response = client.put_item(\n TableName=granularities.get_granularity_table_text(granularities.SECOND),\n Item=self.item_to_dynamo_db_item(item)\n )\n logging.debug(response)\n time.sleep(0.5)\n\n event = {\n 'timeseries': [timeserie1, timeserie2],\n 'start': start,\n 'end': end,\n 'granularity': granularities.SECOND\n }\n # Since this is executed outside the lambda env, we must configure dynamodb\n # manually\n\n lambda_database.configure_dynamodb()\n query_response = lambda_database.query(event, None)\n\n self.assertEqual(query_response.get(timeserie1), zip(times, values1))\n self.assertEqual(query_response.get(timeserie2), zip(times, values2))", "def track_request(params):\n\n if len(params) == 0:\n number_of_orders = \"10\"\n field = None\n from_date = None\n condition_1 = None\n to_date = None\n condition_2 = None\n status = None\n value = None\n condition_3 = None\n else:\n number_of_orders = params['number_of_orders'] if len(params['number_of_orders']) != 0 else \"20\"\n field = \"created_at\"\n from_date = params['from_date'] + \" 00:00:00\" if len(params['from_date']) != 0 else \"2000-01-01 00:00:00\" \n condition_1 = \"gteq\"\n to_date = params['to_date'] + \" 23:59:59\" if len(params['to_date']) != 0 else \"2100-12-31 23:59:59\"\n condition_2 = \"lteq\"\n status = None if params['status'] == \"None\" else \"status\"\n value = None if params['status'] == \"None\" else params['status']\n condition_3 = None if params['status'] == \"None\" else \"eq\"\n\n generate_request = oAuth_magento()\n\n payload = { \"searchCriteria[filter_groups][0][filters][0][field]\": field,\n \"searchCriteria[filter_groups][0][filters][0][value]\": from_date,\n \"searchCriteria[filter_groups][0][filters][0][condition_type]\": condition_1,\n\n \"searchCriteria[filter_groups][1][filters][0][field]\": field,\n \"searchCriteria[filter_groups][1][filters][0][value]\": to_date,\n \"searchCriteria[filter_groups][1][filters][0][condition_type]\": condition_2,\n\n \"searchCriteria[filter_groups][2][filters][0][field]\": status,\n \"searchCriteria[filter_groups][2][filters][0][value]\": value,\n \"searchCriteria[filter_groups][2][filters][0][condition_type]\": condition_3,\n \n \"searchCriteria[pageSize]\": number_of_orders,\n \"searchCriteria[sortOrders][0][field]\":\"created_at\",\n \"fields\": \"items[increment_id,base_currency_code,grand_total,created_at,status,billing_address[company,firstname,lastname]]\",\n }\n\n response = requests.request(\"GET\", url=generate_request[0], headers=generate_request[1], params=payload)\n # with open('temp_files/magento_orders_test.json','w') as f:\n # f.write(response.text)\n json_response = json.loads(response.text)\n for ele in json_response['items']:\n for key, val in ele.items():\n if key == 'billing_address':\n name_container = ele.pop(key)\n try:\n ele['purchasing_institute'] = name_container['company']\n except:\n ele['purchasing_institute'] = name_container['firstname'] + ' ' + name_container['lastname']\n else:\n pass\n col_headers = list((json_response['items'][0]).keys())\n return json_response['items'], col_headers", "def request_device_readings_quartiles(device_uuid):\n\n if request.data:\n post_data = json.loads(request.data)\n type = post_data.get('type', None)\n if not type or type not in ('temperature', 'humidity'):\n return 'error on the required type data', 400\n start = post_data.get('start', None)\n if not start:\n return 'error on the required start data', 400\n end = post_data.get('end', None)\n if not end:\n return 'error on the required end data', 400\n else:\n return 'missing data in the request parameters', 400\n\n # Set the db that we want and open the connection\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n\n sql = 'SELECT r.value from readings r WHERE r.type = ? AND r.device_uuid = ? AND r.date_created >= ? AND r.date_created <= ?'\n params = [type, device_uuid, start, end]\n\n sql += 'ORDER BY r.value'\n\n # Execute the query\n cur.execute(sql, params)\n rows = [row[0] for row in cur.fetchall()]\n\n mid = len(rows) // 2\n\n if (len(rows) % 2 == 0):\n # even\n lowerQ = median(rows[:mid])\n upperQ = median(rows[mid:])\n else:\n # odd\n lowerQ = median(rows[:mid]) # same as even\n upperQ = median(rows[mid + 1:])\n\n return str(lowerQ) + \",\" + str(upperQ), 200", "def poll_results(self, session_key, sort_type = None, sort_order = \"desc\", duration = None, include_carriers = None,\n\t\texclude_carriers = None, origin_airports = None, destination_airports = None, stops = \"0\",\n\t\toutbound_depart_time = None, outbound_depart_start_time = None, outbound_depart_end_time = None,\n\t\toutbound_arrive_start_time = None, outbound_arrive_end_time = None, inbound_depart_time = None,\n\t\tinbound_depart_start_time = None, inbound_depart_end_time = None, inbound_arrive_start_time = None,\n\t\tinbound_arrive_end_time = None, page_index = 0, page_size = 10):\n\n\t\turl = \"{}/pricing/uk2/v1.0/{}\".format(self.base_url, session_key)\n\t\tif sort_type != None:\n\t\t\turl += \"\\\\?sortType={}\".format(sort_type)\n\t\tif sort_order != None:\n\t\t url += \"\\\\?sortOrder={}\".format(sort_order)\n\t\tif duration != None:\n\t\t url += \"\\\\?duration={}\".format(duration)\n\t\tif include_carriers != None:\n\t\t url += \"\\\\?includeCarriers={}\".format(include_carriers)\n\t\tif exclude_carriers != None:\n\t\t url += \"\\\\?excludeCarriers={}\".format(exclude_carriers)\n\t\tif origin_airports != None:\n\t\t url += \"\\\\?originAirports={}\".format(origin_airports)\n\t\tif destination_airports != None:\n\t\t url += \"\\\\?destinationAirports={}\".format(destination_airports)\n\t\tif stops == \"0\":\n\t\t url += \"\\\\?stops={}\".format(stops)\n\t\tif outbound_depart_time != None:\n\t\t url += \"\\\\?outboundDepartTime={}\".format(outbound_depart_time)\n\t\tif outbound_depart_start_time != None:\n\t\t url += \"\\\\?outboundDepartStartTime={}\".format(outbound_depart_start_time)\n\t\tif outbound_depart_end_time != None:\n\t\t url += \"\\\\?outboundDepartEndTime={}\".format(outbound_depart_end_time)\n\t\tif outbound_arrive_start_time != None:\n\t\t url += \"\\\\?outboundArriveStartTime={}\".format(outbound_arrive_start_time)\n\t\tif outbound_arrive_end_time != None:\n\t\t url += \"\\\\?outboundArriveEndTime={}\".format(outbound_arrive_end_time)\n\t\tif inbound_depart_time != None:\n\t\t url += \"\\\\?inboundDepartTime={}\".format(inbound_depart_time)\n\t\tif inbound_depart_start_time != None:\n\t\t url += \"\\\\?inboundDepartStartTime={}\".format(inbound_depart_start_time)\n\t\tif inbound_depart_end_time != None:\n\t\t url += \"\\\\?inboundDepartEndTime={}\".format(inbound_depart_end_time)\n\t\tif inbound_arrive_start_time != None:\n\t\t url += \"\\\\?inboundArriveStartTime={}\".format(inbound_arrive_start_time)\n\t\tif inbound_arrive_end_time != None:\n\t\t url += \"\\\\?inboundArriveEndTime={}\".format(inbound_arrive_end_time)\n\t\tif page_index != 0:\n\t\t url += \"\\\\?pageIndex={}\".format(page_index)\n\t\tif page_size != 1:\n\t\t url += \"\\\\?pageSize={}\".format(page_size)\n\n\t\tresponse = unirest.get(url,\n\t\t\theaders = {\n\t\t\t\t\"X-RapidAPI-Key\": self.rapid_key\n\t\t\t}\n\t\t)\n\n\t\treturn response", "def query(self, url=False):\n if time.time() - self.lastUpdate > self.rateLimit or url:\n # Ignore rate limit if manual url\n self.status = False\n self.preQuery()\n try:\n response = requests.get(url or self.request_url, auth=self._auth)\n except Exception as e:\n logger.warning(e)\n return self.status\n self.lastUpdate += 5 if response.status_code == 202 else 0\n if response.status_code == 200:\n self._handle_response(response)\n self.postQuery()\n self.lastUpdate = time.time()\n self.status = True\n else:\n pass\n else:\n self.postQuery()\n return self.status", "def main(event, context):\n r = requests.get(yql_url)\n\n if r.status_code == 200:\n content = r.content\n t = datetime.now()\n filename = 'iran_' + t.strftime('%Y-%m-%dT%H-%M-%S') + '.json'\n put_data_in_s3(content, filename)\n\n data_dict = json.loads(content)\n data = data_dict.get('list')\n\n # start inserting data into timeseries database\n write_wind_data_to_csv(data)\n conn = psycopg2.connect(dbname='climateTSDB', host=DB_HOST_NAME,\n user=USERNAME, password=PASS)\n cur = conn.cursor()\n f = open('/tmp/wind_data.csv', 'r')\n cur.copy_from(f, 'windts', sep=',', columns=['speed', 'degree',\n 'utc_time', 'latitude', 'longitude'])\n conn.commit()\n try:\n os.remove('/tmp/wind_data.csv')\n except OSError:\n pass\n cur.close()\n\n write_temperature_data_to_csv(data)\n conn = psycopg2.connect(dbname='climateTSDB', host=DB_HOST_NAME,\n user=USERNAME, password=PASS)\n cur = conn.cursor()\n f = open('/tmp/temperature_data.csv', 'r')\n cur.copy_from(f, 'temperaturets', sep=',', columns=['temperature'\n ,'utc_time', 'latitude', 'longitude'])\n conn.commit()\n try:\n os.remove('/tmp/temperature_data.csv')\n except OSError:\n pass\n cur.close()\n\n write_pressure_data_to_csv(data)\n conn = psycopg2.connect(dbname='climateTSDB', host=DB_HOST_NAME,\n user=USERNAME, password=PASS)\n cur = conn.cursor()\n f = open('/tmp/pressure_data.csv', 'r')\n cur.copy_from(f, 'pressurets', sep=',', columns=['pressure'\n ,'utc_time', 'latitude', 'longitude'])\n conn.commit()\n try:\n os.remove('/tmp/pressure_data.csv')\n except OSError:\n pass\n cur.close()\n\n write_humidity_data_to_csv(data)\n conn = psycopg2.connect(dbname='climateTSDB', host=DB_HOST_NAME,\n user=USERNAME, password=PASS)\n cur = conn.cursor()\n f = open('/tmp/humidity_data.csv', 'r')\n cur.copy_from(f, 'humidityts', sep=',', columns=['humidity'\n ,'utc_time', 'latitude', 'longitude'])\n conn.commit()\n try:\n os.remove('/tmp/humidity_data.csv')\n except OSError:\n pass\n cur.close()\n\n write_condition_data_to_csv(data)\n conn = psycopg2.connect(dbname='climateTSDB', host=DB_HOST_NAME,\n user=USERNAME, password=PASS)\n cur = conn.cursor()\n f = open('/tmp/condition_data.csv', 'r')\n cur.copy_from(f, 'conditionts', sep=',', columns=['condition'\n ,'utc_time', 'latitude', 'longitude'])\n conn.commit()\n try:\n os.remove('/tmp/condition_data.csv')\n except OSError:\n pass\n cur.close()", "def pull_data(stop_event):\r\n logger = logging.getLogger(__name__)\r\n\r\n # List of current formats supported\r\n currency_list = ['https://www.bitstamp.net/api/v2/ticker_hour/btceur', 'https://www.bitstamp.net/api/v2/ticker_hour/btcusd',\r\n 'https://www.bitstamp.net/api/v2/ticker_hour/ethusd', 'https://www.bitstamp.net/api/v2/ticker_hour/etheur']\r\n\r\n # Loop until told otherwise!\r\n while not stop_event.is_set():\r\n for currency in currency_list:\r\n res = requests.get(currency)\r\n try:\r\n res.raise_for_status()\r\n except requests.exceptions.HTTPError as e:\r\n # Not 200\r\n logger.error(str(e))\r\n continue\r\n\r\n # Get the end characters to dertermine the type e.g. btceur, ethusd, etc...\r\n currency_type = (currency.rpartition('/')[-1])\r\n logger.info('The Curreny type: ' + currency_type)\r\n\r\n if currency_type == 'btceur':\r\n table = 'btceur'\r\n elif currency_type == 'btcusd':\r\n table = 'btcusd'\r\n elif currency_type == 'ethusd':\r\n table = 'ethusd'\r\n elif currency_type == 'etheur':\r\n table = 'etheur'\r\n else:\r\n table = None\r\n\r\n # Extract Data and Fields\r\n data = res.json()\r\n field_list = data.keys()\r\n logger.info(field_list)\r\n value_list = data.values()\r\n logger.info(value_list)\r\n\r\n # Write to DB\r\n db_commit(table, field_list, value_list)\r\n # Cannot make more than 600 requests per 10 minutes or they will ban your IP address.\r\n # Will in time get real time using their websocket API.\r\n time.sleep(5)", "def report_data(request):\n if request.method == \"GET\":\n params = request.query_params\n async_result = None\n all_providers = False\n provider_uuid = params.get(\"provider_uuid\")\n provider_type = params.get(\"provider_type\")\n schema_name = params.get(\"schema\")\n start_date = params.get(\"start_date\")\n end_date = params.get(\"end_date\")\n queue_name = params.get(\"queue\") or PRIORITY_QUEUE\n if provider_uuid is None and provider_type is None:\n errmsg = \"provider_uuid or provider_type must be supplied as a parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n if queue_name not in QUEUE_LIST:\n errmsg = f\"'queue' must be one of {QUEUE_LIST}.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if provider_uuid == \"*\":\n all_providers = True\n elif provider_uuid:\n with ProviderDBAccessor(provider_uuid) as provider_accessor:\n provider = provider_accessor.get_type()\n else:\n provider = provider_type\n\n if start_date is None:\n errmsg = \"start_date is a required parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if not all_providers:\n if schema_name is None:\n errmsg = \"schema is a required parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if provider is None:\n errmsg = \"Unable to determine provider type.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if provider_type and provider_type != provider:\n errmsg = \"provider_uuid and provider_type have mismatched provider types.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n async_result = update_summary_tables.s(\n schema_name, provider, provider_uuid, start_date, end_date, queue_name=queue_name\n ).apply_async(queue=queue_name or PRIORITY_QUEUE)\n else:\n async_result = update_all_summary_tables.delay(start_date, end_date)\n return Response({REPORT_DATA_KEY: str(async_result)})\n\n if request.method == \"DELETE\":\n params = request.query_params\n\n schema_name = params.get(\"schema\")\n provider = params.get(\"provider\")\n provider_uuid = params.get(\"provider_uuid\")\n simulate = params.get(\"simulate\")\n\n if schema_name is None:\n errmsg = \"schema is a required parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if provider is None:\n errmsg = \"provider is a required parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if provider_uuid is None:\n errmsg = \"provider_uuid is a required parameter.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if simulate is not None and simulate.lower() not in (\"true\", \"false\"):\n errmsg = \"simulate must be a boolean.\"\n return Response({\"Error\": errmsg}, status=status.HTTP_400_BAD_REQUEST)\n\n if simulate is not None and simulate.lower() == \"true\":\n simulate = True\n else:\n simulate = False\n\n LOG.info(\"Calling remove_expired_data async task.\")\n\n async_result = remove_expired_data.delay(schema_name, provider, simulate, provider_uuid)\n\n return Response({\"Report Data Task ID\": str(async_result)})", "def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab", "def query_tas_status(self, attributes):\n def _tas_status_callback(mqtt, userdata, msg):\n for k, v in attributes['values'].items():\n setattr(self, k, nested_get(literal_eval(msg.payload.decode('UTF-8')), v))\n s_topic = '{s_topic}/{stat_topic}'.format(stat_topic=attributes['stat_topic'], **self)\n c_topic = '{c_topic}/status'.format(**self)\n self.mqtt.message_callback_add(s_topic, _tas_status_callback)\n self.mqtt.connect(self.mqtt_host)\n self.mqtt.subscribe(s_topic)\n starttime = datetime.datetime.now()\n self.mqtt.publish(c_topic, attributes['status_payload'])\n # check and see if the last attribute has been found yet\n while getattr(self, list(attributes['values'].keys())[-1]) == '' and (datetime.datetime.now() - starttime).total_seconds() < loop_time:\n self.mqtt.loop(timeout=loop_time)\n self.mqtt.unsubscribe(s_topic)\n self.mqtt.message_callback_remove(s_topic)\n self.mqtt.disconnect()", "def get(\n self, resource_uri, metric_name, timespan=None, interval=None, aggregation=None, sensitivities=None, result_type=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.get.metadata['url']\n path_format_arguments = {\n 'resourceUri': self._serialize.url(\"resource_uri\", resource_uri, 'str', skip_quote=True),\n 'metricName': self._serialize.url(\"metric_name\", metric_name, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n if timespan is not None:\n query_parameters['timespan'] = self._serialize.query(\"timespan\", timespan, 'str')\n if interval is not None:\n query_parameters['interval'] = self._serialize.query(\"interval\", interval, 'duration')\n if aggregation is not None:\n query_parameters['aggregation'] = self._serialize.query(\"aggregation\", aggregation, 'str')\n if sensitivities is not None:\n query_parameters['sensitivities'] = self._serialize.query(\"sensitivities\", sensitivities, 'str')\n if result_type is not None:\n query_parameters['resultType'] = self._serialize.query(\"result_type\", result_type, 'ResultType')\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters)\n response = self._client.send(request, header_parameters, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n raise models.ErrorResponseException(self._deserialize, response)\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('BaselineResponse', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def getSFData(self, since, stream=\"all\"):\r\n brId=None;\r\n queryStr1 =\"Select c.Id, c.CaseNumber, c.ClosedDate, c.OwnerId, c.Status from Case c \"\r\n if stream != \"all\":\r\n queryStr1 += \" WHERE ClosedDate = LAST_N_DAYS:%s AND X1st_Stream__c = %s order by ClosedDate\"%(since, stream)\r\n else:\r\n queryStr1 += \" WHERE ClosedDate = LAST_N_DAYS:%s order by ClosedDate\"%(since)\r\n \r\n #queryStr1 =\"Select c.Id, c.CaseNumber, c.ClosedDate, c.OwnerId, c.Status from Case c WHERE ClosedDate = LAST_N_DAYS:%s order by ClosedDate\"%since\r\n queryList1=self.cxn.query(queryStr1)\r\n \r\n if queryList1 not in BAD_INFO_LIST:\r\n found = len(queryList1)\r\n print 'Found %s Cases closed in the last %s days'%(found,since)\r\n return queryList1\r\n #for qr1 in queryList1:\r\n # brId=qr1.get('Id')\r\n else:\r\n print \"No Cases found in the last '%s' days\" %since\r\n return []", "def needs_by_status(cls):\n\n db = current.db\n s3db = current.s3db\n\n table = s3db.need_line\n etable = s3db.event_event\n ltable = s3db.event_event_need\n\n # Extract the data\n status = table.status\n number = table.id.count()\n query = (etable.closed == False) & \\\n (etable.id == ltable.event_id) & \\\n (ltable.need_id == table.need_id) & \\\n (table.deleted == False)\n\n rows = db(query).select(status, number, groupby = status)\n\n # Build data structure for chart renderer\n rows = dict((row[status], row[number]) for row in rows)\n data = []\n for code, label, color in cls.REQ_STATUS[::-1]: # clockwise\n value = rows.get(code)\n data.append({\"label\": s3_str(label),\n \"value\": value if value else 0,\n \"color\": color,\n \"filterKey\": code,\n })\n\n return data", "def get_chartdata():\n callback = bottle.request.query.get('callback')\n y_axis = bottle.request.query.get('y_axis').strip()\n w_acts = [\"action='%s'\" % act for act in bottle.request.query.get('actions').strip().split(',')]\n w_acts = 'AND (%s)' % ' OR '.join(w_acts) if w_acts else ''\n f_value = 'AVG(latency)' if y_axis.startswith('avg') else 'COUNT(timestamp)'\n atomic = 1 if y_axis in ['aops', 'avgl'] else 0\n\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT test_run_status, timestamp_started, timestamp_completed FROM info LIMIT 1'\n status, started, finished = tools.db_query(db_conn, sql)[1][0]\n progress = int(float(finished) - float(started)) if finished \\\n else int(tools.get_timestamp() - float(started))\n\n sql = 'SELECT substr(timestamp, 0, 11), code, %s FROM recs ' % f_value + \\\n 'WHERE atomic=%s %s GROUP BY code, substr(timestamp, 0, 11) ' % (atomic, w_acts) + \\\n 'ORDER BY id DESC LIMIT 3600' # last 1 hour activity\n\n result = tools.db_query(db_conn, sql)[1] if finished else tools.db_query(db_conn, sql)[1][:-1]\n result = list(reversed(result))\n results = {str(abs(int(item[0]) - int(float(started)))):\n {'failed': 0, 'passed': 0, 'incomplete': 0} for item in result}\n for item in result: # item[0] - timestamp, item[1] - code (None if incomplete), item[2] - value\n timestamp = str(int(item[0]) - int(float(started)))\n value = item[2] or 0\n results[timestamp]['failed'] += value if item[1] and item[1] != 200 else 0\n results[timestamp]['passed'] += value if item[1] == 200 else 0\n results[timestamp]['incomplete'] += value if item[1] == None else 0\n results = [{'timestamp': key, 'failed': value['failed'], 'passed': value['passed'],\n 'incomplete': value['incomplete']} for key, value in results.items()]\n result = {bottle.request.query.slave: results, 'status': status,\n 'started': started, 'finished': finished or '(not finished)', 'progress': progress}\n return '{0}({1})'.format(callback, result)", "def find_sale_qty_in_duration(self,from_date,to_date,warehouse,location,product_id):\n if warehouse:\n query=\"\"\"select sum(product_uom_qty) as total, product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_dest_id and sl.usage='customer'\n where state='done' and mv.location_id in (%s) and product_id = %s and \n warehouse_id= %s and date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\n \"\"\"%(','.join(str(x) for x in location),product_id.id,warehouse.id,from_date,to_date)\n return_query=\"\"\"select sum(product_uom_qty) as total, product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_id and sl.usage='customer'\n where state='done' and mv.location_dest_id in (%s) and product_id = %s and \n warehouse_id= %s and date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\n \"\"\"%(','.join(str(x) for x in location),product_id.id,warehouse.id,from_date,to_date)\n else:\n query=\"\"\"select sum(product_uom_qty) as total,product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_dest_id and sl.usage='customer'\n where state='done' and mv.location_id in (%s) and product_id = %s and \n date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\n \"\"\"%(','.join(str(x) for x in location),product_id.id,from_date,to_date)\n return_query=\"\"\"select sum(product_uom_qty) as total,product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_id and sl.usage='customer'\n where state='done' and mv.location_dest_id in (%s) and product_id = %s and \n date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\n \"\"\"%(','.join(str(x) for x in location),product_id.id,from_date,to_date)\n self._cr.execute(query)\n result = self._cr.fetchall()\n uom_rec = self.env['product.uom']\n sale_qty = 0\n for r in result:\n factor_inv = uom_rec.browse(r[1]).factor_inv\n sale_qty += r[0] * factor_inv\n # Return Qty\n self._cr.execute(return_query)\n return_result = self._cr.fetchall()\n sale_return_qty = 0\n for re in return_result:\n factor_inv = uom_rec.browse(re[1]).factor_inv\n sale_return_qty += re[0] * factor_inv\n sale_qty -= sale_return_qty\n return sale_qty", "def get_order_details(game_id: int, user_id: int, start_time: float = None, end_time: float = None):\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n query = \"\"\"\n SELECT\n o.id as order_id,\n relevant_orders.status,\n relevant_orders.order_status_id,\n symbol,\n relevant_orders.timestamp,\n buy_or_sell,\n quantity,\n order_type,\n time_in_force,\n price,\n relevant_orders.clear_price\n FROM orders o\n INNER JOIN (\n SELECT os_full.id,\n os_full.timestamp,\n os_full.order_id,\n os_full.clear_price,\n os_full.status,\n os_relevant.order_status_id\n FROM order_status os_full\n INNER JOIN (\n SELECT os.order_id, grouped_os.max_id as order_status_id\n FROM order_status os\n INNER JOIN\n (SELECT order_id, max(id) as max_id\n FROM order_status\n GROUP BY order_id) grouped_os\n ON\n os.id = grouped_os.max_id\n WHERE os.status NOT IN ('cancelled', 'expired')\n ) os_relevant\n ON os_relevant.order_id = os_full.order_id\n ) relevant_orders\n ON relevant_orders.order_id = o.id\n WHERE game_id = %s AND user_id = %s AND relevant_orders.timestamp >= %s AND relevant_orders.timestamp <= %s;\"\"\"\n\n with engine.connect() as conn:\n df = pd.read_sql(query, conn, params=[game_id, user_id, start_time, end_time])\n\n df = pivot_order_details(df)\n df[\"status\"] = \"fulfilled\"\n df.loc[df[\"timestamp_fulfilled\"].isna(), \"status\"] = \"pending\"\n return df", "def _get_from_datastore( uuid ):\n return db.Query(Stats).filter('uuid =', uuid).get()", "def process(self, request, **kwargs):\n from_timestamp = int(request.POST['from'])\n to_timestamp = int(request.POST['to'])\n\n return JsonResponse(data=self.get_statistic_per_day(from_timestamp, to_timestamp, kwargs['course_key']))", "def get_stock_level_history(cls, sku):\n records = cls._get_stock_level_history(sku)\n return [\n {\n \"timestamp\": record.timestamp,\n \"stock_level\": record.stock_level,\n \"text\": record.text,\n \"relative_change\": record.relative_change,\n }\n for record in records\n ]", "def get_smps(client, start, end):\n # load image or load from bigquery\n smps_query_str = \"SELECT * FROM cfog.sharp_smps \" +\\\n f\"WHERE timestamp BETWEEN '{start}' AND '{end}' \" +\\\n \"ORDER BY timestamp ASC\"\n print(f\"Executing bigquery query string: \")\n print(smps_query_str + '\\n')\n\n smps_query_job = client.query(smps_query_str)\n smps_query_job.result()\n smps_data = smps_query_job.to_dataframe()\n\n values = np.array(smps_data['values'].values.tolist()).T\n lowBouDia = np.array(smps_data['lowBouDia'].values.tolist()).T\n highBouDia = np.array(smps_data['highBouDia'].values.tolist()).T\n midDia = np.array(smps_data['midDia'].values.tolist()).T\n smps_data_df = smps_data.drop(columns=['values','lowBouDia','highBouDia','midDia']).set_index('timestamp')\n smps_data_out = dict(values=values,\n lowBouDia=lowBouDia,\n highBouDia=highBouDia,\n midDia=midDia,\n df=smps_data_df)\n\n print(f\"Query complete. Total number of data entries: {smps_data_out['df'].shape[0]}.\\n\\n\")\n return smps_data_out", "def test_get_events_history_filtering_by_timestamp(rotkehlchen_api_server: 'APIServer'):\n tx_hex = deserialize_evm_tx_hash('0xb226ddb8cbb286a7a998a35263ad258110eed5f923488f03a8d890572cd4608e') # noqa: E501\n ethereum_inquirer = rotkehlchen_api_server.rest_api.rotkehlchen.chains_aggregator.ethereum.node_inquirer # noqa: E501\n database = rotkehlchen_api_server.rest_api.rotkehlchen.data.db\n get_decoded_events_of_transaction(\n evm_inquirer=ethereum_inquirer,\n database=database,\n tx_hash=tx_hex,\n )\n # Call time range\n from_timestamp = 1627401169\n to_timestamp = 1627401170\n async_query = random.choice([False, True])\n with patch(\n 'rotkehlchen.chain.ethereum.modules.sushiswap.sushiswap.Sushiswap.get_balances',\n side_effect=lambda _: {},\n ):\n response = requests.get(\n api_url_for(\n rotkehlchen_api_server,\n 'modulestatsresource',\n module='sushiswap',\n ),\n json={\n 'async_query': async_query,\n 'from_timestamp': from_timestamp,\n 'to_timestamp': to_timestamp,\n },\n )\n if async_query:\n task_id = assert_ok_async_response(response)\n outcome = wait_for_async_task(rotkehlchen_api_server, task_id, timeout=120)\n assert outcome['message'] == ''\n result = outcome['result']\n else:\n result = assert_proper_response_with_result(response)\n\n events_balances = result[TEST_EVENTS_ADDRESS_1]\n\n assert len(events_balances) == 1", "def _fetch(\n cls, url: str, headers: Mapping[str, str], params: Mapping[str, Any]\n ) -> Tuple[List[EventType], Optional[str]]:\n status_url = cls._post_query(url, headers, params)\n # Await a while before polling the results\n time.sleep(0.1)\n result_url = cls._poll_status(status_url, headers, params)\n data, headers = cls._get_results(result_url, headers, params)\n result = json.loads(data)\n return result, headers.get(\"x-next-token\")", "def get(self, request, *args, **kwargs):\n query = '''\n SELECT\n DATE(bs.\"CreatedAt\"),\n count(1)\n FROM\n blood_sample_bloodsample as bs\n WHERE now() - '36 hour'::interval > bs.\"CreatedAt\" AND \\\n bs.\"State\" in ('0','4')\n GROUP BY DATE(bs.\"CreatedAt\") order by DATE(bs.\"CreatedAt\")\n '''\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n processed_not_ontime = [\n [row[0], row[1]]\n for row in cursor.fetchall() if row[1]\n ]\n\n return JsonResponse(\n {\n 'status': 200,\n 'processed_not_ontime': processed_not_ontime,\n 'processed_hours': settings.PROCESSING_HOURS,\n }\n )", "def fetch_data(swarming, start, end, state, tags):\n # Split the work in days. That's a lot of requests to do.\n queue = Queue.Queue()\n threads = []\n def run(start, cmd):\n data = json.loads(subprocess.check_output(cmd))\n queue.put((start, int(data['count'])))\n\n day = start\n while day != end:\n data = [\n ('start', int((day - _EPOCH).total_seconds())),\n ('end', int((day + datetime.timedelta(days=1)-_EPOCH).total_seconds())),\n ('state', state),\n ]\n for tag in tags:\n data.append(('tags', tag))\n cmd = [\n sys.executable, os.path.join(CLIENT_DIR, 'swarming.py'),\n 'query', '-S', swarming, 'tasks/count?' + urllib.urlencode(data),\n ]\n thread = threading.Thread(target=run, args=(day.strftime('%Y-%m-%d'), cmd))\n thread.daemon = True\n thread.start()\n threads.append(thread)\n while len(threads) > 100:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n day = day + datetime.timedelta(days=1)\n\n while threads:\n # Throttle a bit.\n for i, thread in enumerate(threads):\n if not thread.is_alive():\n thread.join()\n threads.pop(i)\n sys.stdout.write('.')\n sys.stdout.flush()\n break\n print('')\n data = []\n while True:\n try:\n data.append(queue.get_nowait())\n except Queue.Empty:\n break\n return dict(data)", "def query(self,\n timestamp,\n window,\n filter,\n aligner='ALIGN_SUM',\n reducer='REDUCE_SUM',\n group_by=[]):\n measurement_window = SD.get_window(timestamp, window)\n aggregation = SD.get_aggregation(window,\n aligner=aligner,\n reducer=reducer,\n group_by=group_by)\n timeseries = self.client.list_time_series(\n self.parent, filter, measurement_window,\n monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL,\n aggregation)\n LOGGER.debug(pprint.pformat(timeseries))\n return timeseries", "def _query_page(url: str, event: Literal['stats', 'withdrawals']) -> requests.Response:\n tries = 1\n max_tries = 3\n backoff = 60\n while True:\n log.debug(f'Querying beaconcha.in {event}: {url}')\n try:\n response = requests.get(url, timeout=CachedSettings().get_timeout_tuple())\n except requests.exceptions.RequestException as e:\n raise RemoteError(f'Beaconcha.in api request {url} failed due to {e!s}') from e\n\n if response.status_code == HTTPStatus.TOO_MANY_REQUESTS and tries <= max_tries:\n sleep_secs = backoff * tries / max_tries\n log.warning(\n f'Querying {url} returned 429. Backoff try {tries} / {max_tries}.'\n f' We are backing off for {sleep_secs}',\n )\n tries += 1\n gevent.sleep(sleep_secs)\n continue\n\n if response.status_code != 200:\n raise RemoteError(\n f'Beaconcha.in api request {url} failed with code: {response.status_code}'\n f' and response: {response.text}',\n )\n\n break # else all good - break from the loop\n\n return response", "def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n for player in avail_player_key:\n #build the API url for the unique player key\n url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week)\n #convert API call to json\n raw = s.get(url_player, params={'format': 'json'}).json()\n #parse out the players details info (e.g. position, owned, etc.)\n player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0]\n #parse out position from player details\n pos = player_details[9]['display_position'].upper()\n \n ## FILTER OUT NON-OFFENSE POSITIONS\n if pos not in ['QB', 'WR', 'RB', 'TE']:\n continue\n else:\n \n #parse out team from player_details\n team = player_details[6]['editorial_team_abbr'].upper()\n #append data to lists\n pos_list.append(pos)\n team_list.append(team)\n \n #initialize a stats list\n stats_list = []\n #parse out the player stats\n player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats']\n #loop thru all of the various stats\n for stat in player_stats:\n stat_dict = stat['stat']\n stats_list.append(stat_dict)\n \n return stats_list", "def needs_by_status(cls):\n\n db = current.db\n\n # Extract the data\n table = current.s3db.req_need_line\n status = table.status\n number = table.id.count()\n query = (table.deleted == False)\n rows = db(query).select(status, number, groupby = status)\n\n # Build data structure for chart renderer\n rows = dict((row[status], row[number]) for row in rows)\n data = []\n for code, label, color in cls.REQ_STATUS:\n value = rows.get(code)\n data.append({\"label\": s3_str(label),\n \"value\": value if value else 0,\n \"color\": color,\n \"filterKey\": code,\n })\n\n return data", "def get_stravadata(weeks=0):\n # we need to convert the weeks into seconds from UNIX epoch for Strava\n aftersecs = 0\n if weeks > 0:\n afterdate = datetime.today() + timedelta(weeks=-weeks)\n aftersecs = afterdate.timestamp()\n payload = {'access_token': app.config['STRAVA_READ_KEY'],\n 'per_page': 200, 'after': aftersecs}\n strava_read = requests.get(\n 'https://www.strava.com/api/v3/athlete/activities', params=payload)\n records = strava_read.json()\n # masks the ISO8601 date so it's compatible with Airtable\n for record in records:\n record['start_date_local'] = record['start_date_local'][:10]\n return records", "def task_product_downshelf_update_productskusalestats(sku_id, sale_end_time):\n from shopback.items.models import ProductSku, SkuStock, \\\n ProductSkuSaleStats, gen_productsksalestats_unikey\n\n product = ProductSku.objects.get(id=sku_id).product\n stats_uni_key = gen_productsksalestats_unikey(sku_id)\n stats = ProductSkuSaleStats.objects.filter(uni_key=stats_uni_key, sku_id=sku_id)\n\n if stats.count() > 0:\n try:\n stat = stats[0]\n if not stat.sale_end_time:\n stat.sale_end_time = stat.product.offshelf_time\n stat.status = ProductSkuSaleStats.ST_FINISH\n stat.save(update_fields=[\"sale_end_time\", \"status\"])\n except IntegrityError as exc:\n logger.warn(\"IntegrityError - productskusalestat/init_waitassign_num | sku_id: %s, sale_end_time: %s\" % (\n sku_id, sale_end_time))\n raise task_product_downshelf_update_productskusalestats.retry(exc=exc)\n else:\n logger.warn(\"RepeatDownshelf- productskusalestat/init_waitassign_num | sku_id: %s, sale_end_time: %s\" % (\n sku_id, sale_end_time))", "def get_country_status_timebased(country, start_time, end_time):\n api = \"country/{}?from={}T00:00:00Z&to={}T00:00:00Z\".format(\n country, start_time, end_time)\n return _get_data(api)", "def fetch_testing(df, testing_start, curr_date_set, duration=1):\n dt_testing_start = dt.datetime.strptime(testing_start, '%Y-%m-%d')\n for _ in range(200):\n if testing_start in curr_date_set:\n dt_testing_end = dt_testing_start + dt.timedelta(days=duration)\n break\n else:\n dt_testing_start = next_weekday(dt_testing_start)\n try:\n return df.loc[(df.date < dt_testing_end) & (df.date >= dt_testing_start)]\n\n except:\n print('Error fectch_testing')\n return False", "def new_get_sells(self, cb_account_id):\n if cb_account_id == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-25T11:24:52Z\",\n \"resource\": \"sell\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.06,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 800,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 7,\n \"currency\": \"EUR\"\n }\n }]\n }])\n elif cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-23T07:23:54Z\",\n \"resource\": \"sell\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.3,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 80,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 2,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()", "def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status", "async def fetch_query(session, metric, provider, weight):\n provider = Provider(session=session, metrics_provider=provider)\n value = await provider.query(metric)\n return QueryResult(metric=metric, value=normalize(metric, value), weight=weight)", "def searchByKeywordPro(self, query, since=\"\", until=\"\", maxResults=None):\n\n tweetsList = []\n if(not maxResults):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n while(next_token):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n else:\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n while(next_token and maxResults > 0):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n for status in tweetsList:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': query,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n 'quote': {},\n }\n if hasattr(status, \"quoted_status\"):\n if \"extended_tweet\" in status._json[\"quoted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"text\"]\n status_refined['quote'] = {\n 'original_retweet_id': status._json[\"quoted_status\"][\"id\"],\n 'origUserLoc': status._json[\"quoted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"quoted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"quoted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"quoted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"quoted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"quote_count\"],\n }\n elif hasattr(status, \"retweeted_status\"):\n print(status._json[\"retweeted_status\"])\n if \"extended_tweet\" in status._json[\"retweeted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n elif hasattr(status, \"extended_tweet\"):\n if \"extended_tweet\" in status._json.keys():\n status_refined['tweetText'] = status._json[\"extended_tweet\"][\"full_text\"]\n self.tweets.append(status_refined)\n return self.tweets", "def fetch_production(\n zone_key: str = \"JP-KY\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n) -> Union[dict, list]:\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n data = {\n \"zoneKey\": zone_key,\n \"datetime\": None,\n \"production\": {\n \"biomass\": 0,\n \"coal\": 0,\n \"gas\": 0,\n \"hydro\": None,\n \"nuclear\": None,\n \"oil\": 0,\n \"solar\": None,\n \"wind\": None,\n \"geothermal\": None,\n \"unknown\": 0,\n },\n \"storage\": {},\n \"source\": \"www.kyuden.co.jp\",\n }\n # url for consumption and solar\n url = \"https://www.kyuden.co.jp/td_power_usages/pc.html\"\n r = get(url)\n r.encoding = \"utf-8\"\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n # get hours, minutes\n ts = soup.find(\"p\", class_=\"puProgressNow__time\").get_text()\n hours = int(re.findall(r\"[\\d]+(?=時)\", ts)[0])\n minutes = int(re.findall(r\"(?<=時)[\\d]+(?=分)\", ts)[0])\n # get date\n ds = soup.find(\"div\", class_=\"puChangeGraph\")\n date = re.findall(r\"(?<=chart/chart)[\\d]+(?=.gif)\", str(ds))[0]\n # parse datetime\n dt = f\"{date[:4]}-{date[4:6]}-{date[6:]} {hours:02d}:{minutes:02d}\"\n dt = arrow.get(dt).replace(tzinfo=\"Asia/Tokyo\").datetime\n data[\"datetime\"] = dt\n # consumption\n cons = soup.find(\"p\", class_=\"puProgressNow__useAmount\").get_text()\n cons = re.findall(\n r\"(?<=使用量\\xa0)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?(?=万kW/)\",\n cons,\n )\n cons = cons[0].replace(\",\", \"\")\n # convert from 万kW to MW\n cons = float(cons) * 10\n # solar\n solar = soup.find(\"td\", class_=\"puProgressSun__num\").get_text()\n # convert from 万kW to MW\n solar = float(solar) * 10\n\n # add nuclear power plants\n # Sendai and Genkai\n url_s = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/sendai/rename.php?\",\n \"A=s_power.fdat&B=ncp_state.fdat&_=1520532401043\",\n ]\n )\n url_g = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/genkai/rename.php?\",\n \"A=g_power.fdat&B=ncp_state.fdat&_=1520532904073\",\n ]\n )\n sendai = get(url_s).text\n sendai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n sendai,\n )\n genkai = get(url_g).text\n genkai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n genkai,\n )\n nuclear = 0\n for sendai_i in sendai:\n nuclear += float(sendai_i)\n for genkai_i in genkai:\n nuclear += float(genkai_i)\n # convert from 万kW to MW\n nuclear = nuclear * 10\n\n # add the exchange JP-CG->JP-KY\n exch_list = occtonet.fetch_exchange(\"JP-KY\", \"JP-CG\")\n # find the nearest exchanges in time to consumption timestamp\n nearest_exchanges = sorted(exch_list, key=lambda exch: abs(exch[\"datetime\"] - dt))\n # take the nearest exchange\n exch = nearest_exchanges[0]\n # check that consumption and exchange timestamps are within a 15 minute window\n if abs(dt - exch[\"datetime\"]).seconds <= 900:\n\n generation = cons - exch[\"netFlow\"]\n data[\"production\"][\"solar\"] = solar\n data[\"production\"][\"nuclear\"] = nuclear\n data[\"production\"][\"unknown\"] = generation - nuclear - solar\n\n return data\n else:\n return []", "async def get_detailed_stations(response: Response,\n toi: datetime = None,\n source: StationSourceEnum = StationSourceEnum.WILDFIRE_ONE,\n __=Depends(audit),\n _=Depends(authentication_required)):\n try:\n logger.info('/stations/details/')\n response.headers[\"Cache-Control\"] = no_cache\n if toi is None:\n # NOTE: Don't be tempted to move this into the function definition. It's not possible\n # to mock a function if it's part of the function definition, and will cause\n # tests to fail.\n toi = get_utc_now()\n else:\n toi = get_hour_20(toi)\n weather_stations = await fetch_detailed_stations_as_geojson(toi, source)\n return DetailedWeatherStationsResponse(features=weather_stations)\n\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise", "def get(self, request, *args, **kwargs):\n query = '''\n SELECT\n DATE(bs.\"CreatedAt\"),\n SUM (CASE\n WHEN bs.\"State\"='0' THEN 1\n ELSE 0\n END\n ) AS \"State-0\",\n SUM (\n CASE\n WHEN bs.\"State\" = '1' THEN 1\n ELSE 0\n END\n ) AS \"State-1\",\n SUM (\n CASE\n WHEN bs.\"State\" = '2' THEN 1\n ELSE 0\n END\n ) AS \"State-2\",\n SUM (\n CASE\n WHEN bs.\"State\" = '3' THEN 1\n ELSE 0\n END\n ) AS \"State-3\",\n SUM (\n CASE\n WHEN bs.\"State\" = '4' THEN 1\n ELSE 0\n END\n ) AS \"State-4\"\n FROM\n blood_sample_bloodsample as bs\n GROUP BY DATE(bs.\"CreatedAt\") order by DATE(bs.\"CreatedAt\")\n '''\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n columns = [col[0] for col in cursor.description]\n data = [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n\n # Formating the data\n dates = [i['date'] for i in data]\n data = [{\n \"name\": 'ACTIVE',\n \"data\": [i['State-0'] for i in data]\n }, {\n \"name\": 'UNABLE_TO_DRAW',\n \"data\": [i['State-1'] for i in data]\n }, {\n \"name\": 'UNABLE_TO_PROCESS',\n \"data\": [i['State-2'] for i in data]\n }, {\n \"name\": 'PROCESSED_ON_TIME',\n \"data\": [i['State-3'] for i in data]\n }, {\n \"name\": 'PROCESSED_NOT_ON_TIME',\n \"data\": [i['State-4'] for i in data]\n }]\n\n return JsonResponse({'status': 200, 'dates': dates, 'data': data, })", "async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if since is not None:\n request['startTime'] = self.iso8601(since)\n else:\n # by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)\n request['reverse'] = True\n if limit is not None:\n request['count'] = limit\n response = await self.publicGetTrade(self.extend(request, params))\n #\n # [\n # {\n # timestamp: '2018-08-28T00:00:02.735Z',\n # symbol: 'XBTUSD',\n # side: 'Buy',\n # size: 2000,\n # price: 6906.5,\n # tickDirection: 'PlusTick',\n # trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',\n # grossValue: 28958000,\n # homeNotional: 0.28958,\n # foreignNotional: 2000\n # },\n # {\n # timestamp: '2018-08-28T00:00:03.778Z',\n # symbol: 'XBTUSD',\n # side: 'Sell',\n # size: 1000,\n # price: 6906,\n # tickDirection: 'MinusTick',\n # trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',\n # grossValue: 14480000,\n # homeNotional: 0.1448,\n # foreignNotional: 1000\n # },\n # ]\n #\n return self.parse_trades(response, market, since, limit)", "def get_stats_summary(self):\n perf_table = spark.table(self.performance_table)\\\n .where(\"yyyy_mm_dd between '{start_date}' and '{end_date}'\"\n .format(start_date = self.start_date, end_date = self.end_date))\\\n .where(\"clicks > 0\")\\\n .where(\"commission_expected_euro <= {max_rpb}\".format(max_rpb = self.max_rpb))\n\n if self.pos == ['All']:\n perf_table = perf_table.groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n else:\n filtered_pos = spark.createDataFrame(pd.DataFrame(data = self.pos,\n columns = [\"pos\"]))\n\n perf_table = perf_table.join(filtered_pos, on = \"pos\", how = \"inner\")\\\n .groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n\n return (perf_table)", "def scraper_data(self):\n self.lock.acquire()\n for item in s.item:\n item_name = item.get(\"item\")\n item_url = item.get(\"url\")\n item_stock, item_cost = self.scraper.ChooseScraper(item_url)\n s.updateStatus(item_name, item_url, item_stock, item_cost)\n time.sleep(1)\n\n self.lock.release()", "def query(nsid, tunoip, begin_time=None, end_time=None, data_dir='./data'):\n if type(begin_time) is str: begin_time = datetime.datetime.fromisoformat(begin_time)\n if type(end_time) is str: end_time = datetime.datetime.fromisoformat(end_time)\n nowtime = datetime.datetime.now()\n pretime = nowtime - datetime.timedelta(days=30)\n\n if end_time is None: end_time = nowtime\n if begin_time is None: begin_time = pretime\n q_begin_time, q_end_time = begin_time, end_time\n if not os.path.exists(data_dir): os.mkdir(data_dir)\n nsid_dir = os.path.join(data_dir, nsid)\n if not os.path.exists(nsid_dir): os.mkdir(nsid_dir)\n file_prefix = tunoip.replace('>', '')\n # timestamp_path = os.path.join(nsid_dir, '%s.ts'%file_prefix)\n csv_path = os.path.join(nsid_dir, '%s.csv'%file_prefix)\n\n logging.info('Query: nsid={}, tunoip={}, timerange=[{}, {})'.format(nsid, tunoip, begin_time, end_time))\n if os.path.exists(csv_path):\n logging.info('Read data from %s'%csv_path)\n df = pd.read_csv(csv_path)\n logging.info('Time range: [{}, {}]'.format(df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']))\n begin_time = datetime.datetime.fromisoformat(df.iloc[-1]['timestamp']) + datetime.timedelta(minutes=1)\n if begin_time < pretime:\n begin_time = pretime\n df.index = pd.to_datetime(df['timestamp'])\n p_num = len(df)\n if df.index[0] < pretime:\n df = df.loc[pretime:]\n logging.info('Forget %d records'%(p_num-len(df)))\n else:\n df = pd.DataFrame(columns=keys)\n begin_time = pretime\n # if nowtime - end_time < datetime.timedelta(hours=1):\n # tmp_end_time = end_time - datetime.timedelta(hours=1)\n # else:\n # tmp_end_time = end_time\n tmp_end_time = end_time\n\n if q_end_time < begin_time:\n df = df.loc[q_begin_time:q_end_time]\n logging.info('Query: {} items'.format(len(df)))\n logging.info('done.')\n return \n s = json_query.replace('$nsid', nsid).replace('$tunoip', tunoip)\n postdata = json.loads(s)\n timestamp_range = postdata['query']['bool']['filter'][2]['range']['timestamp']\n t_begin_time, t_end_time = int(begin_time.timestamp()), int(end_time.timestamp())\n t_tmp_end_time = int(tmp_end_time.timestamp())\n postdata['size'] = 10000\n delta_time = 3600*24*3 # 7 days\n sess = requests.Session()\n sess.headers.update(headers)\n def get_data(t, et):\n if t >= et: return []\n timestamp_range['gte'] = t*1000\n timestamp_range['lt'] = et*1000\n logging.info('Request data in [{}, {})'.format(datetime.datetime.fromtimestamp(t), datetime.datetime.fromtimestamp(et)))\n for _ in range(5):\n try:\n samples, tot = request_data(sess, postdata)\n logging.info('Hists: {} / {}'.format(len(samples), tot))\n return samples\n except Exception as e:\n logging.warning(e)\n logging.info('Retry')\n time.sleep(3)\n data = []\n for t in range(t_begin_time, t_tmp_end_time, delta_time):\n et = min(t+delta_time, t_tmp_end_time)\n data.extend(get_data(t, et))\n df = df.append(pd.DataFrame(data, columns=keys), ignore_index=True)\n # samples = get_data(t_tmp_end_time, t_end_time)\n logging.info('Total Hists: {}'.format(len(data)))\n if len(data)>0:\n logging.info('Save data to %s'%csv_path)\n df.to_csv(csv_path, index=False)\n # df = df.append(pd.DataFrame(samples, columns=keys), ignore_index=True)\n df.index = pd.to_datetime(df['timestamp'])\n df = df.loc[q_begin_time:q_end_time]\n logging.info('Query: {} items'.format(len(df)))\n logging.info('done.')\n df.fillna(0, inplace=True)\n return df", "def get_sample_state_helper(sample_ids):\n samples = []\n requests = []\n for sample_id in sample_ids:\n r = req('GET', SUB_API + 'samples/' + sample_id + '/state')\n samples.append({\n 'ID': sample_id,\n 'State': demisto.get(r.json(), 'data.state')\n })\n requests.append(r.json())\n return {'samples': samples, 'requests': requests}", "def process_response(self, request, response):\n if self.start_ts:\n query = {'ts': {'$gt': self.start_ts}}\n else:\n query = {}\n self.queries = self.db.system.profile.find(query, timeout=False)\n self.queries = [self._process_query(q) for q in self.queries]\n self.queries_count = len(self.queries)\n self.total_time = sum([q.get('millis') for q in self.queries])", "def new_get_buys(self, cb_account_id):\n if cb_account_id == \"wallet_id_btc\":\n return MockAPIObject(data=[\n {\n \"created_at\": \"2017-12-27T15:16:22Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.04,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 300,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.4,\n \"currency\": \"EUR\"\n }\n }]\n },\n {\n \"created_at\": \"2017-12-27T15:16:22Z\",\n \"resource\": \"buy\",\n # should be skipped since it was canceled\n \"status\": \"canceled\"\n },\n {\n \"created_at\": \"2018-01-28T13:11:35Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 0.05,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 350,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.50,\n \"currency\": \"EUR\"\n }\n }]\n },\n {\n \"created_at\": \"2018-01-28T13:11:35Z\",\n # should be skipped and not end up in the database (neither sell nor buy)\n # and it's status is canceled\n \"resource\": \"should be skipped\",\n \"status\": \"canceled\",\n \"amount\": {\n \"amount\": 0.05,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 350,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 4.50,\n \"currency\": \"EUR\"\n }\n }]\n }\n ])\n elif cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(\n data=[{\n \"created_at\": \"2018-01-22T12:26:35Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 2.2,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 260,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": \"2018-01-22T11:04:01Z\",\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 1.4,\n \"currency\": \"LTC\"\n },\n \"total\": {\n \"amount\": 100,\n \"currency\": \"EUR\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 3,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()", "def query_request():\n query_data = request.get_json()\n print(query_data)\n example_response = []\n \n # First we need to check if the request is for table or time series data\n if query_data and query_data == 'table':\n # send back columns and rows\n pass\n elif query_data:\n # send back value/clock pairs for timeseries charts\n example_response = generate_fake_timeseries(query_data.get('range', {}).get('from'),\n query_data.get('range', {}).get('to'),\n interval=query_data.get('intervalMs', 60000),\n create=4)\n return make_response(jsonify(example_response))", "def getDaySummaryVectors(db_manager, sql_type, timespan, agg_list='max'):\n\n # Get our interpolation dictionary for the query\n interDict = {'start' : weeutil.weeutil.startOfDay(timespan.start),\n 'stop' : timespan.stop,\n 'table_name' : 'archive_day_%s' % sql_type}\n # Setup up a list of lists for our vectors\n _vec = [list() for x in range(len(agg_list))]\n # Initialise each list in the list of lists\n for agg in agg_list:\n _vec[agg_list.index(agg)] = list()\n # Setup up our time vector list\n _time_vec = list()\n # Initialise a dictionary for our results\n _return = {}\n # Get the unit system in use\n _row = db_manager.getSql(\"SELECT usUnits FROM %s LIMIT 1;\" % db_manager.table_name)\n std_unit_system = _row[0] if _row is not None else None\n # Get a cursor object for our query\n _cursor=db_manager.connection.cursor()\n try:\n # Put together our SQL query string\n sql_str = \"SELECT * FROM %(table_name)s WHERE dateTime >= %(start)s AND dateTime < %(stop)s\" % interDict\n # Loop through each record our query returns\n for _rec in _cursor.execute(sql_str):\n # Loop through each aggregate we have been asked for\n for agg in agg_list:\n # Calculate the aggregate\n if agg == 'min':\n _result = _rec[1]\n elif agg == 'max':\n _result = _rec[3]\n elif agg == 'sum':\n _result = _rec[5]\n elif agg == 'gustdir':\n _result = _rec[7]\n elif agg == 'mintime':\n _result = int(_rec[2]) if _rec[2] else None\n elif agg == 'maxtime':\n _result = int(_rec[4]) if _rec[4] else None\n elif agg == 'count':\n _result = int(_rec[6]) if _rec[6] else None\n elif agg == 'avg' :\n _result = _rec[5]/_rec[6] if (_rec[5] and _rec[6]) else None\n elif agg == 'rms' :\n _result = math.sqrt(_rec[10]/_rec[11]) if (_rec[10] and _rec[11]) else None\n elif agg == 'vecavg' :\n _result = math.sqrt((_rec[8]**2 + _rec[9]**2) / _rec[6]**2) if (_rec[6] and _rec[8] and _rec[9]) else None\n elif agg == 'vecdir' :\n if _rec[8] == 0.0 and _rec[9] == 0.0:\n _result = None\n elif _rec[8] and _rec[9]:\n deg = 90.0 - math.degrees(math.atan2(_rec[9], _rec[8]))\n _result = deg if deg >= 0.0 else deg + 360.0\n else:\n _result = None\n # If we have not found it then return None\n else:\n _result = None\n # Add the aggregate to our vector\n _vec[agg_list.index(agg)].append(_result)\n # Add the time to our time vector\n _time_vec.append(_rec[0])\n finally:\n # Close our cursor\n _cursor.close()\n # Get unit type and group for time\n (_time_type, _time_group) = weewx.units.getStandardUnitType(std_unit_system, 'dateTime')\n # Loop through each aggregate we were asked for getting unit and group and producing a ValueTuple\n # and adding to our result dictionary\n for agg in agg_list:\n (t,g) = weewx.units.getStandardUnitType(std_unit_system, sql_type, agg)\n _return[agg]=ValueTuple(_vec[agg_list.index(agg)], t, g)\n # Return our time vector and dictionary of aggregate vectors\n return (ValueTuple(_time_vec, _time_type, _time_group), _return)", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df", "def __weather_api_call(\n self, time: datetime, location: tuple, index: int,\n ) -> Weather:\n URL = (\n 'https://weather.visualcrossing.com/VisualCrossingWebServices'\n + '/rest/services/weatherdata/history?'\n )\n time_start = time.strftime('%Y-%m-%dT%H:%M:%S')\n # time_end = (time + timedelta(hours=1, seconds=0)\n # ).strftime('%Y-%m-%dT%H:%M:%S')\n location0_str = f'{location[0]:.5f}'\n location1_str = f'{location[1]:.5f}'\n\n PARAMS = {\n 'aggregateHours': 1,\n 'combinationMethod': 'aggregate',\n 'startDateTime': time_start,\n 'endDateTime': time_start,\n 'maxStations': -1,\n 'maxDistance': -1,\n 'contentType': 'json',\n 'unitGroup': self.unit_group,\n 'locationMode': 'single',\n 'key': self.vc_api_key,\n 'dataElements': 'all',\n 'locations': f'{location0_str}, {location1_str}',\n }\n # sending get request and saving the response as response object\n r = requests.get(url=URL, params=PARAMS)\n # extracting data in json format\n response_data = r.json()\n data_values = response_data['location']['values'][0]\n return Weather(\n temperature=data_values['temp'],\n maximum_temperature=data_values['maxt'],\n minimum_temperature=data_values['mint'],\n wind_chill=data_values['windchill'],\n heat_index=data_values['heatindex'],\n precipitation=data_values['precip'],\n snow_depth=data_values['snowdepth'],\n wind_speed=data_values['wspd'],\n wind_direction=data_values['wdir'],\n sea_level_pressure=data_values['sealevelpressure'],\n visibility=data_values['visibility'],\n cloud_cover=data_values['cloudcover'],\n dew_point=data_values['dew'],\n solar_radiation=data_values['solarradiation'],\n relative_humidity=data_values['humidity'],\n weather_type=data_values['weathertype'],\n conditions=data_values['conditions'],\n date=time,\n location=location,\n index=index,\n )", "def _get_api_query(start_date='yesterday', end_date='yesterday', metrics='', dimensions='', sort=None, filters=None,\n output=None, webproperty_id=None, profile_id=None, oldest_prf=False, **kwargs):\n\n # get service\n if 'service' not in globals():\n global service\n service = get_service()\n\n # cleanup\n if kwargs.get('cleanup') or kwargs.get('refresh'):\n _cleanup()\n\n # debug\n debug = kwargs.get('debug')\n\n # unify metrics and dimensions to the form accepted by GA\n metrics = unify(metrics)\n dimensions = unify(dimensions)\n\n df = []\n headers = []\n\n if webproperty_id is not None:\n pids_wp = get_profiles(webproperty_id).profileId.astype(str).tolist()\n if profile_id is not None:\n if isinstance(profile_id, list):\n pids = [p for p in profile_id if p in pids_wp]\n elif isinstance(profile_id, str) or isinstance(profile_id, int):\n if profile_id in pids_wp:\n pids = [profile_id]\n else:\n pids = []\n else:\n pids = pids_wp\n else:\n if profile_id is not None:\n if isinstance(profile_id, list):\n pids = profile_id\n elif isinstance(profile_id, str):\n pids = [profile_id]\n else:\n raise TypeError('profile_id can only be str or list')\n else:\n pids = get_profiles(webproperty_id).profileId.astype(str).tolist()\n\n df_profile = get_profiles(webproperty_id).ix[:,\n ['accountId', 'webpropertyId', 'profileId', 'profileName', 'websiteUrl']]\n\n if oldest_prf:\n pids = _get_first_profile_ids(webproperty_id)\n\n # iterate through each profile_id\n for profileId in pids:\n counter = 0\n total_results = 0\n while True:\n if counter >= total_results and counter != 0:\n break\n try:\n results = service.data().ga().get(\n ids=\"ga:\" + str(profileId),\n start_date=start_date,\n end_date=end_date,\n metrics=metrics,\n dimensions=dimensions,\n sort=sort,\n filters=filters,\n start_index=str(counter + 1),\n max_results='10000',\n samplingLevel='HIGHER_PRECISION'\n ).execute()\n\n # get total results\n total_results = results.get('totalResults')\n\n # if not results for the query, break the loop\n if total_results == 0:\n break\n\n # get sample size\n contains_sampled_data = results.get('containsSampledData')\n if contains_sampled_data:\n sample_space = float(results.get('sampleSpace'))\n sample_size = float(results.get('sampleSize'))\n sample_ratio = sample_size / sample_space\n else:\n sample_ratio = 1.0\n\n # avoid quota limit error\n sleep(SLEEP_DURATION)\n\n # parsing data\n headers = [x['name'].replace('ga:', '') for x in results.get('columnHeaders')]\n rows = results.get('rows', [])\n if rows:\n for row in rows:\n row.append(profileId)\n _row = [str(x) for x in row]\n df.append(_row)\n counter += len(rows)\n\n # printing\n if debug:\n print 'processing profile %s (%s / %s results fetched, sample ratio: %.2f%%)' % (profileId,\n counter,\n total_results,\n sample_ratio * 100)\n\n except Exception, err:\n print(err)\n break\n\n # if no results, return empty DataFrame\n if len(df) == 0:\n return DataFrame([])\n else:\n df = DataFrame(df, columns=headers + ['profileId'])\n\n # convert ga:date to datetime dtype\n if 'date' in headers:\n df['date'] = to_datetime(df['date'], format='%Y%m%d')\n\n # convert ga:dateHour to datetime dtype\n if 'dateHour' in headers:\n df['dateHour'] = to_datetime(df['dateHour'], format='%Y%m%d%H')\n\n # add profileId\n df['profileId'] = df.profileId.astype('int')\n\n # add profile info\n df = merge(left=df, right=df_profile, left_on='profileId', right_on='profileId', how='left')\n\n # make metrics dtypes as float\n for m in [x.replace('ga:', '') for x in metrics.split(',')]:\n try:\n df[m] = df[m].astype(float)\n except Exception, err:\n print err\n\n # output to csv if output param is set\n if output is not None:\n df.to_csv(output, index=False, encoding='utf-8')\n\n return df", "def find_purchase_qty_in_duration(self,from_date,to_date,location,product_id):\n # query=\"\"\"\n # select sum(product_uom_qty) from stock_move mv \n # Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier'\n # and mv.location_dest_id in (%s) where state='done' and product_id = %s and date between '%s 00:00:00' and '%s 23:59:59'\n # \"\"\"\n query = \"\"\"select sum(product_uom_qty) as total,product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier' \n and mv.location_dest_id in (%s) where state='done' and product_id = %s and \n date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\"\"\"%(\n ','.join(str(x) for x in location), product_id.id,from_date,to_date)\n self._cr.execute(query)\n result = self._cr.fetchall()\n uom_rec = self.env['product.uom']\n purchase_qty = 0\n for r in result:\n factor_inv = uom_rec.browse(r[1]).factor_inv\n purchase_qty += r[0] * factor_inv\n # Return Qty\n return_query = \"\"\"select sum(product_uom_qty) as total,product_uom \n from stock_move mv Inner join stock_location sl on sl.id = \n mv.location_dest_id and sl.usage='supplier' and mv.location_id in (\n %s) where state='done' and product_id = %s and date between '%s \n 00:00:00' and '%s 23:59:59' group by product_uom\"\"\" % (\n ','.join(str(x) for x in location), product_id.id, from_date,\n to_date)\n self._cr.execute(return_query)\n return_result = self._cr.fetchall()\n purchase_return_qty = 0\n for re in return_result:\n factor_inv = uom_rec.browse(re[1]).factor_inv\n purchase_return_qty += re[0] * factor_inv\n purchase_qty -= purchase_return_qty\n return purchase_qty", "async def run_collector(url: str, session: ClientSession):\n try:\n response = await get_records_from_api(url, session)\n event_data = json.dumps(response[0], ensure_ascii=False)\n log.info(f'Record to stream: {event_data}')\n return event_data\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def task_product_upshelf_update_productskusalestats(sku_id):\n from shopback.items.models import ProductSku, SkuStock, \\\n ProductSkuSaleStats, gen_productsksalestats_unikey\n sku = ProductSku.objects.get(id=sku_id)\n product_id = sku.product_id\n sku_stats = SkuStock.get_by_sku(sku_id)\n wait_assign_num = sku_stats.wait_assign_num\n\n stats_uni_key = gen_productsksalestats_unikey(sku_id)\n stats = ProductSkuSaleStats.objects.filter(uni_key=stats_uni_key, sku_id=sku_id)\n\n if stats.count() == 0:\n try:\n stat = ProductSkuSaleStats(uni_key=stats_uni_key,\n sku_id=sku_id,\n product_id=product_id,\n init_waitassign_num=wait_assign_num,\n sale_start_time=sku.product.upshelf_time,\n sale_end_time=sku.product.offshelf_time)\n stat.save()\n except IntegrityError as exc:\n logger.warn(\n \"IntegrityError - productskusalestat/init_waitassign_num | sku_id: %s, init_waitassign_num: %s\" % (\n sku_id, wait_assign_num))\n raise task_product_upshelf_update_productskusalestats.retry(exc=exc)\n else:\n logger.warn(\"RepeatUpshelf- productskusalestat/init_waitassign_num | sku_id: %s, init_waitassign_num: %s\" % (\n sku_id, wait_assign_num))", "def get_standin_for(userid):", "async def async_api_handler(_origin_details, data_set_master, dest_per_query):\n t_init = time.time()\n async with aiohttp.ClientSession() as session:\n print('Opening AsyncIO HTTP session.')\n\n # Break up list of destinations into chunks of a particular size (the SBB API has a ceiling around 210)\n destination_chunks = [list(data_set_master)[i:i + dest_per_query] for i in range(0, len(data_set_master), dest_per_query)]\n get_requests = []\n for dest_list in destination_chunks:\n get_requests.append(asyncio.ensure_future(async_query_and_process(_origin_details, dest_list, session)))\n print(f'Awaiting response from {len(get_requests)} requests.')\n results_list = await asyncio.gather(*get_requests)\n\n results = {}\n for result in results_list:\n results.update(result)\n\n print(f'\\n{len(destination_chunks)} API queries took {time.time() - t_init} seconds to receive and process', end='\\n\\n')\n return results", "def query_api(location):\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token,location)\n response = response.get('businesses')\n return response", "def query_country_timespan(\n df,\n country_timespan: Dict,\n to_nan: List,\n country_code: str = \"country_code\",\n publication_year: str = \"publication_year\",\n local_analysis: bool = False,\n):\n country_timespan_clause = (\n lambda cc, y: f\"\"\"({country_code}==\"{cc}\" and {y[0]}<={publication_year}<={y[1]})\"\"\"\n )\n tmp = df.query(\n \" or \".join(\n [country_timespan_clause(cc, y) for cc, y in country_timespan.items()]\n )\n ).copy()\n\n if to_nan:\n force_nan_de = [\n all(bools)\n for bools in zip(\n (tmp[\"country_code\"] == \"DE\").values,\n (1945 < tmp[publication_year]).values,\n (1950 > tmp[publication_year]).values,\n )\n ]\n tmp.loc[force_nan_de, to_nan] = np.nan\n if local_analysis:\n force_nan_fr = [\n all(bools)\n for bools in zip(\n (tmp[\"country_code\"] == \"FR\").values,\n (1970 <= tmp[publication_year]).values,\n (1980 > tmp[publication_year]).values,\n )\n ]\n tmp.loc[force_nan_fr, to_nan] = np.nan\n return tmp", "def fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'].datetime,\n 'consumption': value_map['total consumption'],\n 'source': 'sldcguj.com'\n }\n\n return data", "def get_data_from_api():\n s_date = datetime.now()\n uid = uuid.uuid1()\n start_time = time.time()\n cfl.logging.info(f'new session num {uid} was created at {s_date} ')\n\n functions_dict = {'graph' : get_stock_graph_in_range, 'holder': get_stock_holder, 'balance': get_balance,\n 'cash_flow': get_cash_flow, 'earning': get_earning}\n\n answers = inquirer.prompt(questions)\n symbol = answers.get(\"symbol\")\n\n func = functions_dict[answers.get('user_option')]\n\n if answers.get(\"user_option\") == \"graph\" :\n answer_graph = inquirer.prompt(question_graph)\n start_date = answer_graph.get(\"start_date\")\n end_date = answer_graph.get(\"end_date\")\n period = answer_graph.get(\"period\")\n e_date = datetime.now()\n end_time = time.time()\n print(f\"{func(symbol, start_date, end_date, period)}\\n\")\n cfl.logging.info(f' session {uid} end at : {e_date} ')\n cfl.logging.info(f'the time it took to get data from api for session {uid} is: {end_time - start_time} ')\n return\n\n else:\n end_time = time.time()\n e_date = datetime.now()\n print(f\"{func(symbol)}\\n\\n\")\n cfl.logging.info(f' session {uid} end at : {e_date} ')\n cfl.logging.info(f'the time it took to get data from api for session {uid} is: {end_time - start_time} ')\n return", "def query(self, records, streams, debug=False): \n #might want to paginate this eventually\n #ref: the bounds on between in mysql (and postgres) are inclusive\n conn, s = self.dbconn, self.dbstate\n\n #pick a random number between the db starttime and the greatest time\n #value - records\n\n #NOT PART OF timing\n temp = s.executeQuery(\"select max(time) as time from grindertest\")\n temp.next()\n last = temp.getInt(\"time\")\n lastpossible = last - records + 1\n default_starttime = 946684800\n debugout = []\n\n if default_starttime >= lastpossible:\n print(\"WARNING: timerange starts before earliest, resorting to\" + \n \" forced lastpossible\")\n starttime = lastpossible\n else: \n starttime = random.randrange(default_starttime, lastpossible)\n\n endtime = starttime + records - 1\n #done random time window selection\n\n self.reset_conn_state()\n conn, s = self.dbconn, self.dbstate\n\n #build the query\n querystring = \"select * from grindertest where time between \"\n querystring += str(starttime) + \" and \" + str(endtime) + \" and \"\n querystring += \"streamid between 1 and \" + str(streams)\n\n #start timing\n starttime = time.time()\n temp = s.executeQuery(querystring)\n endtime = time.time()\n\n if debug:\n self.query_debugger(temp, debugout)\n return debugout\n\n\n completiontime = endtime - starttime\n return [starttime, endtime, completiontime]", "async def get_records_from_api(url: str, session: ClientSession):\n try:\n response = await session.request(method='GET', url=url)\n response.raise_for_status()\n log.info(f\"Response status ({url}): {response.status}\")\n return await response.json()\n except HttpProcessingError as http_err:\n log.info('An error occurred during the request. Error: ', http_err)\n raise http_err\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def plant_detail(self, plant_id, timespan, date):\n assert timespan in Timespan\n if timespan == Timespan.day:\n date_str = date.strftime('%Y-%m-%d')\n elif timespan == Timespan.month:\n date_str = date.strftime('%Y-%m')\n\n response = self.session.get(self.get_url('PlantDetailAPI.do'), params={\n 'plantId': plant_id,\n 'type': timespan.value,\n 'date': date_str\n })\n data = json.loads(response.content.decode('utf-8'))\n return data['back']", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def _assemble_and_send_request(self):\r\n # Fire off the query.\r\n response = self.client.service.getRates(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n RequestedShipment=self.RequestedShipment,\r\n\t\t\t\t\tReturnTransitAndCommit=self.ReturnTransitAndCommit)\r\n return response", "def retrieve_time_series(api, series_ID):\r\n #Retrieve Data By Series ID \r\n series_search = api.data_by_series(series=series_ID)\r\n ##Create a pandas dataframe from the retrieved time series\r\n df = pd.DataFrame(series_search)\r\n return df", "def get_tweet_data(session, analytics_account, start_time, end_time, user_agent):\n\n export_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/export.json\"\n bundle_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/bundle\"\n\n export_data = {\n 'start_time' : end_time,\n 'end_time' : start_time,\n 'lang' : 'en'\n }\n querystring = '?' + urllib.parse.urlencode(export_data)\n print('Querying Twitter...')\n\n\n status = 'Pending'\n counter = 0\n while status == 'Pending':\n attempt = session.post(export_url + querystring, headers=user_agent)\n status_dict = json.loads(attempt.text)\n status = status_dict['status']\n counter += 1\n print('Attempt:', counter, ' Response:',status)\n time.sleep(5)\n\n csv_header = {'Content-Type': 'application/csv',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}\n\n data_req = session.get(bundle_url + querystring, headers=csv_header)\n #print(\"data_req response: \", data_req.status_code)\n print(\"Data retrieved, appending dataset.\")\n return data_req.text", "def test_fetch_working(suvi_client):\n start = '2019/05/25 00:50'\n end = '2019/05/25 00:52'\n wave = 94 * u.Angstrom\n goes_sat = a.goes.SatelliteNumber.sixteen\n tr = a.Time(start, end)\n qr1 = suvi_client.search(tr, a.Instrument.suvi, a.Wavelength(wave), goes_sat, a.Level(2))\n\n # Mock QueryResponse object\n mock_qr = mock_query_object(suvi_client)\n\n # Compare if two objects have the same attribute\n\n mock_qr = mock_qr[0]\n qr = qr1[0]\n\n assert mock_qr['Source'] == qr['Source']\n assert mock_qr['Provider'] == qr['Provider']\n assert mock_qr['Physobs'] == qr['Physobs']\n assert mock_qr['Instrument'] == qr['Instrument']\n assert mock_qr['url'] == qr['url']\n\n assert qr1['Start Time'] == Time(\"2019-05-25T00:52:00.000\")\n assert qr1['End Time'] == Time(\"2019-05-25T00:56:00.000\")\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n download_list = suvi_client.fetch(qr1, path=tmpdirname)\n assert len(download_list) == len(qr1)", "def run(pv, time_span, abs_z, with_no_data, output, verbose_off):\n avg, df = fetch_data(pv, time_span, abs_z, not with_no_data,\n not verbose_off)\n _s = f\"Average readings for each PV in the past {time_span} seconds:\"\n print(_s)\n print(\"-\" * len(_s))\n for i, (ipv, iavg) in enumerate(zip(pv, avg)):\n print(f\"[{i+1}] {ipv:<30s} : {iavg:>.6g}\")\n print(\"-\" * len(_s))\n if df is not None:\n if output is None:\n click.secho(\n \"Print out the data to the screen, define --output to write into a CSV file.\",\n fg=\"red\")\n try:\n print(df.to_string())\n sys.stdout.flush()\n except BrokenPipeError:\n devnull = os.open(os.devnull, os.O_WRONLY)\n os.dup2(devnull, sys.stdout.fileno())\n sys.exit(1)\n else:\n click.secho(f\"Write the data into {output}\", fg=\"blue\")\n df.to_csv(output)", "def get_courier_info(courier_id):\n db = get_db()\n cursor = db.cursor()\n try:\n courier = list(cursor.execute(\"SELECT * FROM couriers WHERE courier_id = ?\", (courier_id,)))[0]\n except IndexError or Error:\n return \"Not found\"\n json_response = {\n \"courier_id\": courier_id,\n \"courier_type\": courier['courier_type'],\n \"regions\": list(map(int, courier[\"regions\"].strip(\"#\").split('#'))),\n \"working_hours\": courier['working_hours'].strip(\"#\").split('#')\n }\n if courier['completed_orders'] == \"\":\n return json_response\n regions = courier['regions'].split(\"#\")\n times_region = defaultdict(lambda: 0, {})\n times_region_counter = defaultdict(lambda: 0, {})\n type_k = {\"foot\": 2, \"bike\": 5, \"car\": 9}\n earnings = 0\n for region in regions:\n for time, courier_type in cursor.execute(\n \"SELECT time, courier_type FROM orders WHERE region LIKE ? AND courier_id LIKE ?\"\n \" AND status LIKE 'completed'\",\n (region, courier_id,)):\n times_region[region] = times_region.get(region, 0) + int(float(time))\n times_region_counter[region] = times_region_counter.get(region, 0) + 1\n earnings += 500 * type_k[courier_type]\n t = float('inf')\n for region in regions:\n if times_region_counter[region] == 0:\n continue\n t = min(t, times_region[region] // times_region_counter[region])\n rating = (60 * 60 - min(t, 60 * 60)) / (60 * 60) * 5\n json_response[\"rating\"] = round(rating, 2)\n json_response[\"earnings\"] = earnings\n return json_response", "def _query_jql_items(self, data_type, from_date=None, to_date=None, event_selectors=None, user_selectors=None,\n output_properties=None, timezone_offset=0, format='json'):\n\n if data_type == 'events':\n jql_script = \"function main() {return Events({from_date: params.from_date,to_date: params.to_date,\" \\\n \"event_selectors: params.event_selectors}).map(function(event) {var result = {event: \" \\\n \"event.name,properties: {distinct_id: event.distinct_id,time: (event.time / 1000) - \" \\\n \"(params.timezone_offset * 3600)}};if ('output_properties' in params) {output_properties = \" \\\n \"params.output_properties;} else {output_properties = Object.keys(event.properties);}\" \\\n \"_.each(output_properties, prop => result.properties[prop] = event.properties[prop]);return \" \\\n \"result;});}\"\n\n date_format = '%Y-%m-%d'\n if isinstance(from_date, datetime.datetime):\n from_date = from_date.strftime(date_format)\n if isinstance(to_date, datetime.datetime):\n to_date = to_date.strftime(date_format)\n if event_selectors is None:\n event_selectors = []\n elif isinstance(event_selectors, dict):\n event_selectors = [event_selectors]\n elif isinstance(event_selectors, list):\n pass\n else:\n Mixpanel.LOGGER.warning(\n 'Invalid type for event_selectors, must be dict or list, found: ' + str(type(event_selectors)))\n\n params = {'from_date': from_date, 'to_date': to_date, 'event_selectors': event_selectors,\n 'timezone_offset': timezone_offset}\n elif data_type == 'people':\n jql_script = \"function main() {return People({user_selectors: params.user_selectors}).map(function(user)\" \\\n \" {var result = {$distinct_id: user.distinct_id,$properties: {}};if ('output_properties' in\" \\\n \" params) {output_properties = params.output_properties;} else {output_properties = \" \\\n \"Object.keys(user.properties);}_.each(output_properties, prop => result.$properties[prop]\" \\\n \" = user.properties[prop]);return result;});}\"\n\n if user_selectors is None:\n user_selectors = []\n elif isinstance(user_selectors, str):\n user_selectors = [{'selector': user_selectors}]\n elif isinstance(user_selectors, list):\n pass\n else:\n Mixpanel.LOGGER.warning(\n 'Invalid type for user_selectors, must be str or list, found: ' + str(type(user_selectors)))\n return\n\n params = {'user_selectors': user_selectors}\n else:\n Mixpanel.LOGGER.warning('Invalid data_type, must be \"events\" or \"people\", found: ' + data_type)\n return\n\n if output_properties is not None:\n params['output_properties'] = output_properties\n\n return self.query_jql(jql_script, params=params, format=format)", "def test_spending_over_time_subawards_failure(client):\n\n resp = client.post(\n \"/api/v2/search/spending_over_time\",\n content_type=\"application/json\",\n data=json.dumps({\"group\": \"quarter\", \"filters\": non_legacy_filters(), \"subawards\": \"string\"}),\n )\n assert resp.status_code == status.HTTP_400_BAD_REQUEST", "def query(account_id, base_url=\"http://interview.wpengine.io/v1/accounts\"):\n \n data_is_valid = True\n query_success = False\n returned_dict = {}\n message = \"\"\n attempt_num = 0\n \n while (attempt_num < 3) and not query_success:\n sleep(3**attempt_num-1) #backoff [0,2,8] seconds\n attempt_num += 1\n query_string = f\"{base_url}/{account_id}\"\n request = get(query_string)\n\n #successful query case\n if request.status_code == 200:\n returned_dict = request.json()\n data_is_valid = True\n query_success = True\n else:\n data_is_valid = False\n \n if request.status_code > 200:\n try: # check parseable response\n returned_dict = request.json()\n message = f\"Query - {query_string} - {returned_dict['detail']}\"\n query_success = True # but data not found\n except ValueError:\n message = f\"Response not json parseable for query {query_string}\"\n query_success = False # requery\n \n returned_dict[\"message\"] = message\n returned_dict[\"valid\"] = data_is_valid\n return returned_dict" ]
[ "0.5370444", "0.53211254", "0.53194", "0.5045779", "0.49320626", "0.48997423", "0.48471904", "0.4833045", "0.48287934", "0.48239887", "0.47896585", "0.47663978", "0.47641006", "0.47459877", "0.4742183", "0.47376695", "0.473765", "0.4669849", "0.46652684", "0.46615025", "0.46348634", "0.46296775", "0.46247196", "0.4609515", "0.46037695", "0.45855185", "0.45582452", "0.4555231", "0.45462525", "0.45143944", "0.45138538", "0.45007885", "0.44996274", "0.44931287", "0.44919294", "0.448228", "0.44744647", "0.4470617", "0.44560432", "0.44555315", "0.44511217", "0.4445698", "0.44452325", "0.44147682", "0.44124842", "0.4394863", "0.43884876", "0.43859774", "0.43859637", "0.43778098", "0.4376728", "0.4370338", "0.43684164", "0.4367845", "0.43545702", "0.4351768", "0.43501633", "0.4345041", "0.43388462", "0.4338782", "0.43311185", "0.4320658", "0.43170008", "0.4316005", "0.43111855", "0.43085185", "0.4305475", "0.43038815", "0.42983747", "0.42958376", "0.42914608", "0.42909324", "0.42891195", "0.42814654", "0.4274921", "0.42716736", "0.42681983", "0.42665255", "0.42637652", "0.4263378", "0.42587346", "0.42582062", "0.42539343", "0.4253583", "0.4249439", "0.4238266", "0.42367157", "0.4231211", "0.42274728", "0.42246398", "0.42227918", "0.4222733", "0.4222068", "0.42208165", "0.42206442", "0.4219344", "0.4218458", "0.42164227", "0.42141315", "0.421292" ]
0.6179882
0
Get the status of the given id
def get_status_of_id(sku_id): if not sku_id: return None status_query = list(sku_database.find({"SKU_unit": int(sku_id)}, {'_id': 0, 'Status': 1})) status = status_query[0]["Status"] return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status_by_id(cls, request, id):\n return request.dbsession.query(cls).get(id).status", "def status(self, id):", "def get_status(id):\n task = run_ctx_request.AsyncResult(id)\n if task.state == states.PENDING:\n abort(404)\n if task.state == states.RECEIVED or task.state == states.STARTED:\n return '', 202, {'Location': url_for('api.get_status', id=id)}\n return task.info", "def get_by_id(self, status_id: int) -> Status:\n return self.__mapper.map(\n self.__repository.get_by_id(status_id),\n Status\n )", "def get_status(self, id: int) -> Optional[Users]:\n try:\n status = self.session.query(CandidatesStatus).get(id)\n\n return status\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get status: {excpt}')\n\n return None", "def id(self):\n return self.status.id", "def id_status(self):\n return self._id_status", "def check_status(self, id):\n raise NotImplementedError()", "def json_status_by_id(id):\n status = Status.query.filter(Status.id==id).first()\n if status is None:\n abort(404)\n return jsonify(status.get_public_dict())", "async def get_task_status(task_id: TaskId):", "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def update_status(self, id, status):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n\n if index == -1:\n return False\n\n records[index][\"status\"] = status\n self.db.update_cell(index, 'status', status)\n\n return records[index]", "def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None", "async def get_status(self, sms_id: int) -> SmsStatus:\n raise NotImplementedError", "def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })", "def get_card_status(status_id):\n statuses = persistence.get_statuses()\n return next((status['title'] for status in statuses if status['id'] == str(status_id)), 'Unknown')", "def get_card_status(status_id):\n statuses = persistence.get_statuses()\n return next((status['title'] for status in statuses if status['id'] == str(status_id)), 'Unknown')", "def get_card_status(status_id):\n statuses = persistence.get_statuses()\n return next((status['title'] for status in statuses if status['id'] == str(status_id)), 'Unknown')", "def get_observation_status(self, observation_id):\n pass", "def get_status(self, scenario_id):\n table = self.get_execute_table()\n try:\n return table.loc[int(scenario_id), \"status\"]\n except KeyError:\n raise Exception(f\"Scenario not found in execute list, id = {scenario_id}\")", "def get_task_status(id):\n # obtain the task and validate it\n global background_tasks\n rv = background_tasks.get(id)\n if rv is None:\n return not_found(None)\n\n # if the task object is a Thread object that means that the task is still\n # running. In this case return the 202 status message again.\n if isinstance(rv, Thread):\n return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}\n\n # If the task object is not a Thread then it is assumed to be the response\n # of the finished task, so that is the response that is returned.\n # If the application is configured to auto-delete task status resources once\n # the task is done then the deletion happens now, if not the client is\n # expected to send a delete request.\n if app.config['AUTO_DELETE_BG_TASKS']:\n del background_tasks[id]\n return rv", "def test_support_id_status_get(self):\n headers = [('accept_language', 'accept_language_example'),\n ('api', 'api_example'),\n ('api_version', 'api_version_example'),\n ('maas_id', 'maas_id_example'),\n ('addressed_to', 'addressed_to_example')]\n response = self.client.open(\n '/support/{id}/status'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get(self, id):\n\n resp = self.oauth_session.get(self.server + '/api/v2/check/get?id=%s' % id)\n return UnplagCheckResponse(resp)", "def export_status(self, file_id):\n response = self._client.get('workbenches/export/%(file_id)s/status',\n path_params={'file_id': file_id})\n return loads(response.text).get('status')", "def get_status(self, run_id):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/runnables/%s/state/%s\" % (self.project_key, self.runnable_type, run_id))", "def retrieve_task(self, task_id):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_TASK_STATUS,\n str(task_id)]))\n return r.json()", "def id_status_conta(self):\n return self._id_status_conta", "def GetStatus(self):\r\n return self.status", "def get_task_status(task_id):\r\n mock_request = Mock()\r\n mock_request.REQUEST = {'task_id': task_id}\r\n response = instructor_task_status(mock_request)\r\n status = json.loads(response.content)\r\n return status", "def get_status(self, scenario_id):\n query = self.select_where(\"id\")\n self.cur.execute(query, (scenario_id,))\n result = self.cur.fetchmany()\n return to_data_frame(result)", "def get_status(self, scenario_id):\n query = self.select_where(\"id\")\n self.cur.execute(query, (scenario_id,))\n result = self.cur.fetchmany()\n return to_data_frame(result)", "def status_id(self) -> \"str\":\n return self._attrs.get(\"statusId\")", "def get_status(self, build_id):\n url = f\"{self.base_url}/build\"\n payload = {\"build_id\": build_id}\n response = requests.get(url, json=payload, headers=self.headers)\n\n try:\n status = json.loads(response.text)\n except:\n status = response.text\n\n return status", "def get_status(self, ids):\n return [self.tasks[id].status for id in ids]", "def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]", "async def get_status():", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def check_status(self, message_id):\n\n values = {'token': self._token, 'reference': message_id}\n return self._request(self.CHECK_STATUS_URL, values)", "def status(self, command_id):\n path = \"commands/status?commandId=%s&contextId=%s&clusterId=%s\" % (\n command_id,\n self.context.id,\n self.cluster_id,\n )\n return self.get(self.url, \"1.2\", path, token=self.token)", "async def get_device_status(self, device_id: str) -> dict:\r\n return await self.get(API_DEVICE_STATUS.format(device_id=device_id))", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def check_status(self, job_id, config_id=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/statusAsync/\" +\n str(config_id) + \"/\" +\n str(job_id) + \"/\"\n )\n return response", "def get(self, job_id):\n\n if job_id:\n status = {\"state\": self.runner_service.status(job_id)}\n else:\n # TODO: Update the correct status for all jobs; the filtering in jobrunner doesn't work here.\n all_status = self.runner_service.status_all()\n status_dict = {}\n for k, v in all_status.iteritems():\n status_dict[k] = {\"state\": v}\n status = status_dict\n\n self.write_json(status)", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def getStatus(self, gameID):\n\n if gameID in self.games:\n status = self.games[gameID].status\n TournamentSystem._logger.debug(\"Found status of game %d to be %s\",\n gameID, status)\n return (True, {\"status\": status})\n else:\n return (False, {\"error\": \"Invalid game ID\"})", "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def get_project_job_status(id):\n user = current_user\n\n if user.get_id() is not None:\n _tasks = user.get_project_tasks_in_progress(id)\n running_task_dicts = get_running_task_dicts(_tasks)\n\n _tasks = user.get_finished_project_tasks(id)\n finished_task_dicts = get_finished_task_dicts(_tasks)\n\n response_object = {\n 'running_tasks': running_task_dicts,\n 'finished_tasks': finished_task_dicts\n }\n else:\n response_object = {'status': 'error'}\n # print(jsonify(response_object))\n return jsonify(response_object)", "def get_status(person_group_id):\n url = 'persongroups/{}/training'.format(person_group_id)\n\n return util.request('GET', url)", "def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def _get_status(self):\n return self.__status", "def status(self):\n return self._get(path='status')", "def updatestatus(id, status):\n username = os.getlogin()\n res = requests.put('{}update/{}/'.format(base_url, id),\n data={\"keyword_fetching_status\": status, \"user_fetched\": username})\n res = res.json()\n return res", "def id_str(self):\n return self.status.id_str", "def _get_image_status(self, image_id):\n image_status = None\n image = self._get_nova_client().images.get(image_id)\n\n if image is not None:\n image_status = image.status\n\n return image_status", "def get(self, _id):\n endpoint = URL_MAPPING + \"/{}\".format(_id)\n response = self.client.get_json(endpoint)\n response.success = response.status_code == 200\n return response", "def get_run_status(self, run_id):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs/{run_id}/status\",\n headers=self.auth,\n )\n return wes_reponse(postresult)", "def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status", "def get_status(self):\n return self._status", "def get_status_for_experiment(self, id):\n # open = 'open'\n running = 'running'\n finished = 'finished'\n waiting = 'waiting'\n\n experiment = Experiment.get(id)\n date_time_now = datetime.datetime.now()\n start_datetime = experiment.startDatetime\n end_datetime = experiment.endDatetime\n if start_datetime >= end_datetime:\n # validate this earlier\n return None\n if start_datetime <= date_time_now and date_time_now <= end_datetime:\n return running\n elif date_time_now > end_datetime:\n return finished\n elif date_time_now < start_datetime:\n return waiting\n return None", "def get_oozie_status(self, job_id):\n self.echo('Checking status...')\n status = self.call_return(\"oozie job -oozie \" + self.pylot_cfg.hdfs_oozie_interface + \" -info \" + job_id + \" | grep 'Status' | grep ':' | awk '{print $NF}'\")\n status = status.strip('\\n')\n return status", "def get_last_text_status(self):\n with self.connection.cursor() as cursor:\n sql = \"\"\"SELECT * FROM `ow_newsfeed_action` \n WHERE `id`= (SELECT MAX(`id`) FROM `ow_newsfeed_action` WHERE `entityType`=\"user-status\")\n AND `entityType`=\"user-status\"\n \"\"\"\n cursor.execute(sql)\n response = cursor.fetchone()\n data = json.loads(response[\"data\"])\n\n self.connection.commit()\n print(data[\"statusId\"])\n return Status(text=data[\"status\"],id=data[\"statusId\"])", "def status(self):\n return self.get(self._names[\"status\"])", "def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)", "def get_task_state(self, id):\n\n if not isinstance(id, six.string_types):\n msg = 'Param id must be a str|unicode.'\n logger.exception(msg)\n raise ValueError(msg)\n\n task_state = self.stub.get_task_state(opac_pb2.TaskId(id=id))\n\n return task_state.state", "def get(self, id):\n return {'id': id}", "def get_volume_status(self, volume_id):\n r = self.get_volume_details(volume_id)\n return r['status'], None", "def get_status(self, torrent_id, fields):\n status = {}\n for field in fields:\n try:\n status[field] = self.status_fields[field](torrent_id)\n except KeyError:\n pass\n return status", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "def getstatus(self):\n return self.__status", "def GetJobStatus(self, job_id):\n return self._SendRequest(HTTP_GET,\n \"/%s/jobs/%s\" % (GANETI_RAPI_VERSION, job_id),\n None, None)", "def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def get(self, id):\n return self.__model__.query.get(id)", "def getStatus(self, key, time):\n return self.get(\"status\", key, time)", "def getNodeTaskStatusByUPID(self,node,upid):\n data = self.connect('get','nodes/%s/tasks/%s/status' % (node,upid),None)\n return data", "def get_check_in(id, tfid):\n goal = Goal.pull_by_id(id)\n if (not goal):\n raise NotFoundError()\n \n if goal.user != current_user.get_id():\n raise UnauthorizedError\n \n check_in = CheckIn.pull_by_goal_timeframe(id, tfid)\n if (not check_in):\n raise NotFoundError()\n\n return check_in.to_json(), 200", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "def status(transaction_id):\n instance_status = api_get('status', {'transaction_id': transaction_id})\n if instance_status.get('status') == 200:\n print(json.dumps(instance_status, indent=4, sort_keys=True))\n else:\n print(Bcolors.FAIL + str(instance_status) + Bcolors.ENDC)", "def find_by_id(cls, id):\n\t\tif id:\n\t\t\treturn cls.query.filter_by(id=id).first()\n\t\treturn {\n\t\t\t'message': 'id field is required',\n\t\t\t'status': 'Failed'\n\t\t\t}, 400", "def get_object_status(obj):\n return get_object_parameter(obj, 'status')", "def status(self):\n return self._query_status()['status']", "def get_ldap_provider_status(self, id):\n try:\n self.logger.info('get_ldap_provider_status called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for get_ldap_provider_status.')\n self.validate_parameters(id=id)\n\n # Prepare query URL\n self.logger.info(\n 'Preparing query URL for get_ldap_provider_status.')\n _url_path = '/public/ldapProvider/{id}/status'\n _url_path = APIHelper.append_url_with_template_parameters(\n _url_path, {'id': id})\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for get_ldap_provider_status.'\n )\n _request = self.http_client.get(_query_url)\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request,\n name='get_ldap_provider_status')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info(\n 'Validating response for get_ldap_provider_status.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> ActionResult:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"action_name\": action_name,\n \"status_id\": status_id,\n }\n\n path = Template(\"/action/v1beta2/actions/${action_name}/status/${status_id}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, ActionResult)", "def status(self):\n return STATUS[self.fields['status']]", "def get_status(item_id: str, job_id: str):\n url = \"%s/content/users/%s/items/%s/status/\" % (\n root_uri, username, item_id)\n data = {\n \"token\": token,\n \"jobType\": \"export\",\n \"jobId\": job_id,\n \"f\": \"json\"\n }\n status_request = requests.post(url, data=data)\n return status_request.json()", "def get(self, _id):", "def id_status_impressao(self):\n return self._id_status_impressao", "def find_id_medio_fisico(status):\n return 'do some magic!'", "def get_task_status(self, **kwargs):\n if kwargs is None or kwargs['parameters'] is None:\n message = \"For 'get_task_status' method parameters are not parsed.\"\n logger.critical(message)\n raise ValueError(message)\n\n if \"message_id\" not in kwargs['parameters']:\n message = \"Key 'message_id' not in kwargs.\"\n logger.critical(message)\n raise ValueError(message)\n\n message_id = kwargs['parameters']['message_id']\n\n return_data = {\"state\": \"Error\"}\n auth = self.authenticate()\n if auth == 200:\n task_completed = False\n state_message = \"Queued\"\n while not task_completed:\n sleep(WAIT_TIME_BETWEEN_REQUESTS)\n response = Utils.make_get_request(self.url(\"TaskInfo\" + \"/\" + str(message_id)),\n headers=self.request_header, verify=False)\n if 'StateMessage' in response.json():\n state_message = response.json()['StateMessage']\n if state_message == \"Success\" or state_message == \"Error\":\n task_completed = True\n return_data[\"state\"] = state_message\n if state_message == \"Success\":\n return_data[\"vm_id\"] = response.json()['Result']\n else:\n message = \"unable to authenticate to the PlatformA server,\" \\\n \" got the below response from server {}\".format(auth)\n logging.debug(message)\n raise Exception(message)\n\n return return_data", "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state", "def get_label(id):\n return if_found(dao.get_label(id))", "def get_status(self, state):\n raise NotImplementedError", "def get_by_id(cls, id):\n return cls.query().get(id)", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def status(self) -> Optional[int]:\n return pulumi.get(self, \"status\")", "def status(self):\n return self._data['status']" ]
[ "0.8681815", "0.80947495", "0.77845645", "0.7430623", "0.7410559", "0.7287429", "0.7247795", "0.7225844", "0.72096854", "0.71210665", "0.69675994", "0.69470584", "0.6917927", "0.69042987", "0.68739104", "0.6839439", "0.6839439", "0.6839439", "0.6763738", "0.6757584", "0.6706559", "0.6691357", "0.6648465", "0.6646734", "0.6625076", "0.66212875", "0.6519143", "0.6519055", "0.6514637", "0.6501512", "0.6501512", "0.6496173", "0.6456234", "0.64493537", "0.6393883", "0.6384878", "0.63770187", "0.63770187", "0.63770187", "0.636112", "0.63587993", "0.63577056", "0.6356929", "0.63505405", "0.63359064", "0.6332117", "0.63126016", "0.63050604", "0.62927294", "0.62909025", "0.6269271", "0.626119", "0.626119", "0.62466425", "0.6237127", "0.6205263", "0.6195805", "0.61842144", "0.61530256", "0.61515903", "0.61442226", "0.6138261", "0.6119878", "0.6118375", "0.6117281", "0.6115848", "0.6112385", "0.6110978", "0.6099932", "0.6072273", "0.6068432", "0.60633445", "0.60359204", "0.6031305", "0.6005679", "0.6004625", "0.6004173", "0.60018265", "0.5997709", "0.5990606", "0.59864724", "0.5981695", "0.59750456", "0.5966598", "0.5955478", "0.594806", "0.59432405", "0.5940579", "0.5939266", "0.5939245", "0.5937221", "0.5933733", "0.5933357", "0.59331524", "0.5930975", "0.59279746", "0.5907631", "0.5904367", "0.5902868", "0.58974165" ]
0.7535763
3
From a given list of SKU id's and status as mentioned, it checks whether the criteria is matching and returns the matching values.
def get_status_skus(sku_list, status): values = [] if not (sku_list, status): return values for sku_id in sku_list: status_query = list(sku_database.find({"SKU_unit": int(sku_id), "Status": status}, {'_id': 0, 'Status': 1})) if status_query: values.append(sku_id) return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sku_id(sku_id, status, start_time, end_time):\n all_data = []\n if not (sku_id, status, start_time, end_time):\n return all_data\n\n for i in sku_database.find({\"SKU_id\": sku_id, \"Status\": status}, {\"_id\": 0}):\n if start_time < i[\"Time_stamp\"] < end_time:\n all_data.append(i)\n else:\n continue\n\n return all_data", "def check_result(check_set, username, player=False):\n for win in WINS:\n if check_set >= set(win):\n return {'win': True if player else False, 'player': username, 'ids': win}", "def find_studies_by_status(self, statuses: List[str], exclude: bool = False) -> List[dict]:\n if not statuses:\n return []\n\n logic_str = \"NOT\" if exclude else \"\"\n statuses_str = \",\".join([f'\"{s}\"' for s in statuses])\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * from studies\n WHERE status {logic_str} IN ({statuses_str});\n \"\"\"\n )\n results = c.fetchall()\n return results", "def get_status_of_id(sku_id):\n if not sku_id:\n return None\n\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id)}, {'_id': 0, 'Status': 1}))\n status = status_query[0][\"Status\"]\n return status", "def select_by_status(status):\n sql = 'checkStatus'\n val = [status]\n rows = DBconnector.call_procedure(sql, val)\n for r in rows:\n return _wrap_in_parcel_list(r.fetchall())", "def select_steps_with_status(status, steps):\n return [ step for step in steps if step.status == status ]", "def verify_status_filter(self, status_list):\n is_verified = True\n self.click_element(self.multiselect_status_dropdown_locator)\n for item in status_list:\n dropdown_item_locator = (By.XPATH, \"//li[text()='%s']\" % item)\n if self.is_element_visible(dropdown_item_locator) is False:\n is_verified = False\n break\n self.script_executor(\"var elements = document.getElementsByClassName('k-list-container k-popup k-group k-reset multiselect'); for (var i = 0, len = elements.length; i < len; i++) { elements[i].style.display = 'none';}\")\n self.script_executor(\"var elements = document.getElementsByClassName('k-list k-reset'); for (var i = 0, len = elements.length; i < len; i++) { elements[i].setAttribute('aria-hidden', 'true');}\")\n return is_verified", "def get_prices(uuid, card_format, price_source, price_list, card_type_order, price_data_json):\n if price_source not in price_data_json[uuid][card_format]:\n pass\n #print(f'Price source value of {price_source} is not available for {card_format} and {uuid}')\n else:\n source = price_data_json[uuid][card_format][price_source]\n if price_list not in source:\n pass\n #print(f'Price list value of {price_list} is not available for {price_source} and {uuid}')\n else:\n retail = source[price_list]\n for type in card_type_order:\n if type in retail:\n return retail[type]", "def get_status(self, ids):\n return [self.tasks[id].status for id in ids]", "def get_status(self, list_for_check):\n d = {} # dictionary for return\n if len(list_for_check) > 990:\n return \"Error, too much to check\"\n elif len(list_for_check) == 0:\n logging.warning(\"[Collector] Passed empty list to check\")\n return \"Error, nothing to check\"\n\n vkids = \",\".join((str(i) for i in list_for_check))\n logging.info(\"[Collector] Making getProfiles API request...\")\n request = VK_GETPROFILES_BASE+vkids+\"&fields=online\"\n\n try:\n jsondata = json.loads(urllib2.urlopen(request, None, 25).read())\n except (URLError, HTTPError):\n logging.error(\"[Collector] Some error happaned during getProfiles API request\")\n # if jsondata['error']: logging.error(\"Cannot get correct API response.\")\n\n connection = sqlite3.connect('vk.db')\n cursor = connection.cursor()\n\n for i in jsondata['response']:\n d[i['uid']] = i['online']\n cursor.execute(\"SELECT * from u\" + str(i['uid']) + \" order by time desc limit 1\")\n last_status = cursor.fetchone()\n #print(i['uid'],last_status[1],i['online'])\n if last_status[1] != i['online']:\n cursor.execute(\"INSERT INTO u\" + str(i['uid']) + \"(time, status) VALUES (\" + str(int(time.time())) + \",\" + str(i['online']) + \")\")\n logging.info(\"[Collector] Add record for : \" + str(i['uid']) + \" \")\n logging.info(\"[Collector] Request has been parsed, records: \"+str(len(d))+\" \")\n connection.commit()\n connection.close()\n return d", "def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)", "def filter_by_status(x, _filter_status_list=None):\n if _filter_status_list:\n for filter_status in _filter_status_list:\n if x.get(filter_status, False):\n return True\n return False\n return True", "def _check_list(self, input_list, switch_list):\n\n return_list = []\n for vid in input_list:\n if str(vid) in switch_list:\n return_list.append(vid)\n return return_list", "def lookup (barcode, ID_TYPES=['ISBN', 'UPC','EAN']):\n\n matches = [] # list of {'desc', 'sku', 'type', 'vnd'}\n\n for idtype in ID_TYPES:\n try:\n result = api.item_lookup(barcode, SearchIndex='All', IdType=idtype)\n for item in result.Items.Item:\n if not _is_duplicate(item.ASIN, matches):\n matches.append({'desc': unicode(item.ItemAttributes.Title),\n 'sku': unicode(item.ASIN),\n 'type': idtype,\n 'vnd': 'AMZN:'+AMZLOCALE}) # vendor id\n\n except (errors.InvalidAccount, errors.InvalidClientTokenId, errors.MissingClientTokenId):\n print >>sys.stderr, \"Amazon Product API lookup: bad account credentials\"\n\n except errors.TooManyRequests, toomanyerr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", toomanyerr\n\n except errors.InternalError, awserr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", awserr\n\n except errors.InvalidParameterValue:\n # this simply means the barcode\n # does not exist for the given type,\n # so no need to do anything explicit\n pass\n\n return matches", "def _has_run(self, opt: dict, status: Union[List[str], str]):\n if isinstance(status, str):\n status = [status]\n\n for item in self._query_by_dict(opt):\n if item.get('status') in status:\n return True\n return False", "def set_vendor_price_list_status(self, status_items):\n self.multiple_items_selection_from_kendo_dropdown(self.vendor_price_list_status_kendo_dropdown_locator, status_items)\n self.wait_for_ajax_spinner_load()", "def _process_status_onu_discovered_list(self, discovered_onus):\n self.log.debug('discovered-ONUs', list=discovered_onus)\n\n # Only request discovery if activation is auto-discovery or auto-activate\n continue_discovery = ['autodiscovery', 'autoactivate']\n\n if self._activation_method not in continue_discovery:\n return set(), set()\n\n my_onus = frozenset(self._onus.keys())\n\n new_onus = discovered_onus - my_onus\n rediscovered_onus = my_onus & discovered_onus\n\n return new_onus, rediscovered_onus", "def _filter_resources_by_status(self, resources: [], statuses: []):\n all_resources = []\n for resource in resources:\n if statuses:\n status = ResourceModel.Status.from_string(resource.status)\n if status in statuses:\n all_resources.append(resource)\n else:\n all_resources.append(resource)\n return all_resources", "def set_statement_status_for_search(self, status_list):\n self.multiple_items_selection_from_kendo_dropdown(self.statement_status_dropdown_locator, status_list)\n self.wait_for_ajax_spinner_load()", "def set_multiple_status(self, status_list):\n self.multiple_items_selection_from_kendo_dropdown(self.status_kendo_dropdown_locator, status_list)\n self.wait_for_ajax_spinner_load()\n buy_page_title_element = self.wait().until(EC.element_to_be_clickable(self.buy_page_title_locator), 'buy page title locator not found before specified time out')\n buy_page_title_element.click()", "def filter_my_data(stock_status):\n try:\n data = get_data()\n filtered_data = [item for item in data if item[0] == stock_status]\n return filtered_data\n except Exception:\n return []", "def check_products(self, adi):\r\n results = []\r\n products = self.get_products(adi)\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Checking product '{}'... \".format(product[\"name\"]), end='')\r\n detail = self.get_product_detail(adi, product_id=product[\"productId\"], product_name=product[\"name\"])\r\n if self.rf.valid_product_detail(detail):\r\n print(\"Valid.\")\r\n result = \"Available\"\r\n else:\r\n print(\"INVALID.\")\r\n result = \"Not available\"\r\n results.append([product[\"name\"], result])\r\n return results", "def item_to_ids(items, user):\r\n sizes = [\"10\", \"12\", \"14\", \"16\", \"25\", \"30\", \"35\", \"40\"]\r\n if not items:\r\n return []\r\n ids = []\r\n names_to_id_product = get_names_preconfigured(user)\r\n for item in items:\r\n for name, product_id in names_to_id_product.items():\r\n # CLEAN TO REMOVE SMALL MEDIUM LARGE, AND STRIP\r\n item = item.strip()\r\n for size in sizes:\r\n if size in item:\r\n if size == \"10\" or size == \"25\":\r\n replace = \"Small\"\r\n elif size == \"12\" or size == \"30\":\r\n replace = \"Medium\"\r\n elif size == \"14\" or size == \"35\":\r\n replace = \"Large\"\r\n elif size == \"16\" or size == \"40\":\r\n replace = \"X-Large\"\r\n item = item.replace(size + '\"', replace).replace(size + \"'\", replace)\r\n # print(item, \" | \", name, editDistanceDP(item, name, len(item), len(name)) / (len(name)))\r\n if edit_distance_dp(item, name, len(item), len(name)) / (len(name)) < .3 or edit_distance_dp(\r\n item.replace(\"Pizza\", \"\"), name.replace(\"Dipping \", \"\"), len(item.replace(\"Pizza\", \"\")),\r\n len(name.replace(\"Dipping \", \"\"))) / (len(name)) < .1:\r\n ids.append(product_id)\r\n break\r\n final_ids = []\r\n for id in ids:\r\n if \"F_\" in id:\r\n variants = ids_to_variants(user)\r\n replace = variants[id][0]\r\n if replace == \"STJUDE\":\r\n replace = \"STJUDE10\"\r\n final_ids.append(replace)\r\n else:\r\n final_ids.append(id)\r\n return final_ids\r\n # order.add_item('P12IPAZA') # add a 12-inch pan pizza\r\n # order.add_item('MARINARA') # with an extra marinara cup\r\n # order.add_item('20BCOKE') # and a 20oz bottle of coke\r\n return ['P12IPAZA', 'MARINARA', '20BCOKE']", "def only_choice(values):\n for unit in unitlist:\n for d in '123456789':\n # array of boxes for the digit d\n destinations = [b for b in unit if d in values[b]]\n if len(destinations) == 1:\n values = assign_value(values, destinations[0], d)\n return values", "def _get_step_by_status(self, steps, status):\n if steps:\n for s in steps:\n if s['StepStatus'] == status:\n return s", "def task_3_find_item_via_value(data: DT, value) -> DT:\n return [dic for dic in data if value in dic.values()]", "def GetActuatorsWithStatus(status_flags, status_helper, status_to_check):\n\n flag = 0\n for status in status_to_check:\n flag |= status_helper.Value(status)\n return [key for key, value in status_flags.iteritems() if value & flag]", "def get_status(pos, neg, names):\n status = {}\n for i in names:\n #print str(i) +'\\n'+ str(pos) +'\\n'+ str(neg)+'\\n'+'\\n'\n if i in pos:\n status[i] = \"1\"\n elif i in neg:\n status[i] = \"0\"\n else:\n status[i] = \"NA\"\n return status", "def test_get_small_and_light_eligibility_by_seller_sku(self):\n pass", "def test_list_products_filtered_by_selling_status(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?selling=3')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def test_statuses_exist(self):\n\n assert hasattr(Partner, 'AVAILABLE')\n assert hasattr(Partner, 'NOT_AVAILABLE')\n assert hasattr(Partner, 'WAITLIST')\n\n assert hasattr(Partner, 'STATUS_CHOICES')\n\n assert len(Partner.STATUS_CHOICES) == 3\n\n database_statuses = [x[0] for x in Partner.STATUS_CHOICES]\n\n assert Partner.AVAILABLE in database_statuses\n assert Partner.NOT_AVAILABLE in database_statuses\n assert Partner.WAITLIST in database_statuses", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def check_answer(self,msg_list,honeypotids,expect_dict):\n filtered_msgs = []\n for msg in msg_list:\n if \"ALL\" in honeypotids or msg[\"from\"] in honeypotids:\n for k in expect_dict.keys():\n if k in msg.keys():\n if msg[k] == expect_dict[k]:\n filtered_msgs.append(msg)\n return filtered_msgs", "def _return_specific_status_prs(all_prs, filters):\n if _validate_list_of_dict(all_prs):\n pr_issue = []\n for pull_request in all_prs:\n commits = pull_request['pr'].get_commits()\n statuses = [[status.state] for status in commits.reversed[0].get_statuses() if filters['status'] == status.state]\n if statuses:\n pr_issue.append({'pr':pull_request['pr'], 'issue':pull_request['issue']})\n statuses = []\n logger.debug(\"SPECIFIC PR STATUSES: %s\", [pr['pr'].number for pr in pr_issue])\n return pr_issue\n else:\n return []", "def only_choice(values):\n\tfor unit in unitlist:\n\t\tfor digit in '123456789':\n\n\t\t\tmatches = []\n\t\t\t\n\t\t\tfor box in unit:\n\t\t\t\tif digit in values[box]:\n\t\t\t\t\tmatches.append(box)\n\t\t\t\t\n\t\t\tif len(matches) == 1:\n\t\t\t\tvalues = assign_value(values, matches[0], digit)\n\n\treturn values", "def favorite_status(self, **kwargs: Any) -> Response:\n try:\n requested_ids = kwargs[\"rison\"]\n tags = TagDAO.find_by_ids(requested_ids)\n users_favorited_tags = TagDAO.favorited_ids(tags)\n res = [\n {\"id\": request_id, \"value\": request_id in users_favorited_tags}\n for request_id in requested_ids\n ]\n return self.response(200, result=res)\n except TagNotFoundError:\n return self.response_404()\n except MissingUserContextException as ex:\n return self.response_422(message=str(ex))", "def test_acceptance_sku_item_defined_on_checkout(self):\r\n pattern = re.compile(r\"items: \\[\\{sku: 'sku_\\w{14}', quantity: \\d{1}\\}\\]\",\r\n re.I | re.M)\r\n res = re.search(pattern, self.dom_str)\r\n self.assertTrue(hasattr(res, 'group'),\r\n msg=\"You didn't add the SKU code in the items list.\")", "def match_variants(self,state,variants):\r\n for v in variants:\r\n terms = self.match_variant(state,v)\r\n if terms is not None:\r\n return terms\r\n return None", "def aws_waits ( func, matching_ids ) :\n done = False\n found_ids = []\n while not done :\n found_ids = []\n time.sleep( 1 )\n items = func( )\n for item in items :\n for matching_id in matching_ids :\n if item.id == matching_id :\n found_ids.append( item )\n break\n\n if len( found_ids ) == len( matching_ids ) :\n done = True\n break\n\n return found_ids", "def only_choice(values):\n for unit in unitlist:\n for digit in '123456789':\n choices = [box for box in unit if digit in values[box]]\n if len(choices) == 1:\n values = assign_value(values, choices[0], digit)\n \n return values", "def _search_list(list, key, status_date):\n\n for l in list:\n if l[key] == status_date:\n return l", "def _get_status(self):\n status_results = self.db_handler.query_items({\n 'api_version': {\n 'condition': 'is_in',\n 'value': ['3', TsV2CatalogHandler.api_version]\n }\n })\n source_status = None\n status = None\n for s in status_results:\n if s['api_version'] == '3':\n source_status = s\n elif s['api_version'] == TsV2CatalogHandler.api_version:\n status = s\n if not source_status:\n self.logger.warning('Source catalog status not found')\n return False\n if source_status['state'] != 'complete':\n self.logger.debug('Source catalog is not ready for use')\n return False\n if not status or ((status['source_timestamp'] != source_status['timestamp']) and status['state'] != 'in-progress'):\n # begin or restart process.\n # If the catalog is already being generated it will be allowed to finish before re-starting.\n status = {\n 'api_version': TsV2CatalogHandler.api_version,\n 'catalog_url': '{}/ts/txt/2/catalog.json'.format(self.cdn_url),\n 'source_api': source_status['api_version'],\n 'source_timestamp': source_status['timestamp'],\n 'state': 'in-progress',\n 'processed': {}\n }\n\n return (status, source_status)", "def _get_active_values_in_list_with_dicts(self, dict_list):\n return_list = []\n for item in dict_list:\n if item['active']: \n return_list.append(item['value'])\n return return_list", "def generate_mock_tweet(\n raw_status: Union[Status, List[Status]]\n) -> Union[Status, List[Status]]:\n if type(raw_status) == list:\n updated_status = [\n generate_mock_tweet(raw_status=status) for status in raw_status\n ]\n else:\n updated_status = convert_dicts_in_status_to_obj(status=raw_status)\n\n return updated_status", "def makeCondition (self, source) :\n condition = 'OK'\n if 'status' in response :\n if condition in ('OK', 'rejected', 'deferred') :\n condition = response ['status'];\n else :\n raise moderationError ('error in status', source)\n return condition", "def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles", "def test_get_small_and_light_enrollment_by_seller_sku(self):\n pass", "def _inlist(self, source, target):\n source = \"\" if not source else source\n target = \"\" if not target else target\n source_list = source.split(\",\")\n target_list = target.split(\",\")\n return 1 if len([value for value in source_list if value in target_list]) > 0 else 0", "def get_status(self, rows):\n\n\t\taccount_status = {}\n\t\tfor row in rows:\n\t\t\t(account_number, status) = (int(row[0]), row[2])\n\t\t\tif account_status.has_key(account_number):\n\t\t\t\taccount_status[account_number].append(status)\n\t\t\t\t# Log account information if account has more than 1 current active status\n\t\t\t\tself.log.debug(\"Multiple Current Statuses for Account Number:\" + account_number)\n\t\t\telse:\n\t\t\t\taccount_status[account_number] = [status]\n\n\t\treturn account_status", "def query_case(casestatus_list):\n\n # create empty case queryset\n cases_merged = Case.objects.none()\n\n # iterate over casestatus objects\n for casestatus in casestatus_list:\n\n # get cases with specific casestatus\n cases = Case.objects.filter(casestatus=casestatus)\n\n # add cases from above query to merge queryset\n cases_merged = cases | cases_merged\n\n # sort cases by id\n cases_sorted = cases_merged.order_by('case_id')\n\n # return sorted cases with specific casestatus\n return cases_sorted", "def get_statuses():\n statuses = list()\n\n for status in tweepy.Cursor(api.user_timeline, id=836104384366936066).items():\n if is_prediction(status):\n statuses.append(status)\n else:\n continue\n\n return statuses", "def check_status(self) -> Mapping[str, bool]:\n ups_stat = {}\n for name in self.ups_names:\n ups_stat[name] = self.check_ups(name)\n return ups_stat", "def needs_by_status(cls):\n\n db = current.db\n\n # Extract the data\n table = current.s3db.req_need_line\n status = table.status\n number = table.id.count()\n query = (table.deleted == False)\n rows = db(query).select(status, number, groupby = status)\n\n # Build data structure for chart renderer\n rows = dict((row[status], row[number]) for row in rows)\n data = []\n for code, label, color in cls.REQ_STATUS:\n value = rows.get(code)\n data.append({\"label\": s3_str(label),\n \"value\": value if value else 0,\n \"color\": color,\n \"filterKey\": code,\n })\n\n return data", "def _matchStatus(self, status: int):\n return (status in self._allowedStatus['List']\n or (self._allowedStatus['Range']\n and (self._allowedStatus['Range'][0] <= status\n and status <= self._allowedStatus['Range'][1])))", "def pick_status(self):\n # Make a choice\n choice = random.choice(self.statuses)\n # Update status and give PSQL modifiers\n if choice == self.statuses[0]:\n self.altPSQL = [i + 3 for i in self.PSQL]\n self.status = random.choice(self.great)\n elif choice == self.statuses[1]:\n self.altPSQL = [i + 2 for i in self.PSQL]\n self.status = random.choice(self.good)\n elif choice == self.statuses[2]:\n self.altPSQL = [i + 1 for i in self.PSQL]\n self.status = random.choice(self.mild_good)\n elif choice == self.statuses[3]:\n self.altPSQL = [i - 1 for i in self.PSQL]\n self.status = random.choice(self.mild_bad)\n elif choice == self.statuses[4]:\n self.altPSQL = [i - 2 for i in self.PSQL]\n self.status = random.choice(self.bad)\n else:\n self.altPSQL = [i - 3 for i in self.PSQL]\n self.status = random.choice(self.worst)\n \n return self.PSQL, self.altPSQL", "def filter_pro_matches(resp):\n\n return [x for x in resp if x[\"dire_name\"] and x[\"radiant_name\"]]", "def _match_users_with_karma(rankings, user_key, karma_key):\n if not rankings:\n return []\n\n giver_ids = [r[user_key] for r in rankings]\n ids_to_users = User.objects.select_related(\n 'userprofile').in_bulk(giver_ids)\n return [(ids_to_users[r[user_key]], r[karma_key]) \\\n for r in rankings]", "def onlist(listtocheck, item):\n\n # Return the result\n return item in listtocheck", "def award_status_populator():\n award_status_list = funding_data[\"Project Status:\"].unique()\n return [{'label': i, 'value': i} for i in award_status_list]", "def test_list_by_status(self):\n # Submit a reserve, then query by draft status to retrieve\n reserve_kwargs = {\n \"input\": join(self.input_dir, \"pds4_bundle_with_contributors.xml\"),\n \"node\": \"img\",\n \"submitter\": \"my_user@my_node.gov\",\n \"force\": True,\n }\n\n doi_label = self._reserve_action.run(**reserve_kwargs)\n\n dois, _ = self._web_parser.parse_dois_from_label(doi_label)\n doi = dois[0]\n\n list_kwargs = {\"status\": DoiStatus.Draft}\n\n list_result = json.loads(self._list_action.run(**list_kwargs))\n\n self.assertEqual(len(list_result), 1)\n\n list_result = list_result[0]\n self.assertEqual(list_result[\"status\"], doi.status)\n self.assertEqual(list_result[\"title\"], doi.title)\n self.assertEqual(list_result[\"subtype\"], doi.product_type_specific)\n self.assertEqual(list_result[\"identifier\"], doi.pds_identifier)\n\n # Now move the draft to review, use JSON as the format to ensure\n # this test works for both DataCite and OSTI\n doi_label = self._record_service.create_doi_record(dois, content_type=CONTENT_TYPE_JSON)\n\n with tempfile.NamedTemporaryFile(mode=\"w\", suffix=\".json\") as temp_file:\n temp_file.write(doi_label)\n temp_file.flush()\n\n review_kwargs = {\n \"input\": temp_file.name,\n \"node\": \"img\",\n \"submitter\": \"my_user@my_node.gov\",\n \"force\": True,\n \"review\": True,\n }\n\n review_json = self._release_action.run(**review_kwargs)\n\n dois, _ = self._web_parser.parse_dois_from_label(review_json, content_type=CONTENT_TYPE_JSON)\n\n doi = dois[0]\n\n # Now query for review status\n list_kwargs = {\"status\": DoiStatus.Review}\n\n list_result = json.loads(self._list_action.run(**list_kwargs))\n\n self.assertEqual(len(list_result), 1)\n\n list_result = list_result[0]\n self.assertEqual(list_result[\"status\"], doi.status)\n self.assertEqual(list_result[\"title\"], doi.title)\n self.assertEqual(list_result[\"subtype\"], doi.product_type_specific)\n self.assertEqual(list_result[\"identifier\"], doi.pds_identifier)\n\n # Run the same query again using the label format\n list_kwargs = {\"status\": DoiStatus.Review, \"format\": FORMAT_LABEL}\n\n list_result = self._list_action.run(**list_kwargs)\n\n dois, _ = self._web_parser.parse_dois_from_label(list_result)\n\n self.assertEqual(len(dois), 1)\n\n output_doi = dois[0]\n\n self.assertEqual(doi.pds_identifier, output_doi.pds_identifier)\n self.assertEqual(doi.title, output_doi.title)\n self.assertEqual(doi.doi, output_doi.doi)\n self.assertEqual(doi.status, output_doi.status)\n\n # Finally, query for draft status again, should get no results back\n list_kwargs = {\"status\": DoiStatus.Draft, \"format\": FORMAT_RECORD}\n\n list_result = json.loads(self._list_action.run(**list_kwargs))\n\n self.assertEqual(len(list_result), 0)", "def filter_generic(products, listings, result=None):\n print \"Apply Generic Filtering \"\n if result == None:\n result = {}\n matched_listings = []\n for alist in listings:\n manufacturer, renamed_manufacturer = find_manufacturer(products, alist)\n if manufacturer == False:\n continue\n for product in products[manufacturer]:\n product = product[0] # get product information all in lower case\n if not does_list_contains_model(\\\n alist, product['model'], manufacturer):\n continue\n if product['product_name'] not in result:\n result[product['product_name']] = []\n for matched_list in listings[alist]:\n matched_manufacturer =\\\n matched_list['manufacturer'].lower()\n if manufacturer not in matched_manufacturer and\\\n matched_manufacturer not in renamed_manufacturer:\n continue\n result[product['product_name']].append(matched_list)\n matched_listings.append(alist)\n remove_matched_list(listings, matched_listings)\n length_listings(listings)\n return result", "def validate_get_response(response, status, count, job_templates, keys=None):\n assert (response[\"status\"]) == status\n json_response = json.loads(response[\"body\"])\n assert (json_response[\"count\"]) == count\n results = json_response[\"results\"]\n for item in results:\n matching_item = find_by_id(item[\"id\"], job_templates)\n if not keys:\n keys = list(matching_item.keys())\n assert sorted(keys) == sorted(list(item.keys()))\n compare(item, matching_item, keys)", "def check_status(ctx, projectid, snic_api_credentials=None, statusdb_config=None):\n for pid in projectid:\n if statusdb_config == None:\n logger.error(\"--statusdb-config or env variable $STATUS_DB_CONFIG need to be set to perform GRUS delivery\")\n return 1\n taca.utils.config.load_yaml_config(statusdb_config)\n if snic_api_credentials == None:\n logger.error(\"--snic-api-credentials or env variable $SNIC_API_STOCKHOLM need to be set to perform GRUS delivery\")\n return 1\n taca.utils.config.load_yaml_config(snic_api_credentials)\n\n d = _deliver_grus.GrusProjectDeliverer(\n pid,\n **ctx.parent.params)\n d.check_mover_delivery_status()", "def _search_for_id(self, validated_sample_list, client, ordering_org, row):\n org_uri = ORG_URI_BY_NAME[ordering_org]\n barcode = str(row[validated_sample_list.COLUMN_REFERENCE])\n\n if ordering_org == TESTING_ORG:\n # The user can send in a \"fake status\" from the raw sample list, by including it\n # in COLUMN_FAKE_STATUS. If we don't recognize it as one of the status flags, we\n # should just use \"ok\"\n status = row[validated_sample_list.COLUMN_FAKE_STATUS]\n if status not in validated_sample_list.STATUS_ALL:\n status = validated_sample_list.STATUS_OK\n\n comment = \"This data is faked for integration purposes (Internal testing was selected)\"\n\n if status == validated_sample_list.STATUS_OK:\n service_request_id = \"faked-{}\".format(uuid4())\n logger.warn(\"Using testing org. Service request ID faked: {}\".format(\n service_request_id))\n else:\n service_request_id = \"\"\n return service_request_id, status, comment, org_uri\n\n try:\n response = client.search_for_service_request(org_uri, barcode)\n service_request_id = response[\"resource\"][\"id\"]\n status = validated_sample_list.STATUS_OK\n comment = \"\"\n except OrganizationReferralCodeNotFound as e:\n self.usage_warning(\n \"These barcodes are not registered for the org {}. \"\n \"Press '{}' in order to fetch anonymous service requests for these.\".format(\n org_uri,\n BUTTON_TEXT_ASSIGN_UNREGISTERED_TO_ANONYMOUS), barcode)\n status = validated_sample_list.STATUS_UNREGISTERED\n\n # Overwrite the org_uri so we use KARLSSON_AND_NOVAK, because this will be anonymous\n org_uri = ORG_URI_BY_NAME[KARLSSON_AND_NOVAK]\n\n # An \"unregistered\" status signals to the next step in the workflow that we need\n # to fetch an anonymous service request:\n status = validated_sample_list.STATUS_UNREGISTERED\n service_request_id = \"\"\n comment = (\"No matching request was found for this referral code. \"\n \"Press '{}' in order to fetch anonymous service requests for these.\".format(\n BUTTON_TEXT_ASSIGN_UNREGISTERED_TO_ANONYMOUS\n ))\n except PartnerClientAPIException as e:\n self.usage_error_defer(\n \"Something was wrong with {} for barcode(s). \"\n \"See file validated sample list for details.\".format(org_uri), barcode)\n service_request_id = \"\"\n status = validated_sample_list.STATUS_ERROR\n comment = e.message\n return service_request_id, status, comment, org_uri", "def find_all_items(items: WebElements, value_list: List[str]=PARAMS_LEAGUES) -> WebElements:\n items_list = []\n for item in items:\n if any([True if word in item.text.lower() else False for word in value_list]):\n items_list.append(item)\n return items_list", "def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list", "def get_mex_status(request):\n flag = False #flag that is used for indicating successful search of MEx in MEx list\n response = GetMexStatusResponse() #create response object \n for i in mex_list: #search for MEx in MEx list\n if i.id == request.mex_id:\n response.mex_status = i.status.name #MEx status query 'name' method from enum object returns text\n response.job_id = str(i.job_id)\n flag = True\n if flag == True: #if we found requested Mex in the list, together with status and job id we return bool success \n response.success = True\n return response\n else:\n response.success = False\n return response", "def check_status(line):\n parts = line.strip().split(\"\\t\")\n partition = parts[0]\n userid = parts[1]\n parts = parts[2:]\n \n result = [ userid, None ]\n if (userid in user_ids) and (partition == 'user'):\n result = [ userid, parts]\n return result", "def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list", "def get_by_status(status):\n return list(tasks.find({'status': status}))", "def test__validate_status__0():\n for input_value, expected_output in (\n (ScheduledEventStatus.active, ScheduledEventStatus.active),\n (ScheduledEventStatus.active.value, ScheduledEventStatus.active)\n ):\n output = validate_status(input_value)\n vampytest.assert_is(output, expected_output)", "def get_locations_by_ids(self, id_list):", "def test_filter_by_ids(self, original_list, ids_to_filter, expected_result):\n result = helpers.filter_by_ids(original_list, ids_to_filter)\n\n self.assertEqual(result, expected_result)", "def _get_status(self, context, object_list=None):\n status = self.request.GET.get(\"status\", \"\").upper()\n\n if object_list is not None:\n return self._get_object_list(\n object_list, status != \"\" and JobStatus.is_member(status), status=status\n )\n\n options = list(map(lambda s: (s.name, s.value), JobStatus))\n\n return {\n **context,\n \"status_options\": sorted(options, key=lambda x: x[0]),\n \"status\": status,\n }", "def get_item_variants(self, item_id, item_name, start):\n\n item_url = f\"https://www.supremenewyork.com/shop/{item_id}.json\"\n\n item_variants = rq.get(item_url, headers=self.headers, proxies=self.proxy).json()\n\n for stylename in item_variants[\"styles\"]:\n for itemsize in stylename[\"sizes\"]:\n item = [item_name, stylename[\"name\"], itemsize['name'], item_variants[\"description\"], 'https:' + stylename[\"image_url\"], item_url.split('.json')[0]]\n if itemsize[\"stock_level\"] != 0:\n # Checks if it already exists in our instock\n if self.checker(item):\n pass\n else:\n # Add to instock dict\n self.instock.append(item)\n \n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n self.discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if self.checker(item):\n self.instock.remove(item)", "def check_list(source, value):\n try:\n return value in json.loads(source)\n except:\n return False", "def find_by_sku(cls, sku: int):\n cls.logger.info(\"Processing sku query for %s ...\", sku)\n return cls.query.filter(cls.sku == sku).order_by(cls.id).all()", "def get_units(status, application_name):\n units = []\n for unit_name, unit in iter_units(status):\n if unit_name.startswith('{}/'.format(application_name)):\n units.append((unit_name, unit,))\n return units", "def _find_equivalent(searched_dict, dicts_list):\n for id_key in ('id', 'uid', 'name'):\n # Recognize the ID key used, if any\n local_id = searched_dict.get(id_key)\n if local_id:\n # Found an ID\n for other_item in dicts_list:\n if other_item.get(id_key) == local_id:\n # Found an item with the same ID\n return other_item\n \n # Found nothings\n return None", "def filter_list(data: List[dict], field: str, selected: List[str]):\n if len(selected):\n return [x for x in data if x[field] in selected]\n else:\n return data", "def scan_item(request):\n result = {'products':[]}\n u = request.user\n\n p = Product.objects.get_by_sku(request.POST['sku'])\n if p is None:\n p = Product.objects.get_by_upc(request.POST['sku'])\n \n if p is not None:\n result['products'] = [p.details(u)]\n\n return JSONHttpResponse(result)", "def get_selling_status_pack(self, ):\n\n result = {}\n\n # Iterate over incoming raw data and pick values corresponding to \"Selling_status\" db table\n for item in self.raw_data['searchResult']['item']:\n for field in item['sellingStatus'].keys():\n if format_ebay_col_name(field) in selling_status_table_columns:\n if type(item['sellingStatus'][field]) == str:\n result[format_ebay_col_name(field)] = item['sellingStatus'][field]\n\n # Handle several nested values\n while Switch(format_ebay_col_name(field)):\n if case('CONVERTED_CURRENT_PRICE'):\n result['CONVERTED_CURRENT_PRICE_CURRENCY_ID'] = item['sellingStatus'][field]['_currencyId']\n result['CONVERTED_CURRENT_PRICE_VALUE'] = item['sellingStatus'][field]['value']\n if case('CURRENT_PRICE'):\n result['CURRENT_PRICE_CURRENCY_ID'] = item['sellingStatus'][field]['_currencyId']\n result['CURRENT_PRICE_VALUE'] = item['sellingStatus'][field]['value']\n break\n\n # Fill missing values with \"NULL\"s\n for table_filed in selling_status_table_columns:\n if table_filed not in result.keys(): result[table_filed] = 'NULL'\n\n # Set Item ID\n result['ITEM_ID'] = item['itemId']\n\n return result", "def findUniqueResults(ids, results):\n ordered = OrderedDict(sorted(data.items(), key=lambda t: t[0]))\n return list(ordered.values())", "def defaulter(arr):\n return list(set(map(lambda application: application['customer_id'], filter(lambda application: application['repaid_amount'] < (application['principal'] + application['fee']), arr))))", "def get_tasks_ids_by_status(self, status=\"Done\"):\n if status not in (\"Done\", \"Paused\", \"Running\", \"Stopped\"):\n raise ValueError(\"Requested status are not allowed\")\n\n\n m_task_ids = {}\n\n for x in self.get_tasks().findall(\"task\"):\n if x.find(\"status\").text == status:\n m_task_ids[x.find(\"name\").text] = x.attrib[\"id\"]\n\n return m_task_ids", "def get_list_transform(self, sd_responses):\n permit_list = False\n responses_missing = []\n sd_fields = {\n 'activity' : 'dd8a5g7g',\n 'app_id' : 'uqqrsogr',\n 'biz_name' : 't00kheyd',\n 'dba_name' : '60w4ep9y',\n 'addr' : 'kbqz4189',\n 'parcel' : 'kvrgbqrl'\n }\n if isinstance(sd_responses, list):\n permit_list = []\n for resp in sd_responses:\n if (resp.get('responses', False)\n and resp['responses'].get(sd_fields['activity'], False)\n and (resp['responses'].get(sd_fields['biz_name'], False)\n or resp['responses'].get(sd_fields['dba_name'], False))\n and (resp.get('status', '') in self.status_map.keys())\n ):\n resp_status = self.status_map[resp.get('status')].lower()\n resp_referred = self.get_referred_departments(resp.get('labels'))\n item = {\n 'application_id':'',\n 'business_name':'',\n 'dba_name':'',\n 'address':'',\n 'parcel':'',\n 'status':resp_status,\n 'referred':\", \".join(resp_referred)\n }\n data = resp['responses']\n item['application_id'] = str(data.get(sd_fields['app_id']) or '')\n if not data.get(sd_fields['app_id']):\n item['application_id'] = 'P-' + str(resp['id'])\n item['business_name'] = str(data.get(sd_fields['biz_name']) or '')\n item['dba_name'] = str(data.get(sd_fields['dba_name']) or item['business_name'])\n item['parcel'] = data.get(sd_fields['parcel'], '')\n if data.get(sd_fields['addr']) and data.get(sd_fields['addr']).get('street'):\n addr = data.get(sd_fields['addr'])\n item['address'] = str(addr.get('street') or '')\n item['address'] += ', '+str(addr.get('city') or '')\n item['address'] += ', '+str(addr.get('state') or '')\n item['address'] += ' '+str(addr.get('zipcode') or '')\n item['address'] = item['address'].strip(' ,')\n if data[sd_fields['activity']] and data[sd_fields['activity']]['checked']:\n for applied_permit_type in data[sd_fields['activity']]['checked']:\n item[applied_permit_type.lower()] = resp_status\n\n permit_list.append(item)\n else:\n responses_missing.append(\n {'id':resp['id'], 'sequential_id':resp['sequential_id']}\n )\n\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra('get_list_transform.permit_list_len', len(permit_list))\n if responses_missing:\n scope.set_extra('get_list_transform.responses_missing', responses_missing)\n return permit_list", "def get(self, status):\n order_items = mDB.get_items_by_order_status(status)\n items = []\n for item in order_items:\n items.append(serialise_obj(item))\n\n data = {\"order_items\": items}\n return data, 200", "def products_exist(cls, *skus):\n try:\n stock_level_ids = cls._get_stock_item_ids(*skus)\n except linnapi.exceptions.InvalidResponseError:\n return False\n if not set(skus).issubset(set(stock_level_ids.keys())):\n return False\n return True", "def get_all_by_status(status):\n return OaiData.objects.filter(status=status).all()", "def test__parse_sku():\n for input_data, expected_output in (\n ({}, ''),\n ({'sku': None}, ''),\n ({'sku': ''}, ''),\n ({'sku': 'a'}, 'a'),\n ):\n output = parse_sku(input_data)\n vampytest.assert_eq(output, expected_output)", "def get_product_available(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n location_obj = self.pool.get('stock.location')\n warehouse_obj = self.pool.get('stock.warehouse')\n shop_obj = self.pool.get('sale.shop')\n\n user_obj = self.pool.get('res.users').browse(cr, 1, uid)\n\n states = context.get('states',[])\n what = context.get('what',())\n if not ids:\n ids = self.search(cr, uid, [])\n res = {}.fromkeys(ids, 0.0)\n if not ids:\n return res\n\n if context.get('shop', False) and context['shop']:\n warehouse_id = shop_obj.read(cr, 1, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]\n if warehouse_id:\n context['warehouse'] = warehouse_id\n\n if context.get('warehouse', False) and context['warehouse']:\n lot_id = warehouse_obj.read(cr, 1, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]\n if lot_id:\n context['location'] = lot_id\n\n if context.get('location', False) and context['location']:\n if type(context['location']) == type(1):\n location_ids = [context['location']]\n elif type(context['location']) in (type(''), type(u'')):\n location_ids = location_obj.search(cr, 1, [('name','ilike',context['location'])], context=context)\n else:\n location_ids = context['location']\n else:\n location_ids = []\n #wids = warehouse_obj.search(cr, uid, [], context=context)\n #for w in warehouse_obj.browse(cr, uid, wids, context=context):\n # location_ids.append(w.lot_stock_id.id)\n lids = location_obj.search(cr, 1, [])\n #print(lids, 'todas os locais', user_obj.company_id.id)\n for lo in location_obj.browse(cr, 1, lids, context=context):\n #print(lo.id, lo.company_id, lo.company_ids, user_obj.company_id.id)\n if lo.company_id and user_obj.company_id.id == lo.company_id.id:\n location_ids.append(lo.id)\n else:\n for co in lo.company_ids:\n if user_obj.company_id.id == co.id:\n location_ids.append(lo.id)\n\n # build the list of ids of children of the location given by id\n if context.get('compute_child', True):\n if len(location_ids) == 0:\n raise osv.except_osv(u'Atenção!', u'Não há local de estoque definido para a empresa/unidade!')\n\n child_location_ids = location_obj.search(cr, 1, [('location_id', 'child_of', location_ids)])\n location_ids = child_location_ids or location_ids\n\n # this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id\n uoms_o = {}\n # this will be a dictionary of the product UoM by product id\n product2uom = {}\n for product in self.browse(cr, 1, ids, context=context):\n product2uom[product.id] = product.uom_id.id\n uoms_o[product.uom_id.id] = product.uom_id\n\n results = []\n results2 = []\n\n from_date = context.get('from_date',False)\n to_date = context.get('to_date',False)\n date_str = False\n date_values = False\n where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]\n if from_date and to_date:\n date_str = \"date>=%s and date<=%s\"\n where.append(tuple([from_date]))\n where.append(tuple([to_date]))\n elif from_date:\n date_str = \"date>=%s\"\n date_values = [from_date]\n elif to_date:\n date_str = \"date<=%s\"\n date_values = [to_date]\n if date_values:\n where.append(tuple(date_values))\n\n prodlot_id = context.get('prodlot_id', False)\n prodlot_clause = ''\n if prodlot_id:\n prodlot_clause = ' and prodlot_id = %s '\n where += [prodlot_id]\n elif 'prodlot_id' in context and not prodlot_id:\n prodlot_clause = 'and prodlot_id is null '\n\n # TODO: perhaps merge in one query.\n if 'in' in what:\n # all moves from a location out of the set to a location in the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id NOT IN %s '\\\n 'and location_dest_id IN %s '\\\n 'and product_id IN %s '\\\n 'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results = cr.fetchall()\n if 'out' in what:\n # all moves from a location in the set to a location out of the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id IN %s '\\\n 'and location_dest_id NOT IN %s '\\\n 'and product_id IN %s '\\\n 'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results2 = cr.fetchall()\n\n # Get the missing UoM resources\n uom_obj = self.pool.get('product.uom')\n uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)\n if context.get('uom', False):\n uoms += [context['uom']]\n uoms = filter(lambda x: x not in uoms_o.keys(), uoms)\n if uoms:\n uoms = uom_obj.browse(cr, 1, list(set(uoms)), context=context)\n for o in uoms:\n uoms_o[o.id] = o\n\n #TOCHECK: before change uom of product, stock move line are in old uom.\n context.update({'raise-exception': False})\n # Count the incoming quantities\n for amount, prod_id, prod_uom in results:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] += amount\n # Count the outgoing quantities\n for amount, prod_id, prod_uom in results2:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] -= amount\n\n for prod_id in res:\n if isinstance(res[prod_id], D):\n res[prod_id] = float(res[prod_id])\n return res", "def query_response_codes(self, request, status_codes, fail_codes, lock):\n if lock is not None:\n lock.acquire()\n\n QueryResult = collections.namedtuple('QueryResult', ['valid_code', 'fully_valid', 'sequence_failure'])\n for seq_hash in self._sequence_statuses:\n # iterate over each status code that was detected in this sequence\n for code in self._sequence_statuses[seq_hash].request_statuses:\n if code in status_codes or code in fail_codes:\n for req in self._sequence_statuses[seq_hash].request_statuses[code]:\n # Check if the request exists for this status code\n if request.hex_definition == req.request_hex:\n if lock is not None:\n lock.release()\n if code in status_codes:\n valid_code = True\n else:\n valid_code = False\n return QueryResult(valid_code, req.is_fully_valid, req.sequence_failure)\n\n if lock is not None:\n lock.release()\n return QueryResult(valid_code=False, fully_valid=False, sequence_failure=False)", "def _get_status(self):\n\t\tstatus_list = []\n\t\tfor hand in self.player_hand:\n\t\t\tif hand.value > 21:\n\t\t\t\tstatus_list.append('lost')\n\t\t\telif hand.value == 21 \\\n\t\t\t\t\tand len(hand.cards) == 2 \\\n\t\t\t\t\tand not(self.dealer_hand[0].value == 21 and len(self.dealer_hand[0].cards) == 2):\n\t\t\t\tstatus_list.append('blackjack')\n\t\t\telif self.dealer_hand[0].value > 21:\n\t\t\t\tstatus_list.append('won')\n\t\t\telif hand.value > self.dealer_hand[0].value:\n\t\t\t\tstatus_list.append('won')\n\t\t\telif hand.value == self.dealer_hand[0].value:\n\t\t\t\tstatus_list.append('push')\n\t\t\telse:\n\t\t\t\tstatus_list.append('lost')\n\t\treturn status_list", "def check_for_list(check):", "def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items", "def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items", "def find_by_sku_and_sid(cls, sku, sid):\n cls.logger.info(\n \"Processing lookup for shopcart item with sku %s and sid %s\", str(sku), str(sid)\n )\n return cls.query.filter_by(sid=sid).filter_by(sku=sku).first()", "def get_statuses_by_importance(config, all_statuses):\n critical_statuses = [status['id'] for status in all_statuses if status['name'] in\n config['status_urgency_critical']]\n\n unimportant_statuses = [status['id'] for status in all_statuses if status['name'] in\n config['status_urgency_low']]\n\n return {'critical_statuses': critical_statuses,\n 'unimportant_statuses': unimportant_statuses}", "def some_job():\r\n\tfor row in rows:\r\n\t\treceipt_number = row[0]\r\n\t\tphone_number = row[2]\r\n\t\treturn case_status_check(receipt_number, phone_number)", "def status_get(): # noqa: E501\n db = get_db()\n return [{'id': sample, 'status': db['samples'][sample]['status']} for sample in db['samples'].keys()]" ]
[ "0.56345564", "0.5322471", "0.52859175", "0.52649534", "0.5165805", "0.5100922", "0.5077354", "0.50569147", "0.5036123", "0.50279695", "0.50128347", "0.5012462", "0.4975206", "0.49740142", "0.49417433", "0.493961", "0.48568138", "0.4840273", "0.48359197", "0.48324963", "0.48283082", "0.48179632", "0.48125666", "0.48106146", "0.48078296", "0.47970888", "0.47937343", "0.4767169", "0.47667027", "0.47595793", "0.474241", "0.47416484", "0.47353867", "0.47171125", "0.47170398", "0.47100958", "0.46667144", "0.46636024", "0.46595213", "0.46513018", "0.46086985", "0.46055302", "0.46050775", "0.458934", "0.45766228", "0.456541", "0.45578566", "0.45508388", "0.4547965", "0.4544914", "0.4539477", "0.45383936", "0.45354187", "0.45339072", "0.45329365", "0.45237714", "0.4517778", "0.45159075", "0.45070523", "0.4500885", "0.4497941", "0.44907176", "0.4481441", "0.44762358", "0.4475432", "0.4472019", "0.44556177", "0.44545704", "0.44501635", "0.4435782", "0.4429838", "0.44213372", "0.44196913", "0.44189578", "0.44120395", "0.440759", "0.44030702", "0.43975857", "0.4394326", "0.43851894", "0.43794274", "0.43767473", "0.437351", "0.43707705", "0.43665436", "0.43583882", "0.43515995", "0.43496335", "0.43486595", "0.43384814", "0.43373093", "0.4334876", "0.43330118", "0.43283802", "0.4328349", "0.4327147", "0.4324796", "0.43239263", "0.4320815", "0.43173283" ]
0.7839895
0
Converts a signal name to canonical form.
def name2signal(string): try: v = int(string) except ValueError: if "_" in string: raise ValueError("could not convert %r to signal name" % string) s = string.upper() if not s.startswith("SIG"): s = "SIG" + s v = getattr(signal, s, None) if isinstance(v, int): return s raise ValueError("could not convert %r to signal name" % string) if v >= signal.NSIG: raise ValueError("unsupported signal on this platform: %s" % string) for name in dir(signal): if "_" in name: continue if getattr(signal, name) == v: return name raise ValueError("unsupported signal on this platform: %s" % string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def canonical_name(self, name):\n raise NotImplementedError", "def _signal_to_common_name(signal: domain.Signal) -> str: # pragma: no cover\n stationb_map: domain.SIGNAL_MAP = {\n (550.0, 10.0, 610.0, 20.0): \"mRFP1\",\n (430.0, 10.0, 480.0, 10.0): \"ECFP\",\n (500.0, 10.0, 530.0, None): \"EYFP\",\n (485.0, 12.0, 520.0, None): \"GFP\",\n (485.0, 12.0, 530.0, None): \"GFP530\",\n 600.0: \"OD\",\n 700.0: \"OD700\",\n }\n return signal.to_label(stationb_map)", "def symbolize_sensorname(name):\n return name.lower().replace(\" \", \"_\")", "def to_upper_case(signals):\n for _, signal in signals.items():\n signal.name = signal.name.upper()\n return signals", "def encoded_name(self):\n return slugify(str(self).lower())", "def mangle(signal):\n if type(signal) is list:\n return [mangle(s) for s in signal]\n else:\n (name, index) = signal\n return name + '_' + str.join('_', [str(x) for x in index])", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def getSignalName(sig):\n try:\n return sig._name\n except AttributeError:\n pass\n return sig.name", "def getSignalName(sig):\n try:\n return sig._name\n except AttributeError:\n pass\n return sig.name", "def signame(sig):\r\n\r\n if _signames is None:\r\n _init_signames()\r\n return _signames.get(sig) or \"signal %d\" % sig", "def name(self):\n return signal_base_get_name(self.obj)", "def canonicalize(name):\n prefixes, first_part, last_part, suffixes = split(name)\n canonical = \"\"\n if prefixes:\n canonical = namecase(prefixes)\n if first_part:\n canonical += \" \" + namecase(first_part)\n if last_part:\n canonical += \" \" + namecase(last_part)\n if suffixes:\n canonical += \", \" + namecase(suffixes)\n return canonical.strip()", "def normalize_name(name):\n return PUNCT_RE.sub('-', name.lower()).strip('-')", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalizeSerial(serial):\n\treturn serial.upper().replace(\"-\", \"\").strip()", "def normalizeHeaderName(name):\n # type: (AnyStr) -> AnyStr\n return name.lower()", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)", "def normalize_label(label):\n label = normalize('NFKD', label)\n label = re.sub('/[^a-z0-9-_:.]/g', '-', label)\n label = label.lower()\n return label", "def convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def normalize_package_name(_s: str) -> str:\n return _s.replace('_', '-').lower()", "def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def canon(raw_attr_name: str) -> str:\n if raw_attr_name: # do not dereference None, and \"\" is already canonical\n return raw_attr_name.replace(\" \", \"\").lower()\n return raw_attr_name", "def getName(self):\n return signal_base_get_name(self.obj)", "def name(self):\n return f'{self._vehicle.name} {self.wan_name} Signal'", "def convert_x509_name(name):\n types = {\n 'country_name': 'C',\n 'state_or_province_name': 'ST',\n 'locality_name': 'L',\n 'organization_name': 'O',\n 'organizational_unit_name': 'OU',\n 'common_name': 'CN',\n 'email_address': 'emailAddress'\n }\n\n return '/'.join(['{}={}'.format(types[attr], name.native[attr]) for attr in name.native])", "def standardize(self, name: str) -> str:\n clean_name = name\n for rule in rules.RULES:\n clean_name = rule(clean_name)\n return clean_name", "def _FormalizeName(cls, name):\n name = name.replace(\"_\", \"-\").lower()\n name = name[:cls.NAME_LENGTH_LIMIT]\n if name[-1] == \"-\":\n name = name[:-1] + cls.REPLACER\n return name", "def sanitize_name(name):\n # For now just change dashes to underscores. Fix this more in the future\n return name.replace(\"-\", \"_\")", "def process_name(self, name, inverse=False):\n if inverse:\n return name.replace('_', ' ').title()\n return name.lower().replace(' ', '_').replace('.', '')", "def canonicalize_accelerator_name(accelerator: str) -> str:\n # TPU names are always lowercase.\n if accelerator.lower().startswith('tpu-'):\n return accelerator.lower()\n\n # Common case: do not read the catalog files.\n mapping = {name.lower(): name for name in _ACCELERATORS}\n if accelerator.lower() in mapping:\n return mapping[accelerator.lower()]\n\n # _ACCELERATORS may not be comprehensive.\n # Users may manually add new accelerators to the catalogs, or download new\n # catalogs (that have new accelerators) without upgrading SkyPilot.\n # To cover such cases, we should search the accelerator name\n # in the service catalog.\n searched = service_catalog.list_accelerators(name_filter=accelerator,\n case_sensitive=False)\n names = list(searched.keys())\n\n # Exact match.\n if accelerator in names:\n return accelerator\n\n if len(names) == 1:\n return names[0]\n\n # Do not print an error meessage here. Optimizer will handle it.\n if len(names) == 0:\n return accelerator\n\n # Currenlty unreachable.\n # This can happen if catalogs have the same accelerator with\n # different names (e.g., A10g and A10G).\n assert len(names) > 1\n with ux_utils.print_exception_no_traceback():\n raise ValueError(f'Accelerator name {accelerator!r} is ambiguous. '\n f'Please choose one of {names}.')", "def __to_key(name: str) -> str:\n return name.replace(\" \", \"-\")", "def sanitize_clip_model_name(clip_model_name: str) -> str:\n return clip_model_name.lower().replace(\"-\", \"\").replace(\"/\", \"\")", "def normalize(self, name):\n\n\t\t# label emojis, specifically :) and :( as @artist, then apply \n\t\t# base normalization\n\n\t\tname = super().normalize(re.sub(r'\\s*:[\\(\\)]\\s*',' @artist ', name))\n\t\t\n\t\t# if now name is ? it may be an artist, so label as @artist\n\t\tif name.strip() in {'?','...'}:\n\t\t\treturn '@artist'\n\t\t\n\t\t# fix ! - remove if at the end of a word, otherwise replace with i\n\t\tname = re.sub(r'\\!+$','', re.sub(r'\\!+(?=[^\\b\\w])','', name)).replace('!','i')\n\t\t\n\t\t# remove the and a\n\t\tname = re.sub(r'^(the|a)\\s+','', name)\n\t\t \n\t\t# remove multiple white spaces\n\t\tname = re.sub(r'\\s{2,}', ' ', name).strip()\n\t\t\n\t\treturn name", "def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name", "def slugify(name):\n return name.lower().strip().replace(' ', '-').replace('+', '_')", "def normalize_name(self, name: str):\n return self.tknzr.tokenize(name, to_lower=True)", "def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()", "def sanitize_metric_name(name: str) -> str:\n name = name.replace(\":\", \"-\")\n return name", "def unmangle_measurement_name(measurement_name):\n measurement_name = measurement_name.replace('_sp_', ' ')\n measurement_name = measurement_name.replace('_dsh_', '-')\n return measurement_name", "def band_to_cname(input_band: str):\n bands_ref = ((\"red\", \"R\"), (\"green\", \"G\"), (\"blue\", \"B\"), ('nir', \"N\"))\n if isinstance(input_band, int) and 1 <= input_band <= 4:\n return bands_ref[input_band-1][0]\n elif isinstance(input_band, str) and len(input_band) == 1:\n for cname, short_name in bands_ref:\n if input_band == short_name:\n return cname\n elif isinstance(input_band, str) and len(input_band) > 1:\n for cname, short_name in bands_ref:\n if input_band == cname:\n return input_band\n else:\n raise ValueError(f\"Cannot convert given band to valid stac common name. Got: {input_band}\")", "def adjust_event_name(event_name):\n pos=find_first_digit(event_name)\n return event_name[pos:]", "def s(x):\n return x.name.lower().replace('_', '-')", "def mangle_name(name):\n import re\n try:\n return re.sub('_+','_',re.sub('[^\\w_]','_',name).lower()).rstrip('_')\n except TypeError:\n raise TypeError(\n 'Trying to mangle name with invalid type of: ' + str(type(name)))", "def changeName(name):\n\tif name in [\"<OPEN>\", \"<HIGH>\", \"<LOW>\", \"<CLOSE>\"]:\n\t\t# Frist charector is upper case\n\t\tname = name.replace('<', '').replace('>', '')\n\t\t#name = name[0] + name[1:].lower()\t\t\n\telif name in [\"<VOL>\"]:\n\t\t#name = name.replace(\"<VOL>\", \"Volume\")\n\t\tname = name.replace(\"<VOL>\", \"VOLUME\")\n\telif name in [\"<DTYYYYMMDD>\"]:\n\t\t#name = name.replace(\"<DTYYYYMMDD>\", \"Date\")\n\t\tname = name.replace(\"<DTYYYYMMDD>\", \"DATE\")\n\treturn name", "def lowercase_name(name):\n return name.lower()", "def _normalize(self, metric_name, submit_method, prefix):\n metric_prefix = \"mongodb.\" if not prefix else \"mongodb.{0}.\".format(prefix)\n metric_suffix = \"ps\" if submit_method == RATE else \"\"\n\n # Replace case-sensitive metric name characters\n for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():\n metric_name = re.compile(pattern).sub(repl, metric_name)\n\n # Normalize, and wrap\n return u\"{metric_prefix}{normalized_metric_name}{metric_suffix}\".format(\n normalized_metric_name=self.normalize(metric_name.lower()),\n metric_prefix=metric_prefix, metric_suffix=metric_suffix\n )", "def normalize_name(self) -> str:\n name = self.path.name\n server_names = ContentType.server_names()\n for _ in range(2):\n # we iterate twice to handle cases of doubled prefixes like `classifier-mapper-`\n for prefix in server_names:\n try:\n name = name.removeprefix(f\"{prefix}-\") # type: ignore[attr-defined]\n except AttributeError:\n # not supported in python 3.8\n name = (\n name[len(prefix) + 1 :]\n if name.startswith(f\"{prefix}-\")\n else name\n )\n normalized = f\"{self.content_type.server_name}-{name}\"\n logger.debug(f\"Normalized file name from {name} to {normalized}\")\n return normalized", "def normalize(s):\n s = replace_whitespace(s)\n s = remove_dashes(s)\n s = s.lower()\n return s", "def normalisesym(self, label):\n return label", "def use_name(self):\n if self.is_strobe():\n return 'intsigr_%s' % self.name\n return 'intsig_%s' % self.name", "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "def symbolize_sensorname_sysfs(name):\n return name.split(\"_\")[1] + \"_temp\"", "def normalize_pypi_name(s: str) -> str:\n return NORMALIZE_PACKAGE_NAME_RE.sub(\"-\", s).lower()", "def _normalize_show_name(name):\n\tname = name.casefold()\n\tname = re.sub(\"[^a-z0-9]\", \" \", name)\n\tname = re.sub(\"_\", \" \", name)\n\tname = re.sub(\"season \\d( part \\d)?\", \" \", name)\n\tname = re.sub(\"\\s+\", \" \", name)\n\treturn name", "def split_canonical_name(cname):\n return tuple(cname.rsplit('-', 2))", "def standard_name_remapper(orig_name):\n # Remove any trailing parentheses.\n # TODO(tjann): to check if this is safe.\n paren_start = orig_name.find(\"(\")\n if paren_start != -1:\n orig_name = orig_name[:paren_start]\n\n # Removes separating words.\n orig_name = orig_name.replace(\",\", \" \")\n orig_name = orig_name.replace(\"-\", \" \")\n orig_name = orig_name.replace(\"and \", \"\")\n return \"\".join([word.capitalize() for word in orig_name.split()])", "def coco_label_to_name(self, coco_label):\n\t\treturn self.label_to_name(self.coco_label_to_label(coco_label))", "def k8s_safe_name(name):\n return name.lower().replace('_', '-')", "def _clean_name(self, name):\n # Useful for windows' paths\n return os.path.normpath(name).replace(\"\\\\\", \"/\")", "def _clean_workflow_name(name: str) -> str:\n return REGEX_CHARS_TO_REPLACE.sub(\"-\", name).strip(\"-\")", "def get_sig_count_name(self, orig):\n return orig", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def coco_label_to_name(self, coco_label):\n\t\t\treturn self.label_to_name(self.coco_label_to_label(coco_label))", "def normalize(path):\n return os.path.normcase(os.path.realpath(path))", "def get_python_name(cls, name):\n first_cap_re = re.compile(\"(.)([A-Z](?!s([A-Z])*)[a-z]+)\")\n all_cap_re = re.compile(\"([a-z0-9])([A-Z])\")\n\n s1 = first_cap_re.sub(r\"\\1_\\2\", Utils._clean_name(name))\n return all_cap_re.sub(r\"\\1_\\2\", s1).lower()", "def camelize(name):\n return ''.join([bit.capitalize() for bit in name.split('_')])", "def clean_strain_name(raw_name):\n strain_name = clean_string(raw_name)\n strain_name = strain_name.split(' Strain')[0]\n return strain_name", "def singularize(name):\n n = str(name)\n if n[-1:] == 's':\n return n[:-1]\n return n", "def get_name(self):\n return self.normalize_name(self.name)", "def sanitize_name(self, name):\n\n # replace any character that is not [a-zA-Z0-9_] with an underscore\n new_name = re.sub(\"[^a-zA-Z0-9_]\", \"_\", name)\n\n # now check if the name starts with anything but [A-Za-z_]\n # if so, then add the prefix\n if re.match(\"[^a-zA-Z_]\", new_name):\n new_name = self.prefix + new_name\n\n if new_name == name:\n # return if nothing has changed\n self.all_names.add(name)\n return name\n else:\n # name has changed\n # make sure it is unique, then return\n if new_name in self.all_names:\n idx = 0\n new_name += \"_\" + str(idx)\n while new_name in self.all_names:\n idx += 1\n new_name += \"_\" + str(idx)\n # now we have a unique name\n self.all_names.add(new_name)\n return new_name", "def canonicalize(self, url):\n pass", "def _normalize_package_name(self, name):\n return Prepared.normalize(name)", "def convert_segment_name(jpk_name):\n if jpk_name == 'extend':\n real_name = 'approach'\n elif jpk_name == 'pause-at-end':\n real_name = 'contact'\n elif jpk_name == 'pause-at-start':\n real_name = 'pause'\n else:\n real_name = jpk_name\n\n return real_name", "def normalize_pipeline_name(name=''):\n normalized_name = name\n for bad in '\\\\/?%#':\n normalized_name = normalized_name.replace(bad, '_')\n return normalized_name", "def normalize_var_name(var_name):\n var_case = detect_case(var_name)\n if var_case == SNAKE_CASE:\n return normalize_snake(var_name)\n elif var_case == CAMEL_CASE:\n return normalize_camel(var_name)\n elif var_case == KEBAB_CASE:\n return normalize_kebab(var_name)\n elif var_case == CONST_CASE:\n return normalize_const(var_name)\n else:\n raise ValueError('unknown case {}'.format(var_case))", "def sanitize_name(self):\n self._name = self.get_name().strip()", "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name", "def sanitize_name(name: str) -> str:\n return re.sub(r\"[^A-Za-z0-9_-]\", \"-\", name)[0:128]", "def make_systematic_name(name):\n return \" \".join(re.findall(r\"([A-Z]+[a-z]*)\", name)).capitalize()", "def normalize_signal(signal):\n gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)\n return signal * gain", "def CanonicalizeLabel(user_input):\n if user_input is None:\n return user_input\n\n if not isinstance(user_input, six.text_type):\n user_input = user_input.decode('utf-8')\n\n canon_str = user_input.translate(_CANONICALIZATION_TRANSLATION_TABLE)\n return canon_str", "def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()", "def validate_and_normalize_mac(address):\n if not is_valid_mac(address):\n if constants.CLONE_ISO_MAC in address:\n # get interface name from the label\n intf_name = address.rsplit('-', 1)[1][1:]\n raise exception.ClonedInterfaceNotFound(intf=intf_name)\n else:\n raise exception.InvalidMAC(mac=address)\n return address.lower()", "def asName(self, name):\r\n\t\tnewName = \"\"\r\n\t\ttoHigher = False\r\n\t\tfor char in name:\r\n\t\t\tif char in \"_-\":\r\n\t\t\t\ttoHigher = True\r\n\t\t\telse:\r\n\t\t\t\tif toHigher:\r\n\t\t\t\t\tnewName = newName + char.upper()\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewName = newName + char\r\n\t\t\t\ttoHigher = False\r\n\t\treturn newName", "def name(self):\n return utils.force_name_case(self._name)", "def getClassName(self):\n return signal_base_get_class_name(self.obj)", "def normalize_filename(filename):\n value = unicodedata.normalize('NFKD', ensure_unicode(filename)).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n value = re.sub('[^\\w\\s-]', '', value).strip().lower()\n value = re.sub('[-\\s]+', '-', value)\n return ensure_native_str(value)", "def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")", "def _sanitize_field_name(self, field_name):\n field_name = field_name.replace(self._field_prefix, '')\n return field_name.replace('.', '_')", "def __sanitize(name):\n if name[-1] == \"/\":\n return name[:-1]\n return name", "def normalize_directory_name(directory_name: str) -> str:\n return directory_name.lower()", "def unmangle_bucket_name(bucket):\n if bucket == u'monitoring':\n bucket = u'_monitoring' # to handle monitoring bucket. Bucket shouldn't start with special char\n bucket = bucket.replace('_dsh_', '-')\n return bucket", "def decamelize(name):\n pat = re.compile(r'([A-Z]*[^A-Z]*)(.*)')\n bits = []\n while True:\n head, tail = re.match(pat, name).groups()\n bits.append(head)\n if tail:\n name = tail\n else:\n break\n return '_'.join([bit.lower() for bit in bits])", "def _normalize_class_name(self, name):\n class_name = ''.join(\n word.capitalize()\n for word in re.sub('[^A-Za-z0-9]+', ' ', name).split()\n )\n\n if not class_name.endswith('Extension'):\n class_name += 'Extension'\n\n return class_name", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table" ]
[ "0.70679617", "0.672173", "0.66685057", "0.6528386", "0.6398456", "0.6284975", "0.6257678", "0.6253902", "0.6199662", "0.6199662", "0.6164654", "0.61454165", "0.6111997", "0.5949534", "0.58953434", "0.58953434", "0.58953434", "0.5865084", "0.5847952", "0.57750165", "0.5761741", "0.5740908", "0.5669745", "0.5654071", "0.5634133", "0.5633655", "0.56245154", "0.56242156", "0.5598064", "0.55840695", "0.5573234", "0.55464166", "0.5540945", "0.55226094", "0.550668", "0.54875195", "0.5484626", "0.54670227", "0.5466428", "0.5465391", "0.5464786", "0.5443417", "0.5440062", "0.54360855", "0.5432945", "0.5415897", "0.54057187", "0.540393", "0.53785443", "0.53773546", "0.5335187", "0.53350925", "0.53337735", "0.5329959", "0.5319378", "0.5306905", "0.52978927", "0.5296115", "0.52879286", "0.52809", "0.52759695", "0.5272408", "0.52713054", "0.5269969", "0.52693117", "0.5267819", "0.52557755", "0.52530515", "0.5252079", "0.52478415", "0.5235681", "0.52331436", "0.5226479", "0.52231365", "0.52165556", "0.52136385", "0.51991457", "0.51974386", "0.5196267", "0.5194928", "0.51916856", "0.5185879", "0.51789385", "0.5174434", "0.51739806", "0.5166157", "0.5163847", "0.5158733", "0.51522434", "0.51495206", "0.51403886", "0.5139961", "0.5138926", "0.513648", "0.51357466", "0.5135288", "0.5133984", "0.5129893", "0.51295495", "0.509949" ]
0.66648597
3
Disable an alarm. This will prevent the alarm from executing until reenabled or until the application is restarted.
def disable(self, name: str): self._get_backend().disable_alarm(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_disable(self) -> None:\n self._cancel_notification_cycle()", "def on_disable(self) -> None:\n self._cancel_automation()", "def on_disable(self) -> None:\n self._on_stop_cycle({})", "def stop_alarm(self):\n self.out_power.pulse()", "def disable(self):\n self._enabled = False", "def _disable(self):\n self.enabled = False", "def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()", "def disable(self):\n self.enabled = False", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "def disable(self):\n self.enabled = False\n self.__store(self)", "def disable(self):\n self._disable_monitor()\n self._pinger.stop()", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "def disable(self):\n disable_request = self._commcell_object._services['DISABLE_SCHEDULE']\n\n request_text = \"taskId={0}\".format(self.schedule_policy_id)\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'POST', disable_request, request_text)\n\n if flag:\n if response.json():\n error_code = str(response.json()['errorCode'])\n\n if error_code == \"0\":\n return\n else:\n error_message = 'Failed to disable Schedule Policy'\n\n if 'errorMessage' in response.json():\n error_message = \"{0}\\nError: {1}\".format(error_message, response.json()['errorMessage'])\n\n raise SDKException('Schedules', '102', error_message)\n\n else:\n raise SDKException('Response', '102')\n\n response_string = self._commcell_object._update_response_(\n response.text)\n raise SDKException('Response', '101', response_string)", "def disable(self) -> None:\n if self.active_mode is not None:\n logger.info(\"Disabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_disable()\n\n self.active_mode = None", "async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))", "def disable():\n ret = _LIB.oled_click_disable()\n if ret < 0:\n raise Exception(\"oled click disable failed\")", "def disable(self, subsystem=False):\n self.__dict__[\"enabled\"] = False\n\n if subsystem:\n self.subsystem.disable()", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def disable(self):\n return self.enable(False)", "def disable(self):\r\n self.update(enabled=False)", "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")", "def disablePeriodTimer(self):\n taskMgr.remove(\"redoPeriodTimer\")\n self.ignore(\"periodTimerExpired\")", "def DisableJob(self, job_urn, token=None):\n cron_job = aff4.FACTORY.Open(job_urn, mode=\"rw\", aff4_type=\"CronJob\",\n token=token)\n cron_job.Set(cron_job.Schema.DISABLED(1))\n cron_job.Close()", "def disable_mute(self):\n self.mute = False", "async def async_turn_off(self, **kwargs) -> None:\n await self._zone.set_mode(\"timer\")", "async def async_turn_off(self, **kwargs: Any) -> None:\n try:\n result = await self.hass.async_add_executor_job(\n self.coordinator.ezviz_client.sound_alarm, self._serial, 1\n )\n\n except (HTTPError, PyEzvizError) as err:\n raise HomeAssistantError(\n f\"Failed to turn siren off for {self.name}\"\n ) from err\n\n if result:\n if self._delay_listener is not None:\n self._delay_listener()\n self._delay_listener = None\n\n self._attr_is_on = False\n self.async_write_ha_state()", "def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc", "def disable_radio(self):\n self.acquire_response(b'AT*R0')", "def disable(self) -> None:", "async def async_turn_off(self, **kwargs):\n await self.data.set_appliance_state(self.appliance_id, False)\n return True", "def disable(self):\n self._installed_apps_remove()", "def disable(self):\n pass", "def disable_receiver(self):\n self.set_receiver(False)", "def alarm1mmdisable() :\n mpName = 'Control.Subarray%d.alarm1mm' %subarrayNo\n alarmMpdisable( mpName )", "async def async_turn_off(self):\n await self.async_mute_volume(True)", "async def async_turn_off(self):\n data_cmd = _command(COMMAND_POWER_OFF)\n await self._async_send_command(data_cmd)", "def turn_off(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn off\"):\n self.wemo.off()", "def _set_als_disable(self):\n als_path = '/var/lib/power_manager/has_ambient_light_sensor'\n self._client_cmd('if [ -e %s ]; then mv %s %s_backup; fi' %\n (als_path, als_path, als_path))\n self._client_cmd('echo 0 > %s' % als_path)\n self._client_cmd('restart powerd')\n self._als_disabled = True", "def _set_als_disable(self):\n als_path = '/var/lib/power_manager/has_ambient_light_sensor'\n self._client_cmd('if [ -e %s ]; then mv %s %s_backup; fi' %\n (als_path, als_path, als_path))\n self._client_cmd('echo 0 > %s' % als_path)\n self._client_cmd('restart powerd')\n self._als_disabled = True", "def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "async def async_turn_off(self) -> None:\n self._zone.power = False", "async def async_turn_off(self, **kwargs: Any) -> None:\n self._is_on = False\n await self.disable_rain_delay()", "def turn_off(self, **kwargs):\n request = requests.post(self._resource, data=\"0\", timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = False\n else:\n _LOGGER.error(\"Can't turn off %s. Is resource/endpoint offline?\",\n self._resource)\n\n self.schedule_update_ha_state()", "def disable(self):", "async def async_turn_off(self) -> None:\n await self._device.enter_standby()", "def disable(self):\n raise NotImplementedError", "def disable(self, sid):\n return", "def disable(self):\n self.direction = None # remove direction\n self.state['enabled'] = False # reset states\n self.state['return'] = False\n self.return_path = None # remove path\n if self.state['blue']:\n self.stop_blue_state(resume_audio=False)\n self.image, _ = self.norm_images.get_image() # reset image\n self.sound_manager.stop()", "def set_off_alarm(announcement:str):\r\n engine = pyttsx3.init()\r\n try:\r\n engine.endLoop()\r\n except:\r\n pass\r\n engine.say(announcement)\r\n engine.runAndWait()\r\n engine.stop()\r\n logging.info(\"Alarm announced in set_off_alarm(): \" + announcement)\r\n return redirect('/index')", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)", "def dismiss(self):\n self._get_backend().dismiss_alarm()", "def disable(self):\n super().disable()", "def disable(self):\n if self.active != DISABLED:\n self.uimanager.remove_ui(self.active)\n self.uimanager.remove_action_group(self.action_group)\n self.active = DISABLED", "def do_disable_sensor(self, sensor):\n if hasattr(self, sensor) and sensor in self.active_sensors:\n del self.active_sensors[sensor]", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)", "def disable(self):\n self.registrar.unregister_service(\"say\", namespace=__name__)", "def disable_attachments(self, disable_attachments):\n\n self._disable_attachments = disable_attachments", "def turn_off(self, **kwargs):\n self._is_on = False", "async def async_turn_off(self, **kwargs: Any) -> None:\n await self.async_turn_on_off(False)", "async def power_off(self):\n ...", "def disable(self, item_id):\n pass", "def disable_service(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/disable', put_body)\n body = json.loads(body)\n self.validate_response(schema.disable_service, resp, body)\n return rest_client.ResponseBody(resp, body)", "async def async_turn_off(self, **kwargs: Any) -> None:\n if not self.coordinator.data[self.entity_description.uid][\"active\"]:\n raise HomeAssistantError(\n f\"Cannot turn off an inactive program/zone: {self.name}\"\n )\n\n await self.async_turn_off_when_active(**kwargs)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n rif_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)", "def alarmDisableAntenna(nums=0):\n\n # support using 0 as a wildcard meaning \"all antennas\"\n if nums == 0:\n nums = range(1, 24)\n\n m = []\n for elem in helpers.makeList(nums):\n mpname = device.CarmaAnt().getName(elem) + \".online\"\n m.append(mpname)\n\n alarmMpdisable(m)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def turn_off(self, **kwargs):\n self._is_on = False\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 0)", "async def disable_analog_reporting(self, pin):\n command = [PrivateConstants.REPORT_ANALOG + pin,\n PrivateConstants.REPORTING_DISABLE]\n await self._send_command(command)", "async def async_turn_off(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()", "def disable(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Disabling `{service_name}`\")\n \n run_command(f\"sudo systemctl disable {service_name}\")", "def __disable__(self) -> None:\n pass", "def alarmDisableCorrelator(nums=0):\n\n # support using 0 as a wildcard meaning \"all bands\"\n if nums == 0:\n nums = range(1, 25)\n\n m = [helpers.getAstroBandName(elem) for elem in helpers.makeList(nums)]\n alarmMpdisable(m)", "def disable(self):\n for val in data:\n val.disable()\n self.enabled = False", "def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)", "def disable():\n ret = _LIB.led_matrix_click_disable()\n if ret < 0:\n raise Exception(\"led matrix click disable failed\")", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "def disableEditing(self, disable):\n self.disabled = disable", "def disable(mode='soft'):\n Qdb().disable(mode)", "async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()", "def disableSyndication(self, obj):\n info = self._syndication_info(obj)\n info.disable()", "def disable(self):\n if self.enabled:\n self._router_request(\n self._make_request_data(\n 'editMaintWindow',\n data=dict(\n uid=self.parent,\n id=self.id,\n params=dict(\n startDate=self.startDate,\n startHours=self.startHours,\n startMinutes=self.startMinutes,\n durationDays=self.durationDays,\n durationHours=self.durationHours,\n startProductionState=self.startProdState,\n repeat=self.repeat,\n enabled=False,\n occurrence=self.occurrence,\n days=self.days,\n )\n )\n )\n )\n self.enabled = False\n\n return True", "def turn_off(self, **kwargs):\n setattr(self.resource, self.variable, False)", "def _doDisableRegulation(self):\n self._cmdRegulOff()", "def alarmoff() :\n s.alarm(False, \"\")", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)", "def disable_sound(self):\n\t\tif self.emitter['bgsound'] is not None:\n\t\t\tself.emitter['bgsound'].reset()\n\t\tif self.emitter['effects'] is not None:\n\t\t\tself.emitter['effects'].reset()\n\t\tif self.emitter['speech'] is not None:\n\t\t\tself.emitter['speech'].reset()\n\t\tExtScheduler().rem_call(self, self.check_music)", "def stop_alarm_ringtone(self):\n mixer.stop()\n mixer.quit()", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")", "def set_Off(self):\n if not(self._locked):\n self.__dict__['statusOn']=False\n self._undo_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def disable_play_store(self, disable_play_store):\n\n self._disable_play_store = disable_play_store", "def powerOff(self):\n self._sendCommand(self.SONY_CMD_ExtBackupCommunicator_ForcePowerOff, bufferSize=0)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)" ]
[ "0.7147774", "0.7090431", "0.6920151", "0.6847871", "0.6707151", "0.66669804", "0.66627", "0.66279423", "0.656082", "0.65437007", "0.6541512", "0.6485868", "0.6448139", "0.6370838", "0.6320564", "0.6306733", "0.62469566", "0.62280416", "0.62151366", "0.6210067", "0.6168819", "0.61604595", "0.6148103", "0.61015415", "0.6098471", "0.60949063", "0.60888034", "0.60674435", "0.6025756", "0.60150707", "0.5997502", "0.5978079", "0.59669864", "0.5956319", "0.5938445", "0.58802164", "0.587884", "0.58783674", "0.5878192", "0.5878192", "0.5874972", "0.5848475", "0.58434963", "0.5840685", "0.5830875", "0.58211124", "0.58157593", "0.5814363", "0.5806384", "0.5802642", "0.5801223", "0.57936627", "0.5792021", "0.57822365", "0.57695454", "0.5761554", "0.57611585", "0.5753044", "0.57463324", "0.5712154", "0.5711364", "0.5704201", "0.5691203", "0.5681371", "0.56811255", "0.5680842", "0.56763554", "0.56681", "0.5652302", "0.56520736", "0.56477404", "0.56460327", "0.5641", "0.56356484", "0.5627851", "0.56275034", "0.5620658", "0.56152713", "0.56139606", "0.56080407", "0.5601653", "0.56008816", "0.5598297", "0.5597141", "0.5590655", "0.5589523", "0.55825496", "0.55784017", "0.5561549", "0.5556344", "0.5556344", "0.5556344", "0.5552227", "0.5545591", "0.554258", "0.5537034", "0.55338174", "0.5530922", "0.5528082", "0.55216765" ]
0.80944586
0
Dismiss the alarm that is currently running.
def dismiss(self): self._get_backend().dismiss_alarm()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dismiss_reminder(self):\n qry = ServiceOperationQuery(self, \"dismissReminder\")\n self.context.add_query(qry)\n return self", "def stop_animation(self):\n if self.animate_alarm:\n self.loop.remove_alarm(self.animate_alarm)\n self.animate_alarm = None", "async def stop(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n self.__disarm()\n chat_id = update.message.chat_id\n job_removed = remove_job_if_exists(str(chat_id), context)\n msg = \"Alarm stopped 🚫\" if job_removed else \"Alarm isn't running 🚫\"\n await update.message.reply_text(msg)", "def dismiss(self):\n with self.handle_alert():\n self.q(css='button#alert').first.click()", "def stop_alarm(self):\n self.out_power.pulse()", "def stop_alarm_ringtone(self):\n mixer.stop()\n mixer.quit()", "def dismiss_alert(self):\n self.driver.switch_to.alert.dismiss()", "def clear_alarm(self):\n self.alarm_timer.stop()\n self.alarm_build_timer.stop()\n event_logger.info(\"Alarm cleared\")\n self.settings_window.clear_alarm()\n self.main_window.alarm_time_lcd.display(\"\")\n self.config[\"main\"][\"alarm_time\"] = \"\"", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()", "def unschedule(self):\n after_id = self._after_id\n self._after_id = None\n if after_id:\n self.anchor_widget.after_cancel(after_id)", "def cancel_alarm(alarm_title: str):\n if alarm_title not in __schedules:\n raise ValueError(\n f\"The given id: {alarm_title} is not associated with any alarm.\"\n )\n\n canceled_alarm = __alarm_info.pop(alarm_title)\n\n __scheduler.cancel(__schedules.pop(alarm_title))\n logging.info(\n \"Alarm titled %s scheduled on %s canceled.\",\n alarm_title,\n canceled_alarm[\"scheduled_time\"],\n )", "def kill(self):\n return self._raw_execute(\"cancel\", {\"job_id\": self.job_id})", "def cancel(self):\n try: \n self.Scheduler.remove(self)\n except: pass\n self.Scheduler = None", "def _cancel_automation(self) -> None:\n if HANDLE_VACATION_MODE in self.handles:\n handle = self.handles.pop(HANDLE_VACATION_MODE)\n self.cancel_timer(handle)", "def cancel(self):\n if self.activated:\n return\n Sched.timers.remove(self)\n heapq.heapify(Sched.timers)", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def _cancel(self, __button):\r\n\r\n self.assistant.destroy()\r\n\r\n return True", "def cancel(self):\n self._task.cancel()", "async def stop(self) -> None:\n self._acme_task.cancel()\n self._acme_task = None", "async def on_cancel(self, payload):\n\n await self._delete_message(0)\n self.stop()", "def cancel(self):\n _notify.remove(self.callb)", "def cancel(self):\n if self._timer:\n self._timer.cancel()\n self._timer = None\n else:\n raise Exception('Cannot cancel timer. No timer started.')", "def stop(self):\n if self.monitor_lc:\n self.monitor_lc.cancel()\n self.monitor_lc = None", "def cancel(self):\n self.top.destroy()", "def on_quit(self):\n if self.illumination_task is not None:\n self.illumination_task.cancel()", "def alert_cancel(self):\n self._alert_accept_cancel(False)", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()", "def disarm(self):\n return self.__qf.removeTimer(self)", "def stop(self):\n with self._lock:\n self._running.clear()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def cancel(self):\n if self.__watcher is not None:\n self.__cancel_task()\n self.__clear_running_state()\n self.setStatusMessage(\"Cancelled\")\n self.summary_text.setText(\n \"<div>Cancelled<br/><small>Press 'Reload' to try again</small></div>\"\n )", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def on_cancel(self, _):\n self.destroy()", "def stop(self):\n self.ae.stop()", "async def service_panel_stop(self, call):\r\n if not self.SystemStarted:\r\n _LOGGER.debug(\"Request to Stop the HA alarm_control_panel and it is already stopped\")\r\n return\r\n # cancel the task from within HA\r\n if self.visonicTask is not None:\r\n _LOGGER.debug(\" ........... Closing down Current Task\")\r\n self.visonicTask.cancel()\r\n await asyncio.sleep(2.0)\r\n if self.visonicTask.done():\r\n _LOGGER.debug(\" ........... Current Task Done\")\r\n else:\r\n _LOGGER.debug(\" ........... Current Task Not Done\")\r\n else:\r\n _LOGGER.debug(\" ........... Current Task not set\")\r\n self.SystemStarted = False", "def cancel_stop(cls):\n cls._set_mode_running()", "def stop(self):\n self._stopped.set()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def timer_canceled(self, timer):\n try:\n try:\n timer.impltimer.stop()\n del timer.impltimer\n except (AttributeError, TypeError):\n pass\n finally:\n super(Hub, self).timer_canceled(timer)", "def kill(self):\n self._destruct()\n pass", "def stop(self):\n self._kill_event.set()", "def stop(self):\n self._logger.debug(\"%s: request to stop pinger\",\n self.ping_address)\n if self._task is None:\n self._logger.debug(\"%s: already stopped\", self.ping_address)\n return\n\n self._logger.debug(\"%s: sending cancel signal\", self.ping_address)\n self._task.cancel()\n self._task = None", "def cancel(self):\n self.on_cancel()", "def terminate(self):\n self.stop_timer()\n self.terminate_event.set()\n self.log.info(self.name + \" timer terminated\")", "def stopMeasurement_pmt(self):\r\n self.pmtTest.aboutToQuitHandler()", "def _stop_current_task(self):\n\n if self._running_task is not None:\n self._running_task.stop()\n\n if self._running_thread is not None:\n self._running_thread.join()\n\n self._status_text.set('Idle')\n self._running_thread = None\n self._running_task = None", "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "async def stop(self):\n self.updater.cancel()\n await self._update_once()", "def kill(self):\n self.active = False\n self.wakeup()\n self.join()", "def terminate(self):\n self._proxy.ibroadcast(\n component_type=\"actor\", tag=MessageTag.EXIT, session_type=SessionType.NOTIFICATION\n )\n self.logger.info(\"Exiting...\")", "def unschedule(self, handle):\n if self.is_scheduled(handle):\n self.schedule(handle.cancel)", "def StopPreviewTimer(self):\r\n\r\n self_name = self.IsPaneMinimized()\r\n if not self_name:\r\n return\r\n\r\n manager = self.GetAuiManager() \r\n manager.StopPreviewTimer()", "def stop(self):\n # Get the current future instance\n future = self.future\n\n # Cancel the job\n if future:\n future.cancel()", "async def stop(self):\n await self._bot.lavalink.ws.send(op='stop', guildId=self.guild_id)\n self.current = None", "def cancel(self):\n self.__canceled = True", "def stop(self):\n self.ngrok.terminate()\n return", "def close_app(self):\n os.system ('adb shell am force-stop com.tencent.mm')", "def stop(self):\n # set a flag, let the monitor handle this\n self.logger.info(\"Stopping {} for {}\".format(self.__class__.__name__, self.monitor_id))\n self.active = False", "def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False", "def gracefully_terminate(self):\n self.running = False", "def kill(self):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if not currentApplication in self.__appsThatCantBeKilled:\r\n self.phone.comment('exit.kill()')\r\n self.phone.sx(self.__killCommand)\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.warn('Not allowed to kill \"%s\" application using SX' % currentApplication)", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cog_unload(self) -> None:\n log.debug(\"Unloading the cog and canceling the background task.\")\n self.countdown_task.cancel()\n self.status_task.cancel()", "def stop(self):\n if self.running:\n self._unschedule_all()\n self.loop.stop()", "def set_off_alarm(announcement:str):\r\n engine = pyttsx3.init()\r\n try:\r\n engine.endLoop()\r\n except:\r\n pass\r\n engine.say(announcement)\r\n engine.runAndWait()\r\n engine.stop()\r\n logging.info(\"Alarm announced in set_off_alarm(): \" + announcement)\r\n return redirect('/index')", "async def stop_menu(self, payload: discord.RawReactionActionEvent) -> None:\n\n await self.stop()", "def cancel(self) -> None:\n with self._lock:\n if self.__thread and self.__thread.is_alive():\n self.__cancel.set()\n self.__thread.join()\n\n self.__cancel.clear()", "def stop(self):\n self._schedule(0, 0)\n self._started = False", "def cancel_multi_kill_timer(self) -> None:\n self._multi_kill_timer = None", "def cancel(self):\n pass", "def cancel(self):\n\n self.end()\n super().cancel()", "def close(self):\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None", "def stop(self):\n self.stopped = True\n if self.task:\n self.task.cancel()\n self.task.join()", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def cancel(self):\n self.frame.pack_forget()\n MainScreen(self.master)", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def delete_alarm():\r\n name = request.args.get('alarm_item')\r\n logging.info(\"Alarm deleted in delete_alarm(): \" + name)\r\n for alarm in alarms:\r\n if alarm['title'] == name:\r\n alarms.remove(alarm)", "def stop(self):\n self._log.info(\"Stopping\")\n self._running.clear()", "def deactivate(self) -> None:\n return self.stop()", "def stop(self):\n\n self.active = False", "def end(self):\n\t\t\t#kill bgui objects\n\t\t\tself.notificationSystem.activeNote = None\n\t\t\tself._remove_widget(self.frame)", "def stop(self):\n self._run = False\n self.IA.stop()", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def _cancel_notification_cycle(self) -> None:\n if HANDLE_CLEAN in self.data:\n cancel = self.data.pop(HANDLE_CLEAN)\n cancel()", "def cancel_retry(self):\n if self._cancel_retry is not None:\n self._cancel_retry.cancel()\n self._cancel_retry = None", "def cancel_retry(self):\n if self._cancel_retry is not None:\n self._cancel_retry.cancel()\n self._cancel_retry = None", "def _cancel(self, __button=None):\r\n\r\n self.destroy()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()", "def stop_discharge(self):\n self.discharge_pin.off()\n # self.send_stats() # TODO: implement\n log.info(\n \"Discharged finished at {}mAh on channel {}.\".format(\n str(self.discharge_stats), self.channel\n )\n )\n self.set_complete()", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stop(self):\n schedule = self._schedules[self._index]\n schedule.stop()\n self._stopped.set()\n self._started.clear()", "def close(self):\n self.dismiss()\n screenmanager.change_to('main_menu')", "def cancel(self):\n self.cancelled.set()" ]
[ "0.6770825", "0.6504499", "0.6491621", "0.6473124", "0.6453558", "0.6367585", "0.6321344", "0.61386096", "0.60624015", "0.60624015", "0.59958225", "0.5922445", "0.5870512", "0.5868287", "0.5867719", "0.5865545", "0.58586764", "0.58586764", "0.58152795", "0.58013976", "0.57735795", "0.5713634", "0.57128084", "0.5706796", "0.5706749", "0.5704373", "0.5693771", "0.56544703", "0.56472117", "0.5646566", "0.56439435", "0.56386155", "0.5583348", "0.5575065", "0.5571809", "0.55442417", "0.55274713", "0.55226076", "0.5518333", "0.5511393", "0.55103517", "0.55088735", "0.5489465", "0.5480977", "0.5466053", "0.54554015", "0.5453532", "0.5450816", "0.5449186", "0.5438805", "0.5434501", "0.54308546", "0.54290813", "0.5426312", "0.5420374", "0.54122347", "0.5400039", "0.5392625", "0.53905016", "0.5387654", "0.5386503", "0.5386006", "0.53746104", "0.5372856", "0.53715587", "0.5368715", "0.53586566", "0.53579664", "0.5353478", "0.53457034", "0.53456795", "0.53401315", "0.53359485", "0.53305453", "0.53305453", "0.5328982", "0.53261554", "0.53261554", "0.53261554", "0.53261554", "0.53261554", "0.53220546", "0.5320932", "0.53158486", "0.531569", "0.53135645", "0.5310504", "0.5305804", "0.53029114", "0.5294803", "0.5294803", "0.5294505", "0.5292322", "0.5290358", "0.5289331", "0.52888286", "0.52888286", "0.5287739", "0.52861434", "0.5283494" ]
0.8574007
0
Snooze the alarm that is currently running for the specified number of seconds. The alarm will stop and resume again later.
def snooze(self, interval: Optional[float] = 300.0): self._get_backend().snooze_alarm(interval=interval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sleep(self, seconds):\n\n # We schedule an alarm signal for x=seconds out in the future.\n # noinspection PyUnusedLocal\n def handle_alarm(signal_num, frame):\n pass\n\n signal.signal(signal.SIGALRM, handle_alarm)\n signal.alarm(seconds)\n\n # Wait for either the alarm to go off or for us to receive a SIGINT.\n signal.pause()\n\n # Remove the alarm if it is still pending.\n signal.alarm(0)", "def alarm(seconds): # real signature unknown; restored from __doc__\n pass", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def alarmoff() :\n s.alarm(False, \"\")", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "def sleep(self, seconds):\n time.sleep(seconds)", "def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)", "def alarm(self, interval, call):", "def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return", "def sleep(seconds):\r\n time.sleep(seconds)", "def lightleep(time_ms: int = None) -> None:", "def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)", "def sleep(seconds):\n\n return Sleep(seconds)", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def deepsleep(time_ms: int = None) -> None:", "def time_limit(seconds):\n def signal_handler(signum, frame):\n raise TimeoutException(\"TIMEOUT\")\n signal.signal(signal.SIGALRM, signal_handler)\n signal.alarm(seconds)\n try:\n yield\n finally:\n signal.alarm(0)", "def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)", "def start_later(self, seconds):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.timer(seconds)\n self._start_event.start(self.switch)", "def alarmon(alarmName=\"\") :\n s.alarm(True, alarmName)", "def rewind(self, seconds=10):\n self.spam = True\n\n def stop():\n logger.info('Stop spam.')\n self.io_loop.stop()\n\n self.io_loop.add_timeout(time.time() + seconds, stop)\n self.start()", "def vibrate(self, duration):\n self.wm.rumble = 1\n sleep(duration)\n self.wm.rumble = 0", "def sleep(self):\n time.sleep(0.2)", "def _wakeup(self, wakeup_timeout=10, response_timeout=3):\n pass", "def sleep(self, seconds):\n ten_ms_steps = int(round(seconds * 100))\n for _i in xrange(0,ten_ms_steps):\n if self._sequence_stop_signal:\n break\n sleep(0.01)", "def sleep(self, duration):\n active_item = self.stack.pop()\n self.sleeping.sleep(active_item, duration)", "def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "async def sleep(self, seconds):\n await self._sleep_until_nanos(_get_future_nanos(seconds))", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def run(seconds=1):\n time.sleep(seconds)\n print('Slept for ',seconds,' seconds')", "async def _sleep(self, sleep_time: float = 10) -> None:\n async def _interrupt() -> bool:\n return self.stopped\n await async_sleep_until_interrupt(sleep_time, _interrupt, interval=0.5)", "def set_sleep_timer(self, option, time):\n params = [\n ('option', option),\n ('sleeptime', int(time)),\n ]\n\n self.get(COMMAND_UIC, 'SetSleepTimer', params)", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def timeout_syscall(seconds):\n def timeout_handler(signum, frame):\n raise InterruptedError\n\n original_handler = signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(seconds)\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, original_handler)", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def start_alarm(self):\n self.out_power.pulse()", "def sleep(seconds: typing.Union[float, int]):\n if seconds == 0:\n yield\n elif seconds == inf:\n yield from sleepinf()\n else:\n end = monotonic() + seconds\n while end >= monotonic():\n yield", "def sleep(self, amount: float):\n time.sleep(amount)", "def sleep_approx(self, seconds):\n upperbound = (seconds+0.2)*10000\n if (seconds >= 1):\n lowerbound = (seconds-0.2)*10000\n else:\n lowerbound = seconds*10000\n\n sleeptime = random.randint(lowerbound, upperbound)\n sleeptime = sleeptime/10000\n sleeptime = sleeptime*.8\n\n if (self.botspeed == 1.25):\n sleeptime = sleeptime*.75\n elif (self.botspeed == 1.5):\n sleeptime = sleeptime*.5\n sleep(sleeptime)", "def sleep_for(self, duration):\n raise NotImplementedError()", "def set_timeout(seconds, on_timeout):\n\n def _sighandler(signum, frame):\n on_timeout()\n\n signal.signal(signal.SIGALRM, _sighandler)\n signal.alarm(seconds)", "def stop_alarm(self):\n self.out_power.pulse()", "def set_sleep_time(self, milliseconds:int):\n self.send_command(f\"configure mainLoopSleepTime {milliseconds}\")", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def sleep(secs=1.0):\n time.sleep(secs)", "def change_play_interval(self, msec):\n\t\tmsec = max(msec, 500) # at least 500msec\n\t\tmsec = min(10000, msec) # at most 10s\n\t\tself.stop()\n\t\tself._autonext_interval_msec = msec\n\t\tself.start()", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)", "def sleep(secs: float) -> Coroutine[None, None, None]:\n return time_sleep_coro(secs)", "def alarm(n):\r\n for i in range(n):\r\n playsound(sound_file)\r\n time.sleep(2)", "def sleep_after(self, seconds):\n if self._firmware >= 264:\n self.write(self.ASCII_ESC, '8', seconds, seconds >> 8)\n else:\n self.write(self.ASCII_ESC, '8', seconds)", "def alarm_t(self, **kwargs):\n if self.verbose:\n print(\"\\t{} |{}| Initialization begins.\".format(Timer.OK, self.tinfo['name']))\n time_asleep = 1\n if self.testmode is False:\n while self.tinfo['alarm_time'] >= datetime.now():\n if time_asleep % 60 == 0:\n if self.verbose:\n print(\"|{}| +1 minute.\".format(datetime.now().strftime(\"%H:%M:%S\"))) \n time_asleep += 1\n sleep(1)\n self.execute_target(self.tinfo)\n return True\n elif self.testmode is True:\n print(\"\\t{} **** TESTMODE.Forcing immediate exec!\".format(Timer.OK))\n self.execute_target()\n return True\n else:\n print(\"\\t testmode must be True or False!\")\n return False", "def timer(self):\n self.time_remaining -= 1\n if self.time_remaining > 0:\n Timer(1, self.timer).start()", "async def sleep(self, sleep_time):\n await asyncio.sleep(sleep_time)", "def set_sleep_time(self, time):\n self.sleep_time = time", "def sleep(self):\n if not self.is_sleeping:\n self.wait_until_idle()\n self.__interface.send_command('POWER_OFF')\n self.wait_until_idle()\n self.__interface.send_command('DEEP_SLEEP')\n self.__interface.send_data(0xa5)\n\n self.__sleeping = True", "def tmpDown(self, mSec):\n timer = QtCore.QTimer(self)\n timer.setSingleShot(True)\n self.connect(timer, QtCore.SIGNAL('timeout()'), self.timerUp)\n timer.start(mSec)\n self.setDown(True)", "def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d", "def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d", "def start_later(self, *args, **kwargs):\n\n if self.call is not None:\n return\n self.call = None\n if self.seconds is not None:\n self.call = reactor.callLater(self.seconds, self._repeater,\n *args, **kwargs)", "async def remind(self, s:int, message=\"Remind me!\"):\n await asyncio.sleep(int(s))\n await self.bot.reply(\"Reminder: {}\".format(message))", "async def restless_sleep(duration):\n\n while duration > 0:\n await asyncio.sleep(1)\n\n # Poll for program running state\n if Program.is_running():\n duration = duration - 1\n continue\n\n # Otherwise, program is done running, raise an exception to be caught\n raise ProgramShutdownError", "def sleep(self):\n # Just spending cycles of sleep till next date\n timeTarget = self.startTime + timedelta(hours=int(self.newsFrequency))\n while datetime.now() < timeTarget:\n # sleep for 30 min\n # TODO move time to sleep into config\n logging.info(f\"Sleep for 30 min target to wakeup {timeTarget}\")\n time.sleep(60*30)", "def pause(seconds):\n time.sleep(seconds);", "async def sleep(cls, delay: float) -> None:", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def testSleeping(self):\n time.sleep(2 * 60)\n raise AssertionError('Test case should have timed out.')", "def set_timeout(self, seconds):\n self._timeout = seconds", "def functionThatWillTimeOut():\n time.sleep(5)", "def wait(self, seconds):\n time.sleep(seconds)", "def sound_alarm_if_neccessary(alarm_list, button_control, display, time):\n gone_off_alarm = alarm_list.get_gone_off_alarm(time)\n if gone_off_alarm is None:\n return\n alarm_list.delete_alarm(gone_off_alarm, True)\n display.clear()\n display.show_cursor(False)\n iteration = False\n while not button_control.wait_for_press(buttons.ENTER, 500):\n if iteration:\n display.change_row('WAKE UP!!', TOP_ROW)\n else:\n display.clear()\n\n iteration = not iteration\n\n alarm_list.add_alarm(gone_off_alarm,\n gone_off_alarm.repeat != Alarm.NO_REPEAT)\n display.show_cursor(True)", "def wakeup(self):\n self.waker.notify()", "def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")", "async def inbound_task_call(self):\n from cocotb.triggers import Timer\n await Timer(0, units=\"ps\")", "def suspend(host=None,time=10):\r\n if host:\r\n host.suspend(time)", "def sleep(duration):\n f = Future()\n IOLoop.current().call_later(duration, lambda: f.set_result(None))\n return f", "def __timeout(self, seconds, func, *args):\n t = threading.Timer(seconds, func, *args)\n self._timer = t\n t.start()", "async def async_turn_off(self, **kwargs) -> None:\n await self._zone.set_mode(\"timer\")", "def wait_for_seconds(self, seconds, sleeptime=0.001):\n self.listen_until_return(timeout=seconds, sleeptime=sleeptime)", "async def alarm(ctx, on_time:float=1, off_time:float=0.6, n:int=5):\n buzzer.beep(on_time, off_time, n)\n await ctx.send(f\"Alarme acionado\")", "def sleep_sim_time(world, seconds, state_break=[False]):\n start = world.last_time if world.last_time else Time()\n remain = seconds\n\n while remain > 0 and not state_break[0]:\n yield From(trollius.sleep(0.1))\n now = world.last_time if world.last_time else Time()\n remain = seconds - float(now - start)", "def sleep(self):\n self._epd.sleep()", "def timeout_function(seconds=5):\n\n def signal_handler(signum, frame):\n raise TimeoutError(\"Timed out!\")\n\n signal.signal(signal.SIGALRM, signal_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)", "def enter_sleep_mode(self):\n self.execute(SdpI2cCmdEnterSleepMode())", "def pulse(seconds):\n index = 0\n while index < len(fake_threads):\n t = fake_threads[index]\n t['sleep'] -= seconds\n if t['sleep'] <= 0:\n t['sleep'] = 0\n t['next_sleep_time'] = None\n t['greenlet'].run()\n sleep_time = t['next_sleep_time']\n if sleep_time is None or isinstance(sleep_time, tuple):\n del fake_threads[index]\n index -= 1\n else:\n t['sleep'] = sleep_time\n index += 1", "def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()", "def reconnect(self, seconds: int) -> None:", "def wakeup(\n self, timeout: int, callback: Callable[[RTC], None] | None = None, /\n ) -> None:", "def sleep_until(self, time):\n raise NotImplementedError()", "def interval_reminder(duration=60, interval=10):\n\n for i in range(0, int(duration / interval)):\n print(f\"Started {60 * interval}m interval\")\n time.sleep(60 * interval)\n print(\"Reminder played...\")\n reminder_sound()", "def sleeper(sleepdict):\n\n if not sleepdict['fast']:\n time.sleep(sleepdict['sleep'])", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "async def wakeup(self) -> None:\n return await self.relay(\"wakeup\")()", "async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()", "def sleeper(self):\n for waittime in (.01, .02, .05, .1, .2, .5):\n yield waittime\n while True:\n waittime = min(waittime + .2, 5)\n yield waittime" ]
[ "0.6919186", "0.6612018", "0.6467028", "0.6446055", "0.6190884", "0.6190884", "0.6155103", "0.6068098", "0.60492384", "0.60356086", "0.60265136", "0.601966", "0.6012515", "0.6003812", "0.5942315", "0.59309816", "0.59265506", "0.59265506", "0.5859484", "0.5822939", "0.5798656", "0.5785979", "0.5783481", "0.57663196", "0.5755602", "0.57500315", "0.5745419", "0.5740327", "0.57382923", "0.5721624", "0.57200456", "0.5708412", "0.57015014", "0.5699889", "0.56632316", "0.5645852", "0.5636973", "0.5612043", "0.5608524", "0.5604619", "0.5604599", "0.56022096", "0.55304134", "0.55256695", "0.5521359", "0.55213237", "0.5490767", "0.54857683", "0.54850435", "0.5463687", "0.54511946", "0.5440277", "0.543617", "0.54356354", "0.54344887", "0.5419786", "0.53730106", "0.53649986", "0.5360663", "0.535735", "0.5347456", "0.5323663", "0.5299965", "0.5299965", "0.52916455", "0.5264481", "0.52543545", "0.524928", "0.523083", "0.5218324", "0.5215437", "0.5212578", "0.5211006", "0.5189012", "0.51870906", "0.51868874", "0.5186095", "0.5186057", "0.51797485", "0.51783115", "0.5175972", "0.5171634", "0.5161115", "0.5142314", "0.5133924", "0.5133833", "0.51324457", "0.5127834", "0.51267934", "0.5123894", "0.5120787", "0.5118552", "0.5116465", "0.510532", "0.5100965", "0.5100377", "0.50976706", "0.50951463", "0.5089787", "0.5087171" ]
0.7325651
0
Get the list of configured alarms.
def get_alarms(self) -> List[Dict[str, Any]]: return [alarm.to_dict() for alarm in self._get_backend().get_alarms()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alarms() -> List[Dict[str, Any]]:\n return __alarm_info.values()", "def get_alarms(region):\n\n client = boto3.client(\"cloudwatch\", region_name=region)\n\n describe_response = client.describe_alarms()\n\n alarms = describe_response[\"MetricAlarms\"]\n\n return alarms", "def getAlarms(self, **kwargs):\n\n searchQuery = self._client.factory.create('getAlarmsSearchQuery')\n for k, v in kwargs.items():\n setattr(searchQuery, k, v)\n response = self._soap_service.getAlarms(searchQuery)\n\n return CPAPIGetAlarmsResponse(response)", "def get_alarm_info(self):\n response = self.get(COMMAND_UIC, 'GetAlarmInfo')\n\n return response_list(response['alarmList']['alarm'])", "def set_alarms(self):\n if isinstance(self._configuration[\"queue\"].get(\"alarms\"), list) is True:\n sqs_alarms = list()\n for alarm_definition in self._configuration[\"queue\"].get(\"alarms\"):\n sqs_alarms.append(\n base_alarm(\n self,\n resource_name=self._configuration[\"queue\"][\"queue_name\"],\n base_resource=self._sqs_queue,\n **alarm_definition,\n )\n )\n\n for lambda_function_data, lambda_function_definition in zip(\n self._configuration[\"lambda_handlers\"], self._lambda_functions\n ):\n if isinstance(lambda_function_data.get(\"alarms\"), list) is True:\n lambda_alarms = list()\n for alarm_definition in lambda_function_data.get(\"alarms\"):\n lambda_alarms.append(\n base_alarm(\n self,\n resource_name=lambda_function_data.get(\"lambda_name\"),\n base_resource=lambda_function_definition,\n **alarm_definition,\n )\n )", "def get_all(self, q=None):\r\n q = q or []\r\n # Timestamp is not supported field for Simple Alarm queries\r\n kwargs = _query_to_kwargs(q,\r\n pecan.request.alarm_storage_conn.get_alarms,\r\n allow_timestamps=False)\r\n return [Alarm.from_db_model(m)\r\n for m in pecan.request.alarm_storage_conn.get_alarms(**kwargs)]", "def alarms():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"alarm\", \"list\")\n else:\n cmd = _traffic_line(\"--alarms\")\n\n return _subprocess(cmd)", "def get_alarm_sound_list(self):\n response = self.get(COMMAND_UIC, 'GetAlarmSoundList')\n\n return response_list(response['alarmlist']['alarmsound'])", "def _active_arms(self):\n return [self.arms[idx] for idx in self.arm_idx]", "def test_get_hyperflex_alarm_list(self):\n pass", "def alarm_state(self) -> \"AlarmStates\":\n return self.AlarmStates(self.get_state(self.args[CONF_ALARM_CONTROL_PANEL]))", "def get_alarm_config():\n alarm_config = {}\n try:\n xml_content = ET.parse(CONFIG_FILE)\n alarms = xml_content.getroot()\n for alarm in alarms:\n if alarm.tag == 'Access_Config':\n for param in alarm:\n alarm_config[param.tag] = param.text\n except Exception as exp:\n logger.error('Exception: {} occured while parsing config file.'.format(exp))\n\n return alarm_config", "def get_reminders(self):\n return self.load_data(default=[])", "def get_alarm_states(self):\n alarms = Alarms()\n\n # Loss of Signal Alarms\n combined_LOS_states = self.readU8(129)\n alarms.Loss_Of_Signal_1_INT = bool(combined_LOS_states & 0b010)\n alarms.Loss_Of_Signal_2_INT = bool(combined_LOS_states & 0b100)\n alarms.Loss_Of_Signal_X_INT = bool(combined_LOS_states & 0b001)\n\n combined_LOS_flags = self.readU8(131)\n alarms.Loss_Of_Signal_1_FLG = bool(combined_LOS_flags & 0b010)\n alarms.Loss_Of_Signal_2_FLG = bool(combined_LOS_flags & 0b100)\n alarms.Loss_Of_Signal_X_FLG = bool(combined_LOS_flags & 0b001)\n\n # Frequency Offset and Loss of Lock Alarms\n combined_FOLOL_states = self.readU8(130)\n alarms.Freq_Offset_1_INT = bool(combined_FOLOL_states & 0b010)\n alarms.Freq_Offset_2_INT = bool(combined_FOLOL_states & 0b100)\n alarms.Loss_Of_Lock_INT = bool(combined_FOLOL_states & 0b001)\n\n combined_FOLOL_flags = self.readU8(132)\n alarms.Freq_Offset_1_FLG = bool(combined_FOLOL_flags & 0b0100)\n alarms.Freq_Offset_2_FLG = bool(combined_FOLOL_flags & 0b1000)\n alarms.Loss_Of_Lock_FLG = bool(combined_FOLOL_flags & 0b0010)\n\n return alarms", "def get_alarms(username, auth, url):\n f_url = url + \"/imcrs/fault/alarm?operatorName=\" + username + \\\n \"&recStatus=0&ackStatus=0&timeRange=0&size=50&desc=true\"\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 200:\n alarm_list = (json.loads(response.text))\n return alarm_list['alarm']\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_alarms: An Error has occured'", "def _replace_alarm_config_variables(self):\n data = self._load_config_file(CloudwatchConfigType.ALARM.value)\n param_data = []\n for node_id in self.node_ids:\n for item in data:\n item_out = copy.deepcopy(item)\n self._replace_all_config_variables(\n item_out,\n str(node_id),\n self.cluster_name,\n self.provider_config[\"region\"],\n )\n param_data.append(item_out)\n return param_data", "def post(self, body):\r\n query = ValidatedComplexQuery(body,\r\n alarm_models.Alarm)\r\n query.validate(visibility_field=\"project_id\")\r\n conn = pecan.request.alarm_storage_conn\r\n return [Alarm.from_db_model(s)\r\n for s in conn.query_alarms(query.filter_expr,\r\n query.orderby,\r\n query.limit)]", "def get(self):\n try:\n result = load_schedules_from_file()\n return result\n except Exception:\n logging.exception('Failed to get Celery Beat schedules!')\n raise", "def alarm(self):\n self._alarms += 1\n return self._alarms", "def modifyAlarms(parsedDict):\n log = list()\n if len(parsedDict) == 0:\n log.append('No changes were made to the database')\n else:\n for alarmName, alarmState in parsedDict.items():\n activationTime = timeOffset(secs=readNotificationGap(alarmName)) # timeNow + notifyGap = activationTime\n alarmId = allAlarms[alarmName]\n if alarmState == 1: # User has turned on the alarm\n if isActive(alarmName, activeDict): # If the alarm is already active\n message = '%s is already active' % alarmName\n log.append(message)\n else: # Alarm Activated/ New Entry\n activateAlarm(alarmId, activationTime)\n message = '%s has been activated' % alarmName\n log.append(message)\n elif alarmState == 0: # User has turned off the alarm\n if isActive(alarmName, activeDict): # Alarm Deactivated/ Entry Closed\n deactivateAlarm(alarmId)\n message = '%s has been deactivated' % alarmName\n log.append(message)\n else: # If the Alarm is inactive\n message = \"%s is already deactivated\" % alarmName\n log.append(message)\n # Don't process newAlarmState is None\n return log", "def parseActiveConfig():\n parsedDict = dict()\n argParser = argparse.ArgumentParser(description=\"\"\"\n This script is used to turn on and off active alarms.\n \"\"\")\n addDebugArgument(argParser) # Add argument --debug\n for alarm in allAlarms: # Iteratively add all alarms from database as an accepted argument.\n alarm = str(alarm).lower()\n argParser.add_argument((\"--%s\" % alarm), type=strtobool,\n help=\"Setting Alarm: %s on/off.\" % alarm)\n debug('Alarm %s has been defined' % alarm, DEBUG)\n debug('All entries defined', DEBUG)\n parsedArgs = argParser.parse_args() # Parse all arguments from commandline (alarms and debug)\n setDebugOverride(parsedArgs) # Set debug level, if --debug is specified\n for alarmName in allAlarms:\n parsedDict[alarmName] = getattr(parsedArgs, str(alarmName).lower())\n return parsedDict", "def all_schedules(self):\n return self._all_schedules", "def get_alarms(self):\n return pd.Series()", "def antenny_list_configs(self):\n return self.antenny_config.list_configs()", "def getSchedules(self) :\n return self.schedules", "def get_dev_alarms(auth, url, devid=None, devip=None):\n # checks to see if the imc credentials are already available\n if devip is not None:\n devid = get_dev_details(devip, auth, url)['id']\n f_url = url + \"/imcrs/fault/alarm?operatorName=admin&deviceId=\" + \\\n str(devid) + \"&desc=false\"\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 200:\n dev_alarm = (json.loads(response.text))\n if 'alarm' in dev_alarm:\n return dev_alarm['alarm']\n else:\n return \"Device has no alarms\"\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_dev_alarms: An Error has occured'", "def set_alarms(self, name):\n # Special treatment for fan sensors\n if \"Fan\" in name:\n sensor_type = SENSOR_NAMES[name]\n for alarm_level in FAN_ALARMS.keys():\n setattr(self.sensors[sensor_type], alarm_level, FAN_ALARMS[alarm_level])\n self.sensors[sensor_type].alarms_valid = True\n\n # Special treatment for power module current channel sensors\n elif re.match(POWER_CHANNEL_SENSOR_PATTERN, name):\n sensor_type = SENSOR_NAMES[name]\n for alarm_level in POWER_CHANNEL_ALARMS.keys():\n setattr(self.sensors[sensor_type], alarm_level, POWER_CHANNEL_ALARMS[alarm_level])\n self.sensors[sensor_type].alarms_valid = True\n\n # Special treatment for power module total current sensor\n elif re.match(POWER_SUM_SENSOR_PATTERN, name):\n sensor_type = SENSOR_NAMES[name]\n for alarm_level in POWER_SUM_ALARMS.keys():\n setattr(self.sensors[sensor_type], alarm_level, POWER_SUM_ALARMS[alarm_level])\n self.sensors[sensor_type].alarms_valid = True\n\n # All other sensors\n else:\n result = \"\"\n try:\n result = self.mch_comms.call_ipmitool_command([\"sensor\", \"get\", '\"'+name+'\"'])\n except CalledProcessError as e:\n # This traps any errors thrown by the call to ipmitool.\n # This occurs if all alarm thresholds are not set.\n # See Jira issue DIAG-23\n # https://jira.frib.msu.edu/projects/DIAG/issues/DIAG-23\n # Be silent\n print(\"set_alarms: caught CalledProcessError exception: {}\".format(e))\n pass\n except TimeoutExpired as e:\n print(\"set_alarms: caught TimeoutExpired exception: {}\".format(e))\n\n for line in result.splitlines():\n try:\n description, value = [x.strip() for x in line.split(':',1)]\n if description in ALARMS.keys():\n sensor_type = SENSOR_NAMES[name]\n setattr(self.sensors[sensor_type], ALARMS[description], float(value))\n self.sensors[sensor_type].alarms_valid = True\n except ValueError as e:\n # Traps lines that cannot be split. Be silent.\n pass", "def test_admin_alarm_admin_list(self):\n response = self.client.get(\"/admin/appointment/alarm/\")\n self.assertEqual(response.status_code, 200)", "def update_alarms():\n try:\n print(\"update alarms\")\n alarm_groups = {}\n # group everything by region\n for alarm in cloudwatch_data.all_subscribed_alarms():\n region_name = alarm[\"Region\"]\n alarm_name = alarm[\"AlarmName\"]\n if region_name not in alarm_groups:\n alarm_groups[region_name] = []\n alarm_groups[region_name].append(alarm_name)\n print(alarm_groups)\n # update each grouped list for a region\n for region_name in alarm_groups:\n alarm_names = alarm_groups[region_name]\n cloudwatch_data.update_alarms(region_name, alarm_names)\n except ClientError as error:\n print(error)\n return True", "def get_all_raid_config(self):\n\n session = None\n err = None\n raid_configs = None\n try:\n session = sessionmaker(bind=self.engine)()\n raid_configs = session.query(models.RAIDConf).all()\n except Exception as e:\n LOG.error(e)\n err = e\n finally:\n if session:\n try:\n session.close()\n except Exception as e:\n err = e\n LOG.error(\" Failed closing session %s \" % Exception)\n\n return raid_configs, err", "def notification_arns(self) -> Sequence[str]:\n return pulumi.get(self, \"notification_arns\")", "def get_schedules(self):\n return self.__schedules", "def history(self, q=None):\r\n q = q or []\r\n # allow history to be returned for deleted alarms, but scope changes\r\n # returned to those carried out on behalf of the auth'd tenant, to\r\n # avoid inappropriate cross-tenant visibility of alarm history\r\n auth_project = acl.get_limited_to_project(pecan.request.headers)\r\n conn = pecan.request.alarm_storage_conn\r\n kwargs = _query_to_kwargs(q, conn.get_alarm_changes, ['on_behalf_of',\r\n 'alarm_id'])\r\n return [AlarmChange.from_db_model(ac)\r\n for ac in conn.get_alarm_changes(self._id, auth_project,\r\n **kwargs)]", "def list_amendments(self):\n try:\n return list(self[CONFIG_KEY][PROJ_MODS_KEY][AMENDMENTS_KEY].keys())\n except Exception as e:\n _LOGGER.debug(\"Could not retrieve available amendments: {}\".\n format(getattr(e, 'message', repr(e))))\n return None", "def dashboard_alarm_get(self, node, object_name, dashboard_metric_type, desired_attributes=None):\n return self.request( \"dashboard-alarm-get\", {\n 'node': [ node, 'node', [ basestring, 'None' ], False ],\n 'object_name': [ object_name, 'object-name', [ basestring, 'None' ], False ],\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ DashboardAlarmInfo, 'None' ], False ],\n 'dashboard_metric_type': [ dashboard_metric_type, 'dashboard-metric-type', [ basestring, 'dashboard-metric-type' ], False ],\n }, {\n 'attributes': [ DashboardAlarmInfo, False ],\n } )", "def on_raise(cls, alarm):\n r_cfg = defaultdict(list)\n p_cfg = defaultdict(list)\n for c in cls.get_class_diagnostics(alarm.alarm_class):\n if (\n c.resource_group\n and str(c.resource_group.id) not in alarm.managed_object.effective_service_groups\n ):\n continue\n if c.only_root and alarm.root:\n continue\n if c.enable_on_raise:\n if c.on_raise_script:\n r_cfg[c.on_raise_delay] += [\n {\"script\": c.on_raise_script, \"header\": c.on_raise_header}\n ]\n if c.on_raise_action:\n r_cfg[c.on_raise_delay] += [\n {\"action\": c.on_raise_action.name, \"header\": c.on_raise_header}\n ]\n if c.on_raise_handler:\n r_cfg[c.on_raise_delay] += [\n {\"handler\": c.on_raise_handler, \"header\": c.on_raise_header}\n ]\n if c.enable_periodic:\n if c.periodic_script:\n p_cfg[c.periodic_interval] += [\n {\"script\": c.periodic_script, \"header\": c.periodic_header}\n ]\n if c.periodic_action:\n p_cfg[c.periodic_interval] += [\n {\"action\": c.periodic_action.name, \"header\": c.periodic_header}\n ]\n if c.periodic_handler:\n p_cfg[c.periodic_interval] += [\n {\"handler\": c.periodic_handler, \"header\": c.periodic_header}\n ]\n # Submit on_raise job\n for delay in r_cfg:\n call_later(\n \"noc.fm.models.alarmdiagnosticconfig.on_raise\",\n scheduler=\"correlator\",\n pool=alarm.managed_object.get_effective_fm_pool().name,\n delay=delay,\n shard=alarm.managed_object.id,\n alarm=alarm.id,\n cfg=r_cfg[delay],\n )\n # Submit periodic job\n for delay in p_cfg:\n call_later(\n \"noc.fm.models.alarmdiagnosticconfig.periodic\",\n scheduler=\"correlator\",\n max_runs=PERIODIC_JOB_MAX_RUNS,\n pool=alarm.managed_object.get_effective_fm_pool().name,\n delay=delay,\n shard=alarm.managed_object.id,\n alarm=alarm.id,\n cfg={\"cfg\": p_cfg[delay], \"delay\": delay},\n )\n\n # @todo: Submit periodic job", "def get_schedules():\n return json.dumps(calendar.get_schedules())", "def clearAlarms(self, **kwargs):\n\n searchQuery = self._client.factory.create('clearAlarmsSearchQuery')\n for k, v in kwargs.items():\n setattr(searchQuery, k, v)\n response = self._soap_service.clearAlarms(searchQuery)\n return CPAPIResponse(response)", "def get_arms(self) -> (list, ob.TurnstileAxis):\n directions = [(0, 1), (1, 0), (0, -1), (-1, 0),\n (1, 1), (-1, 1), (1, -1), (-1, -1)]\n for dire in directions:\n x_neighbour = self.target.x_obj + dire[0]\n y_neighbour = self.target.y_obj + dire[1]\n neighbour = self.grid.obj_list[x_neighbour, y_neighbour]\n if isinstance(neighbour, ob.TurnstileAxis):\n axis = neighbour\n arms = []\n for dire in directions[:4]:\n x_potential_arm = axis.x_obj + dire[0]\n y_potential_arm = axis.y_obj + dire[1]\n potential_arm = self.grid.obj_list[\n x_potential_arm, y_potential_arm]\n if isinstance(potential_arm, ob.TurnstileBloc):\n arms.append(potential_arm)\n return arms, axis", "def get_schedules():\n path = config.get('schedule', 'paths', './schedule.json')\n with open(path) as schedule_file:\n return json.load(schedule_file)", "def dashboard_alarm_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):\n return self.request( \"dashboard-alarm-get-iter\", {\n 'max_records': max_records,\n 'query': [ query, 'query', [ DashboardAlarmInfo, 'None' ], False ],\n 'tag': tag,\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ DashboardAlarmInfo, 'None' ], False ],\n }, {\n 'attributes-list': [ DashboardAlarmInfo, True ],\n } )", "def list(self):\n for item in self._config:\n item.list()", "async def getAlarmState(self):\n alarm_state = await self.director.getItemVariableValue(\n self.item_id, \"ALARM_STATE\"\n )\n return bool(alarm_state)", "def put_cloudwatch_alarm(self):\n\n data = self._load_config_file(CloudwatchConfigType.ALARM.value)\n param_data = []\n for node_id in self.node_ids:\n for item in data:\n item_out = copy.deepcopy(item)\n self._replace_all_config_variables(\n item_out,\n str(node_id),\n self.cluster_name,\n self.provider_config[\"region\"],\n )\n param_data.append(item_out)\n self.cloudwatch_client.put_metric_alarm(**item_out)\n logger.info(\"Successfully put alarms to cloudwatch console\")\n\n # upload cloudwatch alarm config to the SSM parameter store\n alarm_config_ssm_param_name = self._get_ssm_param_name(\n CloudwatchConfigType.ALARM.value)\n self._put_ssm_param(param_data, alarm_config_ssm_param_name)", "def all(self) -> List[\"BaseDBAsyncClient\"]:\n # The reason this method iterates over db_config and not over `storage` directly is\n # because: assume that someone calls `discard` with a certain alias, and calls this\n # method subsequently. The alias which just got discarded from the storage would not\n # appear in the returned list though it exists as part of the `db_config`.\n return [self.get(alias) for alias in self.db_config]", "def get_arp_list(self):\n arp_list = []\n for ip, value in self.ip_to_mac.items():\n arp_list.append({'ip': str(ip),\n 'hw_addr': str(value[0]),\n 'last_update': datetime.datetime.fromtimestamp(value[1]).strftime('%Y-%m-%d %H:%M:%S')})\n\n return arp_list", "def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])", "def testGetConfigPowerAlarmControl(self):\n self.ports.getconfig_power_alarm_control(file_name = 'get_power_alarm_control.xml', port_ids = portsDict['port_ids'], power_alarm_controls = portsDict['power_alarm_control'])", "def amtool_alerts(self, mess, args):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_alerts()\n return result", "def get_all_apps(self):\n return list(self.apps.values())", "def get_ammos(self):\n return self.__ammos", "def wait_for_all_alarms(alarm_def_id_list, mon_client, number_of_expected_alarms):\n print('Waiting for alarms to be created')\n check_start_time = time.time()\n alarm_count = 0\n while alarm_count < number_of_expected_alarms:\n alarm_count = 0\n for id in alarm_def_id_list:\n num = len(mon_client.alarms.list(alarm_definition_id=id))\n alarm_count += num\n\n if check_start_time + TIMEOUT < time.time():\n print \"TIMEOUT. Found only {} alarms expect {}\".format(alarm_count, number_of_expected_alarms)\n break\n\n return alarm_count", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def get_all(self):\n logging.info(__name__ + ' : reading all settings from instrument')\n self.level.get()\n self.status.get()\n self.rate.get()", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def list():\n # Calling config file\n cf = config.ReadFile(config_file)\n user = cf[\"authentication\"][\"user\"]\n\n l = []\n for job in cron:\n l.append(job)\n return l", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def apms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApmReferenceArgs']]]]:\n return pulumi.get(self, \"apms\")", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def get_all(self, q=[], marker=None, limit=None, sort_key='timestamp',\n sort_dir='desc', alarms=False, logs=False,\n include_suppress=False, expand=False):\n return self._get_eventlog_collection(marker, limit, sort_key,\n sort_dir, expand=expand, q=q,\n alarms=alarms, logs=logs,\n include_suppress=include_suppress)", "def get_alarm(self, name):\n\n alarm = self._alarm_manager.get_alarm(name)\n\n return alarm", "def alerts_all_zones(self: SimpleNWS) -> List[Dict[str, Any]]:\n return self._alerts_all_zones", "def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)", "async def async_get_alarm_action(self) -> Action:\n result = await self._async_fetch('getMotionDetectConfig', [])\n # _LOGGER.warn('async_get_motion_detection %s\\n%s', self._host, result)\n link = int(result[1]['linkage'])\n return Action(\n audio=True if link & ALARM_ACTION['audio'] else False,\n ftp_snap=True if link & ALARM_ACTION['pic'] else False,\n ftp_rec=True if link & ALARM_ACTION['video'] else False\n )", "def get_app_configs(self):\n self.check_apps_ready()\n return self.app_configs.values()", "def get(self):\r\n # variation in API capabilities is effectively determined by\r\n # the lack of strict feature parity across storage drivers\r\n conn = pecan.request.storage_conn\r\n alarm_conn = pecan.request.alarm_storage_conn\r\n driver_capabilities = conn.get_capabilities().copy()\r\n driver_capabilities['alarms'] = alarm_conn.get_capabilities()['alarms']\r\n driver_perf = conn.get_storage_capabilities()\r\n alarm_driver_perf = alarm_conn.get_storage_capabilities()\r\n return Capabilities(api=_flatten_capabilities(driver_capabilities),\r\n storage=_flatten_capabilities(driver_perf),\r\n alarm_storage=_flatten_capabilities(\r\n alarm_driver_perf))", "def rules(self):\n return self._alert_rules_client", "def sensorsItems(self):\n return self.settingsDb.allSensors()", "def match_alarm(self):\n for alarm in self.alarm_list:\n is_matched = False\n self._match_alarm_by_def(alarm)\n if alarm[\"_match_info\"].get(\"alarm_def_id\"):\n self.matched_alarm_list.append(alarm)\n is_matched = True\n\n if is_matched:\n logger.debug(\n \"Matched alarm(source_id:%s)\",\n alarm[\"_match_info\"].get(\"source_id\"))\n else:\n logger.debug(\n \"UNMatched alarm(source_id:%s)\",\n alarm[\"_match_info\"].get(\"source_id\"))\n unmatch_alarm_hook(alarm)\n\n logger.info(\"matched_alarm_list (%s)\", len(self.matched_alarm_list))", "def audit_log_configs(self) -> Sequence['outputs.AuditLogConfigResponse']:\n return pulumi.get(self, \"audit_log_configs\")", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def set_alarms(self, rec):\n\n sensor = self.crate.frus[(self.bus, self.slot)].sensors[self.sensor]\n sensor_type = SENSOR_NAMES[sensor.name]\n try:\n # Handle sensors that do not get non-critical alarms\n if sensor.low == 0 and sensor.lolo != 0:\n sensor.low = sensor.lolo + NO_ALARM_OFFSET\n if sensor.high == 0 and sensor.hihi != 0:\n sensor.high = sensor.hihi - NO_ALARM_OFFSET\n\n # Set the EPICS PV alarms, with small offset to allow for different\n # alarm behaviour\n rec.LOLO = sensor.lolo - EPICS_ALARM_OFFSET\n rec.LOW = sensor.low - EPICS_ALARM_OFFSET\n rec.HIGH = sensor.high + EPICS_ALARM_OFFSET\n rec.HIHI = sensor.hihi + EPICS_ALARM_OFFSET\n\n if sensor.alarms_valid:\n rec.LLSV = 2 # MAJOR\n rec.LSV = 1 # MINOR\n rec.HSV = 1 # MINOR\n rec.HHSV = 2 # MAJOR\n else:\n rec.LLSV = 0 # NO_ALARM\n rec.LSV = 0 # NO_ALARM\n rec.HSV = 0 # NO_ALARM\n rec.HHSV = 0 # NO_ALARM\n\n self.alarms_set = True\n except KeyError as e:\n print (\"caught KeyError: {}\".format(e))", "def apt_list(cal, c_id, start, end):\n\n # Get the appointments returning it as list of dictionaries\n appointments_result = cal.events().list(\n calendarId=c_id,\n timeMin=start,\n timeMax=end,\n singleEvents=True,\n orderBy='startTime'\n ).execute()\n appointments = appointments_result.get('items', [])\n return appointments", "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def list_logging_conf():\n import pkg_resources\n\n configs = set()\n for plugin in plugin_manager.load_all(__name__):\n configs.update({\n cfg for cfg in pkg_resources.resource_listdir(__name__, '.')\n if cfg.endswith('.json')\n })\n\n return configs", "def azure_monitor_alert_settings(self) -> Optional['outputs.AzureMonitorAlertSettingsResponse']:\n return pulumi.get(self, \"azure_monitor_alert_settings\")", "def amendments(self):\n return self[ACTIVE_AMENDMENTS_KEY] if ACTIVE_AMENDMENTS_KEY in self \\\n else None", "def get_all_adapters(self):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n message_type: Optional[pulumi.Input[str]] = None,\n queue_regex: Optional[pulumi.Input[str]] = None,\n recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n reminder_interval: Optional[pulumi.Input[int]] = None,\n time_threshold: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_calculation: Optional[pulumi.Input[str]] = None,\n value_threshold: Optional[pulumi.Input[int]] = None,\n vhost_regex: Optional[pulumi.Input[str]] = None) -> 'Alarm':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AlarmState.__new__(_AlarmState)\n\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"message_type\"] = message_type\n __props__.__dict__[\"queue_regex\"] = queue_regex\n __props__.__dict__[\"recipients\"] = recipients\n __props__.__dict__[\"reminder_interval\"] = reminder_interval\n __props__.__dict__[\"time_threshold\"] = time_threshold\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"value_calculation\"] = value_calculation\n __props__.__dict__[\"value_threshold\"] = value_threshold\n __props__.__dict__[\"vhost_regex\"] = vhost_regex\n return Alarm(resource_name, opts=opts, __props__=__props__)", "def list_honeypots(self):\n req = {\"type\": \"ping\",\n \"to\":[\"ALL\"],\n \"from\": self.network.mc_id}\n expect_dict = {\"type\":\"pong\"}\n msg_list = self.send_receive(req, \"ALL\", expect_dict)\n answer = []\n for msg in msg_list:\n answer.append(msg[\"from\"])\n return answer", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def get_analysis_list(self):\n analysys_list = []\n\n analysis_types = AnalysisPopulator.get_query_and_evaluation_analysis_types(self.parameters)\n\n for analysis_type in analysis_types:\n if analysis_type in self.all_possible_analysis:\n analysys_list.append(self.all_possible_analysis[analysis_type])\n else:\n print \"[WARNING]\", analysis_type, \"is not an allowed analysis type\"\n\n return analysys_list", "def parse_queried_alerts(self, alert_list, query_para):\n # List contains all the current alarms of given storage id\n alert_model_list = []\n for alert in alert_list:\n try:\n occur_time = alert['startTime']\n # skip if alert not in input time range\n if not alert_util.is_alert_in_time_range(query_para,\n occur_time):\n continue\n\n alert_model = dict()\n alert_model['alert_id'] = alert['eventID']\n alert_model['alert_name'] = alert['name']\n alert_model['severity'] = self.QUERY_ALERTS_SEVERITY_MAP.get(\n alert['level'], constants.Severity.NOT_SPECIFIED)\n alert_model['category'] = self.QUERY_ALERTS_CATEGORY_MAP.get(\n alert['eventType'], constants.Category.NOT_SPECIFIED)\n alert_model['type'] = constants.EventType.NOT_SPECIFIED\n alert_model['sequence_number'] = alert['sequence']\n alert_model['occur_time'] = int(occur_time * 1000)\n alert_model['description'] = alert['description']\n\n alert_model['recovery_advice'] = alert['suggestion']\n\n alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE\n alert_model['location'] = alert['location']\n\n alert_model_list.append(alert_model)\n except Exception as e:\n LOG.error(e)\n msg = (_(\"Failed to build alert model as some attributes\"\n \" missing in queried alerts.\"))\n raise exception.InvalidResults(msg)\n return alert_model_list", "def ancillary_spectra(self):\n return []", "def At_list(self):\n return self.Atom_list", "def test_get_all_configurations(self):\n\n time_series = ['test-all-conf-1', 'test-all-conf-2', 'test-all-conf-3']\n [timeserie_configuration.get_timeserie_configure(self.get_local_dynamo_cli(),\n ts) for ts in time_series]\n\n all_configurations = timeserie_configuration.get_all_configurations(\n self.get_local_dynamo_cli())\n self.assertEquals(3, len(all_configurations))\n self.assertTrue(all([conf.default for conf in all_configurations]))", "def amtool_receivers(self, mess, args):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_receivers()\n return result", "def get_appointments(self):\n current_date = timezone.now().date()\n filled_slots = self.slots.filter(\n appointment__isnull=False,\n date__gte=current_date\n )\n appointment_ids = filled_slots.values_list('appointment', flat=True)\n qs = Appointment.objects.filter(id__in=appointment_ids)\n return qs", "def pull_alarm(self):\n self.job = MATCH_QUEUE.take(timeout=settings.QUEUE_WAIT_TIMEOUT)\n\n if not self.job:\n raise lock.PassEmpty\n\n # JSON数据格式,反序列化\n try:\n self.alarm_list = map(json.loads, self.job.body.strip().splitlines())\n except Exception as error:\n logger.warning(\n 'match alarm pull error:%s, %s, please check job is json serialized',\n error,\n self.job.body)", "def list_smsa(self, kwargs):\n\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"sAMAccountName\", \"msDS-HostServiceAccountBL\"]\n entries = self.engine.query(self.engine.SMSA_FILTER(), attributes)\n\n if verbose:\n self.display(entries, verbose)\n else:\n for entry in entries:\n sam = entry['sAMAccountName']\n for host in entry[\"msDS-HostServiceAccountBL\"]:\n print(f'{sam}:{host}')", "def get_schedules(self) -> List[SwitcherV2Schedule]:\n return self._schedule_list", "def get_crons(self) -> dict:\n uri = f\"{self.uri}/crons\"\n\n response = self.request(uri=uri)\n return response.json()", "def list(cls):\n\n db = get_db_handle()\n\n secret_basic_configs = []\n for secret in db.secret_table.select():\n secret_basic_configs.append(secret.get_detail_dict())\n\n return secret_basic_configs", "def manager_configs_list(self):\n _, body = self.request('/v1.1/managers/configs', 'GET')\n return body", "async def async_get_alarm_trigger(self) -> Trigger:\n result = await self._async_fetch('getMotionDetectConfig', [])\n # _LOGGER.warn('async_get_motion_detection %s\\n%s', self._host, result)\n return Trigger(audio=False, motion=True if result[1]['isEnable'] == '1' else False)", "def arns(self) -> Sequence[str]:\n return pulumi.get(self, \"arns\")", "def alerts(self):\n return AlertCollection(client=self)", "def get_app_list(self):\n\n return self._get().keys()", "def dashboards(self) -> dict:\n return Config.get_dashboards()", "def getScanList(self):\n \n scanList = []\n for row in self._calData:\n if str(row.calType()) == 'CAL_DELAY':\n scanList.append(row.scanSet()[0])\n return scanList" ]
[ "0.78672135", "0.7273389", "0.71997935", "0.7173401", "0.70782715", "0.7007334", "0.6998015", "0.6914195", "0.6863513", "0.61937827", "0.61905473", "0.6153018", "0.6041094", "0.60307056", "0.5874066", "0.5827018", "0.5747757", "0.56293756", "0.5591793", "0.5573276", "0.55421096", "0.55367875", "0.5524643", "0.54948324", "0.54619354", "0.5456654", "0.54542726", "0.5396906", "0.5382511", "0.53633296", "0.5349163", "0.5329533", "0.5306765", "0.5285726", "0.5265305", "0.52477837", "0.52342016", "0.5229542", "0.5227855", "0.52264035", "0.52261806", "0.5224824", "0.5209295", "0.5207262", "0.5205262", "0.5180438", "0.5164677", "0.5159054", "0.5153771", "0.51363873", "0.5126733", "0.51163495", "0.5115172", "0.5113136", "0.51088333", "0.5091455", "0.5081193", "0.50689924", "0.5054987", "0.505456", "0.50465393", "0.50328285", "0.5013764", "0.5010069", "0.5001725", "0.4997995", "0.4980723", "0.49788356", "0.49731833", "0.49729759", "0.4969022", "0.49569073", "0.49518982", "0.49474335", "0.49473417", "0.49438035", "0.49409977", "0.49294394", "0.4923212", "0.4921115", "0.49209046", "0.49201667", "0.4909075", "0.49083215", "0.49082133", "0.49080032", "0.48973444", "0.48946804", "0.48830643", "0.48679024", "0.48485196", "0.48472914", "0.48423648", "0.48391038", "0.48372793", "0.48320827", "0.48237583", "0.48214096", "0.48185724", "0.4817249" ]
0.7816481
1
Organization a model defined in Swagger
def __init__(self, country=None, person_name=None, state_province_county=None, email=None, phone_number=None, address_line1=None, address_line2=None, zip_post_code=None, city=None, name=None): # noqa: E501 # noqa: E501 self._country = None self._person_name = None self._state_province_county = None self._email = None self._phone_number = None self._address_line1 = None self._address_line2 = None self._zip_post_code = None self._city = None self._name = None self.discriminator = None if country is not None: self.country = country if person_name is not None: self.person_name = person_name if state_province_county is not None: self.state_province_county = state_province_county if email is not None: self.email = email if phone_number is not None: self.phone_number = phone_number if address_line1 is not None: self.address_line1 = address_line1 if address_line2 is not None: self.address_line2 = address_line2 if zip_post_code is not None: self.zip_post_code = zip_post_code if city is not None: self.city = city if name is not None: self.name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }", "def custom_openapi() -> Dict:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "def expose_models(app, HOST=\"localhost\", PORT=5000, API_PREFIX=\"/api\"):\n\n api = SAFRSAPI(app, host=HOST, port=PORT)\n api.expose_object(models.Category)\n api.expose_object(models.CustomerCustomerDemo)\n api.expose_object(models.OrderDetail)\n api.expose_object(models.Order)\n api.expose_object(models.Customer)\n api.expose_object(models.CustomerDemographic)\n api.expose_object(models.EmployeeAudit)\n api.expose_object(models.EmployeeTerritory)\n api.expose_object(models.Employee)\n api.expose_object(models.Product)\n api.expose_object(models.Region)\n api.expose_object(models.Shipper)\n api.expose_object(models.Supplier)\n api.expose_object(models.Territory)\n return api", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'version': 'str',\n 'tagline': 'str',\n 'keywords': 'str',\n 'short_description': 'str',\n 'usage_information': 'str',\n 'long_description': 'str',\n 'license_model_description': 'str',\n 'system_requirements': 'str',\n 'time_released': 'datetime',\n 'release_notes': 'str',\n 'categories': 'list[str]',\n 'publisher': 'Publisher',\n 'languages': 'list[Item]',\n 'screenshots': 'list[Screenshot]',\n 'videos': 'list[NamedLink]',\n 'support_contacts': 'list[SupportContact]',\n 'support_links': 'list[NamedLink]',\n 'documentation_links': 'list[DocumentationLink]',\n 'icon': 'UploadData',\n 'banner': 'UploadData',\n 'regions': 'list[Region]',\n 'package_type': 'str',\n 'default_package_version': 'str',\n 'links': 'list[Link]',\n 'is_featured': 'bool'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'version': 'version',\n 'tagline': 'tagline',\n 'keywords': 'keywords',\n 'short_description': 'shortDescription',\n 'usage_information': 'usageInformation',\n 'long_description': 'longDescription',\n 'license_model_description': 'licenseModelDescription',\n 'system_requirements': 'systemRequirements',\n 'time_released': 'timeReleased',\n 'release_notes': 'releaseNotes',\n 'categories': 'categories',\n 'publisher': 'publisher',\n 'languages': 'languages',\n 'screenshots': 'screenshots',\n 'videos': 'videos',\n 'support_contacts': 'supportContacts',\n 'support_links': 'supportLinks',\n 'documentation_links': 'documentationLinks',\n 'icon': 'icon',\n 'banner': 'banner',\n 'regions': 'regions',\n 'package_type': 'packageType',\n 'default_package_version': 'defaultPackageVersion',\n 'links': 'links',\n 'is_featured': 'isFeatured'\n }\n\n self._id = None\n self._name = None\n self._version = None\n self._tagline = None\n self._keywords = None\n self._short_description = None\n self._usage_information = None\n self._long_description = None\n self._license_model_description = None\n self._system_requirements = None\n self._time_released = None\n self._release_notes = None\n self._categories = None\n self._publisher = None\n self._languages = None\n self._screenshots = None\n self._videos = None\n self._support_contacts = None\n self._support_links = None\n self._documentation_links = None\n self._icon = None\n self._banner = None\n self._regions = None\n self._package_type = None\n self._default_package_version = None\n self._links = None\n self._is_featured = None", "def _model_structure(self):\n self.model_structure = {\n 'title': str,\n 'description': str,\n 'tags': [str],\n 'references': [str],\n 'categories': [int],\n 'authors': [dict],\n 'defined_type': str,\n 'funding': str,\n 'license': str\n }", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def model_definition(self):\n pass", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def organization(self):\r\n return Organization(self)", "def model(self) -> Type[Model]:", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None", "def openapi(self) -> api.OpenAPISpec:\n return self._get_model(model=api.OpenAPISpec)", "def test_get_organization(self):\n pass", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def org():\n\n settings = current.deployment_settings\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if settings.get_ui_label_cluster() \\\n else \"Sectors\"\n stats = lambda i: settings.has_module(\"stats\")\n\n return M(c=\"org\")(\n M(\"Organizations MSW\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n M(\"TestSpiegel\", c=\"org\",f=\"spiegel\")\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\",\n check=stats)(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def model(self) -> str:\n ...", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self):\n self.swagger_types = {\n 'ids': 'list[str]',\n 'consumer': 'str',\n 'entity_type': 'str',\n 'start_date': 'datetime',\n 'end_date': 'datetime',\n 'created_date': 'datetime',\n 'updated_date': 'datetime',\n 'scope': 'str',\n 'disabled': 'bool',\n 'id': 'str'\n }\n\n self.attribute_map = {\n 'ids': 'ids',\n 'consumer': 'consumer',\n 'entity_type': 'entityType',\n 'start_date': 'startDate',\n 'end_date': 'endDate',\n 'created_date': 'createdDate',\n 'updated_date': 'updatedDate',\n 'scope': 'scope',\n 'disabled': 'disabled',\n 'id': 'id'\n }\n\n self._ids = None\n self._consumer = None\n self._entity_type = None\n self._start_date = None\n self._end_date = None\n self._created_date = None\n self._updated_date = None\n self._scope = None\n self._disabled = None\n self._id = None", "def test_retrieve_l_organization(self):\n pass", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'version': 'int',\n 'division': 'DomainEntityRef',\n 'campaign_status': 'str',\n 'callable_time_set': 'DomainEntityRef',\n 'contact_list': 'DomainEntityRef',\n 'dnc_lists': 'list[DomainEntityRef]',\n 'always_running': 'bool',\n 'contact_sorts': 'list[ContactSort]',\n 'messages_per_minute': 'int',\n 'errors': 'list[RestErrorDetail]',\n 'sms_config': 'SmsConfig',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'version': 'version',\n 'division': 'division',\n 'campaign_status': 'campaignStatus',\n 'callable_time_set': 'callableTimeSet',\n 'contact_list': 'contactList',\n 'dnc_lists': 'dncLists',\n 'always_running': 'alwaysRunning',\n 'contact_sorts': 'contactSorts',\n 'messages_per_minute': 'messagesPerMinute',\n 'errors': 'errors',\n 'sms_config': 'smsConfig',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._date_created = None\n self._date_modified = None\n self._version = None\n self._division = None\n self._campaign_status = None\n self._callable_time_set = None\n self._contact_list = None\n self._dnc_lists = None\n self._always_running = None\n self._contact_sorts = None\n self._messages_per_minute = None\n self._errors = None\n self._sms_config = None\n self._self_uri = None", "def schema(self):\n # NOTE This is exactly the same as the other thing.\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "def __init__(self, service_area: object=None, funder: object=None, area_served: object=None, member_of: object=None, events: object=None, sub_organization: object=None, has_offer_catalog: object=None, global_location_number: str=None, reviews: object=None, members: object=None, aggregate_rating: object=None, duns: str=None, tax_id: str=None, award: str=None, makes_offer: object=None, contact_points: object=None, awards: str=None, seeks: object=None, member: object=None, founders: object=None, alumni: object=None, dissolution_date: datetime=None, address: object=None, logo: str=None, employees: object=None, telephone: str=None, email: str=None, department: object=None, contact_point: object=None, parent_organization: object=None, legal_name: str=None, founding_date: datetime=None, employee: object=None, number_of_employees: object=None, naics: str=None, has_pos: object=None, review: object=None, founding_location: object=None, owns: object=None, event: object=None, founder: object=None, publishing_principles: object=None, sponsor: object=None, isic_v4: str=None, location: object=None, brand: object=None, vat_id: str=None, lei_code: str=None, fax_number: str=None, same_as: str=None, url: str=None, image: object=None, additional_type: str=None, name: str=None, identifier: str=None, potential_action: object=None, main_entity_of_page: str=None, description: str=None, disambiguating_description: str=None, alternate_name: str=None): # noqa: E501\n self.swagger_types = {\n 'service_area': object,\n 'funder': object,\n 'area_served': object,\n 'member_of': object,\n 'events': object,\n 'sub_organization': object,\n 'has_offer_catalog': object,\n 'global_location_number': str,\n 'reviews': object,\n 'members': object,\n 'aggregate_rating': object,\n 'duns': str,\n 'tax_id': str,\n 'award': str,\n 'makes_offer': object,\n 'contact_points': object,\n 'awards': str,\n 'seeks': object,\n 'member': object,\n 'founders': object,\n 'alumni': object,\n 'dissolution_date': datetime,\n 'address': object,\n 'logo': str,\n 'employees': object,\n 'telephone': str,\n 'email': str,\n 'department': object,\n 'contact_point': object,\n 'parent_organization': object,\n 'legal_name': str,\n 'founding_date': datetime,\n 'employee': object,\n 'number_of_employees': object,\n 'naics': str,\n 'has_pos': object,\n 'review': object,\n 'founding_location': object,\n 'owns': object,\n 'event': object,\n 'founder': object,\n 'publishing_principles': object,\n 'sponsor': object,\n 'isic_v4': str,\n 'location': object,\n 'brand': object,\n 'vat_id': str,\n 'lei_code': str,\n 'fax_number': str,\n 'same_as': str,\n 'url': str,\n 'image': object,\n 'additional_type': str,\n 'name': str,\n 'identifier': str,\n 'potential_action': object,\n 'main_entity_of_page': str,\n 'description': str,\n 'disambiguating_description': str,\n 'alternate_name': str\n }\n\n self.attribute_map = {\n 'service_area': 'serviceArea',\n 'funder': 'funder',\n 'area_served': 'areaServed',\n 'member_of': 'memberOf',\n 'events': 'events',\n 'sub_organization': 'subOrganization',\n 'has_offer_catalog': 'hasOfferCatalog',\n 'global_location_number': 'globalLocationNumber',\n 'reviews': 'reviews',\n 'members': 'members',\n 'aggregate_rating': 'aggregateRating',\n 'duns': 'duns',\n 'tax_id': 'taxID',\n 'award': 'award',\n 'makes_offer': 'makesOffer',\n 'contact_points': 'contactPoints',\n 'awards': 'awards',\n 'seeks': 'seeks',\n 'member': 'member',\n 'founders': 'founders',\n 'alumni': 'alumni',\n 'dissolution_date': 'dissolutionDate',\n 'address': 'address',\n 'logo': 'logo',\n 'employees': 'employees',\n 'telephone': 'telephone',\n 'email': 'email',\n 'department': 'department',\n 'contact_point': 'contactPoint',\n 'parent_organization': 'parentOrganization',\n 'legal_name': 'legalName',\n 'founding_date': 'foundingDate',\n 'employee': 'employee',\n 'number_of_employees': 'numberOfEmployees',\n 'naics': 'naics',\n 'has_pos': 'hasPOS',\n 'review': 'review',\n 'founding_location': 'foundingLocation',\n 'owns': 'owns',\n 'event': 'event',\n 'founder': 'founder',\n 'publishing_principles': 'publishingPrinciples',\n 'sponsor': 'sponsor',\n 'isic_v4': 'isicV4',\n 'location': 'location',\n 'brand': 'brand',\n 'vat_id': 'vatID',\n 'lei_code': 'leiCode',\n 'fax_number': 'faxNumber',\n 'same_as': 'sameAs',\n 'url': 'url',\n 'image': 'image',\n 'additional_type': 'additionalType',\n 'name': 'name',\n 'identifier': 'identifier',\n 'potential_action': 'potentialAction',\n 'main_entity_of_page': 'mainEntityOfPage',\n 'description': 'description',\n 'disambiguating_description': 'disambiguatingDescription',\n 'alternate_name': 'alternateName'\n }\n\n self._service_area = service_area\n self._funder = funder\n self._area_served = area_served\n self._member_of = member_of\n self._events = events\n self._sub_organization = sub_organization\n self._has_offer_catalog = has_offer_catalog\n self._global_location_number = global_location_number\n self._reviews = reviews\n self._members = members\n self._aggregate_rating = aggregate_rating\n self._duns = duns\n self._tax_id = tax_id\n self._award = award\n self._makes_offer = makes_offer\n self._contact_points = contact_points\n self._awards = awards\n self._seeks = seeks\n self._member = member\n self._founders = founders\n self._alumni = alumni\n self._dissolution_date = dissolution_date\n self._address = address\n self._logo = logo\n self._employees = employees\n self._telephone = telephone\n self._email = email\n self._department = department\n self._contact_point = contact_point\n self._parent_organization = parent_organization\n self._legal_name = legal_name\n self._founding_date = founding_date\n self._employee = employee\n self._number_of_employees = number_of_employees\n self._naics = naics\n self._has_pos = has_pos\n self._review = review\n self._founding_location = founding_location\n self._owns = owns\n self._event = event\n self._founder = founder\n self._publishing_principles = publishing_principles\n self._sponsor = sponsor\n self._isic_v4 = isic_v4\n self._location = location\n self._brand = brand\n self._vat_id = vat_id\n self._lei_code = lei_code\n self._fax_number = fax_number\n self._same_as = same_as\n self._url = url\n self._image = image\n self._additional_type = additional_type\n self._name = name\n self._identifier = identifier\n self._potential_action = potential_action\n self._main_entity_of_page = main_entity_of_page\n self._description = description\n self._disambiguating_description = disambiguating_description\n self._alternate_name = alternate_name", "def test_add_organization(self):\n pass", "def swagger_definition(self, base_path=None, **kwargs):\n return Swagger(\n {\n \"info\": Info(\n {\n key: kwargs.get(key, self.DEFAULT_INFO.get(key))\n for key in Info.fields.keys()\n if key in kwargs or key in self.DEFAULT_INFO\n }\n ),\n \"paths\": self.paths,\n \"swagger\": \"2.0\",\n \"basePath\": base_path,\n }\n ).to_primitive()", "def __init__(self, client):\n self.client = client\n self.definitions = client.swagger_spec.definitions", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'glossary_key': 'str',\n 'parent_term_key': 'str',\n 'is_allowed_to_have_child_terms': 'bool',\n 'path': 'str',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'created_by_id': 'str',\n 'updated_by_id': 'str',\n 'owner': 'str',\n 'workflow_status': 'str',\n 'uri': 'str',\n 'associated_object_count': 'int',\n 'associated_objects': 'list[TermAssociatedObject]'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'glossary_key': 'glossaryKey',\n 'parent_term_key': 'parentTermKey',\n 'is_allowed_to_have_child_terms': 'isAllowedToHaveChildTerms',\n 'path': 'path',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'created_by_id': 'createdById',\n 'updated_by_id': 'updatedById',\n 'owner': 'owner',\n 'workflow_status': 'workflowStatus',\n 'uri': 'uri',\n 'associated_object_count': 'associatedObjectCount',\n 'associated_objects': 'associatedObjects'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._glossary_key = None\n self._parent_term_key = None\n self._is_allowed_to_have_child_terms = None\n self._path = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._created_by_id = None\n self._updated_by_id = None\n self._owner = None\n self._workflow_status = None\n self._uri = None\n self._associated_object_count = None\n self._associated_objects = None", "def __init__(self, model: object):\n self.model = model", "def test_api_schema(self):\n response = self.client.get(\"/api/schema/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Content-Type\"), \"application/vnd.oai.openapi; charset=utf-8\"\n )\n self.assertEqual(\n response.get(\"Content-Disposition\"), 'inline; filename=\"Marsha API.yaml\"'\n )", "def org():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n SECTORS = \"Clusters\" if current.deployment_settings.get_ui_label_cluster() \\\n else \"Sectors\"\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Facilities\", f=\"facility\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\"),\n ),\n M(\"Offices\", f=\"office\")(\n M(\"Create\", m=\"create\"),\n M(\"Map\", m=\"map\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Resources\", f=\"resource\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\")\n ),\n M(\"Organization Types\", f=\"organisation_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Service Types\", f=\"service\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Office Types\", f=\"office_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Facility Types\", f=\"facility_type\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(SECTORS, f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator", "def __init__(self):\n self.swagger_types = {\n 'source_contact': 'AddressableEntityRef',\n 'target_contact': 'AddressableEntityRef',\n 'resulting_contact': 'AddressableEntityRef'\n }\n\n self.attribute_map = {\n 'source_contact': 'sourceContact',\n 'target_contact': 'targetContact',\n 'resulting_contact': 'resultingContact'\n }\n\n self._source_contact = None\n self._target_contact = None\n self._resulting_contact = None", "def create_model(self):\n pass", "def create_model(self):\n pass", "def MakeModel(self):\n pass", "def get_model_schema(self, app, model):\n model_schema = {\n 'app': {\n 'name': model._meta.app_label,\n 'label': model._meta.app_config.verbose_name,\n },\n 'name': model._meta.model_name,\n 'label': model._meta.verbose_name,\n 'components': []\n }\n\n for field in model._meta.get_fields():\n # 排除不需要在前端构建form的field: id、反向关联field\n # print('field', type(field), field)\n if field.name == 'id':\n continue\n if isinstance(field, ForeignObjectRel):\n # logger.info(\"ForeignObjectRel\", field)\n continue\n\n component = self.get_field_data(model, field)\n # logger.info('component', component)\n model_schema['components'].append(component)\n # print('get_model_schema', model_schema)\n return model_schema\n # return JsonResponse(model_schema)", "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not __name__ == cls.__module__:\n # e.g.: cls.__module__ = mpcontribs.api.projects.views\n views_path = cls.__module__.split(\".\")\n doc_path = \".\".join(views_path[:-1] + [\"document\"])\n cls.tags = [views_path[-2]]\n doc_filepath = doc_path.replace(\".\", os.sep) + \".py\"\n if os.path.exists(doc_filepath):\n cls.doc_name = cls.tags[0].capitalize()\n Model = getattr(import_module(doc_path), cls.doc_name)\n cls.schema_name = cls.doc_name + \"Schema\"\n cls.Schema = type(\n cls.schema_name,\n (ModelSchema, object),\n {\n \"Meta\": type(\n \"Meta\",\n (object,),\n dict(model=Model, ordered=True, model_build_obj=False),\n )\n },\n )\n cls.definitions = {cls.schema_name: schema2jsonschema(cls.Schema)}\n cls.resource.schema = cls.Schema\n\n # write flask-mongorest swagger specs\n for method in cls.methods:\n spec = get_specs(cls, method, cls.tags[0])\n if spec:\n dir_path = os.path.join(DOC_DIR, cls.tags[0])\n file_path = os.path.join(dir_path, method.__name__ + \".yml\")\n if not os.path.exists(file_path):\n os.makedirs(dir_path, exist_ok=True)\n\n if is_gunicorn:\n with open(file_path, \"w\") as f:\n yaml.dump(spec, f)\n logger.debug(\n f\"{cls.tags[0]}.{method.__name__} written to {file_path}\"\n )", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def getOrganization(self):\n return _libsbml.ModelCreator_getOrganization(self)", "def describe_model(ModelName=None):\n pass", "def create_models( self ):", "def test_retrieve_l_organizations(self):\n pass", "def test_organizations_list(self):\n pass", "def organizations(self):\n self.elements('organizations')", "def test_addOrganization(self):\r\n #fetch the object form the datastore\r\n org_obj = db.GqlQuery(\"SELECT * FROM Organization\")\r\n organization = addOrganization(org_obj.run().next())\r\n #view it as a dict\r\n organization_d = importer.etree_to_dict(organization)\r\n assert [{'name': u'Test Organization'},\r\n {'kind': u'TestOrgKind'},\r\n {'description': u'TestOrgDescription'},\r\n {'location': [{'city': u'Organization City'}, {'country': u'USA'}]},\r\n {'images': [\r\n {'image': [\r\n {'source': u'http://www.testimage.com'},\r\n {'description': u'Description of TestImage'}]}]},\r\n {'maps': [\r\n {'map': [{'source': u'http://maps.google.com'}, {'description': u'Map Description'}]}]},\r\n {'videos': [{u'youtube': u'r_8om4dsEmw'}]},\r\n {'social': [{u'twitter': u'@billgates'}]},\r\n {'citations': [\r\n {'citation': [\r\n {'source': u'http://maps.google.com'},\r\n {'description': u'Map Description'}]}]},\r\n {'external-links': [\r\n {'external-link': [\r\n {'source': u'http://www.google.com'},\r\n {'description': u'Google'}]}]}] in organization_d.values()", "def org():\n\n sysroles = current.auth.get_system_roles()\n\n ADMIN = sysroles.ADMIN\n ORG_GROUP_ADMIN = sysroles.ORG_GROUP_ADMIN\n\n return M(c=\"org\")(\n M(\"Organizations\", f=\"organisation\")(\n M(\"Hierarchy\", m=\"hierarchy\"),\n M(\"Create\", m=\"create\", restrict=(ADMIN, ORG_GROUP_ADMIN)),\n ),\n M(\"Facilities\", f=\"facility\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Administration\", restrict=(ADMIN, ORG_GROUP_ADMIN))(\n M(\"Facility Types\", f=\"facility_type\"),\n M(\"Organization Types\", f=\"organisation_type\"),\n M(\"Sectors\", f=\"sector\"),\n )\n )", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def test_openapi_schema(app, client):\n response = client.get(\"/swagger/\")\n assert response.status_code == 200\n assert len(json.loads(response.data)[\"paths\"]) > 0", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def set_model(self, model):\n if isinstance(model, edmx.Document):\n doc = model\n model = model.root\n elif isinstance(model, edmx.Edmx):\n # create a document to hold the model\n doc = edmx.Document(root=model)\n else:\n raise TypeError(\"Edmx document or instance required for model\")\n # update the base URI of the metadata document to identify this service\n doc.set_base(self.service_root)\n if self.model:\n # get rid of the old model\n for c in self.ws.Collection:\n c.detach_from_doc()\n c.parent = None\n self.ws.Collection = []\n for s in model.DataServices.Schema:\n for container in s.EntityContainer:\n if container.is_default_entity_container():\n prefix = \"\"\n else:\n prefix = container.name + \".\"\n # define one feed for each entity set, prefixed with the name\n # of the entity set\n for es in container.EntitySet:\n feed = self.ws.add_child(app.Collection)\n feed.href = prefix + es.name\n feed.add_child(atom.Title).set_value(prefix + es.name)\n # update the locations following SetBase above\n es.set_location()\n self.model = model", "def schema(self):\n # TODO The schema of a container resource...\n # This is the same as the leaf.\n # However, this isn't actually the schema of the response\n return {\n \"$id\": f\"{self.request.resource_url(self)}#schema\",\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n # generated fields shouldn't be submitted or in forms\n \"url\": {\"type\": \"string\", \"generated\": True},\n }\n }", "def model_artifact(self):\n pass", "def get_model(self):\n return Doc()", "def swagger():\n return jsonify(current_app.spec.to_dict())", "def __init__(self):\n self.swagger_types = {\n 'discovery': 'Discovery',\n 'groups': 'list[str]',\n 'labels': 'object'\n }\n\n self.attribute_map = {\n 'discovery': 'discovery',\n 'groups': 'groups',\n 'labels': 'labels'\n }\n\n self._discovery = None\n self._groups = None\n self._labels = None", "def __init__(self, id: int=None, owner: str=None, code: int=None, status: str=None):\n self.swagger_types = {\n 'id': int,\n 'owner': str,\n 'code': int,\n 'status': str\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'owner': 'owner',\n 'code': 'code',\n 'status': 'status'\n }\n\n self._id = id\n self._owner = owner\n self._code = code\n self._status = status", "def model() -> Model:\n return Model()", "def _build_model(self):\n raise NotImplementedError()", "def test_data_org_structure(self):\n url = '/api/options/?list=org_structure'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Division 1 will be present in the response.\n self.assertContains(response, self.div1.name)\n # Response can be deserialised into a dict.\n r = response.json()\n self.assertTrue(isinstance(r, dict))\n # Deserialised response contains a list.\n self.assertTrue(isinstance(r['objects'], list))\n # Make OrgUnit inactive to test exclusion.\n self.branch1.active = False\n self.branch1.save()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Division 1 won't be present in the response.\n self.assertNotContains(response, self.branch1.name)", "def real_model(request):\n return request.config.option.real_model", "def build_model():", "def __init__(self, datastoreio_stub, label=None):\n super(PutModels, self).__init__(label=label)\n self.datastoreio = datastoreio_stub", "def build_model(self):\n pass", "def build_model(self):\n pass", "def __init__(self, model):\n self._model = model", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'error_entity': 'DomainEntityRef',\n 'related_entity': 'DomainEntityRef',\n 'timestamp': 'datetime',\n 'level': 'str',\n 'category': 'str',\n 'correlation_id': 'str',\n 'event_message': 'EventMessage',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'error_entity': 'errorEntity',\n 'related_entity': 'relatedEntity',\n 'timestamp': 'timestamp',\n 'level': 'level',\n 'category': 'category',\n 'correlation_id': 'correlationId',\n 'event_message': 'eventMessage',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._error_entity = None\n self._related_entity = None\n self._timestamp = None\n self._level = None\n self._category = None\n self._correlation_id = None\n self._event_message = None\n self._self_uri = None", "def test_get_organization_from_api_key(self):\n pass", "def __init__(self, model):\n\n self.model = model\n self.paths = PathOperations()\n self.tags = Tags()", "def schema_view(request):\n generator = schemas.SchemaGenerator(title='Experiment Data Depot')\n return response.Response(generator.get_schema(request=request))", "def _generate_implicit_api_resource(self):\n return ImplicitHttpApiResource().to_dict()", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def post(self):\n try:\n # Create the organization\n if request.headers['Content-Type'] == \"application/json\":\n payload = request.get_json(silent=True)\n elif request.form:\n payload = request.data.to_dict()\n else:\n payload = request.get_json(force=True)\n organization = Organization(**payload)\n organization.save()\n response = organization.serialize()\n return make_response(jsonify(response)), 201\n\n except Exception as e:\n response = {\n \"message\": str(e)\n }\n return make_response(jsonify(response)), 500", "def test_organizations_read(self):\n pass", "def test_basic_api_inline_swagger(self):\n self.create_and_verify_stack(\"single/basic_api_inline_swagger\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def _build_model(self, **kwargs):\n pass", "def schema(self):", "def modelClass(self):\n raise NotImplementedError", "def resolve(self, spec: \"ModelSpec\"):", "def __init__(__self__, *,\n name: pulumi.Input[str],\n type: pulumi.Input[str],\n composite_model_properties: Optional[pulumi.Input[Sequence[pulumi.Input['AssetModelPropertyArgs']]]] = None,\n description: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n if composite_model_properties is not None:\n pulumi.set(__self__, \"composite_model_properties\", composite_model_properties)\n if description is not None:\n pulumi.set(__self__, \"description\", description)", "def test_related_resource(read_only):\n class ProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProjectWithCustomID\n fields = ('custom_id', 'name', 'archived', 'members', 'owner_member')\n read_only_fields = ['members', 'owner_member'] if read_only else []\n\n class ProjectViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset = ProjectWithCustomID.objects.all()\n serializer_class = ProjectSerializer\n renderer_classes = [renderers.JSONRenderer]\n parser_classes = [parsers.JSONParser]\n swagger_schema = base.BasicSwaggerAutoSchema\n\n router = routers.DefaultRouter()\n router.register(r'projects', ProjectViewSet, **compatibility._basename_or_base_name('projects'))\n\n generator = OpenAPISchemaGenerator(info=openapi.Info(title=\"\", default_version=\"\"), patterns=router.urls)\n\n swagger = generator.get_schema(request=None, public=True)\n\n response_schema = swagger['paths']['/projects/{custom_id}/']['get']['responses']['200']['schema']['properties']\n assert 'id' in response_schema['data']['properties']\n assert response_schema['data']['properties']['id']['type'] == 'string'\n assert response_schema['data']['properties']['id']['format'] == 'int32'\n assert 'type' in response_schema['data']['properties']\n assert 'attributes' in response_schema['data']['properties']\n assert list(response_schema['data']['properties']['attributes']['properties'].keys()) == ['name', 'archived']\n assert 'relationships' in response_schema['data']['properties']\n relationships_schema = response_schema['data']['properties']['relationships']['properties']\n assert list(relationships_schema.keys()) == ['members', 'owner-member']\n members_schema = relationships_schema['members']['properties']\n assert members_schema['data']['items']['properties']['id']['type'] == 'string'\n assert members_schema['data']['items']['properties']['id']['format'] == 'int64'\n assert members_schema['data']['items']['properties']['type']['pattern'] == 'member-with-custom-ids'\n owner_member_schema = relationships_schema['owner-member']['properties']\n assert owner_member_schema['data']['properties']['id']['type'] == 'string'\n assert owner_member_schema['data']['properties']['id']['format'] == 'int64'\n assert owner_member_schema['data']['properties']['type']['pattern'] == 'member-with-custom-ids'", "def test_basic_api_inline_openapi(self):\n self.create_and_verify_stack(\"single/basic_api_inline_openapi\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def emit_swagger_spec(ctx, modules, fd, path):\n\n printed_header = False\n model = OrderedDict()\n definitions = OrderedDict()\n augments = list()\n # Go through all modules and extend the model.\n for module in modules:\n if not printed_header:\n model = print_header(module, fd)\n printed_header = True\n path = '/'\n\n typdefs = [module.i_typedefs[element] for element in module.i_typedefs]\n models = list(module.i_groupings.values())\n referenced_types = list()\n referenced_types = findTypedefs(ctx, module, models, referenced_types)\n for element in referenced_types:\n typdefs.append(element)\n\n # The attribute definitions are processed and stored in the \"typedefs\" data structure for further use.\n gen_typedefs(typdefs)\n\n # list() needed for python 3 compatibility\n referenced_models = list()\n referenced_models = findModels(ctx, module, models, referenced_models)\n for element in referenced_models:\n models.append(element)\n # Print the swagger definitions of the Yang groupings.\n definitions = gen_model(models, definitions)\n\n # If a model at runtime was dependant of another model which had been encounter yet, it is generated 'a posteriori'.\n if pending_models:\n gen_model(pending_models, definitions)\n\n if PARENT_MODELS:\n for element in PARENT_MODELS:\n if PARENT_MODELS[element]['models']:\n definitions[element]['discriminator'] = PARENT_MODELS[element]['discriminator']\n # extract children which contain data definition keywords\n chs = [ch for ch in module.i_children\n if ch.keyword in (statements.data_definition_keywords + ['rpc','notification'])]\n\n # generate the APIs for all children\n if len(chs) > 0:\n model['paths'] = OrderedDict()\n gen_apis(chs, path, model['paths'], definitions)\n\n model['definitions'] = definitions\n fd.write(json.dumps(model, indent=4, separators=(',', ': ')))", "def model_info():\n pass", "def test_org_structure(self):\n url = '/api/users/?org_structure=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # User 1 will be present in the response.\n self.assertContains(response, self.user1.email)\n # Division 1 will be present in the response.\n self.assertContains(response, self.div1.name)", "def model(name):\n model = Model.query.filter_by(name=name).first_or_404()\n\n if request.method == 'POST':\n # update model (publish a new version)\n validate_owner(model, request)\n\n # TODO validate the data\n # TODO should the model be sent as a separate file, w/ a checksum?\n # TODO client should first validate version stuff before submitting the full model\n data = request.get_json()\n\n try:\n version = data['meta']['version']\n model.publish(data['meta'], data['model'], version)\n model.make_archive(version)\n db.session.add(model)\n db.session.commit()\n return jsonify(status='success')\n except ModelConflictException as e:\n return jsonify(status='failure', reason=str(e)), 409\n\n elif request.method == 'DELETE':\n # deletes the entire model package\n validate_owner(model, request)\n model.destroy()\n return jsonify(status='success')\n\n elif request.method == 'PUT':\n # this is just for changing ownership atm\n validate_owner(model, request)\n data = request.get_json()\n\n user = User.query.filter_by(name=data['user']).first_or_404()\n model.owner = user\n db.session.add(model)\n db.session.commit()\n return jsonify(status='success')\n\n else:\n # download archive\n try:\n return send_from_directory(*os.path.split(model.archive()))\n except ModelNotFoundException:\n abort(404)" ]
[ "0.6377022", "0.6078088", "0.58630645", "0.5785033", "0.5765188", "0.5756673", "0.57561225", "0.573102", "0.5684268", "0.5679472", "0.5679472", "0.5679472", "0.5673386", "0.56596184", "0.56596184", "0.56596184", "0.56596184", "0.56596184", "0.5653947", "0.5642673", "0.56262547", "0.5557814", "0.5555922", "0.5537126", "0.55168724", "0.5509069", "0.5498529", "0.54883623", "0.5482239", "0.5469874", "0.5457707", "0.545488", "0.5436275", "0.5419595", "0.5415368", "0.54034394", "0.53935134", "0.5392721", "0.5371513", "0.53396076", "0.5335642", "0.53280604", "0.53137285", "0.53137285", "0.5306561", "0.5278172", "0.5272773", "0.52303207", "0.52303207", "0.52303207", "0.52303207", "0.522521", "0.5218521", "0.5210209", "0.51995033", "0.5189326", "0.5184482", "0.51807046", "0.51782686", "0.51689374", "0.51654285", "0.5150648", "0.5147429", "0.51472056", "0.51422256", "0.51331824", "0.5131841", "0.51305073", "0.5129039", "0.5127362", "0.5125861", "0.511951", "0.51190114", "0.51174474", "0.51174194", "0.51108783", "0.51081973", "0.51081973", "0.5094234", "0.50840366", "0.50796187", "0.5069657", "0.50667924", "0.5052756", "0.5048582", "0.5047059", "0.5047059", "0.50437146", "0.5043346", "0.50419205", "0.5037494", "0.5033446", "0.50272256", "0.5025908", "0.5023542", "0.5023429", "0.50229377", "0.50148565", "0.5007729", "0.5004751", "0.50008136" ]
0.0
-1
Sets the country of this Organization.
def country(self, country): self._country = country
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def country(self, country):\n if country is None:\n raise ValueError(\"Invalid value for `country`, must not be `None`\")\n\n self._country = country", "def domain_settings_set_country(self, country):\n return self._request('domain/settings/set_country', inspect_args_func(inspect.currentframe()))", "def country(self, country):\n # type: (string_types) -> None\n\n if country is not None:\n if not isinstance(country, string_types):\n raise TypeError(\"Invalid type for `country`, type has to be `string_types`\")\n\n self._country = country", "def location_country(self, location_country):\n\n self._location_country = location_country", "def country_code(self, country_code):\n\n self._country_code = country_code", "def country_id(self, country_id):\n\n self._country_id = country_id", "def country_id(self, country_id):\n\n self._country_id = country_id", "def with_preset_issuing_country(self, country):\n self.__preset_issuing_country = country\n return self", "def country_of_taxation(self, country_of_taxation):\n\n self._country_of_taxation = country_of_taxation", "def set_CountryCode(self, value):\n super(AddressValidationInputSet, self)._set_input('CountryCode', value)", "def source_scopes_country(self, source_scopes_country):\n\n self._source_scopes_country = source_scopes_country", "def source_locations_country(self, source_locations_country):\n\n self._source_locations_country = source_locations_country", "def country(self) -> str:\n return pulumi.get(self, \"country\")", "def country(self):\n return self._country", "def country(self):\n return self._country", "def country(self):\n if self._country is not None:\n return self._country\n if not self.isValid():\n return None\n self._country = self.geocoder_results.country\n return self._country", "def set_country_group_scope(self, country_group_scope):\n self.single_selection_from_kendo_dropdown(self.country_group_scope_dropdown_locator, country_group_scope)", "def set_country_code(zd, option, **kwargs):\n cfg_option = {'country_code': '',\n 'channel_optimization': '',\n 'channel_mode':''} \n cfg_option.update(option)\n \n xloc = LOCATOR_CFG_SYSTEM_COUNTRY_CODE\n xloc_map = {\n 'country_code': xloc['country_code_listbox'],\n 'compatibility': xloc['optimization_for_compatibility_radio'],\n 'interoperability': xloc['optimization_for_interoperability_radio'],\n 'performance': xloc['optimization_for_performance_radio'],\n 'allow_indoor': xloc['allow_indoor_channel_checkbox'],\n }\n nav_to(zd)\n \n if cfg_option['country_code']:\n zd.s.select_option(xloc_map['country_code'], re.escape(cfg_option['country_code']))\n if cfg_option['channel_optimization']:\n zd.s.click_and_wait(xloc_map[cfg_option['channel_optimization']])\n if cfg_option['channel_mode']:\n zd.s.click_if_not_checked(xloc_map[cfg_option['channel_mode']])\n \n zd.s.choose_ok_on_next_confirmation()\n zd.s.click_and_wait(zd.info['loc_cfg_sys_ctrycode_apply_button'])\n if not zd.s.is_confirmation_present(5):\n raise Exception(\"No dialog confirmation for setting country code appeared\")\n zd.s.get_confirmation()\n logging.info(\"Change country code option for ZoneDirector to %s successfully\" % str(cfg_option))", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def country(self) -> Optional[str]:\n return pulumi.get(self, \"country\")", "def country(self):\n # type: () -> string_types\n return self._country", "def country(self):\n if \"country\" in self._prop_dict:\n return self._prop_dict[\"country\"]\n else:\n return None", "def country(self):\n if \"country\" in self._prop_dict:\n return self._prop_dict[\"country\"]\n else:\n return None", "def language_iso_code(self, language_iso_code):\n\n self._language_iso_code = language_iso_code", "def __create_country_dropdown(self):\n return dcc.Dropdown(\n id=\"dd_country\",\n options=self.data_reader.get_country_options(),\n value=\"Canada\",\n )", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def setOrganisation(self, *args):\n return _libsbml.ModelCreator_setOrganisation(self, *args)", "def country_code(self):\n return self.__country_code", "def country_code(self):\n return self.__country_code", "def country(self):\n return self.status.place['country']", "def set_encoding(self, encoding, asset=None):\n self._set_property('pc:encoding', encoding, asset)", "def issuerCountry(self) -> str:\n country = self.issuer.native['country_name']\n return country", "def set_country(request):\n next = request.REQUEST.get('next', None)\n if not next:\n next = request.META.get('HTTP_REFERER', None)\n if not next:\n next = '/'\n response = http.HttpResponseRedirect(next)\n if request.method == 'POST':\n country_code = request.POST.get('country', None)\n if country_code:\n if hasattr(request, 'session'):\n request.session['django_country'] = country_code\n else:\n response.set_cookie(settings.COUNTRY_COOKIE_NAME, country_code)\n\n #return http.HttpResponse(request.POST)\n return response", "def set_language(self, lang):\n\n self.language = lang\n\n self.add_metadata('DC', 'language', lang)", "def conference_country(self):\n return ConferenceReader(self.record).country", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def country_id(self):\n return self._country_id", "def country_id(self):\n return self._country_id", "def country_code(self) -> str | None:\n pass", "def country_of_origin(self):\n if self.investor_company:\n return self.investor_company.address_country", "def set_country_for_search(self, country_name_list):\n self.multiple_items_selection_from_kendo_dropdown(self.country_dropdown_locator, country_name_list)\n self.wait_for_ajax_spinner_load()", "def set_year (self, year):\n self.year = year", "def update_customer_country(self, customer_to_change, new_value):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_change.get_customer_id():\n customer.set_country(new_value)\n self._customer_repo.overwrite_customer_list(customer_list)", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def language(self, language):\n\n self._language = language", "def company(self, company):\n\n self._company = company", "def company(self, company):\n\n self._company = company", "def country_of_residence(self, country_of_residence):\n\n self._country_of_residence = country_of_residence", "def company(self, company):\n self._company = company", "def industry(self, industry):\n\n self._industry = industry", "def onchange_country(self):\n if self.country_id and self.country_id.code == 'SA':\n self.is_saudi = True\n else:\n self.is_saudi = False", "def user_country(self):\n return utils.to_country(lib.sp_session_user_country(self._sp_session))", "def set_encoding(self, encoding):\n\n self._encoding = encoding", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def get_country_code(self):\n\n return self.country_code", "def supported_countries(self, supported_countries):\n\n self._supported_countries = supported_countries", "def nationality(self, nationality):\n\n self._nationality = nationality", "def nationality(self, nationality):\n\n self._nationality = nationality", "def _get_country_code(self, cr, uid, context=None):\n context = context or {}\n user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id\n return user_company.partner_id and user_company.partner_id.country_id \\\n and user_company.partner_id.country_id.code or 'XX'", "def language(self, language: str):\n\n self._language = language", "def industry(self, industry: str):\n\n self._industry = industry", "def timezone(self, timezone):\n\n self._timezone = timezone", "def timezone(self, timezone):\n\n self._timezone = timezone", "def timezone(self, timezone):\n\n self._timezone = timezone", "def timezone(self, timezone):\n\n self._timezone = timezone", "def timezone(self, timezone):\n\n self._timezone = timezone", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def country_or_region(self) -> str:\n return pulumi.get(self, \"country_or_region\")", "def currency(self, currency: str):\n\n self._currency = currency", "def language(self, language: str):\n self._language = language", "def payee_city(self, payee_city):\n\n self._payee_city = payee_city", "def get_country(self):\n return self.reference[REF_COUNTRY][REF_VALUE]", "def set_calender_year(self, year):\n self.single_selection_from_kendo_dropdown(self.calender_year_kendo_dropdown_locator, year)", "def birth_city(self, birth_city):\n\n self._birth_city = birth_city", "def org_id(self, org_id):\n\n self._org_id = org_id", "def municipality(self, municipality):\n\n self._municipality = municipality", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city" ]
[ "0.7712507", "0.71704066", "0.6832151", "0.6527378", "0.65113974", "0.62519395", "0.62519395", "0.6071815", "0.6042363", "0.5949575", "0.5870461", "0.56637245", "0.558508", "0.5546206", "0.5546206", "0.55045587", "0.54971576", "0.5478352", "0.5457733", "0.5457733", "0.54065216", "0.53824925", "0.53824925", "0.5196442", "0.51683843", "0.5144928", "0.5144928", "0.5144928", "0.51325303", "0.5095314", "0.5095314", "0.5043203", "0.5029868", "0.5028861", "0.49779913", "0.49673772", "0.49349558", "0.491871", "0.48750743", "0.48750743", "0.48564565", "0.48373836", "0.48273915", "0.48074552", "0.47750455", "0.4774923", "0.4774923", "0.4774923", "0.4774923", "0.4774923", "0.47702268", "0.47702268", "0.47681797", "0.47595134", "0.47513923", "0.4706774", "0.46463957", "0.46461824", "0.46378136", "0.46378136", "0.46378136", "0.46378136", "0.46378136", "0.46378136", "0.46378136", "0.46378136", "0.46329224", "0.46312165", "0.46312165", "0.4621588", "0.4614209", "0.460575", "0.459961", "0.459961", "0.459961", "0.459961", "0.459961", "0.45859557", "0.45859557", "0.45859557", "0.45859557", "0.45687646", "0.4545664", "0.45447645", "0.45369768", "0.4531195", "0.45258322", "0.45168507", "0.4510086", "0.4508326", "0.45001423", "0.45001423", "0.45001423", "0.45001423", "0.45001423", "0.45001423" ]
0.76674914
5
Sets the person_name of this Organization.
def person_name(self, person_name): self._person_name = person_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_name(self, PersonName):\r\n self.name = PersonName", "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def set_person(self, person):\n if person.upper() == UNKNOWN.upper():\n self.person_name = UNKNOWN\n self.person_id = 'XX'\n else:\n self.person_name = person.lower()\n self.person_id = person.upper()[:2]", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def contact_name(self, contact_name):\n\n self._contact_name = contact_name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, _name):\n self.name = _name", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def set_name(self, name):\n self._name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, _name):\n self.name = _name\n return self.name", "def SetName(self, name):\n self.name = name", "def set_name(self, name: str):\n self._name = name", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\n assert isinstance(name, str), 'Name must be string'\n self._name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def team_set_name(self, team_type: TeamType, team_name):\r\n\t\tself._teams[team_type].team_name = team_name\r\n\t\t_logger.info(\"Set the name of team {0} to \\\"{1}\\\".\" \\\r\n\t\t\t.format(team_type, team_name))", "def set_name(self, room_name):\n self.name = room_name", "def setName(self, name): \n\n self._name = name", "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def set_name(self, name):\r\n self.stream.set_node_name(self.node, name)", "def setName(self, name):\n self.name = str(name)", "def set_name(self, newname=\"\"):\n self.name = newname", "def company_name(self, company_name):\n\n self._company_name = company_name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def set_name(self, name):\n\n\t\tif name is not None and not isinstance(name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__name = name\n\t\tself.__key_modified['name'] = 1", "def person_id(self, person_id):\n\n self._person_id = person_id", "def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self", "def person_name(self):\n return self._person_name", "def set_name(self,name):\r\n self._name = __name", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def team_name(self, team_name):\n\n self._team_name = team_name", "def author_name(self, author_name):\n\n self._author_name = author_name", "def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)", "def payee_name(self, payee_name):\n\n self._payee_name = payee_name", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def setName(self, name):\n # type: (str)->None\n self._validator.validate_one('name', VALID_OPTS['name'], name)\n self._ifAttributes['name'] = str(name)", "def set_name(self, name):\n self.options['name'] = name", "def set_name(self, name):\n # XXX: convert name to unicode, if it's a plain string?\n d = analyze_name(name, canonical=0)\n self.data.update(d)", "def name(self, name):\n from_name = self.name\n assert isinstance(name, str)\n self._name = name\n if self.has_parent():\n self._parent_._name_changed(self, from_name)", "def updateName(self,name):\n self.name = name", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def name(self, name):\n\n self._set_field(\"name\", name.get_json())", "def name(self, name):\n\n self._set_field(\"name\", name)", "def set_name(self, application_name):\r\n self._name = application_name", "def setName(self, newName):\n self.__username = newName", "def conversation_participant_name(self, conversation_participant_name):\n\n self._conversation_participant_name = conversation_participant_name", "def setName(self, name):\n self.name = name\n return self", "def set_name(self, name):\n self.settings[\"name\"] = name", "def name(self, name: str) -> None:\n self._name = name", "def new_name(self,new_name):\n self.name = new_name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def set_name(self, name):\n return self.set_meta('name', name)", "def account_name(self, account_name):\n\n self._account_name = account_name", "def account_name(self, account_name):\n\n self._account_name = account_name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def name(self, name):\n self._name = name", "def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value", "def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def name(self, name: str):\r\n self._name = name", "def set_doc_name(self, doc, name):\n if not self.doc_name_set:\n doc.name = name\n self.doc_name_set = True\n return True\n else:\n raise CardinalityError('Document::Name')", "def person_phone_number(self, person_phone_number):\n\n self._person_phone_number = person_phone_number", "def setDisplayName( self, name ):\n self._displayName = name\n self._titleFont = None", "def profile_name(self, profile_name):\n\n self._profile_name = profile_name", "def profile_name(self, profile_name):\n\n self._profile_name = profile_name", "def name(self, name):\n\n self._name = name", "def name(self, name):\n\n self._name = name" ]
[ "0.7833753", "0.7134179", "0.7134179", "0.6688187", "0.65317196", "0.65317196", "0.65103024", "0.65103024", "0.65096503", "0.6446856", "0.64192414", "0.6416891", "0.63956535", "0.63956535", "0.63956535", "0.63956535", "0.63956535", "0.63945746", "0.63945746", "0.6344165", "0.6329904", "0.6307423", "0.6302715", "0.6276397", "0.625713", "0.6183842", "0.6183842", "0.615428", "0.6152874", "0.615113", "0.61446863", "0.61446863", "0.6132225", "0.61069745", "0.610379", "0.60774857", "0.60562176", "0.60562176", "0.60562176", "0.60562176", "0.60515237", "0.60468817", "0.6037776", "0.60243183", "0.60162306", "0.60093176", "0.60040605", "0.59605604", "0.59355915", "0.5921068", "0.59184307", "0.59184307", "0.5896455", "0.58889407", "0.5883073", "0.58766586", "0.5866777", "0.58420074", "0.5839159", "0.5810008", "0.5802428", "0.57832694", "0.57802695", "0.576427", "0.57604325", "0.5749192", "0.5742982", "0.5729118", "0.5729118", "0.5729118", "0.5729118", "0.5715754", "0.56999946", "0.56999946", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.5698096", "0.56953746", "0.5681217", "0.56785", "0.56785", "0.56785", "0.56785", "0.5675954", "0.56708336", "0.5664109", "0.56481326", "0.56481326", "0.56452817", "0.56452817" ]
0.8247543
1
Sets the state_province_county of this Organization.
def state_province_county(self, state_province_county): self._state_province_county = state_province_county
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def county(self, county):\n\n self._county = county", "def county(self, county):\n\n self._county = county", "def nationality(self, nationality):\n\n self._nationality = nationality", "def nationality(self, nationality):\n\n self._nationality = nationality", "def set_city_count(self, city_count):\n self.city_count = city_count", "def _load_county_geometry() -> geopandas.GeoDataFrame:\n\tfilename = shapefiles_folder / \"cb_2016_us_county_500k\"\n\ttable = read_geometry(filename)\n\n\ttable['regionCode'] = [f\"{i:>02}{j:>03}\" for i, j in zip(table['STATEFP'].values, table['COUNTYFP'].values)]\n\treturn table", "def get_county_estimates_by_state(\n self, api, table, variable, estimate, state\n ):\n state = Division.objects.get(level=self.STATE_LEVEL, code=state)\n county_data = api.get(\n ('NAME', estimate),\n {\n 'for': 'county:*',\n 'in': 'state:{}'.format(state.code)\n },\n year=table.year\n )\n for datum in county_data:\n self.write_estimate(table, variable, estimate, datum)", "def SetRegion(self,stateAbbrev):\n if not stateAbbrev in self.VectorData:\n print \"Error - No Data for %s available\" % stateAbbrev\n print \"Valid state abbreviations are:\", self.StateAbbrevList\n else:\n self.SelectedRegion = stateAbbrev", "def set_City(self, value):\n super(AddressValidationInputSet, self)._set_input('City', value)", "def county(self):\n return self._county", "def ad_rep_city_state(obj):\n return '%s, %s' % (obj.ad_rep.geolocation_object.us_city.name,\n obj.ad_rep.geolocation_object.us_state.abbreviation)", "def get_county_boundaries(self):\n\n county_boundaries_gdf = self.census_boundaries.get_boundaries_gdf(\n \"Colorado\", \"county\"\n )\n\n county_boundaries_gdf = county_boundaries_gdf.set_index(\"GEOID\")\n county_boundaries_gdf[\"STATENAME\"] = county_boundaries_gdf[\"STATEFP\"].apply(\n lambda fip: self.census_boundaries.state_names.get(fip)\n )\n\n return county_boundaries_gdf", "def __build_county_list(self):\n for entry in self._data.values():\n if entry['type'] == 40:\n self._counties[entry['county']] = entry['name']", "def setState(self, uistate):\n if isinstance(uistate, list):\n state = set(uistate)\n elif isinstance(uistate, int):\n state = set([uistate])\n else:\n raise TypeError, \"Argument must be int or list\"\n\n if len(state & self._constrains) > 0:\n self._state = state\n else:\n raise KeyError, \"Attemptinng to set an unknown state\"\n\n self.stateChanged.emit(state)", "def set_state(self, uNodeState, uNewState):\n uNodeState['coincidences'] = uNewState['coincidences']\n uNodeState['temporal_groups'] = uNewState['temporal_groups']\n uNodeState['PCG'] = uNewState['PCG']", "def kyc_state(self, kyc_state):\n\n self._kyc_state = kyc_state", "def get_covid_stats_by_county(state, county):\n url = \"https://corona.lmao.ninja/v2/jhucsse/counties/\" + county\n response = requests.get(url)\n data = response.json()\n counties = []\n for res in data:\n if res[\"province\"] == state:\n county1 = res[\"county\"]\n updatedAt = res[\"updatedAt\"]\n stats = res[\"stats\"]\n confirmed = stats[\"confirmed\"]\n deaths = stats[\"deaths\"]\n recovered = stats[\"recovered\"]\n counties.append(\n CountyStats(state, county1, updatedAt, confirmed, deaths, recovered)\n )\n # return CountyStats(state,county,updatedAt,confirmed,deaths,recovered)\n return counties", "def payee_city(self, payee_city):\n\n self._payee_city = payee_city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def associate(self, county: County, reflexive: bool=True):\n self._counties.add(county)\n if reflexive:\n county.associate(self, reflexive=False)", "def city(self, city):\n self._city = city", "def country_of_taxation(self, country_of_taxation):\n\n self._country_of_taxation = country_of_taxation", "def draw_county(subplot, fips, **kwargs):\n global _color_idx\n if fips not in fips2poly:\n #raise RuntimeError, 'County fips %s not found' % fips\n print 'County fips %s not found' % fips\n return\n if 'color' not in kwargs:\n color = blues[_color_idx]\n _color_idx = (_color_idx+1) % len(blues)\n kwargs['color'] = color\n for polygon in fips2poly[fips]:\n draw_polygon(subplot, polygon, **kwargs)", "def test_county(self):\n counties = self.geographies.find({ 'geoid': '15009' })\n\n self.assertEqual(counties.count(), 1)\n\n county = counties[0]\n\n self.assertEqual(county['sumlev'], config.SUMLEV_COUNTY)\n self.assertEqual(county['metadata']['NAME'], 'Maui County')\n self.assertEqual(county['metadata']['STATE'], '15')\n self.assertEqual(county['metadata']['COUNTY'], '009')\n\n pop_2000 = 128094 \n pop_2010 = 154834\n self._test_totalpop(county, pop_2000, pop_2010)", "def coaching_state(self, coaching_state):\n\n self._coaching_state = coaching_state", "async def setIncident_state(\n self,\n eventID: str,\n incidentNumber: int,\n state: IncidentState,\n author: str,\n ) -> None:", "def birth_city(self, birth_city):\n\n self._birth_city = birth_city", "def district(self, district):\n\n self._district = district", "def district(self, district):\n\n self._district = district", "def address_city(self, address_city):\n if self.local_vars_configuration.client_side_validation and address_city is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_city`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_city is not None and len(address_city) > 64):\n raise ValueError(\"Invalid value for `address_city`, length must be less than or equal to `64`\") # noqa: E501\n\n self._address_city = address_city", "def test_county_limit_by_state__valid_arg(self):\n response_01 = self.client.get(self.url, {\"state\": \"01\"})\n self.assertEqual(response_01.status_code, 200)\n self.assertEqual(\n \"Autauga County\", response_01.data[\"data\"][0][\"county\"]\n )\n response_AL = self.client.get(self.url, {\"state\": \"AL\"})\n self.assertTrue(response_01.data[\"data\"] == response_AL.data[\"data\"])\n response_DC = self.client.get(self.url, {\"state\": \"DC\"})\n self.assertEqual(len(response_DC.data[\"data\"]), 1)\n response_VA = self.client.get(self.url, {\"state\": \"VA\"})\n self.assertEqual(len(response_VA.data[\"data\"]), 1)\n self.assertEqual(\n \"Accomack County\", response_VA.data[\"data\"][0][\"county\"]\n )", "def industry(self, industry):\n\n self._industry = industry", "def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_qos_interfaces__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_qos_interfaces__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()", "def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_qos__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_qos__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()", "def conduit_committee_city(self, conduit_committee_city):\n\n self._conduit_committee_city = conduit_committee_city", "def election_state(self, election_state):\n\n self._election_state = election_state", "def save_state_data(self, county_wise_filepath):\n self.location_data = {\n \"geohashes\": {},\n \"locations\": {}\n }\n\n # load the data\n self.csv_data = pd.read_csv(county_wise_filepath, delimiter=',', index_col=[0], parse_dates=[0])\n \n for _, row in self.csv_data.iterrows():\n # only store unique data\n if self.location_data['locations'].get(row['Combined_Key']) is None:\n location_hash = row['Combined_Key']\n state = row['Province_State']\n county = row['Admin2']\n country = row['Country_Region']\n fips = row['FIPS']\n\n county_geohash = ''\n try:\n lat = ''\n try:\n lat = float(row['Lat'].strip())\n except AttributeError:\n lat = float(row['Lat'])\n\n log = ''\n try:\n log = float(row['Long_'].strip())\n except AttributeError:\n log = float(row['Long_'])\n\n county_geohash = geohash2.encode(lat,log) # Generate Geohash for use with Grafana Plugin\n except Exception as e:\n print(\"Error Getting Geohash: \", e)\n # traceback.print_exc()\n\n if county_geohash != '':\n self.location_data['geohashes'][county_geohash] = location_hash\n else:\n print(\"Empty Geohash! (\",location_hash,\")\")\n\n self.location_data['locations'][location_hash] = [state, county, country, county_geohash, fips]", "def set_state(self, uState):\n self.strategy['state_handler'].set_state(self.state, uState)", "def tax_address_region_id(self, tax_address_region_id):\n\n self._tax_address_region_id = tax_address_region_id", "def principal_city(self, principal_city):\n\n self._principal_city = principal_city", "def conduit_committee_state(self, conduit_committee_state):\n\n self._conduit_committee_state = conduit_committee_state", "def test_county_limits_by_state__no_args(self):\n response = self.client.get(self.url, {})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n response.data, {\"detail\": \"Required parameter state is missing\"}\n )", "def set_state(self,state):\n self.__state = state", "def test_county_subdivision(self):\n counties = self.geographies.find({ 'geoid': '1500190630' })\n\n self.assertEqual(counties.count(), 1)\n\n county = counties[0]\n\n self.assertEqual(county['sumlev'], config.SUMLEV_COUNTY_SUBDIVISION)\n self.assertEqual(county['metadata']['NAME'], 'Hilo CCD')\n self.assertEqual(county['metadata']['STATE'], '15')\n self.assertEqual(county['metadata']['COUNTY'], '001')\n\n pop_2000 = 42425 \n pop_2010 = 45714 \n self._test_totalpop(county, pop_2000, pop_2010)", "def setSicxAddress(self, _address: Address) -> None:\n self._sICX_address.set(_address)", "def industry(self, industry: str):\n\n self._industry = industry", "def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_qos_elements__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_qos_elements__qos_classifiers_classifier_terms_term_conditions_ipv4_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()", "def set_value(self, cart_state: CartState):\n self._state = cart_state", "def set_state(self, state):\n self._env.set_state(state)", "def state_or_province(self) -> Optional[str]:\n return pulumi.get(self, \"state_or_province\")", "def town(self, town):\n\n self._town = town", "def set_State(self, value):\n super(AddressValidationInputSet, self)._set_input('State', value)", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def country(self, country):\n\n self._country = country", "def set_state(self, state: int):\n self.state = state", "def write_estimate(self, table, variable, code, datum):\n try:\n division = Division.objects.get(code='{}{}'.format(\n datum['state'],\n datum['county']\n ), level=self.COUNTY_LEVEL)\n CensusEstimate.objects.update_or_create(\n division=division,\n variable=variable,\n defaults={\n 'estimate': datum[code] or 0\n }\n )\n except:\n print('ERROR: {}, {}'.format(datum['NAME'], datum['state']))", "def Set3StateValue(self, state):\r\n\r\n if not self._is3State and state == wx.CHK_UNDETERMINED:\r\n raise Exception(\"Set3StateValue can only be used with 3-state checkbox items.\")\r\n\r\n self._checked = state", "async def setIncident_locationConcentricStreet(\n self, eventID: str, incidentNumber: int, streetID: str, author: str\n ) -> None:", "def state_id(self, state_id):\n\n self._state_id = state_id", "def setState( self, cCtrlName, nState ):\n self.setControlModelProperty( cCtrlName, \"State\", nState )", "def country_id(self, country_id):\n\n self._country_id = country_id", "def country_id(self, country_id):\n\n self._country_id = country_id", "def industry_code(self, industry_code):\n\n self._industry_code = industry_code", "def industry_code(self, industry_code):\n\n self._industry_code = industry_code", "def country_code(self, country_code):\n\n self._country_code = country_code", "def setState(self, state):\n assert self.isValidState(state)\n self._state = state", "def location_country(self, location_country):\n\n self._location_country = location_country", "def setState(self, state):\n self.state = state", "def setCheckBoxState( self, cCtrlName, nState ):\n oControl = self.getControl( cCtrlName )\n oControl.setState( nState )", "def country_of_residence(self, country_of_residence):\n\n self._country_of_residence = country_of_residence", "def set_state(self, state_dict):\n for key, target_object in self._map.items():\n self.set_single_state(target_object,\n value=state_dict.get(key, None))", "def candidate_office_district(self, candidate_office_district):\n\n self._candidate_office_district = candidate_office_district", "def state(self, state: str):\n\n self._state = state", "def set_state(self, state):\n self.state = state", "def city_update(self):\n self.city = self.city_finder(self.location.__str__())", "def state(self, state: str) -> None:\n self._state = state", "def get_city_state(self):\n\n city = self.city\n return f'{city.name}, {city.state}'", "def write_state_count(file_path):\n \n with open(file_path, 'w+') as csvfile:\n \n writer = csv.writer(csvfile, delimiter = ',')\n for state in state_abbr:\n writer.writerow([state, airport_count(airport_data, state)])", "def equity(self, equity):\n\n self._equity = equity", "def change_contig(self, state):\n if state == Qt.Checked:\n self.layer.contiguous = True\n else:\n self.layer.contiguous = False", "def update_state_occupancy_count(self, obs):\n obs_hash = self.compute_obs_hash(obs)\n matches_previous_obs = self.prev_obs_hash is not None and obs_hash == self.prev_obs_hash\n self.state_occupancy_counts[obs_hash] += 1\n self.prev_obs_hash = obs_hash\n return {\n \"occupancy_count\": self.state_occupancy_counts[obs_hash],\n \"num_unique_states\": len(self.state_occupancy_counts),\n \"matches_previous_obs\": matches_previous_obs\n }", "def set_state(self, value):\n self.state = value", "def cities_aibnb():\n objetos = storage.all(\"State\")\n c_dit = {}\n s_dit = {}\n for key, values in objetos.items():\n if \"State\" in key:\n s_dit[key] = values\n if \"City\" in key:\n c_dit[key] = values\n return render_template(\"8-cities_by_states.html\", city=c_dit, state=s_dit)", "def set_state(self, value):\n _LOGGER.debug(\"%s: Set state to %d\", self.entity_id, value)\n self._flag_state = True\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if value == 0:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(3)\n self.call_service(DOMAIN, SERVICE_OPEN_COVER, params)\n elif value == 1:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(2)\n self.call_service(DOMAIN, SERVICE_CLOSE_COVER, params)", "def state(self, state):\n if state is None:\n raise ValueError(\"Invalid value for `state`, must not be `None`\") # noqa: E501\n\n self._state = state", "def municipality(self, municipality):\n\n self._municipality = municipality", "def __setstate__(self, state):\n\n if sparse.issparse(state['transition_matrix']):\n state['transition_matrix'] = state['transition_matrix'].toarray()\n\n # Recalculate the cumulative probabilities\n state['cumulative_probabilities'] = np.cumsum(state['transition_matrix'], axis=1)\n\n self.__dict__ = state", "def post_state_city(state_id):\n\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n if storage.get(\"State\", state_id) is None:\n abort(404)\n if 'name' not in request.get_json():\n return make_response(jsonify({\"error\": \"Missing name\"}), 400)\n obj = request.get_json()\n obj['state_id'] = state_id\n new_obj = City(**obj)\n storage.new(new_obj)\n storage.save()\n return make_response(new_obj.to_dict(), 201)", "def candidate_office_state(self, candidate_office_state):\n\n self._candidate_office_state = candidate_office_state" ]
[ "0.64520186", "0.64520186", "0.51732033", "0.51732033", "0.51432556", "0.4989287", "0.488448", "0.48415154", "0.47505423", "0.46348393", "0.4630611", "0.46284756", "0.45552388", "0.45378423", "0.4523382", "0.45212606", "0.45178837", "0.44777262", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.4427605", "0.44236192", "0.44225785", "0.43888307", "0.43641698", "0.4363409", "0.43630356", "0.4346722", "0.4335508", "0.4327009", "0.4327009", "0.42955273", "0.4293836", "0.4278917", "0.42713007", "0.42631745", "0.4257909", "0.42269367", "0.4220931", "0.42104593", "0.42091376", "0.41936848", "0.41828942", "0.41826507", "0.41747734", "0.41696298", "0.41684616", "0.41582015", "0.4156738", "0.41556966", "0.41392645", "0.41177872", "0.41133636", "0.4109387", "0.41055435", "0.41055435", "0.41055435", "0.41055435", "0.41055435", "0.41006005", "0.40999553", "0.40977114", "0.40974292", "0.4095329", "0.40948442", "0.4090596", "0.4090596", "0.40899453", "0.40899453", "0.40784186", "0.40731144", "0.40583906", "0.40539864", "0.404882", "0.4029145", "0.4028796", "0.40281776", "0.40259814", "0.4021849", "0.40168142", "0.40147275", "0.401096", "0.39998916", "0.39754426", "0.3973493", "0.39717725", "0.3971292", "0.39699525", "0.39632943", "0.3963133", "0.39629748", "0.3957535", "0.3953656", "0.39490643" ]
0.8909677
0
Sets the email of this Organization.
def email(self, email): self._email = email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setEmail(self, email):\n self.email = email\n return self", "def email(self, email: str):\n\n self._email = email", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email", "def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address", "def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address: str):\n if email_address is None:\n raise ValueError(\"Invalid value for `email_address`, must not be `None`\") # noqa: E501\n\n self._email_address = email_address", "def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email", "def business_email(self, business_email):\n\n self._business_email = business_email", "def customer_email(self, customer_email):\n self._customer_email = customer_email", "def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email", "def admin_email(self, admin_email):\n\n self._admin_email = admin_email", "def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email", "def set_email(net_id, email):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET email='\"+email+\"' WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "def delivery_email(self, delivery_email):\n\n self._delivery_email = delivery_email", "def technical_owner_email(self, technical_owner_email):\n\n self._technical_owner_email = technical_owner_email", "def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)", "def set_email_notification(self, hit_type, email, event_types=None):\r\n return self._set_notification(hit_type, 'Email', email, event_types)", "def email(self) -> str:\n return self._email", "def _set_campaign_email(self, campaign_email):\n if isinstance(campaign_email, str):\n campaign_email = TrackedCampaignEmail.objects.create(\n campaign=self.campaign, name=campaign_email\n )\n\n campaign_email.save()\n\n self.campaign_email = campaign_email", "def email_address(self) -> str:\n return self._email_address", "def employer_email(self, employer_email):\n if employer_email is not None and len(employer_email) > 1024:\n raise ValueError(\"Invalid value for `employer_email`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._employer_email = employer_email", "def client_email(self, client_email):\n\n self._client_email = client_email", "def from_email_address(self, val: EmailAddress):\n self._from_email = val", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def getEmail(self):\n return self.__email", "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def user_profile_setemail(token, email):\n users = database.get_users()\n for user in users:\n if user['email'] is email:\n raise error.InputError(description=\"This email is already taken\")\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['email'] = email\n database.set_user_data(user)", "def get_email(self):\n return self._email", "def get_email(self):\n return self.email", "def Email(self, default=None):\n return self.data.get('email', default)", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def getEmail(self):\n return self.email", "def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key", "def email(self):\n return sa.Column(sa.Unicode(100), nullable=False, unique=True)", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "async def change_email(self, new_email, password):\n data = {\"password\": password, \"emailAddress\": new_email}\n e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='post', data=data)\n return e", "def set_email_para(self,\n email_dict):\n\n self.__email_flag__ = 1\n\n # email\n self.__email_host__ = email_dict[\"email_host\"]\n self.__email_receiver_list__ = email_dict[\"email_recv_list\"]\n self.__email_sender__ = email_dict[\"email_sender_mailbox\"]\n self.__email_user__ = email_dict[\"email_username\"]\n self.__email_passwd__ = email_dict[\"email_password\"]\n\n print(\"NotifyManager email host=%s\"\n % self.__email_host__)\n print(\"NotifyManager email sender mailbox=%s\"\n % self.__email_sender__)\n print(\"NotifyManager email receiver mailbox=%s\"\n % self.__email_receiver_list__)\n\n return", "def email(self):\n return self._dict.get('email')", "def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def getEmail(self):\n\t\treturn self.Email", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def primary_email(self, primary_email):\n\n self._primary_email = primary_email", "def primary_contact_email(self, primary_contact_email):\n\n self._primary_contact_email = primary_contact_email", "def from_email(self, from_email):\n\n self._from_email = from_email", "def updateEmail(self, newEmail, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/update-email\"\n\t\tpayload = {\"newEmail\": newEmail, \"password\": password}\n\t\treturn(putUrl(url, self.credentials, payload))", "def validate_email(self, value):\n if not value:\n raise serializers.ValidationError(\"Email cannot be null\")\n return value", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")", "def _set_user_email_address(self, request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']\n else:\n return form", "def email(self, email_id):\r\n return emails.Email(self, email_id)", "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def ___str__(self):\n return self.email", "def reply_to_email_address(self, val: EmailAddress):\n self._reply_to = val", "def cc_emails(self, cc_emails):\n\n self._cc_emails = cc_emails", "def email(self):\n return self.__email", "def email(self, instance):\r\n return instance.user.email" ]
[ "0.8130456", "0.7738362", "0.7542091", "0.7482652", "0.7326202", "0.72683007", "0.72421545", "0.71375", "0.71375", "0.686361", "0.686361", "0.686361", "0.66893005", "0.6623506", "0.6539565", "0.65185577", "0.64557123", "0.6449671", "0.6417425", "0.63945293", "0.6345876", "0.632388", "0.63150615", "0.6232352", "0.6229653", "0.6175159", "0.6170344", "0.6163531", "0.61254686", "0.6120732", "0.610426", "0.610426", "0.610426", "0.60518885", "0.60518885", "0.60518885", "0.60518885", "0.6022733", "0.6007345", "0.6007345", "0.6007345", "0.6007345", "0.59319115", "0.5931445", "0.5919633", "0.5915805", "0.5913257", "0.59032184", "0.59028447", "0.5890771", "0.58593607", "0.58593607", "0.58593607", "0.58593607", "0.58593607", "0.58593607", "0.58593607", "0.5832111", "0.5823686", "0.5817584", "0.57910526", "0.5787329", "0.5783023", "0.5779137", "0.5777841", "0.57377654", "0.5720735", "0.57023937", "0.5695292", "0.5695236", "0.5677071", "0.56382996", "0.5610757", "0.5605379", "0.55960023", "0.5571943", "0.55718386", "0.55718386", "0.55718386", "0.55493903", "0.5497613", "0.54603964", "0.54184794", "0.54184794", "0.54079133", "0.53887105", "0.5386264", "0.5380942", "0.5380854", "0.5355054", "0.53323656" ]
0.7769974
9
Sets the phone_number of this Organization.
def phone_number(self, phone_number): self._phone_number = phone_number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def person_phone_number(self, person_phone_number):\n\n self._person_phone_number = person_phone_number", "def phone(self, new_number):\n self._phone.number = new_number", "def mobile_phone_number(self, mobile_phone_number):\n\n self._mobile_phone_number = mobile_phone_number", "def setPhone(self, phone):\n self.phone = phone\n return self", "def business_phone_number(self, business_phone_number):\n\n self._business_phone_number = business_phone_number", "def phone_number_detail(self, phone_number_detail):\n\n self._phone_number_detail = phone_number_detail", "def setPhone(self,phone):\r\n self.phone = phone", "def sms_phone_number(self, sms_phone_number):\n\n self._sms_phone_number = sms_phone_number", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def service_phone_number(self, service_phone_number):\n\n self._service_phone_number = service_phone_number", "def number(self, new_phone):\n returned_num = self.get_valid_num(new_phone)\n if returned_num is None:\n raise ValueError\n self._phone = returned_num", "def set_PhoneNumber(self, value):\n super(AddressValidationInputSet, self)._set_input('PhoneNumber', value)", "def phone(self, phone):\n if self.local_vars_configuration.client_side_validation and phone is None: # noqa: E501\n raise ValueError(\"Invalid value for `phone`, must not be `None`\") # noqa: E501\n\n self._phone = phone", "def personal_phone(self, personal_phone):\n\n self._personal_phone = personal_phone", "def telephone(self, telephone):\n\n self._telephone = telephone", "def contact_number(self, contact_number):\n if contact_number is None:\n raise ValueError(\"Invalid value for `contact_number`, must not be `None`\") # noqa: E501\n\n self._contact_number = contact_number", "def home_phone_number(self, home_phone_number):\n\n self._home_phone_number = home_phone_number", "def set_sms_telephone_number(self, telephone_number, email):\n ngo_user_profile = NGOUserProfile.objects.get(user__email=email)\n org_setting = OrganizationSetting.objects.get(organization__org_id=ngo_user_profile.org_id)\n smsc = SMSC(vumi_username=\"smsc\")\n smsc.save()\n outgoing_number = OutgoingNumberSetting(phone_number=telephone_number, smsc=smsc)\n outgoing_number.save()\n org_setting.sms_tel_number = telephone_number\n org_setting.outgoing_number = outgoing_number\n org_setting.save()", "def telephone(self, telephone: str):\n\n self._telephone = telephone", "def phone(self, phone):\n if self.local_vars_configuration.client_side_validation and phone is None: # noqa: E501\n raise ValueError(\"Invalid value for `phone`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n phone is not None and len(phone) > 16):\n raise ValueError(\"Invalid value for `phone`, length must be less than or equal to `16`\") # noqa: E501\n\n self._phone = phone", "def business_phone(self, business_phone):\n\n self._business_phone = business_phone", "def set_number(self, number):\n self.number = number", "def organizational_id_number(self, organizational_id_number: str):\n self._organizational_id_number = organizational_id_number", "def phone_number_pricing(self, phone_number_pricing):\n\n self._phone_number_pricing = phone_number_pricing", "def phone_number(self):\n\n return self._phone_number", "def contract_number(self, contract_number):\n\n self._contract_number = contract_number", "def build_number(self, build_number):\n\n self._build_number = build_number", "async def change_phone(self, code: int, prefix: int, phone: int, password: str):\n data = {\n \"countryCode\": code,\n \"prefix\": prefix,\n \"phone\": phone,\n \"password\": password\n }\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='post', data=data)\n return e", "def service_phone_number_info(self, service_phone_number_info):\n\n self._service_phone_number_info = service_phone_number_info", "def serial_number(self, serial_number: str):\n\n self._serial_number = serial_number", "def fax_number(self, fax_number: str):\n\n self._fax_number = fax_number", "def set_id_number(self, id_number):\n self.id_number = id_number", "def setNumber(self, number):\n prevNumber = self.number\n self.number = number\n return prevNumber", "def primary_contact_phone(self, primary_contact_phone):\n\n self._primary_contact_phone = primary_contact_phone", "def transport_call_sequence_number(self, transport_call_sequence_number: TransportCallSequenceNumber):\n\n self._transport_call_sequence_number = transport_call_sequence_number", "def __init__(self, phone_number):\n self.number = self.clean(phone_number)", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def account_number(self, account_number):\n\n self._account_number = account_number", "def merchant_order_no(self, merchant_order_no):\n\n self._merchant_order_no = merchant_order_no", "def release_phone_number(self, number):\n number = str(number)\n if validatePhoneNumber(number, False) is False:\n raise ValueError(\"Invalid phone number {} - unable to release\".\n format(number))\n\n nat_number = self._parse_number_to_bw_format(str(number), 'US')\n try:\n self.account_client.delete_phone_number(nat_number)\n except BandwidthAccountAPIException as e:\n logging.info(\"Error Deleting phone# {}, Exception: {}\".\n format(number, e))\n raise", "def sequence_number(self, sequence_number):\n # type: (int) -> None\n\n if sequence_number is not None:\n if not isinstance(sequence_number, int):\n raise TypeError(\"Invalid type for `sequence_number`, type has to be `int`\")\n\n self._sequence_number = sequence_number", "def phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"phone_number\")", "def person_phone_number(self):\n return self._person_phone_number", "def employer_phone(self, employer_phone):\n if employer_phone is not None and len(employer_phone) > 1024:\n raise ValueError(\"Invalid value for `employer_phone`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._employer_phone = employer_phone", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def registration_number(self, registration_number):\n\n self._registration_number = registration_number", "def billing_contact(self, billing_contact):\n\n self._billing_contact = billing_contact", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def contact_person(self, contact_person):\n\n self._contact_person = contact_person", "def number(self):\n return str(self._phone)", "def serialno(self, serialno):\n\n self._serialno = serialno", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def license_number(self, license_number):\n\n self._license_number = license_number", "def phone_policy(self, phone_policy):\n\n self._phone_policy = phone_policy", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def validate_phone_number(self, phone_number):\n if User.objects.filter(phone_number=phone_number).exists():\n raise serializers.ValidationError('Phone Number already registered.')\n return phone_number", "def set_AccountNumber(self, value):\n super(AddressValidationInputSet, self)._set_input('AccountNumber', value)", "def save(self, *args, **kwargs):\n if self.phone is not None and self.phone.strip() == \"\":\n self.phone = None\n if self.phone is not None:\n try:\n phone_number = phonenumbers.parse(self.phone, \"US\")\n self.phone = phonenumbers.format_number(\n phone_number, phonenumbers.PhoneNumberFormat.E164\n )\n except phonenumbers.phonenumberutil.NumberParseException:\n raise ValidationError(\"Invalid phone number (this should have been caught already)\")\n super().save(*args, **kwargs)", "def reference_number(self, reference_number):\n\n self._reference_number = reference_number", "def number_value(self, number_value):\n\n self._number_value = number_value", "def casenumber(self, casenumber) :\n\t\ttry :\n\t\t\tself._casenumber = casenumber\n\t\texcept Exception as e:\n\t\t\traise e", "def set_document_reference_number(self, reference_number):\n self.set_value_into_input_field(self.reference_number_text_field_locator, reference_number)", "def contact_reference(self, contact_reference):\n\n self._contact_reference = contact_reference", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def update_number(name, number, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if not phonebook_data.get(name):\n raise NoEntryError(\"This entry does not exist! \"\n \"(Names are case-sensitive.)\")\n\n else:\n print \"Previous entry:\", name, phonebook_data[name]\n phonebook_data[name] = number\n print \"New entry:\", name, phonebook_data[name]\n save(phonebook_data, phonebook)", "def set_PostalCode(self, value):\n super(AddressValidationInputSet, self)._set_input('PostalCode', value)", "def send(self, phone_number):\n #response = self.client.publish(PhoneNumber=phone_number, Message=self.message)\n return True", "def sequencing_contact(self, sequencing_contact):\n self.logger.debug(\"In 'sequencing_contact' setter.\")\n\n self._sequencing_contact = sequencing_contact", "def to_number(self, to_number):\n self._to_number = to_number", "def __init__(self, number: str) -> object:\n number = re.sub(\" +\", \" \", number).strip()\n nr = re.match((r\"^(?:\\+?1)?\\W?\"\n r\"\\(?([2-9][0-9]{2})\\)?\\W?\"\n r\"([2-9][0-9]{2})\\W?\"\n r\"([0-9]{4})$\"), number)\n if nr is None:\n raise ValueError(\"Not a phonenumber\")\n self.number = ''.join(nr.groups())\n self.area_code = nr.group(1)", "def find_partner_from_phone_number(self, cr, uid, phone_number, context=None):\n _logger.debug('Phone number: %s' % phone_number)\n if context is None:\n context = self.pool.get('res.users').context_get(cr, uid, context=context)\n\n search_args = [\n '|',\n ('phone', '=', phone_number),\n ('mobile', '=', phone_number),\n ]\n address_obj = self.pool.get('res.partner.address')\n address_ids = address_obj.search(cr, uid, search_args, context=context)\n if not address_ids:\n return False, False\n\n address_id = address_ids[0]\n partner_id = address_obj.browse(cr, uid, address_id, context=context).partner_id\n partner_id = partner_id and partner_id.id or False\n\n return partner_id, address_id", "def lot_serial_nbr(self, lot_serial_nbr):\n\n self._lot_serial_nbr = lot_serial_nbr", "def phone(self) -> str:\n return pulumi.get(self, \"phone\")", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def update_customer_phone(self, customer_to_change, new_value):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_change.get_customer_id():\n customer.set_phone(new_value)\n self._customer_repo.overwrite_customer_list(customer_list)", "def recipient_primary_account_number(self, recipient_primary_account_number):\n if recipient_primary_account_number is None:\n raise ValueError(\"Invalid value for `recipient_primary_account_number`, must not be `None`\")\n\n self._recipient_primary_account_number = recipient_primary_account_number", "def gateway_serial(self, gateway_serial):\n\n self._gateway_serial = gateway_serial", "def set_remit_to_address(self, remit_to_address):\n self.remit_to_address = remit_to_address", "def phone_number_organizer(self, key):\n\t\ttry:\n\t\t\tphone_number = key[u'phone']\n\t\t\tformat_number = '(' + phone_number[0:3] + ') ' + phone_number[3:6] + '-' + phone_number[6:]\n\t\t\treturn format_number\n\t\texcept KeyError:\n\t\t\tprint [u'name'], \"requires manual phone number verification.\"\n\t\t\treturn \"Manual Input\"", "def po_num(self, po_num):\n\n self._po_num = po_num", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone" ]
[ "0.7575966", "0.7262518", "0.7254966", "0.7023407", "0.69712853", "0.68257356", "0.6815127", "0.67999643", "0.6795196", "0.6795196", "0.6795196", "0.6795196", "0.6795196", "0.6791341", "0.6722033", "0.67016137", "0.6508162", "0.63192666", "0.6278706", "0.62630147", "0.6222011", "0.61987907", "0.6166454", "0.61315536", "0.6068493", "0.60383105", "0.59987867", "0.5886408", "0.5802734", "0.5785353", "0.57843", "0.57621425", "0.5739203", "0.57215387", "0.5685911", "0.56492877", "0.56285936", "0.5617132", "0.5601909", "0.5578592", "0.55670774", "0.55670774", "0.5531279", "0.54825383", "0.5475589", "0.54627293", "0.54245806", "0.5419211", "0.5369806", "0.5356553", "0.5356553", "0.53351265", "0.5322454", "0.53025997", "0.53025997", "0.5270495", "0.5256134", "0.52395284", "0.5228968", "0.5228968", "0.5228968", "0.51975965", "0.515454", "0.51387036", "0.51387036", "0.51387036", "0.51345104", "0.5127415", "0.5123717", "0.51215386", "0.510878", "0.5087337", "0.50859874", "0.50402856", "0.49697226", "0.49471483", "0.49215242", "0.4897696", "0.4894692", "0.48946038", "0.48925543", "0.4886784", "0.48722804", "0.4869645", "0.48637617", "0.48637617", "0.48567685", "0.4851192", "0.483405", "0.48332033", "0.48289177", "0.48246348", "0.4824493", "0.4823108", "0.4823108", "0.4823108", "0.4823108" ]
0.81304574
1
Sets the address_line1 of this Organization.
def address_line1(self, address_line1): self._address_line1 = address_line1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def address_line1(self, address_line1):\n if address_line1 is None:\n raise ValueError(\n \"Invalid value for `address_line1`, must not be `None`\"\n ) # noqa: E501\n\n self._address_line1 = address_line1", "def principal_address_line1(self, principal_address_line1):\n\n self._principal_address_line1 = principal_address_line1", "def address_1(self, address_1):\n\n self._address_1 = address_1", "def street_line_1(self, street_line_1):\n\n self._street_line_1 = street_line_1", "def address1(self, address1):\n\n self._address1 = address1", "def address_line2(self, address_line2):\n\n self._address_line2 = address_line2", "def address_line2(self, address_line2):\n\n self._address_line2 = address_line2", "def address_line2(self, address_line2):\n\n self._address_line2 = address_line2", "def street_1(self, street_1):\n\n self._street_1 = street_1", "def setLine1(self, value):\n self.lcd.lcd_string(value, self.lcd.LCD_LINE_1)\n self.line1 = value", "def address_line_1(self):\n return \"{} {} {}\".format(\n self.fake.randomize_nb_elements(1000),\n self.fake.last_name(),\n self.fake.random_element(elements=STREET_SUFFIX)\n )", "def street_line_2(self, street_line_2):\n\n self._street_line_2 = street_line_2", "def principal_address_line2(self, principal_address_line2):\n\n self._principal_address_line2 = principal_address_line2", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def address_1(self):\n return self._address_1", "def address_line3(self, address_line3):\n\n self._address_line3 = address_line3", "def set_address(self, address):\n pass", "def street_address1(self) -> str:\n return pulumi.get(self, \"street_address1\")", "def address_2(self, address_2):\n\n self._address_2 = address_2", "def street_line_1(self):\n return self._street_line_1", "def address2(self, address2):\n\n self._address2 = address2", "def set_address(self, new_address, ):\n self.address.append(new_address)\n self.save()", "def __init__(\n self,\n address_line1=None,\n address_line2=None,\n city=None,\n region=None,\n postal_code=None,\n country=None,\n ): # noqa: E501 # noqa: E501\n\n self._address_line1 = None\n self._address_line2 = None\n self._city = None\n self._region = None\n self._postal_code = None\n self._country = None\n self.discriminator = None\n\n self.address_line1 = address_line1\n if address_line2 is not None:\n self.address_line2 = address_line2\n if city is not None:\n self.city = city\n if region is not None:\n self.region = region\n if postal_code is not None:\n self.postal_code = postal_code\n if country is not None:\n self.country = country", "def address1(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address1\")", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num", "def set_complete_address(self, complete_address):\n self.complete_address = complete_address", "def get_address(self):\n \n if \"'\" in self.data.get(\"AddressInfo\").get(\"AddressLine1\") :\n self.data.get(\"AddressInfo\").get(\"AddressLine1\").replace(\"'\",\"\")\n\n return self.data.get(\"AddressInfo\").get(\"AddressLine1\")", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def conduit_committee_street1(self, conduit_committee_street1):\n\n self._conduit_committee_street1 = conduit_committee_street1", "def address(self, address):\n if self.local_vars_configuration.client_side_validation and address is None: # noqa: E501\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def format_address(line1, line2, city, state, zipcode):\n\t\n\tstreetlines = line1\n\tcityline = city\n\t\n\tif len(streetlines) > 0 and len(line2) > 0:\n\t\tstreetlines += \"\\n\"\n\t\n\tif len(cityline) > 0 and len(state) > 0:\n\t\tcityline += \", \"\n\t\n\tstreetlines += line2\n\tcityline += state\n\t\n\treturn \"\\n\".join([streetlines, cityline, zipcode])", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\")\n\n self._address = address", "def address(self, address: str):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def address1(self, instance):\r\n return instance.user.profile.address1", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def test_13_company_1_address(self):\n with mock_api(company_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999256')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999256'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Marechal')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n self.assertEqual(partner.child_ids[0].type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")", "def setAddress(self, ip_address):\n # type: (str)->None\n\n self._validator.validate_one(\n 'address', VALID_OPTS['address'], ip_address)\n self._ifAttributes['address'] = ip_address", "def address(self, address: object):\n\n self._address = address", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def address(self, address: str):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address_type(self, address_type):\n\n self._address_type = address_type", "def address_nr(self, address_nr):\n if self.local_vars_configuration.client_side_validation and address_nr is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_nr`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_nr is not None and len(address_nr) > 10):\n raise ValueError(\"Invalid value for `address_nr`, length must be less than or equal to `10`\") # noqa: E501\n\n self._address_nr = address_nr", "def test_11_individual_1_address(self):\n with mock_api(individual_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999254')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999254'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, 'Ferreira Margaux')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner\n self.assertEqual(len(partner.child_ids), 0)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999253',\n msg=\"The merged address should be the \"\n \"billing address\")", "def street_2(self, street_2):\n\n self._street_2 = street_2", "def set_startline(self, line_no):\n self.set_attribute(\"startline\", line_no)", "def set_AccountNumber(self, value):\n super(AddressValidationInputSet, self)._set_input('AccountNumber', value)", "def setLine2(self, value):\n self.lcd.lcd_string(value, self.lcd.LCD_LINE_2)\n self.line2 = value", "def set_amf_addr(self, addr: str) -> None:\n self.config[\"amfConfigs\"][0][\"address\"] = addr", "def set_ddram_address(self, address=0, line=0):\n # instruction bit\n data = [True]\n # select wich line (0/1)\n data.append(bool(line))\n # integer address to binary list\n address_bin = list(map(bool, list(map(int, bin(address)[2:].zfill(6)))))\n data.extend(address_bin)\n self.instruction(data)", "def set_remit_to_address(self, remit_to_address):\n self.remit_to_address = remit_to_address", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def set_Street(self, value):\n super(AddressValidationInputSet, self)._set_input('Street', value)", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def setAddressSource(self, address_source):\n # type: (str)->None\n\n self._validator.validate_one(\n 'source', VALID_OPTS['source'], address_source)\n self._ifAttributes['source'] = address_source", "def add_new_address(self, address: dict) -> None:\n self.new_address_button.click()\n\n self.address_form.select_location(address['address'])\n self.address_form.label_input.fill(address['name'])\n\n self.address_form.save_button.click()", "def add_address(self, address_list=None):\n sql = u' INSERT INTO address_TBL ' \\\n u'(line_1, line_2, city, county, country, billing_address, main_address, client_company_ID) ' \\\n u'VALUES (%s, %s, %s, %s, %s, %s, %s, %s);'\n if address_list is None:\n address_list = self.data_set['address']\n\n c, conn = connection(self.schema)\n\n try:\n for address in address_list:\n if address['line_2'] is None:\n address['line_2'] = 'NULL'\n if address['billing'] is None:\n address['billing'] = 0\n if address['default'] is None:\n address['default'] = 0\n\n data = (address['line_1'],\n address['line_2'],\n address['city'],\n address['county'],\n address['country'],\n address['billing'],\n address['default'],\n self.id)\n\n c.execute(sql, data)\n finally:\n conn_close(c, conn)", "def test_set_address(self):\n s1 = System()\n s1.set_address(\"101 St James Rd\")\n self.assertEqual(s1.get_address(), \"101 St James Rd\")", "def address_code(self, address_code):\n if self.local_vars_configuration.client_side_validation and address_code is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_code`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_code is not None and len(address_code) > 10):\n raise ValueError(\"Invalid value for `address_code`, length must be less than or equal to `10`\") # noqa: E501\n\n self._address_code = address_code", "def set_address(self,address): \n new_address = self._format_address(address)\n self.rs485.write_command('#00?8 {}'.format(new_address))\n self.rs485.clear_buffers()\n time.sleep(0.2)", "def as_address(self, base_clazz):\n address = base_clazz()\n address.name = self.name\n address.line1 = self.line1\n address.line2 = self.line2\n address.city = self.city\n address.region = self.region\n address.country = self.country\n address.post_code = self.post_code\n address.phone = self.phone\n return address", "def add_address(self, **kwargs):\n addressitem = AddressItem(**kwargs)\n self.addresses.append(addressitem)\n # TODO check uniqueness of email addresses", "def set_address(self, address, defer=False):\n\n # The MAXUSB chip handles this for us, so we don't need to do anything.\n pass", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def setP1(self, p1):\n self.points[0] = p1", "def add_address(self, address_item):\r\n self.addresses_to_validate.append(address_item)", "def set_line_alignment(self, alignment, line_no):\n self.change_alignment_for_a_line(alignment, line_no)\n return self", "def street_line_2(self):\n return self._street_line_2", "def address_2(self):\n return self._address_2", "def set_end_address(address):\n try:\n command(address + \"L\")\n except EppException as e:\n print 'No EPROM type is selected, or value is lower than start address.', e.value", "def set_start_address(address):\n try:\n command(address + \"P\")\n except EppException as e:\n print 'No EPROM type is selected, or value is higher than end address.', e.value", "def set_line_to_initial_position(self, line_no):\n self.lines[1][line_no] = None\n self[line_no].move_to(self.get_center() + self.lines_initial_positions[line_no])\n return self", "def create_or_update_address(address, customer):\n\tname = frappe.db.get_value('Address', { 'entity_id': address.get('entity_id') })\n\tif not name:\n\t\taddr = frappe.new_doc('Address')\n\t\taddr.address_title = \"{} {} {}\".format(\n\t\t\taddress.get(\"firstname\"),\n\t\t\taddress.get(\"lastname\"),\n\t\t\taddress.get(\"entity_id\")\n\t\t)\n\telse:\n\t\taddr = frappe.get_doc(\"Address\", name)\n\n\taddr.address_type = get_address_type(address).get('type')\n\taddr.entity_id = address.get('entity_id')\n\taddr.address_line1 = address.get('street')[0]\n\taddr.address_line2 = address.get('street')[1] if len(address.get('street')) > 1 else \"\"\n\taddr.city = address.get('city')\n\taddr.country = frappe.db.get_value('Country', { 'code': address.get('country_id') })\n\taddr.state = address.get('region')\n\taddr.pincode = address.get('postcode')\n\taddr.phone = address.get('telephone') or '00000'\n\taddr.fax = address.get('fax')\n\taddr.customer = customer\n\taddr.customer_name = address.get('firstname')+' '+address.get('lastname')\n\taddr.is_primary_address = get_address_type(address).get('is_primary_address')\n\taddr.is_shipping_address = get_address_type(address).get('is_shipping_address')\n\n\taddr.save(ignore_permissions=True)", "def address2(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address2\")", "def host_alias1(self, host_alias1):\n\n self._host_alias1 = host_alias1", "def address_lines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"address_lines\")", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def build_address(record):\n pass", "def address_street(self, address_street):\n if self.local_vars_configuration.client_side_validation and address_street is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_street`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_street is not None and len(address_street) > 128):\n raise ValueError(\"Invalid value for `address_street`, length must be less than or equal to `128`\") # noqa: E501\n\n self._address_street = address_street", "def custom_info1(self, custom_info1):\n\n self._custom_info1 = custom_info1", "def custom_info1(self, custom_info1):\n\n self._custom_info1 = custom_info1", "def port1(self, port1):\n\n self._port1 = port1" ]
[ "0.8497842", "0.7957488", "0.7876892", "0.7850841", "0.77656263", "0.70708996", "0.70708996", "0.70708996", "0.689766", "0.6551664", "0.65369886", "0.64015824", "0.6390496", "0.6295419", "0.6270153", "0.6252031", "0.62348896", "0.6188278", "0.61339515", "0.6105675", "0.60872704", "0.6034428", "0.597977", "0.5942783", "0.59326655", "0.5903573", "0.5903573", "0.5852497", "0.58352596", "0.5798821", "0.5717868", "0.5683813", "0.56804323", "0.56804323", "0.56673557", "0.5641375", "0.5637733", "0.56246847", "0.5622243", "0.5548126", "0.5536705", "0.55098635", "0.5492607", "0.54907215", "0.5462714", "0.545734", "0.54324657", "0.5416574", "0.5416574", "0.5416574", "0.5416574", "0.5416574", "0.5416574", "0.5416574", "0.5416574", "0.54110503", "0.5402178", "0.53957695", "0.53886616", "0.5338394", "0.5328947", "0.53231233", "0.53186715", "0.5285037", "0.52701", "0.524083", "0.5230879", "0.5228754", "0.5207067", "0.5191488", "0.5191078", "0.51746786", "0.5163739", "0.51601154", "0.5159459", "0.515513", "0.51531655", "0.51405996", "0.51405996", "0.51405996", "0.51342505", "0.5128869", "0.5105931", "0.5092636", "0.50847757", "0.5080593", "0.5017827", "0.50169975", "0.49632105", "0.49586758", "0.49511784", "0.49480513", "0.49298653", "0.49298653", "0.49261674", "0.491673", "0.49114507", "0.49114507", "0.49098462" ]
0.8811455
0
Sets the address_line2 of this Organization.
def address_line2(self, address_line2): self._address_line2 = address_line2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def address_2(self, address_2):\n\n self._address_2 = address_2", "def principal_address_line2(self, principal_address_line2):\n\n self._principal_address_line2 = principal_address_line2", "def street_line_2(self, street_line_2):\n\n self._street_line_2 = street_line_2", "def address2(self, address2):\n\n self._address2 = address2", "def address_line1(self, address_line1):\n\n self._address_line1 = address_line1", "def address_line1(self, address_line1):\n\n self._address_line1 = address_line1", "def street_2(self, street_2):\n\n self._street_2 = street_2", "def setLine2(self, value):\n self.lcd.lcd_string(value, self.lcd.LCD_LINE_2)\n self.line2 = value", "def address_line1(self, address_line1):\n if address_line1 is None:\n raise ValueError(\n \"Invalid value for `address_line1`, must not be `None`\"\n ) # noqa: E501\n\n self._address_line1 = address_line1", "def address_2(self):\n return self._address_2", "def street_line_1(self, street_line_1):\n\n self._street_line_1 = street_line_1", "def address1(self, address1):\n\n self._address1 = address1", "def principal_address_line1(self, principal_address_line1):\n\n self._principal_address_line1 = principal_address_line1", "def address_1(self, address_1):\n\n self._address_1 = address_1", "def address_line3(self, address_line3):\n\n self._address_line3 = address_line3", "def street_line_2(self):\n return self._street_line_2", "def set_address(self, new_address, ):\n self.address.append(new_address)\n self.save()", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def setL2(self, l2value):\n return self._set(l2=l2value)", "def address2(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address2\")", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def address2(self, instance):\r\n return instance.user.profile.address2", "def street_address2(self) -> Optional[str]:\n return pulumi.get(self, \"street_address2\")", "def conduit_committee_street2(self, conduit_committee_street2):\n\n self._conduit_committee_street2 = conduit_committee_street2", "def address(self, new_address):\n house_num, street_name, apt_num = new_address\n self._address.house_num = house_num\n self._address.street_name = street_name\n self._address.apt_num = apt_num", "def set_address(self, address):\n pass", "def setP2(self, p2):\n self.points[1] = p2", "def format_address(line1, line2, city, state, zipcode):\n\t\n\tstreetlines = line1\n\tcityline = city\n\t\n\tif len(streetlines) > 0 and len(line2) > 0:\n\t\tstreetlines += \"\\n\"\n\t\n\tif len(cityline) > 0 and len(state) > 0:\n\t\tcityline += \", \"\n\t\n\tstreetlines += line2\n\tcityline += state\n\t\n\treturn \"\\n\".join([streetlines, cityline, zipcode])", "def set_complete_address(self, complete_address):\n self.complete_address = complete_address", "def __init__(\n self,\n address_line1=None,\n address_line2=None,\n city=None,\n region=None,\n postal_code=None,\n country=None,\n ): # noqa: E501 # noqa: E501\n\n self._address_line1 = None\n self._address_line2 = None\n self._city = None\n self._region = None\n self._postal_code = None\n self._country = None\n self.discriminator = None\n\n self.address_line1 = address_line1\n if address_line2 is not None:\n self.address_line2 = address_line2\n if city is not None:\n self.city = city\n if region is not None:\n self.region = region\n if postal_code is not None:\n self.postal_code = postal_code\n if country is not None:\n self.country = country", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def custom_info2(self, custom_info2):\n\n self._custom_info2 = custom_info2", "def custom_info2(self, custom_info2):\n\n self._custom_info2 = custom_info2", "def set_remit_to_address(self, remit_to_address):\n self.remit_to_address = remit_to_address", "def get_address(self):\n \n if \"'\" in self.data.get(\"AddressInfo\").get(\"AddressLine1\") :\n self.data.get(\"AddressInfo\").get(\"AddressLine1\").replace(\"'\",\"\")\n\n return self.data.get(\"AddressInfo\").get(\"AddressLine1\")", "def port2(self, port2):\n\n self._port2 = port2", "def address_line_1(self):\n return \"{} {} {}\".format(\n self.fake.randomize_nb_elements(1000),\n self.fake.last_name(),\n self.fake.random_element(elements=STREET_SUFFIX)\n )", "def __set_i2c_address(self, address):\n fcntl.ioctl(self.file_read, self.I2C_SLAVE, address)\n fcntl.ioctl(self.file_write, self.I2C_SLAVE, address)", "def _set_address(self, v, load=False):\n try:\n t = YANGDynClass(v,base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"address must be of a type compatible with base=[unicode,unicode,unicode,unicode,unicode,], is_leaf=True, yang_name=\"address\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__address = t\n if hasattr(self, '_set'):\n self._set()", "def test_parse_consumer_address_2_field(self):\n fields = {'Consumer Address 2': {'offset': 111,\n 'length': 30}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Consumer Address 2': 'Stanmore'}\n msg = 'Consumer Address 2 field parse incorrect'\n self.assertEqual(received, expected, msg)", "def street_1(self, street_1):\n\n self._street_1 = street_1", "def host_alias2(self, host_alias2):\n\n self._host_alias2 = host_alias2", "def set_svpn2(self, svpn2):\n self.svpn2 = svpn2", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def set_end_address(address):\n try:\n command(address + \"L\")\n except EppException as e:\n print 'No EPROM type is selected, or value is lower than start address.', e.value", "def test_12_individual_2_addresses(self):\n with mock_api(individual_2_addresses):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999255')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999255'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, u'Mace Sébastien')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner,\n # second address as a contact\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999254',\n msg=\"The merged address should be the \"\n \"billing address\")\n self.assertEqual(partner.child_ids[0].type, 'delivery',\n msg=\"The shipping address should be of \"\n \"type 'delivery'\")", "def setLine1(self, value):\n self.lcd.lcd_string(value, self.lcd.LCD_LINE_1)\n self.line1 = value", "def amended_address(self, amended_address):\n\n self._amended_address = amended_address", "def test_14_company_2_addresses(self):\n with mock_api(company_2_addresses):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999257')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999257'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Bertin')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 2)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n def get_address(magento_id):\n address_ids = self.address_model.search(\n cr, uid,\n [('magento_id', '=', magento_id),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(address_ids), 1)\n return self.address_model.browse(cr, uid, address_ids[0])\n # billing address\n address = get_address('9999257')\n self.assertEqual(address.type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")\n # shipping address\n address = get_address('9999258')\n self.assertEqual(address.type, 'delivery',\n msg=\"The shipping address should be of \"\n \"type 'delivery'\")", "def update_address(cls, address_data):\n address_instance = cls.objects.get(email=address_data['customer']['email'])\n address_data = address_data.get('addresses')\n for field_name, values in address_data:\n setattr(address_instance, field_name, values)\n address_instance.save()\n return address_instance.save()", "def set_endline(self, line_no):\n self.set_attribute(\"endline\", line_no)", "def set_obj2(self, obj1, obj2):\n self.obj1_obj2[obj1] = obj2", "def demand2(self, demand2):\n\n self._demand2 = demand2", "def on_GenerateRandomAccountAddress_2_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def ref2(self, ref2):\n if ref2 is not None and len(ref2) > 250:\n raise ValueError(\"Invalid value for `ref2`, length must be less than or equal to `250`\")\n\n self._ref2 = ref2", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def address_type(self, address_type):\n\n self._address_type = address_type", "def trilha2(self, trilha2):\n self._trilha2 = trilha2", "def set_address(self, address):\n self._java_ref.setAddress(address)", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address_id(self, address_id):\n\n self._address_id = address_id", "def address(self, address: object):\n\n self._address = address", "def test_set_address(self):\n s1 = System()\n s1.set_address(\"101 St James Rd\")\n self.assertEqual(s1.get_address(), \"101 St James Rd\")", "def set_amf_addr(self, addr: str) -> None:\n self.config[\"amfConfigs\"][0][\"address\"] = addr", "def address_1(self):\n return self._address_1", "def partition2(self, partition2):\n\n self._partition2 = partition2", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n\n self._address = address", "def address(self, address):\n if self.local_vars_configuration.client_side_validation and address is None: # noqa: E501\n raise ValueError(\"Invalid value for `address`, must not be `None`\") # noqa: E501\n\n self._address = address", "def add_address(self, address_item):\r\n self.addresses_to_validate.append(address_item)", "def set_line_end(self, line_nr):\n self._line_end = line_nr", "def address(self, address):\n if address is None:\n raise ValueError(\"Invalid value for `address`, must not be `None`\")\n\n self._address = address", "def address1(self, instance):\r\n return instance.user.profile.address1", "def acumula_n2(self, n2, rodada):\n self.n2[rodada].append(n2)", "def test_update_shipping_address(self):\n self.cim.update_shipping_address(\n customer_profile_id=u\"222\",\n customer_address_id=u\"444\",\n first_name=u\"pippo\",\n phone=u\"415-415-4154\"\n )", "def address(self, address: str):\n\n self._address = address", "def set_address(self, address, defer=False):\n\n # The MAXUSB chip handles this for us, so we don't need to do anything.\n pass", "def as_address(self, base_clazz):\n address = base_clazz()\n address.name = self.name\n address.line1 = self.line1\n address.line2 = self.line2\n address.city = self.city\n address.region = self.region\n address.country = self.country\n address.post_code = self.post_code\n address.phone = self.phone\n return address", "def set_address(self,address): \n new_address = self._format_address(address)\n self.rs485.write_command('#00?8 {}'.format(new_address))\n self.rs485.clear_buffers()\n time.sleep(0.2)", "def test_13_company_1_address(self):\n with mock_api(company_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999256')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999256'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Marechal')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n self.assertEqual(partner.child_ids[0].type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")", "def set_end(self, end_line):\n self.__end_line = end_line", "def generate_address(self, private_key):\n words, seed = CryptoUtils.create_mnemonic_key(private_key)\n private_key_to_hex = CryptoUtils.generate_private_key(seed)\n public_key = CryptoUtils.generate_public_key(private_key_to_hex)\n public_key_compressed = CryptoUtils.compress_public_key(public_key)\n address = CryptoUtils.generate_address(public_key)\n self.addresses[address] = {\n 'owner_key': private_key,\n 'private_key': private_key_to_hex,\n 'mnemonic_words': words,\n 'public_key': public_key,\n 'public_key_compressed': public_key_compressed,\n 'balance': 0\n }", "def address1(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address1\")", "def add_address(self, address_list=None):\n sql = u' INSERT INTO address_TBL ' \\\n u'(line_1, line_2, city, county, country, billing_address, main_address, client_company_ID) ' \\\n u'VALUES (%s, %s, %s, %s, %s, %s, %s, %s);'\n if address_list is None:\n address_list = self.data_set['address']\n\n c, conn = connection(self.schema)\n\n try:\n for address in address_list:\n if address['line_2'] is None:\n address['line_2'] = 'NULL'\n if address['billing'] is None:\n address['billing'] = 0\n if address['default'] is None:\n address['default'] = 0\n\n data = (address['line_1'],\n address['line_2'],\n address['city'],\n address['county'],\n address['country'],\n address['billing'],\n address['default'],\n self.id)\n\n c.execute(sql, data)\n finally:\n conn_close(c, conn)", "def street_line_1(self):\n return self._street_line_1", "def setAddress(self, ip_address):\n # type: (str)->None\n\n self._validator.validate_one(\n 'address', VALID_OPTS['address'], ip_address)\n self._ifAttributes['address'] = ip_address", "def AddOrganizationAddressGroupToParser(release_track, parser):\n AddAddressGroupToParser(parser, release_track,\n 'network_security.orgAddressGroup')", "def set_AccountNumber(self, value):\n super(AddressValidationInputSet, self)._set_input('AccountNumber', value)", "def set_second_incident_node(self, second_incident_node):\n # overwrite the existing second incident node with the input second incident Node object\n self.second_incident_node = second_incident_node", "def add_address(self, **kwargs):\n addressitem = AddressItem(**kwargs)\n self.addresses.append(addressitem)\n # TODO check uniqueness of email addresses" ]
[ "0.79640037", "0.7917009", "0.7910908", "0.78585726", "0.71762484", "0.71762484", "0.7049437", "0.6794425", "0.6462342", "0.64471567", "0.64098936", "0.6366884", "0.63469005", "0.6327694", "0.62689376", "0.62077683", "0.60219824", "0.60010564", "0.60010564", "0.59956384", "0.5990719", "0.5970087", "0.59313774", "0.58521384", "0.5850473", "0.5740258", "0.57362646", "0.5645953", "0.5562085", "0.5508507", "0.5485232", "0.5455933", "0.54181486", "0.54078853", "0.54078853", "0.54056764", "0.53925234", "0.5372098", "0.533942", "0.5335309", "0.5328613", "0.53215194", "0.5264947", "0.52544105", "0.52275807", "0.52170694", "0.5216709", "0.51965874", "0.51699555", "0.51492137", "0.5141423", "0.51366246", "0.5135503", "0.5058971", "0.50522476", "0.50451976", "0.5031204", "0.5015734", "0.5009751", "0.49903744", "0.49858367", "0.49830428", "0.49830428", "0.49795157", "0.49736476", "0.49620783", "0.49417138", "0.49093577", "0.4893962", "0.4893962", "0.4893962", "0.4893962", "0.4893962", "0.4893962", "0.4893962", "0.4893962", "0.48920944", "0.4888389", "0.48597726", "0.48581636", "0.48511422", "0.4847381", "0.48395118", "0.48190707", "0.4818633", "0.4818063", "0.4804153", "0.48029888", "0.4785562", "0.47819093", "0.47768652", "0.4773212", "0.47724771", "0.47710955", "0.47648495", "0.47582862", "0.47546798", "0.4752346" ]
0.88047636
1
Sets the zip_post_code of this Organization.
def zip_post_code(self, zip_post_code): self._zip_post_code = zip_post_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zipcode(self, zipcode):\n self._zipcode = zipcode", "def zip_code(self, zip_code):\n\n self._zip_code = zip_code", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def postal_code(self, postal_code):\n\n self._postal_code = postal_code", "def post_code(self, post_code):\n\n self._post_code = post_code", "def postcode(self, postcode):\n\n self._postcode = postcode", "def postcode(self, postcode):\n\n self._postcode = postcode", "def postcode(self, postcode):\n\n self._postcode = postcode", "def principal_zip_code(self, principal_zip_code):\n\n self._principal_zip_code = principal_zip_code", "def set_PostalCode(self, value):\n super(AddressValidationInputSet, self)._set_input('PostalCode', value)", "def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value", "def zipcode(self, zipcode):\n if zipcode is None:\n raise ValueError(\"Invalid value for `zipcode`, must not be `None`\") # noqa: E501\n\n self._zipcode = zipcode", "def payee_zip(self, payee_zip):\n\n self._payee_zip = payee_zip", "def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None", "def principal_zip_code_suffix(self, principal_zip_code_suffix):\n\n self._principal_zip_code_suffix = principal_zip_code_suffix", "def zip_code(self):\n return self._zip_code", "def zip(self, zip):\n\n self._zip = zip", "def input_postal_code(self, postal_code):\n self.send_keys_to_element(self.postalcode_textbox_selector, postal_code)", "def zip_code(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\treturn element.element_value", "def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None", "def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def postal_code(self):\n return self._postal_code", "def city_state_zip(**kwargs):\r\n result = \"{city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n # RLID for some reason has two spaces between state & ZIP.\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n return result", "def replace_zip_code(zip_code):\r\n if len(zip_code)>5:\r\n return zip_code[0:5]\r\n else:\r\n return zip_code", "def postal_code(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def postcode(self):\n return self._postcode", "def postcode(self):\n return self._postcode", "def __init__(self, zipcode, countrycode, apikey):\r\n self.zip = zipcode\r\n self.ccode = countrycode\r\n self.set_apikey(apikey)", "def update_customer_zip(self, customer_to_change, new_value):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_change.get_customer_id():\n customer.set_zip(new_value)\n self._customer_repo.overwrite_customer_list(customer_list)", "def _extract_zipcode(self):\n self.col_etl = self.col_etl.apply(lambda x: x[0:2])\n self.col_etl.name = 'deliv_sector'", "def postal_code(self, instance):\r\n return instance.user.profile.postal_code", "def zipcode_update():\n\n\n user_id = session['user_id']\n zipcode = int(request.form.get('zipcode'))\n\n User.update_zipcode(user_id, zipcode)\n\n return \"Zip code updated\"", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def pin_code(self, pin_code: List[PinCodeSummary]):\n\n self._pin_code = pin_code", "def conduit_committee_zip(self, conduit_committee_zip):\n\n self._conduit_committee_zip = conduit_committee_zip", "def set_country_code(zd, option, **kwargs):\n cfg_option = {'country_code': '',\n 'channel_optimization': '',\n 'channel_mode':''} \n cfg_option.update(option)\n \n xloc = LOCATOR_CFG_SYSTEM_COUNTRY_CODE\n xloc_map = {\n 'country_code': xloc['country_code_listbox'],\n 'compatibility': xloc['optimization_for_compatibility_radio'],\n 'interoperability': xloc['optimization_for_interoperability_radio'],\n 'performance': xloc['optimization_for_performance_radio'],\n 'allow_indoor': xloc['allow_indoor_channel_checkbox'],\n }\n nav_to(zd)\n \n if cfg_option['country_code']:\n zd.s.select_option(xloc_map['country_code'], re.escape(cfg_option['country_code']))\n if cfg_option['channel_optimization']:\n zd.s.click_and_wait(xloc_map[cfg_option['channel_optimization']])\n if cfg_option['channel_mode']:\n zd.s.click_if_not_checked(xloc_map[cfg_option['channel_mode']])\n \n zd.s.choose_ok_on_next_confirmation()\n zd.s.click_and_wait(zd.info['loc_cfg_sys_ctrycode_apply_button'])\n if not zd.s.is_confirmation_present(5):\n raise Exception(\"No dialog confirmation for setting country code appeared\")\n zd.s.get_confirmation()\n logging.info(\"Change country code option for ZoneDirector to %s successfully\" % str(cfg_option))", "def set_City(self, value):\n super(AddressValidationInputSet, self)._set_input('City', value)", "def setDZ(self, dz):\n self.resetValue('dz', dz, INIT_ARRAYS - 1)", "def setPostUp(self, post):\n # type: (tp.Union[str, tp.List[str]])->None\n if isinstance(post, list):\n self._ifAttributes['post-up'] = post\n elif isinstance(post, str):\n self._ifAttributes['post-up'] = [post]\n else:\n raise ValueError(\"Invalid value type {0}, expected str or List[str]\".format(type(post)))", "def update_my_zip_code(\n user_zip_code_update: UserZipCodeUpdate,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyZipCode.create(\n user_zip_code_update=user_zip_code_update,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def clean_incident_zip(zipcode):\n zipcode = str(zipcode).replace('.0', '')[:5]\n try:\n zipcode = int(zipcode)\n except:\n return None\n # Pad it on the left with '0's\n zipcode = '{:05}'.format(zipcode)\n return zipcode", "def get_postal_code(self):\n element = self.driver.find_element(*self.postalcode_textbox_selector)\n return element.get_attribute(\"value\")", "def fix_zipcode(df):\n zipcode5 = []\n fixnum = 0\n for zipcode in df['Zip'].values:\n if isinstance(zipcode, str) and '-' in zipcode:\n zipcode5.append(int(zipcode.split('-')[0]))\n fixnum += 1\n else:\n zipcode = int(float(zipcode))\n zipcode5.append(zipcode)\n df['zip'] = zipcode5\n # print('Fixing %.2f %% of the data' % (fixnum * 100 / len(zipcode5)))\n return df", "def shipping_address(self, shipping_address):\n\n self._shipping_address = shipping_address", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def postal_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"postal_codes\")", "def order_code(self, order_code):\n\n self._order_code = order_code", "def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)", "def load_zip(data):\n zip_codes = [address['postalCode'] for address in data]\n zip_codes_objects = [Zip(data=zip_code)\n for zip_code\n in zip_codes]\n Zip.objects.bulk_create(zip_codes_objects)", "def set_UrbanizationCode(self, value):\n super(AddressValidationInputSet, self)._set_input('UrbanizationCode', value)", "def country_code(self, country_code):\n\n self._country_code = country_code", "def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)", "def banking_pin(self, banking_pin):\n\n self._banking_pin = banking_pin", "def geocode_postcode(self, postcode: [str],\n address: Optional[str] = None) -> Union[Tuple[float, float], List[Tuple[float, float]]]:\n address = [None for a in address] if address is None else list(address)\n logging.debug(\"Geocoding %s postcodes (%s addresses)\", len(postcode), len(address))\n results = []\n for pc, addr in zip(postcode, address):\n results.append(self.geocode_one(postcode=pc, address=addr))\n return results", "def merchant_order_no(self, merchant_order_no):\n\n self._merchant_order_no = merchant_order_no", "def municipality(self, municipality):\n\n self._municipality = municipality", "def bank_transaction_code(self, bank_transaction_code):\n\n self._bank_transaction_code = bank_transaction_code", "def postal_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"postal_codes\")", "def set_CountryCode(self, value):\n super(AddressValidationInputSet, self)._set_input('CountryCode', value)", "def __init__(self, zip_code, house_number, house_addition=\"\"):\n self.zip_code = zip_code.replace(' ', '')\n self.house_number = house_number.strip()\n self.house_addition = house_addition.strip()", "async def update_my_zip_code_async(\n user_zip_code_update: UserZipCodeUpdate,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = UpdateMyZipCode.create(\n user_zip_code_update=user_zip_code_update,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None", "def test_update_shipping_address(self):\n self.cim.update_shipping_address(\n customer_profile_id=u\"222\",\n customer_address_id=u\"444\",\n first_name=u\"pippo\",\n phone=u\"415-415-4154\"\n )", "def get_postal_zone(self, postal_code: str) -> PostalZone:\n key = postal_code.lower()\n try:\n return self._postal_zones[key]\n except KeyError:\n postal_code = PostalZone(postal_code=postal_code, city=self)\n self._postal_zones[key] = postal_code\n return postal_code", "def set_address(self, address):\n pass", "def test_postal_code(self):\n self.assertIsInstance(self.address.postal_code, str)\n self.assertEqual(self.address.postal_code, \"75000\")", "def set_address(self, address):\n if address == \"\":\n self.address = Address(\"\", \"\", \"\")\n else:\n self.address = address", "def __init__(self):\n self._zipcode = None\n self._city = None", "def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results", "def journal_iso_abbreviation(self, journal_iso_abbreviation):\n\n self._journal_iso_abbreviation = journal_iso_abbreviation", "def un_location_code(self, un_location_code: UNLocationCode1):\n\n self._un_location_code = un_location_code", "def get_zip_code(string):\n zip_code = \"\"\n\n #for each character in string\n for ch in string:\n #if the character is a number, add it to the \"zip_code\" string\n if ch.isdigit():\n zip_code += ch\n\n return zip_code", "def test_address__postal_address_title__1(zcmlS):\n pa = PostalAddress()\n pa.country = None # reset default value of `Germany`\n assert u'none' == ITitle(pa)", "def setPostDown(self, post):\n # type: (tp.Union[str, tp.List[str]])->None\n if isinstance(post, list):\n self._ifAttributes['post-down'] = post\n elif isinstance(post, str):\n self._ifAttributes['post-down'] = [post]\n else:\n raise ValueError(\"Invalid value type {0}, expected str or List[str]\".format(type(post)))", "def abbreviation(self, abbreviation):\n self._abbreviation = abbreviation", "def code(self, code):\n\n self._code = code", "def zipsave(self):\n\n filename = filedialog.asksaveasfilename(initialdir=self.root.cache_dir,\n title='Save As',\n filetypes=[('Zip', '.zip')],\n defaultextension='')\n self.save_entry.clear()\n self.save_entry.insert(tk.END, filename)\n self.root.cache_dir = filename", "def _setordering_institution_52D(self, val):\n self.swift_obj.OrderingInstitution_D = val\n self.swift_obj.OrderingInstitution_D.swiftTag = '52D'", "def set_query_string(self):\n\n if self.search_by == 'by-postal-code':\n self.querystring = {'postalCode': self.search_input, 'countryCode': \"US\"}\n else :\n self.querystring = {'city': self.search_input}", "def convert_zip_code(zipcode):\n zipcode = tf.strings.regex_replace(zipcode, r\"X{0,5}\", \"0\")\n zipcode = tf.strings.to_number(zipcode, out_type=tf.float32)\n return zipcode", "def address_type(self, address_type):\n\n self._address_type = address_type", "def __init__(self, zipped_code_path: str, is_local: bool = False):\n self.zipped_code_path = zipped_code_path\n self.is_local = is_local", "def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()", "def __init__(self, kwargs):\n\n self.postcode = kwargs[\"postcode\"]\n self.east = float(kwargs[\"east\"])\n self.north = float(kwargs[\"north\"])\n self.latitude = kwargs[\"latitude\"]\n self.longitude = kwargs[\"longitude\"]", "def branch_code(self, branch_code):\n if self.local_vars_configuration.client_side_validation and branch_code is None: # noqa: E501\n raise ValueError(\"Invalid value for `branch_code`, must not be `None`\") # noqa: E501\n\n self._branch_code = branch_code", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def setZ(self, z):\n self.position.setZ(z)", "def zone_topo(self, zone_topo):\n\n self._zone_topo = zone_topo", "def _set_bank_operation_code_23B(self, val):\n self.swift_obj.BankOperationCode = val\n self.swift_obj.BankOperationCode.swiftTag = \"23B\"", "def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()", "def address_code(self, address_code):\n if self.local_vars_configuration.client_side_validation and address_code is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_code`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_code is not None and len(address_code) > 10):\n raise ValueError(\"Invalid value for `address_code`, length must be less than or equal to `10`\") # noqa: E501\n\n self._address_code = address_code", "def __init__(__self__, *,\n country: str,\n postal_code: str,\n street_address1: str,\n address_type: Optional[str] = None,\n city: Optional[str] = None,\n company_name: Optional[str] = None,\n state_or_province: Optional[str] = None,\n street_address2: Optional[str] = None,\n street_address3: Optional[str] = None,\n zip_extended_code: Optional[str] = None):\n pulumi.set(__self__, \"country\", country)\n pulumi.set(__self__, \"postal_code\", postal_code)\n pulumi.set(__self__, \"street_address1\", street_address1)\n if address_type is None:\n address_type = 'None'\n if address_type is not None:\n pulumi.set(__self__, \"address_type\", address_type)\n if city is not None:\n pulumi.set(__self__, \"city\", city)\n if company_name is not None:\n pulumi.set(__self__, \"company_name\", company_name)\n if state_or_province is not None:\n pulumi.set(__self__, \"state_or_province\", state_or_province)\n if street_address2 is not None:\n pulumi.set(__self__, \"street_address2\", street_address2)\n if street_address3 is not None:\n pulumi.set(__self__, \"street_address3\", street_address3)\n if zip_extended_code is not None:\n pulumi.set(__self__, \"zip_extended_code\", zip_extended_code)", "def qb_code(self, qb_code):\n\n self._qb_code = qb_code", "def zips_from_csv(zipcode_data):\n # We can skip the first line from csv as it just defines columns\n all_zipcodes = []\n for zipcode in zipcode_data[1:]:\n # get zip and statename\n zcode = zipcode[1][0]\n statename = zipcode[1][3]\n zipcode_obj = Zipcode(zipcode=zcode, state=statename)\n all_zipcodes.append(zipcode_obj)\n db.session.bulk_save_objects(all_zipcodes)\n db.session.commit()\n\n return" ]
[ "0.7381539", "0.7371916", "0.70360076", "0.70360076", "0.70360076", "0.69919515", "0.67918134", "0.67918134", "0.67918134", "0.6679859", "0.65118146", "0.6351724", "0.61154234", "0.6063556", "0.6023592", "0.59138143", "0.58627266", "0.56648487", "0.56164485", "0.55818355", "0.55670595", "0.55670595", "0.5506508", "0.5461406", "0.5410723", "0.5402437", "0.53523207", "0.52537054", "0.52537054", "0.52537054", "0.52467287", "0.52368075", "0.52368075", "0.5193979", "0.5131873", "0.50965893", "0.50526273", "0.501074", "0.4922739", "0.48960015", "0.48413196", "0.47737652", "0.47465318", "0.47083312", "0.46880773", "0.46616116", "0.4648061", "0.4644842", "0.4627843", "0.46211952", "0.4604334", "0.4604334", "0.46005887", "0.45928276", "0.4567488", "0.4533099", "0.45225528", "0.45120937", "0.44998124", "0.44948527", "0.44658172", "0.44637704", "0.44586948", "0.44490677", "0.44266126", "0.44250107", "0.44053325", "0.44015434", "0.4386536", "0.436534", "0.4361915", "0.43491563", "0.43341908", "0.43182874", "0.43167123", "0.4274156", "0.42662665", "0.426218", "0.42605662", "0.42575124", "0.42541167", "0.4250559", "0.42288345", "0.4227928", "0.42234987", "0.42140025", "0.42134237", "0.42123005", "0.42043498", "0.4203392", "0.4202036", "0.41991568", "0.4194375", "0.4192365", "0.4180916", "0.4170344", "0.41658345", "0.41584697", "0.41570753", "0.41503876" ]
0.84573597
0
Sets the city of this Organization.
def city(self, city): self._city = city
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def city(self, city):\n self._city = city", "def city(self, city):\n # type: (string_types) -> None\n\n if city is not None:\n if not isinstance(city, string_types):\n raise TypeError(\"Invalid type for `city`, type has to be `string_types`\")\n\n self._city = city", "def set_City(self, value):\n super(AddressValidationInputSet, self)._set_input('City', value)", "def payee_city(self, payee_city):\n\n self._payee_city = payee_city", "def city_update(self):\n self.city = self.city_finder(self.location.__str__())", "def moveTo(self, city):\r\n self.city = city", "def birth_city(self, birth_city):\n\n self._birth_city = birth_city", "def city(self) -> str:\n return pulumi.get(self, \"city\")", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def city(self):\n return self._city", "def principal_city(self, principal_city):\n\n self._principal_city = principal_city", "def type_city(self, city):\n\n\t\twith allure.step(\"Type payee city\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.CITY_INPUT)\n\t\t\telement.write(city)\n\t\t\treturn None", "def city(self) -> Optional[str]:\n return pulumi.get(self, \"city\")", "def conduit_committee_city(self, conduit_committee_city):\n\n self._conduit_committee_city = conduit_committee_city", "def set_city_count(self, city_count):\n self.city_count = city_count", "def city(self):\n # type: () -> string_types\n return self._city", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None", "def source_locations_city(self, source_locations_city):\n\n self._source_locations_city = source_locations_city", "def address_city(self, address_city):\n if self.local_vars_configuration.client_side_validation and address_city is None: # noqa: E501\n raise ValueError(\"Invalid value for `address_city`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n address_city is not None and len(address_city) > 64):\n raise ValueError(\"Invalid value for `address_city`, length must be less than or equal to `64`\") # noqa: E501\n\n self._address_city = address_city", "def edit_city(g, city_name, option, value):\n city_code = g.convert[city_name]\n \n if(option == \"country\"):\n g.city_dict[city_code].set_country(value)\n \n if(option == \"continent\"):\n g.city_dict[city_code].set_continent(value)\n \n if(option == \"timezone\"):\n g.city_dict[city_code].set_timezone(int(value)) \n \n if(option == \"coordinates\"):\n g.city_dict[city_code].set_coordinates(value) \n \n if(option == \"population\"):\n g.city_dict[city_code].set_population(int(value))\n \n if(option == \"region\"):\n g.city_dict[city_code].set_region(int(value))\n \n return g", "def city(self):\n\n try:\n city = self.status.place[\"full_name\"].strip(r\",[A-Z ]\")\n except TypeError:\n city = None\n if not city:\n try:\n city = self.metadata.as_dict.get(\"user_city\").get(\"google_geocoding\")\n except (TypeError, AttributeError):\n city = None\n return city", "def __init__(self, city):\r\n self.city = city", "def city(self):\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.CITY_INPUT)\n\t\treturn element.element_value", "def source_scopes_city(self, source_scopes_city):\n\n self._source_scopes_city = source_scopes_city", "def city(self, instance):\r\n return instance.user.profile.city", "def town(self, town):\n\n self._town = town", "def city(self):\r\n try:\r\n return str(self.connect()['name'])\r\n except:\r\n return '@weather_city'", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def organization(self, organization):\n\n self._organization = organization", "def setUp(self):\n self.my_city = City()", "def addCity(self, city):\n if city:\n self.city_lbx.insert(0, str(city).strip())\n self.city_ent.delete(0, \"end\")\n self.city_ent.focus()", "def set_tour(self, city_list=None):\n self.cities = city_list or \\\n random.sample(range(len(self.x_points)), len(self.y_points))\n self.distance = 0\n self.fitness = 0", "def onChangeCity(self, item):\n list = self.lstCities.getMultiSelectedItems()\n self.frame.mode.createChangeCityFrame(list, self.mySystemDict['id'], self.mySystemDict['name'])", "def company(self, company):\n self._company = company", "def add_city(self, city):\r\n self.vertices[city[\"code\"]] = Vertex(city)", "def company(self, company):\n\n self._company = company", "def company(self, company):\n\n self._company = company", "def city_put(city_id):\n city_json = request.get_json(silent=True)\n if city_json is None:\n abort(400, 'Not a JSON')\n fetched_obj = storage.get(\"City\", str(city_id))\n if fetched_obj is None:\n abort(404)\n for key, val in city_json.items():\n if key not in [\"id\", \"created_at\", \"updated_at\", \"state_id\"]:\n setattr(fetched_obj, key, val)\n fetched_obj.save()\n return jsonify(fetched_obj.to_json())", "def locality(self, locality):\n\n self._locality = locality", "def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")", "def setUpClass(cls):\n cls.city = City()", "def setOrganization(self, *args):\n return _libsbml.ModelCreator_setOrganization(self, *args)", "def test_city(self):\n c = City()\n self.assertEqual(c.name, \"\")\n self.assertEqual(c.state_id, \"\")\n c.name = \"San Francisco\"\n c.state_id = \"98\"\n self.assertEqual(c.name, \"San Francisco\")\n self.assertEqual(c.state_id, \"98\")\n self.assertEqual(type(c.state_id), str)", "def put_update_city(city_id):\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n kwargs = request.get_json()\n if kwargs is None:\n return ('Not a JSON', 400)\n for k, v in kwargs.items():\n setattr(city, k, v)\n city.save()\n return (jsonify(city.to_json()), 200)", "def organization(self, value):\n organization = self._tower.get_organization_by_name(value)\n if not organization:\n raise InvalidOrganization(value)\n self._update_values('organization', organization.id)", "def changeCity(self, resource, cityList, systemID):\n try:\n for cityID in cityList:\n dOrder = {'type':'Change City', 'value':'%s-%s' % (cityID, resource),\n 'system':systemID, 'round':self.game.myGalaxy['currentRound']}\n serverResult = self.game.server.addIndustryOrder(self.game.authKey, dOrder)\n if serverResult <> 1:\n self.modeMsgBox(serverResult)\n break\n self.changeCityFrame.destroy()\n self.refreshIndustryOrder(systemID)\n except:\n self.modeMsgBox('changeCity->Connection to Server Lost, Login Again')", "def __init__(self, model_city):\n\n assert isinstance(\n model_city, city_settings.ModelCity), 'ModelCity expected'\n self.model_city = model_city\n\n self._raw_data = data_io.fetch_service_units(\n self.servicetype, self.model_city)", "def insert_city(self, city_point):\n city = City(city_point)\n self.map.insert(city)", "def add_city(g, code, name, country, continent, timezone, coordinates, population, region):\n port = Ports(code, name, country, continent, timezone, coordinates, population, region)\n g.city_dict[code] = port\n g.convert[name] = code \n return g", "def put_city(city_id):\n\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n if storage.get(\"City\", city_id) is not None:\n obj = storage.get(\"City\", city_id)\n obj_dict = obj.to_dict()\n dict1 = request.get_json()\n ignored_keys = ['id', 'created_at', 'updated_at', 'state_id']\n for key, value in obj_dict.items():\n if key in ignored_keys:\n pass\n else:\n for k, v in dict1.items():\n if key == k:\n setattr(obj, k, v)\n else:\n pass\n obj.save()\n return make_response(obj.to_dict(), 200)\n else:\n abort(404)", "def insert(self, city: City):\n self.list_cities.append(city)", "def update_city(city_id):\n obj = storage.get('State', city_id)\n if obj is None:\n abort(400)\n r = request.get_json()\n if r is None:\n abort(400, 'Not a JSON')\n for k, v in r.items():\n if k not in['id', 'created_at', 'updated_at']:\n setattr(obj, k, v)\n storage.save()\n return jsonify(obj.to_dict())", "def _build_city(db, place):\n location = get_main_location(db, place)\n county = location.get(PlaceType.COUNTY)\n # Build a title description string that will work for Eniro\n city_descr = _build_area(db, place)\n if county:\n city_descr += ', ' + county\n return _strip_leading_comma(city_descr)", "def update_city(city_id):\n obj = models.storage.get(\"City\", city_id)\n json = request.get_json()\n if obj is not None:\n if json is not None:\n for key, value in json.items():\n if key not in [\"id\", \"updated_at\", \"created_at\",\n \"state_id\"]:\n setattr(obj, key, value)\n obj.save()\n return jsonify(obj.to_dict())\n else:\n abort(400, \"Not a JSON\")\n else:\n abort(404)", "def associate(self, city: 'City', reflexive: bool=True):\n self._cities.add(city)\n if reflexive:\n city.associate(self, reflexive=False)", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def equity(self, equity):\n\n self._equity = equity", "def get_city(self, territory_id: str = \"\"):", "def get_city(self, territory_id: str = \"\"):", "def org_id(self, org_id):\n\n self._org_id = org_id", "def industry(self, industry):\n\n self._industry = industry", "def setOrganisation(self, *args):\n return _libsbml.ModelCreator_setOrganisation(self, *args)", "def get_city(self, city_id):\n city = self.city_repo.get_by_id(city_id)\n\n resource = self.city_repo.dump(city)\n return dict(city=resource), [], SUCCESS", "def industry(self, industry: str):\n\n self._industry = industry", "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def put_cities(city_id=None):\n dict_json = request.get_json()\n if not dict_json:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n cities_obj = storage.get('City', city_id)\n list_ignore = ['id', 'created_at', 'updated_at']\n if cities_obj:\n for key, value in dict_json.items():\n if key not in list_ignore:\n setattr(cities_obj, key, value)\n storage.save()\n return make_response(jsonify(cities_obj.to_dict()), 200)\n else:\n return abort(404)", "def city_location(city, country, population = None):\n if population:\n full_str = f\"{city.title()}, {country.title()} - population {population}\"\n else:\n full_str = f\"{city.title()}, {country.title()}\"\n return full_str", "def county(self, county):\n\n self._county = county", "def county(self, county):\n\n self._county = county", "def set_coor(self, new_coor):\n self.__x_coor, self.__y_coor = new_coor", "def get_billing_city(self):\n if self.billing_address and self.billing_address.city:\n return self.billing_address.city\n else:\n sub_prods = SubscriptionProduct.objects.filter(subscription=self)\n addresses = [sp.address for sp in sub_prods]\n if addresses:\n return addresses[0].city\n else:\n return \"\"", "def set_location(self, location):\n self.location = location", "def __init__(self, cities):\n self.cities = copy.copy(cities)\n self.compute_fitness()", "def cidade(self, cidade):\n self._cidade = cidade", "def build_city(self, logs=None) -> str:\n if self.is_cart():\n raise ValueError(f\"Unit {self.id} is a cart; cannot build a city!\")\n if logs is not None:\n logs.append((self.id, ValidActions.BUILD, self.pos))\n return \"bcity {}\".format(self.id)", "def set_location(self, location):\r\n self.__location = location", "def test_city(self):\n self.assertIsInstance(self.address.city, str)\n self.assertEqual(self.address.city, \"Paris\")", "def get_city_state(self):\n\n city = self.city\n return f'{city.name}, {city.state}'", "def coord(self, coord):\n\n self._coord = coord", "def city(city_id):\n\n if storage.get(\"City\", city_id) is not None:\n return jsonify(storage.get(\"City\", city_id).to_dict())\n else:\n abort(404)", "def _set_origin_value(self, origin):\n self.origin_value = origin", "def test_set_attrs(self):\n city2 = City()\n city2.name = \"Hawaii\"\n self.assertEqual(city2.name, \"Hawaii\")\n city2.state_id = \"<3\"\n self.assertEqual(city2.state_id, \"<3\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(City.state_id, \"\")", "def __getitem__(self, key):\n \n city = self._citydb[key]\n city.astral = self\n return city", "def location_country(self, location_country):\n\n self._location_country = location_country", "def get_datacenter_city(self, node):\n if self._datacenter_cache is None:\n self.populate_datacenter_cache()\n location = self._datacenter_cache[node.datacenter_id].location\n location = location.lower()\n location = location.split(\",\")[0]\n return location" ]
[ "0.8256088", "0.7434127", "0.715741", "0.7125765", "0.70390105", "0.6859149", "0.6617845", "0.65802497", "0.6554839", "0.6554839", "0.6554839", "0.6554839", "0.6554839", "0.6495286", "0.6381163", "0.634089", "0.63268566", "0.63185745", "0.6303239", "0.6295937", "0.6295937", "0.6247916", "0.61640805", "0.6049013", "0.5947243", "0.58795154", "0.58347857", "0.5808114", "0.5635428", "0.56250036", "0.5613722", "0.5607772", "0.5607772", "0.5607772", "0.55565655", "0.54772335", "0.54344624", "0.54190713", "0.5373945", "0.5353313", "0.53267246", "0.53267246", "0.52727497", "0.521567", "0.52037734", "0.51873726", "0.5186075", "0.5185913", "0.51741576", "0.51682925", "0.5138998", "0.50990385", "0.50758785", "0.5072668", "0.5046796", "0.503577", "0.5032528", "0.50236005", "0.5021601", "0.50139636", "0.499863", "0.499863", "0.4988769", "0.4979052", "0.4979052", "0.497743", "0.49391577", "0.49361852", "0.49206865", "0.49081442", "0.4898933", "0.4898933", "0.48615137", "0.48323417", "0.48170176", "0.48170176", "0.47999498", "0.479818", "0.4794418", "0.47900477", "0.4767069", "0.47608426", "0.47337505", "0.47248456", "0.4724455", "0.47192052", "0.47145334", "0.4694547", "0.46908897", "0.46830043", "0.4658293", "0.46516615" ]
0.8241868
9
Sets the name of this Organization.
def name(self, name): self._name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def set_name(self, name):\n self._name = name", "def SetName(self, name):\n self.name = name", "def set_name(self, name: str):\n self._name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, _name):\n self.name = _name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n assert isinstance(name, str), 'Name must be string'\n self._name = name", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self, _name):\n self.name = _name\n return self.name", "def setname(self, name):\n self.__name = name", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def name(self, name):\n\n self._set_field(\"name\", name)", "def setName(self, name): \n\n self._name = name", "def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self", "def set_name(self, room_name):\n self.name = room_name", "def setName(self, name):\n self.name = str(name)", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def set_name(self, PersonName):\r\n self.name = PersonName", "def setName(self, name):\n self.name = name\n return self", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n\n self._name = name", "def set_name(self, name):\n self.options['name'] = name", "def set_name(self, name):\n return self.set_meta('name', name)", "def name(self, name: \"str\"):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n self._attrs[\"name\"] = name", "def name(self, name: \"str\"):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n self._attrs[\"name\"] = name", "def name(self, name: \"str\"):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\")\n self._attrs[\"name\"] = name", "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def updateName(self,name):\n self.name = name", "def name(self, name: str) -> None:\n self._name = name", "def set_name(self, name):\n\n\t\tif name is not None and not isinstance(name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__name = name\n\t\tself.__key_modified['name'] = 1", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n \n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n \n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n\n self._set_field(\"name\", name.get_json())", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def name(self, name: str):\n\n self._name = name", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def set_name(self, newname=\"\"):\n self.name = newname", "def name(self, name):\n # if name is None:\n # raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name", "def name(self, name):\n from_name = self.name\n assert isinstance(name, str)\n self._name = name\n if self.has_parent():\n self._parent_._name_changed(self, from_name)", "def rename(self, name):\n self.name = name" ]
[ "0.759724", "0.759724", "0.75542027", "0.755328", "0.7494422", "0.7469748", "0.7469748", "0.7469748", "0.7469748", "0.7469748", "0.74372435", "0.74372435", "0.73873687", "0.7347915", "0.7347915", "0.733082", "0.73160726", "0.7291633", "0.7268685", "0.72510606", "0.7188799", "0.7188799", "0.7110063", "0.7089243", "0.70361817", "0.7026681", "0.7014402", "0.70009184", "0.70009184", "0.70009184", "0.70009184", "0.69969", "0.69969", "0.69969", "0.69969", "0.697371", "0.69474655", "0.6939438", "0.6928441", "0.69279945", "0.6907616", "0.6907616", "0.6907616", "0.6889919", "0.6889919", "0.6806459", "0.67907536", "0.67903996", "0.67895144", "0.67895144", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6783482", "0.6773257", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6764662", "0.6760242", "0.6760242", "0.6760242", "0.6760242", "0.6760242", "0.6760242", "0.6760242", "0.67558473", "0.67515767", "0.6743406", "0.6736455", "0.6735642", "0.6733785" ]
0.0
-1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Organization, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n pass", "def _print_custom(self):\n pass", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def print(self):\n # Your implementation here", "def p(value):\n pp.pprint(value)", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def pprint(self):\n print(self.pprint_str())", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def print_(self, s: str) -> None:", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def test_print(chikin):\n chikin.print()", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def out(*args):\r\n print(*args)", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def DumpPprint(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import pprint\n \n text = pprint.pformat(data)\n \n return text", "def repl_print_statements():\n pass", "def test_03_pass_print(self):\n print('Hello World!')", "def p(self):\n self.printstdout = True", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def print(self):\r\n self.print_avec_separateur()", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print(self):\n print(self.pretty_str())", "def test_print4(self):\n writer = StringIO()\n collatz_print(writer, 1, 1, 1)\n self.assertEqual(writer.getvalue(), \"1 1 1\\n\")", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def test_print(self):\n writer = StringIO()\n collatz_print(writer, 1, 10, 20)\n self.assertEqual(writer.getvalue(), \"1 10 20\\n\")", "def pprint(self):\n return pformat(repr(self))", "def printer(message):\n if VERBOSITY:\n pprint(message)", "def rec_print(p):\n if len(p) == 0:\n return\n t = p.pop(0)\n print t\n rec_print(p)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def printc(*a, **kw):\n print(*a, **kw)", "def pr(x):\n Card.print_pretty_cards(x)", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def safe_print(*objs, errors=\"replace\"):\n\tprint(*(to_stdout(str(o), errors) for o in objs))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def hook_print():\n sys.stdout = PrintHook()", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def print_list(self):\r\n pass", "def debugprint(obj, depth=-1, print_type=False,\r\n file=None, ids='CHAR', stop_on_name=False):\r\n if file == 'str':\r\n _file = StringIO()\r\n elif file is None:\r\n _file = sys.stdout\r\n else:\r\n _file = file\r\n done = dict()\r\n results_to_print = []\r\n order = []\r\n if isinstance(obj, gof.Variable):\r\n results_to_print.append(obj)\r\n elif isinstance(obj, gof.Apply):\r\n results_to_print.extend(obj.outputs)\r\n elif isinstance(obj, Function):\r\n results_to_print.extend(obj.maker.fgraph.outputs)\r\n order = obj.maker.fgraph.toposort()\r\n elif isinstance(obj, (list, tuple)):\r\n results_to_print.extend(obj)\r\n elif isinstance(obj, gof.FunctionGraph):\r\n results_to_print.extend(obj.outputs)\r\n order = obj.toposort()\r\n elif isinstance(obj, (int, long, float, numpy.ndarray)):\r\n print obj\r\n else:\r\n raise TypeError(\"debugprint cannot print an object of this type\", obj)\r\n for r in results_to_print:\r\n debugmode.debugprint(r, depth=depth, done=done, print_type=print_type,\r\n file=_file, order=order, ids=ids,\r\n stop_on_name=stop_on_name)\r\n if file is _file:\r\n return file\r\n elif file == 'str':\r\n return _file.getvalue()\r\n else:\r\n _file.flush()", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def test_print1(self):\n writer = StringIO()\n collatz_print(writer, 100, 200, 125)\n self.assertEqual(writer.getvalue(), \"100 200 125\\n\")", "def printOutput(self):\n pass", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def pr(string, verbose):\n if(verbose):\n print(string)", "def print(*args, sep=\" \"):\n pass", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print(self):\n\n print(self)", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def test_print2(self):\n writer = StringIO()\n collatz_print(writer, 201, 210, 89)\n self.assertEqual(writer.getvalue(), \"201 210 89\\n\")", "def print_pointers(self):\n\n ### FILL IN ###", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_me(self, tabs=0, tab=' '):\n pre = tab*tabs\n print(pre+'Producer:')\n print(pre+' produces:', self._produces)\n print(pre+' consumes:', self._consumes)\n print(pre+' transfer:', self._transfer)\n print(pre+' capacity:', self._capacity)", "def _print(cls, quad):\n\t\tprint(\"\\nLIGHT OUTPUT:\\n<<<<{}>>>>\".format(ast.literal_eval(str(cls.get_address_value(quad.result)))))\n\t\tprint(\"END\")\n\n\t\tvar = cls.get_address_value(quad.result)\n\t\tif isinstance(var, collections.Iterable):\n\t\t\tprint(\"DEEP COPY\")\n\t\t\tcls.print_queue.enqueue(copy.deepcopy(var))\n\t\telse:\n\t\t\tcls.print_queue.enqueue(var)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def print(self):\n self.print_avec_separateur(\" \")", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debugprint(r, prefix='', depth=-1, done=None, print_type=False,\r\n file=sys.stdout, print_destroy_map=False,\r\n print_view_map=False, order=None, ids='CHAR',\r\n stop_on_name=False, prefix_child=None):\r\n if depth == 0:\r\n return\r\n\r\n if order is None:\r\n order = []\r\n\r\n if done is None:\r\n done = dict()\r\n\r\n if print_type:\r\n type_str = ' <%s>' % r.type\r\n else:\r\n type_str = ''\r\n\r\n if prefix_child is None:\r\n prefix_child = prefix\r\n\r\n def get_id_str(obj):\r\n if obj in done:\r\n id_str = done[obj]\r\n elif ids == \"id\":\r\n id_str = \"[@%s]\" % str(id(r))\r\n elif ids == \"int\":\r\n id_str = \"[@%s]\" % str(len(done))\r\n elif ids == \"CHAR\":\r\n id_str = \"[@%s]\" % char_from_number(len(done))\r\n elif ids == \"\":\r\n id_str = \"\"\r\n done[obj] = id_str\r\n return id_str\r\n\r\n if hasattr(r.owner, 'op'):\r\n # this variable is the output of computation,\r\n # so just print out the apply\r\n a = r.owner\r\n\r\n r_name = getattr(r, 'name', '')\r\n # normally if the name isn't set, it'll be None, so\r\n # r_name is None here\r\n if r_name is None:\r\n r_name = ''\r\n\r\n if print_destroy_map:\r\n destroy_map_str = str(getattr(r.owner.op, 'destroy_map', ''))\r\n else:\r\n destroy_map_str = ''\r\n\r\n if print_view_map:\r\n view_map_str = str(getattr(r.owner.op, 'view_map', ''))\r\n else:\r\n view_map_str = ''\r\n if destroy_map_str and destroy_map_str != '{}':\r\n destroy_map_str = 'd=' + destroy_map_str\r\n if view_map_str and view_map_str != '{}':\r\n view_map_str = 'v=' + view_map_str\r\n\r\n o = ''\r\n if order:\r\n o = str(order.index(r.owner))\r\n already_printed = a in done # get_id_str put it in the dict\r\n id_str = get_id_str(a)\r\n\r\n if len(a.outputs) == 1:\r\n print >> file, '%s%s %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n id_str,\r\n type_str, r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n else:\r\n print >> file, '%s%s.%i %s%s \\'%s\\' %s %s %s' % (prefix, a.op,\r\n a.outputs.index(r),\r\n id_str, type_str,\r\n r_name,\r\n destroy_map_str,\r\n view_map_str,\r\n o)\r\n if not already_printed:\r\n if (not stop_on_name or\r\n not (hasattr(r, 'name') and r.name is not None)):\r\n new_prefix = prefix_child + ' |'\r\n new_prefix_child = prefix_child + ' |'\r\n for idx, i in enumerate(a.inputs):\r\n if idx == len(a.inputs) - 1:\r\n new_prefix_child = prefix_child + ' '\r\n\r\n debugprint(i, new_prefix, depth=depth - 1, done=done,\r\n print_type=print_type, file=file, order=order,\r\n ids=ids, stop_on_name=stop_on_name,\r\n prefix_child=new_prefix_child)\r\n else:\r\n #this is an input variable\r\n id_str = get_id_str(r)\r\n print >> file, '%s%s %s%s' % (prefix, r, id_str, type_str)\r\n\r\n return file", "def bpprint(self, out=None):\n if out is None:\n out = sys.stdout\n print(self.bpformat(), file=out)", "def vprint(expr, **settings):\n\n outstr = vsprint(expr, **settings)\n\n import builtins\n if (outstr != 'None'):\n builtins._ = outstr\n print(outstr)", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n options = numpy.get_printoptions()\n numpy.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if isinstance(v, float):\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if this_line_length + len(this_repr) >= 75 or '\\n' in this_repr:\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n numpy.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)" ]
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", "0.6561717", "0.65549695", "0.6494838", "0.6473391", "0.64491546", "0.6411177", "0.6340302", "0.6339321", "0.6335031", "0.6332035", "0.6315847", "0.631272", "0.6297732", "0.62969106", "0.6283717", "0.6279154", "0.6271603", "0.62673396", "0.6265511", "0.62629336", "0.6258366", "0.6258278", "0.62501305", "0.6248315", "0.62459755", "0.6244254", "0.6242083", "0.62393075", "0.62156516", "0.6208198", "0.62068796", "0.62062824", "0.62062824", "0.6194123", "0.6189738", "0.6183852", "0.6183035", "0.61697906", "0.61614454", "0.6160741", "0.61544997", "0.61528033", "0.6150831", "0.6147288", "0.61380607", "0.613793", "0.61300766", "0.61278135", "0.6125416", "0.6114217", "0.61126333", "0.6100682", "0.60998785", "0.6096818", "0.6081694", "0.6076982", "0.6072701", "0.6060028", "0.60581726", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6055044", "0.6043662", "0.6037599", "0.60336643", "0.6030174", "0.60290223", "0.60242903", "0.6016989", "0.6004274", "0.60005474", "0.60005474", "0.60003483", "0.599558", "0.59923434", "0.5979316", "0.59777945" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, Organization): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
find vertex with minimum weight
def find_suitable_vertex(incident_vertexes: List[int], used: Set[int], terms: List[int]) -> Tuple[int, int]: weight, vertex = INF, INF for i in range(len(incident_vertexes)): if incident_vertexes[i] < weight and terms[i] not in used: weight = incident_vertexes[i] vertex = terms[i] return weight, vertex # вес ребра, номер последний вершины - конец ребра
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_min_weight_edge(self, weight, visited):\n minimum = INF\n min_index = None\n for v in range(self.vertices):\n if weight[v] < minimum and not visited[v]:\n minimum = weight[v]\n min_index = v\n return min_index", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def min_distance_vertex(distance, visited):\n vertices = len(visited)\n min_distance = INF\n min_index = None\n for v in range(vertices):\n if not visited[v] and distance[v] <= min_distance:\n min_distance = distance[v]\n min_index = v\n return min_index", "def minimum_other_vertex(self, vertex):\n return min([(len(self.out_edges(v)), v) for v in self.vertices() if v != vertex])", "def min_neighbor_node(g):\r\n return min(g.degree_iter(),key = lambda item:item[1])[0]", "def find_min_weight(self, path, node):\n \n min_edge = []\n \n # assign edge weight first pair of node as min_weight\n min_weight = self.get_weight(path[-1], node)\n min_edge.append(path[-1])\n min_edge.append(node)\n \n # start from last node in path\n index = -1 \n i = path[index]\n \n \n # while node in path not equal to the node passed in, find min_weight\n while i != node:\n \n weight = self.get_weight(path[index-1], path[index])\n \n if weight < min_weight:\n \n min_weight = weight\n min_edge.clear()\n min_edge.append(path[index-1])\n min_edge.append(path[index])\n \n index -= 1\n i = path[index]\n \n \n return min_edge", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def find_min(self):\n return min(self.nodes, key=int)", "def getStartVertex(self):", "def minimum_spanning_tree(self, start_vertex):\n\n # Initialize sets of seen variables to far in the algorithm\n taken_edges = set()\n taken_vertices = set([start_vertex])\n all_vertices = set(self._edges.keys())\n \n # Create a list from the neighbors, heapify to turn into a queue\n neighbors_iterator = ((w, (start_vertex, v)) for (v, w) in \n self.neighbors(start_vertex, and_weights=True))\n queue = list(neighbors_iterator)\n heapq.heapify(queue)\n \n # While not every single vertex is taken\n while not (taken_vertices == all_vertices):\n\n # Pop the minimum edge (u, v) from the priority queue\n weight, (u, v) = heapq.heappop(queue)\n\n # If v is already taken, we have a cycle and continue\n if v in taken_vertices:\n continue\n \n # If v is not already taken, add the edge and vertex to the sets\n taken_vertices.add(v)\n taken_edges.add((frozenset((u, v)), weight))\n \n # Get edges going out to neighbors of v, i.e. every (v, u)\n for (u, w) in self.neighbors(v, and_weights=True):\n\n # If u is taken the edge is not interesting, since it would\n # add a cycle. If it's not taken, add to the queue\n # This if-statement speeds up computations from 5 to 4.5s\n if u not in taken_vertices:\n heapq.heappush(queue, (w, (v, u)))\n \n # The minimum spanning tree is found. Extract information and create\n # a new graph from it.\n mst_edges = [(u, v) for ((u, v), weight) in taken_edges]\n mst_weights = [weight for ((u, v), weight) in taken_edges]\n \n return type(self)(mst_edges, mst_weights)", "def compare_min(values, weights):\n return np.min(values.numpy())", "def get_min_weight_index(weights: list, mst_set: set) -> int:\n min_weight = math.inf\n index = 0\n\n for i in range(len(weights)):\n if weights[i] < min_weight and i not in mst_set:\n min_weight = weights[i]\n index = i\n\n return index", "def __init__(self, vertex, weight=1):\n self.start_vertex = vertex\n \n self.weight = weight\n\n def __str__(self):\n return self.weight", "def minimum_spanning_tree(graph):\n mst = [] # initialize a list to record the edges\n weight = 0 # initialize the total weight to zero\n mst.append(0) # add 0 to the ordering of vertices\n while len(mst) != len(graph): # while all vertices have not been added yet\n min2 = float('inf') # initialize to negative infinity\n node_add = 0\n new_w = 0\n for j in mst: # for every node in the graph\n inner_dict = graph[j] # retrieve the inner dictionary\n for k in inner_dict: # for every node in the inner dictionary\n if inner_dict[k] < min2 and k not in mst: # get the minimum edge\n min2 = inner_dict[k]\n new_w = min2\n node_add = k\n mst.append(node_add) # append the next node\n weight += new_w # add the weight to the tally\n return mst, weight # return the final ordering and the total weight", "def Vmin(V):\n return np.min(V)", "def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode", "def _create_weight_func(G, weight):\n\n def weight_func(u, v):\n edge = G[u][v]\n\n if G.is_multigraph():\n return min(att.get(weight, 1) for att in edge.values())\n\n return edge.get(weight, 1)\n\n return weight_func", "def exact_min_vertex_cover(graph):\n for N in range(1,len(graph.nodes())+1):\n for graph_sub in it.combinations(sorted(graph.nodes(), reverse=True), N):\n graph_temp = graph.copy()\n graph_temp.remove_nodes_from(graph_sub)\n if len(graph_temp.edges()) == 0:\n return list(graph_sub)", "def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id", "def min_fill_node(g):\r\n return min( g.nodes(),key = lambda x:fill_edges(g,x) )", "def find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None", "def test_node_weight_range_min(self):\n n = Node(inputs=6)\n for i in n.weights:\n self.assertGreaterEqual(i, -0.1)", "def prim_mst(graph: list, vertices: int) -> None:\n mst_set: set = set() # keep track of visited nodes\n weights: list = [math.inf for _ in range(vertices)] # next connected, lightest node\n weights[0] = 0\n edges = 0\n parent: list = [None] * vertices # from node\n\n while edges < vertices - 1:\n next_vertex = get_min_weight_index(weights, mst_set)\n mst_set.add(next_vertex)\n update_weight(graph, next_vertex, mst_set, weights, parent)\n edges += 1\n\n for i in range(1, vertices):\n print(parent[i], \"->\", i, \":\", weights[i])", "def nearest_vertex(coord):\n # Make sure a coordinate was passed in\n if len(coord) != 2:\n raise ValueError\n\n # Make sure any vertex has a shorter distance\n shortest_dist = float('inf')\n vertex = -1\n \n # Loop through all vertices and find closest\n for v in V_coord:\n dist = distance(coord, V_coord[v])\n if dist < shortest_dist:\n shortest_dist = dist\n vertex = v\n\n return vertex", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def find_minima_via_projections(line, arr, weight):\n top_pt = weight*line\n low_pt = -weight*line\n x_line = top_pt - low_pt\n\n projs = np.zeros((arr.shape[0],), dtype=float)\n for i, pt in enumerate(arr):\n vec = pt - low_pt\n projs[i] = project_vectors_ab(vec, x_line)\n\n return np.argmin(projs)", "def prim_solve(self):\n\n\t\tmin_span_tree = Graph([self.graph.vertices[0]], [])\n\t\tdup_graph = self.graph.duplicate()\n\n\t\tfor i in range(len(self.graph.vertices) - 1):\n\t\t\tneighbour_edges = []\n\t\t\tfor cur in min_span_tree.vertices:\n\t\t\t\tneighbour_edges += dup_graph.get_neighbour_edges(cur)\n\n\t\t\tneighbour_edges.sort(key=lambda x: x[2])\n\t\t\tshortest_edge = neighbour_edges[0]\n\t\t\tnew_node = shortest_edge[0] if shortest_edge[1] in min_span_tree.vertices else shortest_edge[1]\n\n\t\t\tmin_span_tree.edges.append(shortest_edge)\n\t\t\tmin_span_tree.vertices.append(new_node)\n\t\t\tdup_graph.edges.remove(shortest_edge)\n\n\t\treturn min_span_tree", "def lovliest_path(G):\n m = 0\n ma = None\n mb = None\n for node in G.keys():\n for conn in G[node].keys():\n if G[node][conn] > m:\n m = G[node][conn]\n ma = node\n mb = conn\n print \"found lovliest_path of %s to %s with weight %s\" % (ma,mb,m)\n return (ma,mb)", "def return_weight(self, startVertex: np.int, endVertex:np.int):\n return self.__mat[startVertex][endVertex]", "def MinimumSpanningTree(V,E,W):\n \n from UnionFind import UnionFind # used for forests manipulation\n # Kruskal's algorithm: sort edges by weight, and add them one at a time.\n # We use Kruskal's algorithm, first because it is very simple to\n # implement once UnionFind exists, and second, because the only slow\n # part (the sort) is sped up by being built in to Python.\n subtrees = UnionFind()\n tree = []\n #edges = [(G[u][v],u,v) for u in G for v in G[u]]\n edges = [(W[(u,v)],u,v) for (u,v) in E]\n edges.sort()\n cost = 0.0\n for w,u,v in edges:\n if subtrees[u] != subtrees[v]:\n tree.append((u,v))\n subtrees.union(u,v)\n\t cost+=w\n return (tree,cost)", "def min_weight(self):\n return self.experiences.min_weight", "def get_edge_weight(self, vertex):\n return self.neighbors[vertex]", "def update_weight(graph: list, vertex: int, mst_set: set, weights: list, parent: list):\n for i in range(len(graph)):\n if 0 < graph[vertex][i] < weights[i] and i not in mst_set:\n weights[i] = graph[vertex][i]\n parent[i] = vertex", "def get_start_vertex(self):\n\n return self._start_vertex", "def extract_min(H, ds):\n minDist = approxInf\n u = None # min vertex unknown\n i = 0\n for v in H:\n if ds[v] <= minDist:\n minDist = ds[v]\n u = v # note that u is unused (instead returned by pop)\n imin = i\n i += 1\n return(H.pop(imin)) # return [u, d]", "def get_bprop_minimum(self):\n input_grad = G.MinimumGrad()\n\n def bprop(x, y, out, dout):\n dx, dy = input_grad(x, y, dout)\n return dx, dy\n return bprop", "def get_smallest_h_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n return min(node_list, key=lambda x: x.h_cost)", "def djikstra(self, start_vertex):\n \n # Initialize a priority queue by distance to vertices\n queue = [(0, start_vertex)]\n heapq.heapify(queue)\n \n # Initialize the dictionary which will contain the answer\n min_dists = collections.defaultdict(lambda: float('inf'))\n min_dists[start_vertex] = 0\n \n # A set to keep track of visisted vertices\n visited = set()\n \n # While there are elements left in the priority queue\n while queue:\n \n # Pop off the vertex which is closest\n (dist, vertex) = heapq.heappop(queue)\n \n # If it's seen, continue. If not, it's seen now\n if vertex not in visited:\n visited.add(vertex)\n else:\n continue\n \n # Go through every neighbor of the vertex\n for neighbor, weight in self.neighbors(vertex, True):\n \n # The minimum distance to `neighbor` might be improved if\n # the path going through `vertex` is smaller than the\n # existing solution. This is the dynamic programming step.\n min_dist_so_far = min_dists[neighbor]\n possible_new_min = min_dists[vertex] + weight\n min_dists[neighbor] = min(min_dist_so_far, possible_new_min)\n \n # Push to the queue. Prioritize by distance.\n heapq.heappush(queue, (min_dists[neighbor], neighbor))\n \n return min_dists", "def apply(G, initial_node_name):\n minimum_cut = tuple() # initialize the minimum cut and its corresponding weight\n minimum_cut_weight = float('inf') # initialized to a very large positive number\n G_prime = G.__deepcopy__() # construct a deepcopy of the input graph\n while len(G_prime.get_nodeset()) > 1: # while the cardinality of the vertex set is greater than one\n # perform a single iteration of the minimum cut phase\n G_prime, current_cut = StoerWagner.__minimum_cut_phase(G_prime, initial_node_name)\n # evaluate the weight of the current cut\n current_cut_weight = StoerWagner.evaluate_cut_weight(G, current_cut)\n if current_cut_weight < minimum_cut_weight: # if the weight is lower than the stored weight\n minimum_cut = current_cut # store the current cut and its weight as the stored cut and weight\n minimum_cut_weight = current_cut_weight\n return tuple(minimum_cut), float(minimum_cut_weight) # return the minimum cut and its corresponding weight", "def get_next_unvisited(visited, shortest_paths):\n min_dist = math.inf\n min_node = None\n for node_id in shortest_paths:\n dist = shortest_paths[node_id][0]\n if dist < min_dist and visited[node_id] == False:\n min_dist = dist\n min_node = node_id\n return min_node", "def get_min_vertex_distance(coor, guess):\n # Sort by x.\n ix = nm.argsort(coor[:,0])\n scoor = coor[ix]\n\n mvd = 1e16\n\n # Get mvd in chunks potentially smaller than guess.\n n_coor = coor.shape[0]\n\n i0 = i1 = 0\n x0 = scoor[i0,0]\n while 1:\n while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):\n i1 += 1\n\n ## print i0, i1, x0, scoor[i1,0]\n aim, aa1, aa2, aux = get_min_vertex_distance_naive(scoor[i0:i1+1])\n if aux < mvd:\n im, a1, a2 = aim, aa1 + i0, aa2 + i0\n mvd = min(mvd, aux)\n i0 = i1 = int(0.5 * (i1 + i0)) + 1\n ## i0 += 1\n x0 = scoor[i0,0]\n ## print '-', i0\n\n if i1 == n_coor - 1: break\n\n ## print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]\n\n return mvd", "def relax(u, v, weight, shortest, previous):\n if shortest[u] + weight < shortest[v]:\n shortest[v] = shortest[u] + weight\n previous[v] = u", "def test_minimum_spanning_tree():\n \n # A very simple graph\n g = UndirectedGraph([('A', 'B'), ('B', 'D'), ('D', 'C'), ('A', 'C')], \n weights=[7, 6, 2, 3])\n mst = g.minimum_spanning_tree('A')\n assert mst == UndirectedGraph([('B', 'D'), ('D', 'C'), ('A', 'C')], \n weights=[6, 2, 3])\n \n # A slightly more complicated graph\n g = UndirectedGraph([('A', 'B'), ('B', 'D'), ('D', 'C'), ('A', 'C'),\n ('C', 'B'), ('A', 'D')], \n weights=[7, 6, 2, 3, 2, 1])\n mst = g.minimum_spanning_tree('A')\n assert mst == UndirectedGraph([('D', 'C'), ('C', 'B'), ('A', 'D')], \n weights=[2, 2, 1])", "def vertex(self):\n if self.a != 0.0:\n # Find x where f'(x) = 2ax + b = 0\n x = -0.5 * self.b / self.a\n return (x, self.f(x))\n else:\n # Quadratic is actually a line, no minimum!\n return (None, None)", "def edgecomplete_left_vertex(self, X, w):\n h = self.op_norm(X, w[0])\n if self.K.is_zero(h):\n return None\n h -= w[1]\n if self.variant == NormGraphVariant.NG and w == (X, h):\n return None\n return h", "def _node_lowest_neighbour(self, node):\n\n lowest = self.neighbour_array_lo_hi[node][0]\n\n if lowest != node:\n return lowest\n else:\n return -1", "def _find_lowest_cost_node(self) -> str:\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in self.costs:\n cost = self.costs[node]\n if cost < lowest_cost and node not in self.closed_nodes:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node", "def min_index(self):\n return self.__pq[1]", "def bellman_fords_shortest_path(graph: Graph[T], source_vertex_data: T) -> \\\n Tuple[bool, Dict[Vertex[T], int], Dict[Vertex[T], Vertex[T]]]:\n\n vertex_distance_mapping: Dict[Vertex[T], int] = defaultdict(lambda: maxsize) # vertex_weight_mapping\n vertex_parent_mapping: Dict[Vertex[T], Vertex[T]] = dict()\n source_vertex: Vertex[T] = graph.get_vertex(source_vertex_data)\n\n vertex_distance_mapping[source_vertex] = 0\n vertex_parent_mapping[source_vertex] = None\n\n # Relax all the edges (V-1)th time.\n # Why (V-1) times? - https://www.youtube.com/watch?v=-mOEd_3gTK0&feature=youtu.be&list=PLrmLmBdmIlpu2f2g8ltqaaCZiq6GJvl1j&t=785\n for i in range(0, len(graph.vertices)-1): # run it (V-1) times... for i=0: i<(V-1); i++\n relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping)\n\n # Relax all the edges for one more time(Vth time) to check if there is any -ve weight cycle present.\n has_negative_weight_cycle: bool = relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping,\n check_negative_weight_cycle=True)\n if has_negative_weight_cycle:\n return has_negative_weight_cycle, dict(), dict()\n\n return has_negative_weight_cycle, vertex_distance_mapping, vertex_parent_mapping", "def get_smallest_f_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n min_f_cost_node = min(node_list, key=lambda x: x.g_cost)\n min_f_cost_list = []\n for column in self.grid:\n for node in column:\n if (\n node.f_cost == min_f_cost_node.f_cost\n and node.pos in self.unvisited_pos\n ):\n min_f_cost_list.append(node)\n return min_f_cost_node, len(min_f_cost_list)", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def find_smallest(self):\n return self._find_smallest(self.root)", "def getOriginDistance(priorityQ, vertex):\n for index, element in enumerate(priorityQ):\n if element.vertex() == vertex:\n return index\n return None", "def Prim(self):\n g = Graph(attr={DIRECTED: False})\n distance = [sys.maxsize] * len(self.vertices)\n parent = [None] * len(self.vertices)\n set = [False] * len(self.vertices)\n\n distance[0] = 0\n parent[0] = -1\n\n for i in self.vertices:\n # Search vertex with minimum distance\n min_index = 0\n min = sys.maxsize\n for v in self.vertices:\n if distance[v] < min and set[v] is False:\n min = distance[v]\n min_index = v\n u = min_index\n\n # Add u vertex in set to not use it in other iteration \n set[u] = True\n g.add_vertex(vertex.Vertex(u))\n\n # Iterate all adjacent vertices of u vertex and update distance \n for v in self.get_adjacent_vertices_by_vertex(u):\n if set[v] is False and distance[v] > \\\n self.get_edge((u, v)).attr[\"WEIGHT\"]:\n distance[v] = self.get_edge((u, v)).attr[\"WEIGHT\"]\n parent[v] = u\n\n for i in self.vertices:\n if i == 0:\n continue\n if parent[i] is not None:\n g.add_edge(edge.Edge(parent[i], i, {\"WEIGHT\": self.get_edge((parent[i], i)).attr[\"WEIGHT\"]}))\n\n return g", "def get_head_vertex(self):\n return self.graph.vertices[self.head_vertex.vertex_number]", "def find_minimum_cut(graph):\n while len(graph) > 2:\n u, v = choose_random_edge(graph)\n merge_edge(graph, u, v)\n\n # Remove self-loops\n while u in graph[u]:\n graph[u].remove(u)\n\n # Remove vertex from graph\n del graph[v]\n\n # The length of the first remaining vertex is the size of the minimum cut\n return len(graph[graph.keys()[0]])", "def get_edge_weight(self, vertex):\n try:\n return self.neighbors[vertex]\n except KeyError:\n return \"Vertex {} not in Graph\".format(vertex.id)", "def get_edge_weight(self, vertex):\n #returns the weight of the edge from this\n #vertex to the given vertex.\n return self.neighbors[vertex]", "def min(self):\n if self._mesh.is_1d():\n ind = 1\n elif self._mesh.is_2d():\n ind = 2\n else:\n if self._logger:\n self._logger.error(\"mesh dimension not implemented\")\n raise NotImplementedError(\"mesh dimension not implemented\")\n\n def __map(m):\n return m[ind]\n\n return self.data.map(\n __map\n ).min()", "def _findMin(p, A):\n\n m=(-1, (0,0))\n for p0 in A:\n dist = np.linalg.norm(p0-np.array(p))\n if m[0]==-1 or m[0]>dist:\n m = (dist, p0)\n \n return tuple(m[1])", "def __determine_most_tightly_connected_vertex(G, induced_ordering, previous_vertex):\n # compute the connection strengths\n G_prime = StoerWagner.__compute_connection_strengths(G, induced_ordering, previous_vertex)\n\n connection_strength_map = \\\n dict({\n node: node.get_attribute_value(StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE)\n for node in G_prime.get_nodeset()\n if node.get_name() not in induced_ordering\n }) # obtain the connection map\n\n # return the node with the strongest connection\n return max(connection_strength_map, key=connection_strength_map.get)", "def find_vertex_at_nearest_distance(DISTANCES, D):\n v = int(0) # All vertex IDs are integers\n iv = int(0) # Index of the vertex v in DISTANCES\n DISTANCES = np.asarray(DISTANCES)\n min_val = (np.abs(DISTANCES - D)).min()\n vertices = np.where(DISTANCES == min_val + D)\n iv = int(np.random.random() * (len(vertices[0]) - 1))\n v = vertices[0][iv]\n return v", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def canonical_vertex(self):\n return self.L.zero(), self.K.one()", "def get_min(h: Heap) -> Node:\n prev, curr = _min(h)\n return curr", "def minimum_value(self):\n return self._fitness[self._minidx]", "def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid", "def best_unexplored_lower_bound(self):\n if self._unexplored_nodes:\n return min(node.lower_bound for node in self._unexplored_nodes)\n else:\n return 0.0", "def find_min_distance():\n return np.argmin(d)", "def kruskal_solve(self):\n\n\t\tmin_span_tree = Graph(self.graph.vertices, [])\n\t\tedges = sorted(self.graph.edges[:], key=lambda x: x[2])\n\t\tcount = 0\n\n\t\twhile count < len(self.graph.vertices) - 1:\n\t\t\tcur_edge = edges[0]\n\t\t\tedges = edges[1:]\n\t\t\t\n\t\t\tnode1, node2, weight = cur_edge\n\t\t\tif not min_span_tree.is_connected(node1, node2):\n\t\t\t\tmin_span_tree.edges.append(cur_edge)\n\t\t\t\tcount = count + 1\n\n\t\treturn min_span_tree", "def min_span_tree(adjacency_matrix, indices_to_connect):\n\n if len(indices_to_connect) > 1:\n Root = indices_to_connect[0]\n M = Prim(adjacency_matrix, Root)\n adjacency_matrix, W, Path, Degree, TreeNbr = M.mst_prim(adjacency_matrix,\n [Root], [], M.degree, M.tree_nbr)\n\n return W, Path, Degree, TreeNbr", "def filter_minimum_gain_like(\n G_min, w, y, alpha=None, k: float = 10.0, eps: float = EPS\n):\n # (..., L)\n filtered_input = einsum(\"...d,...d->...\", [w.conj(), y])\n # (..., L)\n Y = y[..., -1]\n return minimum_gain_like(G_min, Y, filtered_input, alpha, k, eps)", "def mst_prim(g):\n mst = Graph() # create new Graph object to hold the MST\n \n # if graph is empty\n if not g:\n return mst\n \n # nearest_neighbour[v] is the nearest neighbour of v that is in the MST\n # (v is a vertex outside the MST and has at least one neighbour in the MST)\n nearest_neighbour = {}\n # smallest_distance[v] is the distance of v to its nearest neighbour in the MST\n # (v is a vertex outside the MST and has at least one neighbour in the MST)\n smallest_distance = {}\n # v is in unvisited iff v has not been added to the MST\n unvisited = set(g)\n \n u = next(iter(g)) # select any one vertex from g\n mst.add_vertex(u.get_key()) # add a copy of it to the MST\n unvisited.remove(u)\n \n # for each neighbour of vertex u\n for n in u.get_neighbors():\n if n is u:\n # avoid self-loops\n continue\n # update dictionaries\n nearest_neighbour[n] = mst.get_vertex(u.get_key())\n smallest_distance[n] = u.get_weight(n)\n \n # loop until smallest_distance becomes empty\n while (smallest_distance):\n # get nearest vertex outside the MST\n outside_mst = min(smallest_distance, key=smallest_distance.get)\n # get the nearest neighbour inside the MST\n inside_mst = nearest_neighbour[outside_mst]\n \n # add a copy of the outside vertex to the MST\n mst.add_vertex(outside_mst.get_key())\n # add the edge to the MST\n mst.add_edge(outside_mst.get_key(), inside_mst.get_key(),\n smallest_distance[outside_mst])\n mst.add_edge(inside_mst.get_key(), outside_mst.get_key(),\n smallest_distance[outside_mst])\n \n # now that outside_mst has been added to the MST, remove it from our\n # dictionaries and the set unvisited\n unvisited.remove(outside_mst)\n del smallest_distance[outside_mst]\n del nearest_neighbour[outside_mst]\n \n # update dictionaries\n for n in outside_mst.get_neighbors():\n if n in unvisited:\n if n not in smallest_distance:\n smallest_distance[n] = outside_mst.get_weight(n)\n nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())\n else:\n if smallest_distance[n] > outside_mst.get_weight(n):\n smallest_distance[n] = outside_mst.get_weight(n)\n nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())\n \n return mst", "def extract_node_with_lowest_estimate(self):\n assert not self.is_empty\n node, _ = self.__nodes.popitem()\n\n return node", "def minimum_spanning_tree(self):\n if self._directed:\n raise Exception('Current implementation of minimum spanning tree does not work for directed graphs')\n vertices = [self._vertex_dict[x].abstract_vertex for x in self._vertex_dict]\n tree = {'vertices': [random.choice(vertices)], 'edges': []}\n while len(tree['vertices']) < len(vertices):\n best_edge_number = None\n best_edge = None\n best_vertex = None\n vertex_names = [vertex.label for vertex in tree['vertices']]\n for vertex in tree['vertices']:\n for edge in vertex.edges:\n if edge not in vertex_names and (vertex.edges[edge] < best_edge_number or best_edge is None):\n best_edge_number = vertex.edges[edge]\n best_edge = self._edge_dict[vertex.label, edge]\n best_vertex = edge\n tree['vertices'].append(self._vertex_dict[best_vertex].abstract_vertex)\n tree['edges'].append(best_edge)\n return tree['edges']", "def filter_by_weight(self, w):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] >= w:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def smallest (self):\n return self.pointers[0].smallest()", "def convert_minimum(node, **kwargs):\n return create_basic_op_node('Min', node, kwargs)", "def remove_heavier_than(self, w):\n G = nx.DiGraph()\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] <= w:\n G.add_edge(u, v, weight=self.graph[u][v][\"weight\"], alignment=self.graph[u][v][\"alignment\"])\n self.graph = G", "def weighted_constraint(mesh=None, nodes=None, values=[]):\n\n if not mesh and not nodes:\n nodes = mc.ls(sl=1)[:-1]\n mesh = mc.ls(sl=1)[-1]\n\n nodes = mc.ls(nodes)\n\n cpom = mc.createNode('closestPointOnMesh')\n shape = utils.get_shapes(mesh)[0]\n\n mc.connectAttr(shape+'.outMesh', cpom+'.inMesh')\n\n for grp_node in nodes:\n\n node = mc.listRelatives(grp_node, c=1)[0]\n scls = mm.eval('findRelatedSkinCluster '+mesh)\n\n utils.set_attrs([node, grp_node], k=1, l=0)\n\n pos = mc.xform(grp_node, q=1, ws=1, t=1)\n mc.setAttr(cpom+'.inPosition', pos[0], pos[1], pos[2])\n vert = mesh+'.vtx[{0}]'.format(mc.getAttr(cpom+'.closestVertexIndex'))\n\n existing_cons = utils.get_constraints([grp_node, node])\n if existing_cons:\n mc.delete(existing_cons)\n\n # get influences\n if values:\n sorted_influences = values\n else:\n infs = mc.skinCluster(scls, q=1, inf=1)\n\n # get values\n weighted_influences = {}\n values = []\n for inf in infs:\n val = mc.skinPercent(scls, vert, q=1, t=inf, v=1)\n val = round(val, 3)\n\n if val > 0.0:\n weighted_influences[inf] = val\n values.append(val)\n\n values.sort()\n values.reverse()\n\n sorted_influences = []\n for sv in values:\n for inf, val in weighted_influences.items():\n if val == sv:\n sorted_influences.append([inf, val])\n\n suffix = utils.get_suffix('parentConstraint')\n prc1 = mc.parentConstraint(sorted_influences[0][0], grp_node, n=grp_node+'_'+suffix, mo=1)[0]\n for si in sorted_influences:\n prc2 = mc.parentConstraint(si[0], node, n=node+'_'+suffix, mo=1, weight=si[1])[0]\n\n mc.setAttr(prc2+'.interpType', 2)\n mc.addAttr(prc1, ln='weighted_constraint', at='message')\n\n mc.delete(cpom)", "def min(self):\n return self._min(self.root)", "def min_path(vs, es, source, target):\n dijkstra(vs, es, source, stop = target)\n test = target\n result = []\n while test != source:\n e = test._ss_edge\n result.append(e)\n test = e.v1 if e.v1 != test else e.v2\n assert test == source and test._ss_edge is None\n return result[::-1]", "def relaxVertex(self, v):\r\n for arc in self.graph.getAdj(v):\r\n neighbour = arc.getExtremityNode()\r\n if neighbour in self.closed_set:\r\n continue\r\n self.nb_relax_edges += 1\r\n new_dist = self.dists_so_far[v] + arc.getWeight()\r\n # print(arc.getTravelType(), arc.getWeight(), end=\" \")\r\n if neighbour not in self.preds or new_dist < self.dists_so_far[neighbour]:\r\n self.preds[neighbour] = v\r\n self.dists_so_far[neighbour] = new_dist\r\n self.pushPriorityQueue((new_dist, neighbour))", "def find_min(self):\n return self.root and self.root.find_min()", "def _get_obj(self, weights, alpha_vector):\r\n assert (len(alpha_vector.columns) == 1)\r\n\r\n # TODO: Implement function\r\n alpha_vec_vals = alpha_vector.values[:, 0]\r\n x_star = (alpha_vec_vals - np.mean(alpha_vec_vals)) / sum(abs(alpha_vec_vals))\r\n return cvx.Minimize(cvx.pnorm(weights - x_star, 2))", "def minimum_index(self):\n return self._minidx", "def _get_obj(self, weights, alpha_vector):\r\n assert (len(alpha_vector.columns) == 1)\r\n return cvx.Minimize(-alpha_vector.T.values[0]*weights)", "def find_min(self):\n \n return self.root and self.root.find_min()", "def get_min_max_weight_edges(G):\n min_weight = 1e10\n max_weight = 0\n for edge in G.edges(data=True):\n min_weight = min(1.0/edge[2][\"weight\"], min_weight)\n max_weight = max(1.0/edge[2][\"weight\"], max_weight)\n return min_weight, max_weight", "def vmin(self):\n return self._vmin", "def weight(self):", "def get_vertex(self, name):\n return self.vertices[name]", "def get_rank(weight):\n weight = min(1.0, max(weight, 0.0))\n ranks = [x for x in ALL_RANKS if weight >= x.min_weight]\n ranks.sort(key=lambda x: x.min_weight)\n return ranks.pop()", "def get_min_distance(distances, unvisited_nodes):\n min_value = None\n node = None\n for city, distance in distances.items():\n if city not in unvisited_nodes:\n continue\n if min_value is None:\n node = city\n min_value = distance\n elif distance < min_value:\n node = city\n min_value = distance\n return node", "def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left", "def kmin(self, u, v):\n return self.H(u, v) - np.sqrt(np.square(self.H(u, v)) - self.K(u, v))", "def min(self):\n node = self\n while node.left:\n node = node.left\n return node" ]
[ "0.7971822", "0.7408999", "0.7256268", "0.6921469", "0.6910666", "0.686184", "0.6673513", "0.6541637", "0.65068144", "0.6502696", "0.6480919", "0.64556634", "0.6346771", "0.63144493", "0.62669945", "0.62147427", "0.61931205", "0.61720634", "0.6159165", "0.61385703", "0.6131952", "0.6118164", "0.61176765", "0.60798997", "0.6073035", "0.60634935", "0.60550106", "0.60211694", "0.60165936", "0.6014253", "0.60045516", "0.5992841", "0.5984087", "0.5963915", "0.59605783", "0.59601563", "0.59462136", "0.59375465", "0.59286904", "0.59271634", "0.5920616", "0.5905796", "0.59055066", "0.5900756", "0.58830726", "0.5880179", "0.58692724", "0.5849871", "0.5848842", "0.5841945", "0.5841849", "0.5833542", "0.5832268", "0.58307225", "0.5803951", "0.58038706", "0.5792142", "0.5772091", "0.57622486", "0.5761225", "0.57508767", "0.5748402", "0.574716", "0.5741059", "0.5739821", "0.57281595", "0.5727059", "0.57181865", "0.5712345", "0.57079875", "0.5700371", "0.5697013", "0.56929916", "0.5684051", "0.56712246", "0.5668374", "0.5666029", "0.56635565", "0.56635565", "0.56591296", "0.5649608", "0.5649093", "0.5641048", "0.5640032", "0.56272954", "0.5626892", "0.56235206", "0.5622413", "0.5613308", "0.56116486", "0.5600561", "0.5599471", "0.55821383", "0.5580188", "0.5575957", "0.5575728", "0.55716044", "0.55669117", "0.5565819", "0.5562441" ]
0.7048187
3
Assume the value in mu_list is ascending. Assume mu=one is failure
def get_label_from_mu(mu_all, mu_list): mu_list.insert(-1, 1.0) # add one to end; end means lowest reward num_label_level = len(mu_list) label_level_list = np.linspace(1.0, 0.0, num_label_level) # Convert list to dic with key as mu and value as label label_dic = {} for mu, label in zip(mu_list, label_level_list): label_dic[mu] = label # Assign label based on mu label_all = [] for mu in mu_all: label_all += [label_dic[mu]] return label_all
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def kernel_mu(n_kernels, manual=False):\n mus = [1] # exact match\n if n_kernels == 1:\n return mus\n bin_step = (1 - (-1)) / (n_kernels - 1) # score from [-1, 1]\n mus.append(1 - bin_step / 2) # the margain mu value\n for k in range(1, n_kernels - 1):\n mus.append(mus[k] - bin_step)\n if manual:\n return [1, 0.95, 0.90, 0.85, 0.8, 0.6, 0.4, 0.2, 0, -0.2, -0.4, -0.6, -0.80, -0.85, -0.90, -0.95]\n else:\n return mus", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def gen_osmotic_equilibrium(L_list, M, nu_list, mu_list) :\n N_list = np.zeros(M)\n for m in range(M) :\n N_list[m] = osmotic_equilibrium(L_list[m], nu_list[m], mu_list[m])\n return N_list", "def mu(k, z):\n return 1", "def kernel_mus(self, n_kernels: int):\n l_mu = [1.0]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n return l_mu", "def calc_muj_list(theta_list) :\n return np.sin(theta_list)**2 / (2*theta_list - np.sin(2*theta_list))", "def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p", "def algo_UCB(mu, Na):\n i = 0\n while i < Na.size:\n if Na[i] < 1:\n return i\n else:\n i+= 1\n t = Na.sum()\n return np.argmax(mu + np.sqrt(2*np.log(t)/Na))", "def lessthan_5(num_list):", "def mu(self, x):\n x = utils.to_float_if_int(x)\n utils.verify_is_numeric(x)\n\n return (\n 0\n if (x < self.__l or x > self.__r)\n else (x - self.__l) / (self.__n - self.__l)\n if (self.__l <= x <= self.__n)\n else (self.__r - x) / (self.__r - self.__n)\n )", "def _get_mu(self):\n ## a = 1.0/(2*self.B.K)\n ## b = np.sqrt(np.log(16.0*(self.t**2)*self.B.N/self.delta)/float(self.B.K*self.B.L*self.t))\n a = self.mu\n b = self.mu*np.sqrt(self.B.K)/np.sqrt(self.B.L*self.t)\n c = np.min([a,b])\n return np.min([1,c])", "def osmotic_equilibrium(L, nu, mu) :\n N = L**2 / mu\n return N", "def like(x, sigma, mu):\n L = np.ones(len(mu))\n for x_i,sigma_i in zip(x, sigma):\n L *= (1.0/np.sqrt(2.0*np.pi*sigma_i**2))*np.exp(-0.5*(x_i-mu)**2/(sigma_i**2))\n return L", "def wm(mu,se):\n\tnomsum =[]\n\tw = []\n\tfor i in range(len(mu)):\n\t\tnom = mu[i] / (se[i])**2\n\t\tnomsum.append(nom)\n\t\tw.append(1/(se[i])**2)\n\twmu = sum(nomsum) / sum(w)\n\treturn (wmu)", "def ordering_sign(permu, weights):\n sign_exp = 0\n for idx, j in enumerate(permu):\n to_add = [weights[permu.index(i)] for\n i in permu[idx + 1:] if i < j]\n sign_exp += weights[idx] * sum(to_add)\n return sign_exp % 2", "def exo5_q1(mu, x0, n, m):\r\n listex=[]\r\n listeuu=[]\r\n listex.append(x0)\r\n listeuu.append(mu[0]) #recopie liste mu\r\n a=x0\r\n for i in range(1,len(mu) -1):\r\n a=exo2_1(a,mu[i]) # a = Fu(xi, Ui)\r\n listex.append(a) # liste.append(a)\r\n listeuu.append(mu[i])\r\n \r\n return listeuu,listex", "def eval_mu(self, mu, T, Du, Dx):\n sample_u = mu[:, slice(Dx, Dx + Du)]\n l = 0.5 * np.sum(self._hyperparams['wu'] * (sample_u ** 2), axis=1)\n lu = self._hyperparams['wu'] * sample_u\n lx = np.zeros((T, Dx))\n luu = np.tile(np.diag(self._hyperparams['wu']), [T, 1, 1])\n lxx = np.zeros((T, Dx, Dx))\n lux = np.zeros((T, Du, Dx))\n return l, lx, lu, lxx, luu, lux", "def compute_mu_uniform(epoch: float, noise_multi: float, n: int, batch_size: int):\n\n t = epoch * n / batch_size\n c = batch_size * np.sqrt(t) / n\n return (\n np.sqrt(2)\n * c\n * np.sqrt(\n np.exp(noise_multi ** (-2)) * norm.cdf(1.5 / noise_multi)\n + 3 * norm.cdf(-0.5 / noise_multi)\n - 2\n )\n )", "def __mu_calc(self, mu_pi):\n return mu_pi[1] / (mu_pi[0] + mu_pi[1])", "def exo2_1(x,mu):\r\n return (x*1.0)*mu*(1-x)", "def mu(self):\n return self._mu", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def mu0(self):\n return self.__mu0", "def mu_na(n: float, a: float) -> float:\n return n * n * a * a * a", "def condition(self, tensor_list, prior_mu, smoothing_tensors=None):\n if self.smoothing:\n tensor_list.extend(smoothing_tensors)\n mu, sigma = super(NormalApproximatePosterior, self).condition(tensor_list)\n return mu + prior_mu, sigma", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n return k+self.offset\n return None", "def test_gauss_list():\n x,g = cw04.gen_gaussian_list(-1,1,3)\n desired = [0.24197072451914337, 0.3989422804014327, 0.24197072451914337]\n print(\"Obtained:\",g)\n print(\"Desired:\",desired)\n # For comparing floating point values, nose has useful helper functions\n # to ensure they are equal up to a numerical precision tolerance\n nose.tools.assert_almost_equal(g, desired)", "def reparametrize(self, mu, sigma):\n return mu + sigma # TODO Your code goes here.", "def bmu_r(self, unit_input):\n return np.argmin([np.linalg.norm(diff) for diff in self.differences])", "def lsem (inlist):\r\n sd = stdev(inlist)\r\n n = len(inlist)\r\n return sd/math.sqrt(n)", "def MyMUlt( l ):\n\n #checking if arg is a list\n if isinstance(l, list):\n\n #myltiplying all numbs\n result = 1\n\n for numb in l:\n result = result * numb\n\n return result\n\n else:\n return \"Argument is not a list\"", "def _compute_mu_factor2(*input_mols):\n mu_factor = 1\n for mol in input_mols:\n mu_factor *= np.prod(fact(mol))\n return mu_factor", "def normal_like(x, mu=0.0, sigma=1.0):\n k = 2 * np.pi * sigma ** 2\n d = (x - mu) / sigma\n lnp = -0.5 * np.nansum(np.log(k) + d ** 2)\n return lnp", "def collapse(probability, input_list):\n\n result = int((probability * len(input_list)) + 0.5)\n return min(result, len(input_list) - 1)", "def f(x):\n return delta_eps_mu(x, mu) - delta", "def unbalanced_Wasserstein_L1(mu,nu,x = None,alpha = 1):\n\n N = mu.size\n \n if x is None:\n x = np.linspace(0,1,N)\n\n dx = x[1]-x[0]\n\n mass_diff = np.sum((mu-nu) * dx) \n\n Integrand = np.abs( np.cumsum(mu-nu) - x * mass_diff )\n\n\n UW1 = np.sum(Integrand * dx) + (1/alpha)* np.abs( mass_diff )\n\n return UW1", "def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out", "def estimate(self, U, mu=None):\n raise NotImplementedError", "def mutateList(values, numMutate, vmin, vmax, rabs=True):\n\tmutations = set()\n\tcount = 0\n\twhile count < numMutate:\n\t\tj = randint(0, len(values)-1)\n\t\tif j not in mutations:\n\t\t\ts = np.random.uniform(vmin, vmax)\n\t\t\tvalues[j] = s if rabs else values[j] * s\n\t\t\tcount += 1\n\t\t\tmutations.add(j)\n\treturn values", "def uniform_list_check(value_list):\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)", "def adjust(data):\n mu = mean(data)\n return mu, map(lambda x: (x-mu), data)", "def _get_l1_mlist(n):\n global _l1_bucket_max\n if n == 1:\n mlist = [1]\n elif n <= _l1_bucket_max:\n mlist = [n, 2*n, n*n]\n else:\n mlist = [n, 2*n] #just to try\n return mlist", "def expected_improvement(f_min, mu, sigma):\n # log-scaling might not be the best idea here, especially\n # if people use negative values to maximize output\n # v = (np.log(f_min) - mu) / sigma\n v = (f_min - mu) / sigma\n return (f_min * norm.cdf(v)\n - (np.exp(0.5 * sigma ** 2 + mu)\n * norm.cdf(v - sigma)))", "def check(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n ind = k+self.offset\n return ind, 'prim' in self.which[k], 'dual' in self.which[k]\n return None, False, False", "def bruteForce_MC(N,M):\n hewlist = np.zeros(M)\n for i in range(M):\n x = createDist(N)\n x = np.abs(x-np.mean(x))\n x.sort()\n hewlist[i] = np.median(x)*2.\n return np.mean(hewlist), np.std(hewlist)", "def testTicket1025(self):\n \n # check the exact example in the ticket\n values = [1.0, 2.0, 3.0, 2.0]\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 2)\n self.assertEqual(afwMath.makeStatistics(sorted(values), afwMath.MEDIAN).getValue(), 2)\n\n # check some other possible ways it could show up\n values = range(10)\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 4.5)\n values = range(11)\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 5.0)", "def hoffman(mu_1, mu_n):\n return 1 + (mu_1 / abs(mu_n))", "def mixture_entropy_von_mises_fisher(mu_list, k_list, nr_samples = int(1e6)):\n nr_mixtures = len(mu_list)\n \n assert len(mu_list) == len(k_list), 'len of lists must be equal'\n \n mixt_coeff = np.random.randint(low=0, high=nr_mixtures,size = nr_samples)\n\n # select randomly nr_samples many mu and k parameters\n selected_mu = np.take(mu_list, mixt_coeff, axis=0)\n selected_k = np.take(k_list, mixt_coeff, axis=0)\n \n # get samples for these parameters\n samples = get_vMF_samples(mu = selected_mu, k =selected_k)\n\n return np.mean(-np.log(mixture_vMF_density(samples, mu_list,k_list)))", "def mean_stat(list):\n answer = 0 #Final value\n count = 0 #number of values\n\n ##Mean calculation\n for item in list:\n answer += item\n count +=1\n\n if(answer == 0):\n answer = \"List is Empty\"\n return answer\n else:\n answer = answer/count\n return answer", "def calculate_mu(return_data):\n return np.array(return_data.mean())", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n ind = k+self.offset\n return ind, self.which[k]\n return None, None", "def minmax_add(mmlist, val):\r\n if len(mmlist) == 0:\r\n mmlist.append(val)\r\n return\r\n \r\n if val < mmlist[0]:\r\n mmlist.insert(-1,mmlist[0])\r\n mmlist[0] = val\r\n elif val > mmlist[-1]:\r\n mmlist.append(val)\r\n else:\r\n mmlist.insert(-1, val)", "def precondition(amp):\n n = len(amp)\n mean = np.mean(amp[:n/5])\n return -(amp-mean)", "def lsterr(inlist):\r\n return stdev(inlist) / float(math.sqrt(len(inlist)))", "def multk(w,l):\n lm=w.su3\n ##will need to change for python 3\n mk=max(0,int((lm.lbda+lm.mu+2.-l)/2))-max(0,int((lm.lbda+1.-l)/2))-max(0,int((lm.mu+1.-l)/2))\n return mk", "def delta_eps_mu(eps, mu):\n return norm.cdf(-eps / mu + mu / 2) - np.exp(eps) * norm.cdf(-eps / mu - mu / 2)", "def _compute_mu_factor(*input_mols):\n return np.sqrt(_compute_mu_factor2(*input_mols))", "def mu(self):\n return self.mass * G", "def iteration( M, sign_num):\n M_bootstrap = bootstrap(M)\n model = NMF(n_components = sign_num, solver = 'mu', max_iter = 10000000, init = 'random')\n #P = np.random.rand(len(M_bootstrap), sign_num)\n #E = np.random.rand(sign_num, len(M_bootstrap[0]))\n P = model.fit_transform(M_bootstrap)\n E = model.components_\n error = model.reconstruction_err_\n P , E = normalize(P, E)\n return P, error", "def zvalue(value, sigma, mu):\n return (value - mu) / sigma", "def calc_min(data: list) -> float:\n acc = data[0]\n for n in data:\n if n < acc:\n acc = n\n return float(acc)", "def prob1(L):\n return min(L), max(L), sum(L)/len(L)\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def join_param_between_bands(kwargs_list, prior_list):\n \n logL = 0\n \n if not kwargs_list: \n pass\n else:\n for i in range(len(prior_list)):\n mean_index,index,param,sigma = prior_list[i]\n\n model_val = kwargs_list[index][param] #model parameter on which to place prior\n mean_val = kwargs_list[mean_index][param] #mean value around which gaussian is centered\n\n dist = (model_val - mean_val)**2 / sigma ** 2 / 2\n logL -= np.sum(dist)\n# print('prior: {} \\n model value: {} \\n mean value: {} \\n sigma: {}'.format(param,model_val,mean_val,sigma))\n \n return logL", "def user_mobility(user_list):\n new_user_list = list()\n for user in user_list:\n # update loc according to user mobility type\n ii = random.randint(-user[3], user[3])\n print(\"user[3]= \", user[3])\n print(\"ii= \", ii)\n user[0] += random.randint(-user[3], user[3])\n user[1] += random.randint(-user[3], user[3])\n # restrict user loc in the cell range\n user[0] = max(user[0], LEFT_MARGIN + 4)\n user[0] = min(user[0], RIGHT_MARGIN - 4)\n user[1] = max(user[1], TOP_MARGIN + 4)\n user[1] = min(user[1], BOTTOM_MARGIN - 4)\n # update which cell user is in\n user[2] = which_cell(user[0], user[1])\n new_user_list.append(user)\n return new_user_list", "def pval(trueVal,randomWinsList):\n pvaluelist=[]\n denom=len(randomWinsList)\n for num in randomWinsList:\n if num >= trueVal:\n pvaluelist.append(num) \n pvaluelistLen = len(plist)\n \n return pvaluelistLen*1.0/denom*1.0", "def evaluate( self, mu ) :\n\n P = 0.\n for l, c_l in enumerate( self.coefficients ) : P += ( l + 0.5 ) * c_l * Legendre( l, mu, checkXRange = False ) \n return( P )", "def ExponentialPosteriorSample(bs, ls) :\n def exps(args) :\n lamb = args\n eps = K.random_uniform(shape=(bs, ls))\n ans = (-1./lamb) * K.log(-eps + 1)\n return ans\n return exps", "def s0(mu,P,nMin):\n return tau(0,mu,P) * (mu[1]*nMin[2] - mu[2]*nMin[1]) / (mu[1] - mu[2])", "def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")", "def prob(x: np.ndarray, mu, sigma):\n n = mu.shape[0]\n inv = np.linalg.inv(sigma)\n den = np.sqrt(np.linalg.det(sigma)) * np.power(2 * np.pi, n / 2)\n dif = (x - mu).reshape(1, -1)\n num = -0.5 * dif @ inv @ dif.T\n num = np.exp(num)[0][0]\n return num / den", "def calculate_bmu(self, input):\n result = 0\n\n if len(input) > self.som.input_count:\n raise Exception(\n \"Can't train SOM with input size of {} with input data of count {}.\".format(self.som.input_count,\n len(input)))\n\n # Track the lowest distance so far.\n lowest_distance = float(\"inf\")\n\n for i in range(self.som.output_count):\n distance = self.calculate_euclidean_distance(self.som.weights, input, i)\n\n # Track the lowest distance, this is the BMU.\n if distance < lowest_distance:\n lowest_distance = distance\n result = i\n\n # Track the worst distance, this is the error for the entire network.\n if lowest_distance > self.worst_distance:\n self.worst_distance = lowest_distance\n\n return result", "def infection_probability(mu, sigma, n=1):\n prob = np.abs(np.random.normal(mu, sigma, n))\n \n if (prob + prob) <0 or (prob + prob) >1: \n prob = np.array([mu])\n prob = prob.tolist()[0]\n return prob", "def multinomial_likelihood(m_true, alpha, alpha0, m_probs):\n\n ll = tf.reduce_sum(input_tensor=m_true * (tf.math.log(alpha0) - tf.math.log(alpha)), axis=1, keepdims=True)\n ll = tf.reduce_mean(input_tensor=ll)\n return ll", "def mu_law_bins_tf(num_bins):\n #all edges\n bins_edge = tf.linspace(-1.0, 1.0, num_bins + 1)\n #center of all edges\n bins_center = tf.linspace(-1.0 + 1.0 / num_bins, 1.0 - 1.0 / num_bins, num_bins)\n #get the right edges\n bins_trunc = tf.concat([bins_edge[1:-1], [1.1]], 0)\n #if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this\n #convert edges and centers to mu-law scale\n bins_edge_mu = tf.multiply(tf.sign(bins_trunc), (num_bins ** tf.abs(bins_trunc) - 1) / (num_bins - 1))\n bins_center_mu = tf.multiply(tf.sign(bins_center), (num_bins ** tf.abs(bins_center) - 1) / (num_bins - 1))\n \n return (bins_edge_mu, bins_center_mu)", "def ciw_95_normal (list1):\r\n sd = std(list1)\r\n return 1.96*sd/sqrt(len(list1))", "def test_mut_simple(self):\n domain = {0:{'min':0,'max':5},1:{'min':-5,'max':5}}\n sol = np.array([0,0])\n \n sol1 = d.mutate(sol,domain,mut_prob=0.5)\n print(f\"mut_simple: {sol1}\")\n self.assertTrue(0 <= sol1[0] <= 5) \n self.assertTrue(-5 <= sol1[1] <= 5)", "def lstdev (inlist):\r\n return math.sqrt(var(inlist))", "def test_sinc_list():\n x,sc = cw04.gen_sinc_list(-1,1,3)\n desired = [0.8414709848078965, 1.0 , 0.8414709848078965]\n print(\"Obtained:\",sc)\n print(\"Desired:\",desired)\n # For comparing floating point values, nose has useful helper functions\n # to ensure they are equal up to a numerical precision tolerance\n nose.tools.assert_almost_equal(sc, desired)", "def adaptive_parzen_estimator(\n mus: numpy.ndarray | Sequence,\n low: float,\n high: float,\n prior_weight: float = 1.0,\n equal_weight: bool = False,\n flat_num: int = 25,\n) -> tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:\n mus = numpy.asarray(mus)\n\n prior_mu = (low + high) * 0.5\n prior_sigma = (high - low) * 1.0\n\n size = len(mus)\n if size > 1:\n order = numpy.argsort(mus)\n sorted_mus = mus[order]\n prior_mu_pos = numpy.searchsorted(sorted_mus, prior_mu)\n\n weights = ramp_up_weights(size, flat_num, equal_weight)\n\n mixture_mus = numpy.zeros(size + 1)\n mixture_mus[:prior_mu_pos] = sorted_mus[:prior_mu_pos]\n mixture_mus[prior_mu_pos] = prior_mu\n mixture_mus[prior_mu_pos + 1 :] = sorted_mus[prior_mu_pos:]\n\n mixture_weights = numpy.ones(size + 1)\n mixture_weights[:prior_mu_pos] = weights[:prior_mu_pos]\n mixture_weights[prior_mu_pos] = prior_weight\n mixture_weights[prior_mu_pos + 1 :] = weights[prior_mu_pos:]\n\n sigmas = numpy.ones(size + 1)\n sigmas[0] = mixture_mus[1] - mixture_mus[0]\n sigmas[-1] = mixture_mus[-1] - mixture_mus[-2]\n sigmas[1:-1] = numpy.maximum(\n (mixture_mus[1:-1] - mixture_mus[0:-2]),\n (mixture_mus[2:] - mixture_mus[1:-1]),\n )\n sigmas = numpy.clip(\n sigmas, prior_sigma / max(10, numpy.sqrt(size)), prior_sigma\n )\n\n else:\n if prior_mu < mus[0]:\n\n mixture_mus = numpy.array([prior_mu, mus[0]])\n sigmas = numpy.array([prior_sigma, prior_sigma * 0.5])\n mixture_weights = numpy.array([prior_weight, 1.0])\n else:\n mixture_mus = numpy.array([mus[0], prior_mu])\n sigmas = numpy.array([prior_sigma * 0.5, prior_sigma])\n mixture_weights = numpy.array([1.0, prior_weight])\n\n weights = mixture_weights / mixture_weights.sum()\n\n return mixture_mus, sigmas, weights", "def TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)\r\n mmd_value = get_item(TEMP[0], is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()", "def medianErrorOfList(num):\n numberList = []\n for t in num:\n for x in range(1, 2): #additional for loop that is only executed if the respective input value is not empty\n if t == '':\n break\n else:\n numberList.append(t) #create a numberList from num by omitting all empty elements\n \n median = statistics.median(numberList) #if number of elements is odd, take the one in the middle\n return median #if number of elements is even, take average of the two middle values", "def de_mean(xs: List[float]) -> float:\n x_bar = mean(xs)\n return [x - x_bar for x in xs]", "def normalize(list, max_value=1):\n maxi = max(list)\n mini = min(list)\n\n if maxi == mini or len(list) == 1:\n return list\n \n norm = []\n\n for item in list:\n new = max_value * ((item - mini) / (maxi - mini))\n norm.append(new)\n\n return norm", "def testExpected_selfDistrib_withMinDist(self):\n\t\tself.minDistVal = 0.1\n\t\tself.useGroups = [ [0,0] ]\n\t\tself.createTestObjs()\n\t\texpVals = [ (1,), (1,) ]\n\t\tactVals = self.binValGetter.getValsToBin(self.sparseMatrixCalculator)\n\n\t\tfor expIter, actIter in it.zip_longest(expVals,actVals):\n\t\t\t[self.assertAlmostEqual(exp,act,places=6) for exp,act in it.zip_longest(expIter, actIter)]", "def expected_counts_stationary(T, n, mu=None):\n if n<=0:\n EC=np.zeros(T.shape)\n return EC\n else:\n if mu is None:\n mu=statdist(T)\n EC=n*mu[:, np.newaxis]*T\n return EC", "def Phi_nu_mu1(self, E_nu, N=1e24):\n #check this \n try:\n phi = [0.]*len(E_nu)\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n for i, E_nu in enumerate(E_nu):\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n\n for j in range(Intervals):\n phi[i] += 1.6*N*quad(Int, IntegrationBoundary[j], IntegrationBoundary[j+1])[0]\n\n return np.array(phi)\n\n except TypeError as e:\n phi = 0.\n\n Int = lambda x: self.crossSection(x) * self.protonFlux(x) * self.F_nu_mu_1(E_nu/x, x) / x\n\n logE_min = np.log10(E_nu)\n logE_max = 10\n\n Intervals = int(np.ceil(logE_max-logE_min))\n IntegrationBoundary = np.logspace(logE_min, logE_max, Intervals+1)\n #print IntegrationBoundary\n for i in range(Intervals):\n phi += 1.6*N*quad(Int, IntegrationBoundary[i], IntegrationBoundary[i+1])[0]\n print (phi)\n\n return phi", "def lmoment(inlist,moment=1):\r\n if moment == 1:\r\n return 0.0\r\n else:\r\n mn = mean(inlist)\r\n n = len(inlist)\r\n s = 0\r\n for x in inlist:\r\n s = s + (x-mn)**moment\r\n return s/float(n)", "def mu_law_bins(num_bins):\n #all edges\n bins_edge = np.linspace(-1, 1, num_bins + 1)\n #center of all edges\n bins_center = np.linspace(-1 + 1.0 / num_bins, 1 - 1.0 / num_bins, num_bins)\n #get the right edges\n bins_trunc = bins_edge[1:]\n #if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this\n bins_trunc[-1] += 0.1\n #convert edges and centers to mu-law scale\n bins_edge_mu = np.multiply(np.sign(bins_trunc), (num_bins ** np.absolute(bins_trunc) - 1) / (num_bins - 1))\n bins_center_mu = np.multiply(np.sign(bins_center), (num_bins ** np.absolute(bins_center) - 1) / (num_bins - 1))\n \n return (bins_edge_mu, bins_center_mu)", "def test_ul_per_mm_continuous(pipette_model: PipetteModel) -> None:\n config = pipette_config.load(pipette_model)\n aspirate = config.ul_per_mm[\"aspirate\"]\n dispense = config.ul_per_mm[\"dispense\"]\n min_vol = 0.000001 # sufficiently small starting volume\n for lno in range(len(aspirate) - 1):\n line = aspirate[lno]\n curr_max_vol = line[0]\n # find a halfway point roughly between max and min volume for a given\n # piecewise sequence of a pipette function\n half_max_vol = (curr_max_vol - min_vol) / 2 + min_vol\n\n min_ul_per_mm = line[1] * min_vol + line[2]\n mid_ul_per_mm = line[1] * half_max_vol + line[2]\n max_ul_per_mm = line[1] * curr_max_vol + line[2]\n\n lower_mm = min_ul_per_mm / min_vol\n higher_mm = max_ul_per_mm / curr_max_vol\n half_mm = mid_ul_per_mm / half_max_vol\n\n range_1 = (half_mm >= lower_mm) and (half_mm <= higher_mm)\n range_2 = (half_mm <= lower_mm) and (half_mm >= higher_mm)\n\n assert range_1 or range_2\n\n min_vol = curr_max_vol\n # make sure the mm of movement for max aspirate and max dispense agree\n aspirate_seq = aspirate[len(aspirate) - 1]\n dispense_seq = dispense[len(dispense) - 1]\n pip_max_vol = config.max_volume\n aspirate_mm = (aspirate_seq[1] * pip_max_vol + aspirate_seq[2]) / pip_max_vol\n dispense_mm = (dispense_seq[1] * pip_max_vol + dispense_seq[2]) / pip_max_vol\n # for many of the older pipettes, the aspirate and dispense values are\n # not the same.\n assert isclose(round(aspirate_mm), round(dispense_mm))", "def lsamplevar (inlist):\r\n n = len(inlist)\r\n mn = mean(inlist)\r\n deviations = []\r\n for item in inlist:\r\n deviations.append(item-mn)\r\n return ss(deviations)/float(n)", "def _builtin_sample_uniform(key, lst, result, database=None, target=None, **kwdargs):\n mode = check_mode((key, lst, result,), ['gLv', 'gLn'], database=database, **kwdargs)\n identifier = '_uniform_%s' % key\n elements, tail = list_elements(lst)\n if len(elements) == 0:\n return []\n else:\n prob = Constant(1 / float(len(elements)))\n results = []\n if mode == 0:\n for i, elem in enumerate(elements):\n elem_identifier = (identifier, i)\n # res = unify_value(result, elem)\n results.append(((key, lst, elem),\n target.add_atom(identifier=elem_identifier, probability=prob,\n group=identifier)))\n else:\n res = None\n for el in elements:\n try:\n res = unify_value(el, result, {})\n break\n except UnifyError:\n pass\n if res is not None:\n results.append(((key, lst, res),\n target.add_atom(identifier=identifier, probability=prob)))\n return results", "def mean(l):\n return sum(f) / max(1, len(f))", "def mu(self, v):\n return (np.dot(self.mat_mu, v) + self.mu_ext)", "def nextMin(value,lista):\n for i in lista:\n if i<value:\n return i\n raise NameError('No value')", "def cmudf(df, mu, alphamu):\r\n return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)", "def mu(o1, o2):\n return o1*o2", "def mumo_op(op, mp, mq):\n if op == 3:\n musol = [mp[0] * mq[0], mp[1][:], mp[2][:]]\n for i, v in enumerate(mq[1]): # per cada variable del segon\n if v in musol[1]: # si ja la tenia\n j = musol[1].index(v) # miro on la tinc\n musol[2][j] += mq[2][i] # sumo els graus\n else: # no la tenia\n musol[1].append(mq[1][i]) # l'afegeixo\n musol[2].append(mq[2][i])\n else:\n musol = \"No has pas definit res d'això...\"\n return musol", "def find_min(start, user_list):\n\n minimum = start\n for j in range(start, len(user_list)):\n if user_list[minimum].value > user_list[j].value:\n minimum = j\n\n return minimum", "def sample_mu(self, val) -> None:\n\n # get data\n data = self.data.reshape((1, -1))\n\n # get values\n gain = val.gain\n states = val.states\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n num_data = val.num_data\n num_rois = val.num_rois\n num_states = val.num_states\n\n # initialze variables\n num_vars = num_states + num_rois\n idx = np.where(val.mu_flor_mean > 0)[0]\n # shape\n shape = np.zeros((num_vars, 1))\n shape[:num_states, 0] = mu_flor_shape[:]\n shape[num_states:, 0] = mu_back_shape\n # scale\n scale = np.zeros((num_vars, 1))\n scale[idx, 0] = mu_flor_mean[idx] / mu_flor_shape[idx]\n scale[num_states:, 0] = (mu_back_mean / mu_back_shape)[:]\n\n # initialize a mu vector containing the variables we wish to sample, mu_flor and mu_back\n q = np.zeros((num_vars, 1))\n q[:num_states, 0] = mu_flor[:]\n q[num_states:, 0] = mu_back[:]\n q_old = q.copy()\n idy = q > 0 # keep track of which states are dark (we only sample bright states)\n num_var = q.shape[0]\n\n # hmc dynamics variables\n h = np.random.exponential() / 100\n masses = (1 + np.random.pareto(1, size=q.shape))\n masses_inv = np.zeros(shape=masses.shape) # negative mass is interpretted as an unchanging variable\n masses_inv[masses > 0] = 1 / masses[masses > 0]\n num_steps = np.random.poisson(25)\n\n # create populations array\n pops = np.zeros((num_vars, num_rois * num_data))\n \"\"\"\n pops is an array such that each element i, j corresponds to the \n multiplicitive factor in front of q[i] for data point j in the \n likelihood. For example, if in ROI 1 at time level 17 there are two\n fluorophores in the bright state, then we find the element, j,\n corresponding to ROI 1 and time level 17, and we find the element,\n i, corresponding to the bright state, and we set q[i,j]=2 (because\n there are two bright fluorophores), then we would find the i\n corresponding to the background brightness of ROI 1, and we would\n set this q[i,j]=1 (the multiplicitive factor in front of the \n background brightness is 1 when it is the corresponding ROI and 0\n otherwise).\n \"\"\"\n for r in range(num_rois):\n idx = np.arange(r*num_data, (r+1)*num_data)\n pops[:num_states, idx] = states_to_pops(states[r, :, :], num_states)\n pops[num_states + r, idx] = 1\n\n # the conditional probability for the mu vector\n def probability(q_, p_):\n if np.sum(q_ < 0) > 0:\n prob = -np.inf\n else:\n prob = (\n np.sum(stats.gamma.logpdf(data, a=q_.T @ pops, scale=gain)) # likelihood\n + np.sum(stats.gamma.logpdf(q_[idy], a=shape[idy], scale=scale[idy])) # prior\n + np.sum(stats.norm.logpdf(p_[idy], loc=0, scale=np.sqrt(masses[idy]))) # momentum\n )\n return prob\n\n # the gradient of the Hamiltonian with respect to the mu_vector\n def dH_dq(q_):\n if np.any(q_ < 0):\n \"\"\"\n In the event that q_new becomes negative, fast_digamma becomes\n slow. Since q should never be negative anyway, there is no\n need for further computation and we can skip this step knowing\n that this value of q will be rejected anyway.\n \"\"\"\n return q_\n q_new = np.zeros(q_.shape)\n q_new[idy] = (\n (shape[idy] - 1) / q_[idy] - 1 / scale[idy]\n + (pops @ (np.log(data / gain) - fast_digamma(q_.T @ pops)).T)[idy]\n )\n return q_new\n\n # sample momentum\n p = np.random.randn(num_var, 1) * np.sqrt(masses)\n p_old = p.copy()\n\n # run the HMC\n for i in range(num_steps):\n p = p + .5 * h * dH_dq(q)\n q = q + h * p * masses_inv\n p = p + .5 * h * dH_dq(q)\n\n # find acceptance ratio\n P_new = probability(q, p)\n P_old = probability(q_old, p_old)\n if (P_new - P_old) < np.log(np.random.rand()):\n q = q_old\n\n # update the new mu values\n val.mu_flor[:] = q[:num_states, 0]\n val.mu_back[:] = q[num_states:, 0]\n\n return" ]
[ "0.6373463", "0.6199289", "0.6165148", "0.6024897", "0.60122585", "0.59807706", "0.5785042", "0.5725333", "0.5695361", "0.5641516", "0.5622558", "0.5540344", "0.55360234", "0.55046725", "0.54993856", "0.5480164", "0.54566556", "0.5363416", "0.535079", "0.53319746", "0.5316501", "0.5310478", "0.5301416", "0.5296882", "0.52892697", "0.5265967", "0.52513504", "0.52357733", "0.5235453", "0.5206244", "0.51990837", "0.5190812", "0.5188229", "0.5185906", "0.517859", "0.51643556", "0.51540047", "0.514585", "0.51418924", "0.51396763", "0.51393604", "0.5137403", "0.5134219", "0.51218706", "0.5119816", "0.5118795", "0.51072216", "0.5106724", "0.51035655", "0.5088986", "0.50878435", "0.5082839", "0.50774324", "0.5075289", "0.50625145", "0.5061672", "0.5061269", "0.5060245", "0.5057236", "0.5051045", "0.5047363", "0.5044244", "0.50435627", "0.5040812", "0.5038929", "0.5038719", "0.5037006", "0.5036676", "0.50294787", "0.50275207", "0.5007173", "0.50066125", "0.50041014", "0.5003992", "0.49958545", "0.4995106", "0.49895585", "0.49827984", "0.49827346", "0.49816862", "0.49747327", "0.49699664", "0.49691263", "0.49678147", "0.4959819", "0.49594614", "0.49586228", "0.49520507", "0.4947132", "0.4932469", "0.49323732", "0.49273595", "0.49221236", "0.49201518", "0.49143413", "0.4912724", "0.49109265", "0.49013427", "0.48907164", "0.48820108" ]
0.58873326
6
Use in `__init__()` only; assign all args/kwargs to instance attributes. To maintain precedence of args provided to subclasses, call this in the subclass before `super().__init__()` if `save__init__args()` also appears in base class, or use `overwrite=True`. With `subclass_only==True`, only args/kwargs listed in current subclass apply.
def save__init__args(values, underscore=False, overwrite=False, subclass_only=False): prefix = "_" if underscore else "" self = values['self'] args = list() Classes = type(self).mro() if subclass_only: Classes = Classes[:1] for Cls in Classes: # class inheritances if '__init__' in vars(Cls): args += getfullargspec(Cls.__init__).args[1:] for arg in args: attr = prefix + arg if arg in values and (not hasattr(self, attr) or overwrite): setattr(self, attr, values[arg])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.__init__ = _wrap_init(cls.__init__, cls.__post_init_check)", "def kwargs_to_parent(cls):\n original_init = cls.__init__\n\n def new_init(self, *args, **kwargs):\n # pass only those kwargs to the dataclass which are expected\n dataclass_kwargs = {\n key: value\n for key, value in kwargs.items()\n if key in [f.name for f in dataclasses.fields(cls)]\n }\n\n # pass args and kwargs to the dataclasses' __init__\n original_init(self, *args, **dataclass_kwargs)\n\n # update kwargs with default arguments\n kwargs.update(dataclasses.asdict(self))\n\n # Pass only those arguments to solph component's __init__ that\n # are expected.\n init_expected_args = list(\n inspect.signature(super(cls, self).__init__).parameters\n )\n\n kwargs_expected = {\n key: value\n for key, value in kwargs.items()\n if key in init_expected_args\n }\n\n kwargs_unexpected = {\n key: value\n for key, value in kwargs.items()\n if key not in init_expected_args\n }\n\n if \"custom_attributes\" in init_expected_args:\n kwargs_expected[\"custom_attributes\"] = kwargs_unexpected\n\n if kwargs_unexpected and \"custom_attributes\" not in init_expected_args:\n warnings.warn(\n f\"No custom_attributes in parent class {cls.__mro__[1]}\"\n )\n\n super(cls, self).__init__(\n **kwargs_expected,\n )\n\n if not kwargs.get(\"build_solph_components\") is False:\n self.build_solph_components()\n\n cls.__init__ = new_init\n return cls", "def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n if None not in (cls.setpoint, cls.readback):\n cls.setpoint.sub_value(cls._update_setpoint)\n cls.readback.sub_value(cls._update_readback)", "def __init__(self, *save_args, **save_kwargs):\n self.provides = save_kwargs.pop('provides', None)\n self.flag_update = save_kwargs.pop('flag_update', True)\n self.save_args = save_args\n self.save_kwargs = save_kwargs", "def initArgs(self, args):\n ignore = ['self', 'kwargs', 'args']\n for k,v in args.iteritems():\n if k not in ignore: \n setattr(self, k, v)\n pass", "def copy(self, **override):\n newargs = self._get_init_args()\n newargs.update(override)\n return self.__class__(**newargs)", "def __new__(cls, *args, **kwargs):\n # create the instance by calling the base class __new__\n obj = cls.__base__.__new__(cls)\n # using super() did not work here -- why??\n # set the instance attributes to defaults\n for attr, typ in cls._attrs_to_save.items():\n setattr(obj, attr, typ.default)\n return obj", "def __init__(self, *args, internal_only=False, external_only=False, **kwargs):\n self.internal_only = internal_only\n self.external_only = external_only\n super().__init__(*args, **kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def _copy_kwargs(self, **kwargs):\n ns = self.__dict__\n for attr, kw in {'_engine': 'engine', '_format': 'format'}.items():\n assert kw not in kwargs\n if attr in ns:\n kwargs[kw] = ns[attr]\n return super()._copy_kwargs(**kwargs)", "def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(self, *args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args,**kw):\n pass", "def __init__(self, *args, **kwargs):\n\n self.logger = util.get_logger()\n self.args = args\n self.kwargs = kwargs\n for key, value in kwargs.items():\n setattr(self, key, value)", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def __init__(self, *args, **kwargs):\n\n kwargs.update(allow_read=False)\n kwargs.update(allow_write=False)\n\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs", "def __init_subclass__(cls) -> None:\n super().__init_subclass__()\n dataclass(cls)", "def get_extra_kwargs(self):\n extra_kwargs = copy.deepcopy(getattr(self.Meta, \"extra_kwargs\", {}))\n\n read_only_fields = getattr(self.Meta, \"read_only_fields\", None)\n if read_only_fields is not None:\n if not isinstance(read_only_fields, (list, tuple)):\n raise TypeError(\n \"The `read_only_fields` option must be a list or tuple. \"\n \"Got %s.\" % type(read_only_fields).__name__\n )\n for field_name in read_only_fields:\n kwargs = extra_kwargs.get(field_name, {})\n kwargs[\"read_only\"] = True\n extra_kwargs[field_name] = kwargs\n\n else:\n # Guard against the possible misspelling `readonly_fields` (used\n # by the Django admin and others).\n assert not hasattr(self.Meta, \"readonly_fields\"), (\n \"Serializer `%s.%s` has field `readonly_fields`; \"\n \"the correct spelling for the option is `read_only_fields`.\"\n % (self.__class__.__module__, self.__class__.__name__)\n )\n\n return extra_kwargs", "def __init_subclass__(cls, **kwargs):\n base_specs = {}\n for base in cls.__bases__:\n if issubclass(base, Params):\n base_specs.update(base.__specs)\n\n cls_specs = [] # evaluate in order of declaration\n for attr, value in cls.__dict__.items():\n if attr.startswith(\"_\") or callable(getattr(cls, attr)):\n continue\n\n attr_val = getattr(cls, attr)\n if isinstance(attr_val, property):\n param_spec = Param(attr_val.fget, params_class=cls)\n elif not isinstance(attr_val, Param):\n param_spec = Param(value)\n else:\n param_spec = attr_val\n\n param_spec.name = attr\n cls_specs.append((attr, param_spec))\n\n _specs = {}\n for attr, value in list(base_specs.items()) + cls_specs:\n setattr(cls, attr, value.default_value)\n _specs[attr] = value\n\n cls.__specs = _specs\n cls.__defaults = {key: val.default_value for key, val in cls.__specs.items()}", "def __init_subclass__(cls):\n # All Flax Modules are dataclasses. We force this convention since\n # it encourages the stateless behavior needed to clone module instances for\n # functional transformation. Instead of using a python metaclass, we\n # automatically transform Modules into dataclasses at subclass creation\n # time, and we set the last dataclass arguments to `parent` and `name`.\n cls._customized_dataclass_transform()\n # We wrap user-defined methods including setup and __call__ to enforce\n # a number of different checks and to provide clear error messages.\n cls._verify_single_or_no_compact()\n cls._wrap_module_methods()\n # Set empty class defaults.\n cls._state = _uninitialized_module_internal_state\n cls.scope = None", "def __init__(self, **kwargs):\n # TODO: see if i can remove keyword args\n super().__init__()\n self._updateData = {}", "def __new__(cls, *args, **kwargs):\n obj = super().__new__(cls)\n obj.init_kwargs = cls.init_params(*args, **kwargs)\n return obj", "def save(self, *args, **kwargs):\n raise NotImplementedError('missing data mixin')", "def __init__(self, **kwargs):\n # Handle whatever kwargs we want here\n self.io_mapping = kwargs.get(\"io_mapping\", [])\n\n self.create_inputs = kwargs.get(\"create_inputs\", [])\n self.min_completion_fraction = kwargs.get(\"min_completion_fraction\", 1.0)\n\n # Now pass all of them to the parent class\n super(self.__class__, self).__init__(**kwargs)", "def __init__(self, *args, **kwargs):\n for dictionary in [_ for _ in args if isinstance(_, dict)]:\n for key in dictionary:\n setattr(self, key, dictionary[key])\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def __init__(self, *args, **kwargs):\n if (args and type(args) is dict):\n BaseModel.__init__(self, args[0])\n else:\n BaseModel.__init__(self)", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __init_subclass__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):" ]
[ "0.6435379", "0.6124592", "0.59995186", "0.5889906", "0.5642248", "0.551526", "0.5499164", "0.5485295", "0.54716355", "0.54716355", "0.5430047", "0.5418889", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.53908145", "0.5350231", "0.53242445", "0.53226817", "0.53003854", "0.529636", "0.5264756", "0.5240403", "0.5221585", "0.5217685", "0.52093506", "0.52029", "0.5183168", "0.51799977", "0.5173419", "0.5169438", "0.5169417", "0.5169417", "0.5169417", "0.5169417", "0.5169417", "0.5169417", "0.5084597", "0.5084597", "0.5084597", "0.5084597", "0.5084597", "0.5084597", "0.5084597", "0.5084597" ]
0.7307496
0
Check if the OpenLDAP container is up.
def is_ldap_up(host, port): conn = ldap.initialize(f'ldap://{host}:{port}') conn.simple_bind_s(LDAP_BINDDN, LDAP_SECRET) # The OpenLDAP server is pretty quick to start up but it can still be building the indices # or computing the memberOf property. So check and wait until that's done before we let the # tests proceed, otherwise we get all kinds of crazy errors. # conn.search returns either True or False, depending on if the query succeeded or not. As # long as the query doesn't succeed we're still starting up. res = conn.search_s('dc=planetexpress,dc=com', ldap.SCOPE_BASE, '(objectclass=*)') return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False", "def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False", "def is_ready(self) -> bool:\n # Check the container is running or not\n if self.container is None:\n return False\n cmd_output = self.podman(Command(\n 'container', 'inspect',\n '--format', '{{json .State.Running}}',\n self.container\n ))\n cmd_stdout, cmd_stderr = cmd_output\n return str(cmd_stdout).strip() == 'true'", "def in_host():\n return not in_docker()", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def is_mongod_running(self):\r\n \r\n try:\r\n _connect_to_mongo_port(int(self.port))\r\n return True\r\n except OSError:\r\n return False\r\n except Exception:\r\n return False", "def is_alive(self):\n ret = subprocess.call(\n shlex.split(\"ping -c 1 -W 2 %s\" % self.ip_address),\n stdout=open('/dev/null', 'w'),\n stderr=subprocess.STDOUT,\n )\n \n if ret == 0:\n return True\n else:\n return False", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def health_check(self):\n headers = {\"NDS-Proxy-Ping\": \"NPP\"}\n url = \"http://{host}:{port}/upm\".format(host=self.upm_host, port=self.upm_port)\n is_available, http_code = http_utilities.get(url, headers=headers)\n\n if http_code == 200:\n self.log.info(\"The UPM is available\")\n return True\n else:\n self.log.error(\"The UPM is not available\")\n return False", "def check_configuration_server(self) -> bool:\n return (\n self.container is not None\n and self.container.exec_run(\n \"bash -c 'curl -s --head http://localhost:19071/ApplicationStatus'\"\n )\n .output.decode(\"utf-8\")\n .split(\"\\r\\n\")[0]\n == \"HTTP/1.1 200 OK\"\n )", "def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True", "def docker_available(): # type: () -> bool\n return bool(get_docker_command())", "def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False", "def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")", "def running(self):\n running_properties = [i for i in self.client.containers() if i['Id'] == self.container['Id']]\n\n if len(running_properties) == 0 or 'Up' not in running_properties[0].get('Status', ''):\n return False\n else:\n self.logger.info('Docker container {} running'.format(self.image))\n self.running_properties = running_properties\n\n if self.service_name:\n try:\n running = self.client.port(self.container, config.DEPENDENCIES[self.service_name.upper()]['PORT'])\n self.running_host = running[0]['HostIp']\n self.running_port = running[0]['HostPort']\n except KeyError:\n pass\n\n return True", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def is_container_running(self):\n return self._is_container_running", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False", "def is_sshd_running(container):\n debug(\"Container: {}\".format(container))\n try:\n (rc, stdout, stderr) = container.execute(\n [\"service\", \"ssh\", \"status\"]\n )\n # If the status is a) found and b) running, the exit code will be 0\n if rc == 0:\n return True\n except Exception as ex:\n debug(\"Failed to check sshd service status: {}\".format(ex))\n\n return False", "def is_alive(addr, user):\n return _ssh_master_cmd(addr, user, 'check') == 0", "def is_running(self):\n\n command = ('ssh {0} -q -o StrictHostKeyChecking=no -o '\n 'UserKnownHostsFile=/dev/null \"pgrep httperf \"').format(self.client)\n\n if subprocess.call(command, stdout=subprocess.PIPE, shell=True) == 0:\n return True\n else:\n return False", "def IsWiredUp(self):\n return self.wired.IsUp()", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def is_ready(self):\n if not self.is_accessible:\n return False\n\n is_ready_cmd = '/usr/rift/bin/ssh_root {ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no stat /var/lib/cloud/instance/boot-finished > /dev/null'\n rc = subprocess.call(is_ready_cmd.format(ip=self._ip), shell=True)\n\n logger.info(\"Checking if {} is ready\".format(self._ip))\n if rc != 0:\n return False\n\n return True", "def is_alive(self):\n pass", "def is_server_alive(self):\n self.log.info('Checking if the server is available via SSH')\n is_available = ssh_utilities.is_server_alive(remote_host=self.upm_host,\n remote_username=self.upm_username,\n remote_password=self.upm_password)\n if not is_available:\n message = 'The server is not available via SSH'\n assert False, message\n self.log.info('The server is available via SSH')\n return True", "def check_status(self):\n try:\n self.server.ping()\n return True\n except Exception as e:\n return False", "def is_inside_im_container() -> bool:\n # TODO(*): Why not testing only STAGE?\n condition = (\n os.environ.get(\"STAGE\") == \"TEST\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_test\"\n ) or (\n os.environ.get(\"STAGE\") == \"LOCAL\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_local\"\n )\n return condition", "def _daemonExists(self):\n return os.path.exists(self._lockFilename)", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def check_status(con):\n try:\n status = con.sudo('su - splunk -c \"/opt/splunk/bin/splunk status\"', hide=True)\n if 'is running' in status.stdout:\n return True\n else:\n return False\n except (ConnectionError, AuthenticationException, NoValidConnectionsError, UnexpectedExit):\n return False", "def check_running(self, fail_on_error=True):\n status = True\n state = self.check_mount_state(self.running_hosts)\n if state[\"unmounted\"] or state[\"nodirectory\"]:\n self.log.error(\n \"Error: dfuse not running on %s\",\n str(state[\"unmounted\"].union(state[\"nodirectory\"])))\n status = False\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return status", "def is_sm_running() -> bool:\n initd = '/etc/init.d'\n print(\"Checking SUSE Manager running...\")\n\n # Get tomcat\n tomcat = \"\"\n for cmd in os.listdir(initd):\n if cmd.startswith('tomcat'):\n tomcat = initd + \"/\" + cmd\n break\n\n return os.popen(tomcat + \" status 2>&1\").read().strip().find('dead') == -1", "def ready(self):\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{host}:{port}/'.format(\n host=self.running_host,\n port=self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 200:\n return True\n elif response.status_code >= 500:\n return False\n else:\n self.logger.warning('Unexpected error code from {}: {}'.format(self.image, response.status_code))\n return True", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def is_alive(self):\n return True", "def mmo_is_mongo_up(self, hostname, port=27017):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n service_up = False\n try:\n s.connect((hostname, port))\n service_up = True\n s.close()\n except socket.error as e:\n pass\n except Exception as e:\n raise e\n return service_up", "def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def check_connection(self):\n return False", "def is_northd_active():\n try:\n for line in ovn_appctl('ovn-northd', ('status',)).splitlines():\n if line.startswith('Status:') and 'active' in line:\n return True\n except subprocess.CalledProcessError:\n pass\n return False", "def check_db_connection(self):\n self.logger.debug('Checking database connection.')\n if self.db is None:\n try:\n self.connect_to_db()\n except Exception as e:\n print('Lost database connection.')\n self.logger.error('Lost database connection.')\n self.logger.error(e)\n return False\n else:\n try:\n # force connection on a request as the connect=True parameter of MongoClient seems\n # to be useless here\n self.db['client'].server_info()\n except pymongo.errors.ServerSelectionTimeoutError as e:\n print('Lost database connection.')\n self.logger.error('Lost database connection.')\n self.logger.error(e)\n return False\n\n return True", "def is_alive(self):\n params = {'detail': 'true', 'path': '/clusterstate.json'}\n\n try:\n response = self.client.get('zookeeper', params)\n except solr_errors.SolrError:\n logger.exception('Failed to check zookeeper')\n return False\n else:\n try:\n data = json.loads(response['znode']['data'])\n except ValueError:\n return False\n\n for name, collection in data.items():\n shards = collection['shards']\n for shard, shard_info in shards.items():\n replicas = shard_info['replicas']\n for replica, info in replicas.items():\n state = info['state']\n if name == self.solr_collection and state != 'active':\n return False\n\n return True", "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def openldap(docker_ip, docker_services):\n host, port = docker_ip, docker_services.port_for('openldap', 389)\n docker_services.wait_until_responsive(\n timeout=600, pause=10,\n check=lambda: is_ldap_up(host, port))\n\n global LDAP_HOST\n global LDAP_PORT\n\n LDAP_HOST = host\n LDAP_PORT = port\n\n return None", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()", "def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)", "def is_alive(self):\n if self.health > 0:\n return True\n return False", "def nat_waitforconn_alive():\r\n return NAT_STATE_DATA[\"mux\"] != None and NAT_STATE_DATA[\"mux\"].isAlive()", "def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def IsWirelessUp(self):\n return self.wifi.IsUp()", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def check_ups(self, ups): #pylint: disable=no-self-use\n try:\n args = [\n MONITOR_COMMAND,\n f\"{ups}@localhost\"\n ]\n #pylint: disable=unused-variable\n results = subprocess.run(args, check=True, capture_output=True)\n return True\n except subprocess.CalledProcessError:\n return False", "def is_northd_active():\n try:\n for line in ovs_appctl('ovn-northd', 'status').splitlines():\n if line.startswith('Status:') and 'active' in line:\n return True\n except subprocess.CalledProcessError:\n pass\n return False", "def is_healthy(self) -> bool:\n try:\n self.health()\n except MeiliSearchError:\n return False\n return True", "def check_availability(self):\n pass", "def is_active(self):\n deployment_type = self.get_var(\"openshift_deployment_type\")\n has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO\n\n return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type", "def isup(sourcename) :\n return s.isUp(sourcename)", "def is_alive(self):\n result = execute('ps -Ao pgid', check_pg_alive=False, stdout=PIPE)\n pgids = result['stdout'].decode('utf8').split()\n return str(self.process.pid) in pgids", "def IsAdbConnected(self):\n return self.GetAdbConnectionStatus() is not None", "def wait_for_cadvisor_up(self):\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\"Unable to connect to cadvisor %s. Will sleep for %s sec\",\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info(\"cAdvisor client is up for endpoint %s\", self._url_prefix)", "def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False", "def HasSystemd(self):\n _, stderr = self.RunCmdOnDevice(['systemctl'], quiet=True)\n return stderr == ''", "def HasSystemd(self):\n _, stderr = self.RunCmdOnDevice(['systemctl'], quiet=True)\n return stderr == ''", "def is_alive(pid):\n pid = int(pid)\n return psutil.pid_exists(pid)", "def alive(self):\n return self._process.is_alive()", "def __check_db_container(self, mode='running'):\n if mode == 'running':\n cmd_docker = ['docker', 'ps']\n elif mode == 'exist':\n cmd_docker = ['docker', 'ps', '-a']\n else:\n raise DockerExecError('Invalid container check mode: {}.'.format(mode))\n\n\n proc_docker = subprocess.Popen(cmd_docker,\n stdout=subprocess.PIPE)\n proc_grep = subprocess.Popen(['grep', self.__db_cont_name],\n stdin=proc_docker.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc_grep.communicate()\n output = str(stdout).split()\n LOGGER.debug(output)\n try:\n container_image = output[1]\n container_name = output[-1]\n container_port = output[-2]\n # remove new line spacial character\n container_name = container_name.rstrip(\"\\\\n'\")\n container_port = find_xtport(container_port) \n except IndexError:\n container_name = None\n container_image = None\n container_port = None\n \n LOGGER.debug('Found that there is an existing container with the name: {}'.format(container_name))\n\n if container_name == self.__db_cont_name:\n if container_image == self.__db_image:\n if mode == 'running':\n self.__is_db_running = True\n elif mode == 'exist':\n self.__is_db_exist = True\n if container_port != self.__dbport:\n LOGGER.warning('Using as external container port: {}'.format(container_port))\n self.__dbport = container_port\n else:\n msg = ('The name \\\"{}\\\" is used by another container.'\n 'Could not create postgres database container.' \n 'Please use other db container name.').format(self.__db_cont_name)\n raise DockerExecError(msg)", "def check_env():\n logger.debug(\"Checking enviroment\")\n if os.getuid() != 0:\n exit_on_error(\"twindb-register-storage.py must be run by root\")\n logger.debug(\"Enviroment is OK\")\n return True", "def available(self) -> bool:\n return self.thermostat[\"runtime\"][\"connected\"]", "def is_alive(self):\n return self.alive", "def is_alive(self):\n return self.alive", "def tunnel_up(self):\n return self._ssh_host != None and self._ssh_port != None", "def is_alive(self) -> bool:\n\n\n try:\n self.sock.settimeout(2)\n except OSError:\n\n return False\n\n try:\n self.talk('/system/identity/print')\n\n except (socket.timeout, IndexError, BrokenPipeError):\n\n self.close()\n return False\n\n self.sock.settimeout(None)\n return True", "def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def test_status(self):\n with DockerHost('host', dind=False, start_calico=False) as host:\n host.calicoctl(\"status\")", "def alive(self):\n return True", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "def local_is_up(self, target):\n try:\n check_address(target)\n except ValueError:\n self.logger.warning('Target must be a tuple (IP, port), where IP '\n 'is a string (i.e. \"192.168.0.1\") and port is '\n 'an integer (i.e. 40000). Alternatively '\n 'target can be a valid UNIX domain socket.')\n return False\n\n self.check_tunnels()\n return self.tunnel_is_up.get(target, True)", "def is_connected(self):\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()", "def is_connected(self) -> bool:\n return hasattr(_app_ctx_stack.top, \"zodb_connection\")", "def health_check(self):\n with self.session as session:\n try:\n query = session.execute('SELECT 1')\n except Exception as e:\n raise UnhealthyCheck()\n\n return True", "def is_alive(self, site):\n try:\n return requests.get(site).status_code == 200\n except Exception:\n pass", "async def _check_db_initialized(self) -> bool:\n q = \"\"\"SELECT count(*)\n FROM information_schema.TABLES\n WHERE (TABLE_SCHEMA = %s) AND (TABLE_NAME = %s)\"\"\"\n res = await self.fetch_single(q, (self.dbname, 'version'))\n if res == 0:\n return False\n return True", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def is_alive(self):\n try:\n _ = self.CORE.title\n return True\n except SeleniumExceptions.WebDriverException:\n return False", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def poll(self):\n result = False\n\n container_status = self.get_container_status(None)\n if container_status is None or container_status in self.get_initial_states():\n result = None\n\n return result", "def check_connection(self):\n pass", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def is_connected():\n import socket\n try:\n host = socket.gethostbyname(\"www.gov.uk\")\n socket.create_connection((host, 80), 2)\n return True\n except:\n pass\n return False" ]
[ "0.6554002", "0.646989", "0.642654", "0.64182204", "0.63442534", "0.6322428", "0.6279061", "0.6246154", "0.6180694", "0.6163434", "0.6151141", "0.61259544", "0.6092133", "0.60640645", "0.6042775", "0.60386544", "0.60356444", "0.6010722", "0.59878546", "0.5981053", "0.5968419", "0.59552395", "0.59000754", "0.589097", "0.5882983", "0.5881435", "0.5879372", "0.5867597", "0.5856462", "0.5855608", "0.5850339", "0.5848975", "0.58481765", "0.58266604", "0.58242273", "0.5817087", "0.5816476", "0.5803656", "0.58022463", "0.58020794", "0.57931167", "0.5788853", "0.57713306", "0.57691455", "0.57588387", "0.57367826", "0.5732935", "0.57123476", "0.57100904", "0.5697526", "0.5695976", "0.56829876", "0.56813955", "0.56766915", "0.5674932", "0.5673949", "0.56703943", "0.56675225", "0.56628823", "0.56576556", "0.56473976", "0.5644513", "0.56442374", "0.56341547", "0.56292754", "0.562688", "0.56263554", "0.5625666", "0.56214166", "0.5612503", "0.5608181", "0.5608181", "0.5587325", "0.55869085", "0.5576927", "0.5574991", "0.55717427", "0.5571344", "0.5571344", "0.5568074", "0.5567915", "0.55572337", "0.55560106", "0.55560106", "0.5550622", "0.5540239", "0.55331653", "0.5531554", "0.55177593", "0.55177194", "0.55148333", "0.5502589", "0.5496242", "0.54959005", "0.5495571", "0.54933065", "0.5492689", "0.54901403", "0.548855", "0.5480971" ]
0.716125
0
The OpenLDAP container to test against.
def openldap(docker_ip, docker_services): host, port = docker_ip, docker_services.port_for('openldap', 389) docker_services.wait_until_responsive( timeout=600, pause=10, check=lambda: is_ldap_up(host, port)) global LDAP_HOST global LDAP_PORT LDAP_HOST = host LDAP_PORT = port return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_container(self):\n pass", "def get_container(self, account, container):\n \n pass", "def test_show_container(self):\n pass", "def app(openldap):\n _app = flask.Flask(__name__)\n _app.config['LDAP_URI'] = f'ldap://{LDAP_HOST}:{LDAP_PORT}'\n _app.config['LDAP_BINDDN'] = LDAP_BINDDN\n _app.config['LDAP_SECRET'] = LDAP_SECRET\n LDAP(_app)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app", "def container_interface(self):\r\n return self._container_if", "def container (self):\n return self.__container", "def container_name(self):\n pass", "def containerID(self):\n return self._container", "def get_container(self) -> CNT:\n raise NotImplementedError()", "def is_inside_im_container() -> bool:\n # TODO(*): Why not testing only STAGE?\n condition = (\n os.environ.get(\"STAGE\") == \"TEST\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_test\"\n ) or (\n os.environ.get(\"STAGE\") == \"LOCAL\"\n and os.environ.get(\"POSTGRES_HOST\") == \"im_postgres_local\"\n )\n return condition", "def run(self, container_config: ContainerConfig) -> Container:", "def __init__(self, container):\r\n self.container = container", "def __init__(self, container):\r\n self.container = container", "def __init__(self):\n\n try:\n import docker\n except ImportError:\n raise ImportError(\n \"'docker' package not found. Install support \\\nfor Allegro with 'pip install brickschema[allegro]\"\n )\n\n try:\n self._client = docker.from_env(version=\"auto\")\n except Exception as e:\n logger.error(\n f\"Could not connect to docker ({e}); defaulting to naive evaluation\"\n )\n raise ConnectionError(e)\n self._container_name = f\"agraph-{secrets.token_hex(8)}\"\n logger.info(f\"container will be {self._container_name}\")", "def container(self) -> Union[Element, None]:\n if self._container is None and self.ROOT_LOCATOR is not None:\n try:\n return self.driver.find_element(*self.ROOT_LOCATOR)\n except WebDriverException:\n return None\n return self._container", "def test_create_container(self):\n pass", "def __init__(self):\n #Iotlab PROD LDAP parameters\n self.ldapserv = None\n ldap_config = LdapConfig()\n self.config = ldap_config\n self.ldapHost = ldap_config.LDAP_IP_ADDRESS\n self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN\n self.ldapGroupDN = ldap_config.LDAP_GROUP_DN\n self.ldapAdminDN = ldap_config.LDAP_WEB_DN\n self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD\n self.ldapPort = ldap.PORT\n self.ldapVersion = ldap.VERSION3\n self.ldapSearchScope = ldap.SCOPE_SUBTREE", "def __init__(self):\n SpokeLDAP.__init__(self)\n self.config = config.setup()\n self.log = logging.getLogger(__name__)\n self.search_scope = 2 # ldap.SCOPE_SUBTREE\n self.retrieve_attr = None\n self.base_dn = self.config.get('LDAP', 'basedn')\n self.org_class = self.config.get('ATTR_MAP', 'org_class', 'organization')\n self.user_class = self.config.get('ATTR_MAP', 'user_class', 'aenetAccount')\n self.org_attr = self.config.get('ATTR_MAP', 'org_attr', 'o')\n self.container_attr = self.config.get('ATTR_MAP', 'container_attr', 'ou')\n self.container_class = self.config.get('ATTR_MAP', \\\n 'container_class', 'organizationalUnit')\n self.org_def_children = self.config.get('ATTR_MAP', \\\n 'org_def_children', 'people,groups,dns,hosts')\n self.org_children = self.org_def_children.split(',')\n self.org_suffix_attr = self.config.get('ATTR_MAP', 'org_suffix', 'aenetAccountSuffix')", "def _update_container(self):\n client = docker.from_env()\n self.container = client.containers.get(self.session.container_id)", "def getTargetContainer(self):\n\n settings = zope.component.getUtility(IPMR2GlobalSettings)\n\n if settings.create_user_workspace:\n uwc = settings.getCurrentUserWorkspaceContainer()\n if uwc is not None:\n return uwc\n\n # Otherwise return the global workspace container.\n target = settings.getWorkspaceContainer()\n if target is None:\n raise NotFound(self.context, settings.default_workspace_subpath)\n return target", "def getRoot(self):\n return inmemory.ReadOnlyInMemoryLDAPEntry(\n dn=distinguishedname.DistinguishedName(\"dc=example,dc=com\")\n )", "def search_lxc_bridge():\n return search_lxc_bridges()[0]", "def container(self):\n return self._transport._reactor", "def _get_container(self) -> Container:\n obj = self.get_container()\n return to_container(obj)", "def test04_get_container_type(self):\n r = LDPRS()\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\", default=LDP.BasicContainer), LDP.BasicContainer)\n r.parse(b'<http://ex.org/aa> <http://ex.org/b> \"1\".')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://ex.org/some_type>.')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), None)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/ldp#DirectContainer>.')\n self.assertEqual(r.get_container_type(context=\"http://ex.org/aa\"), LDP.DirectContainer)\n r.parse(b'<http://ex.org/aa> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/ldp#IndirectContainer>.')\n self.assertRaises(Exception, r.get_container_type, context=\"http://ex.org/aa\")\n self.assertEqual(r.get_container_type(context=\"http://ex.org/NOT_aa\"), None)", "def get_app():\n return ApplicationContainer()", "def findSite(container):\n\n if ISite.providedBy(container):\n return container\n return findNextSite(container)", "def go_to_container(self):\r\n return ContainerPage(self.browser, self.locator).visit()", "def get_container(self, profile, exec_cmd):\n container = None\n if self._value.has_option(profile, 'container'):\n container = self._value.get(profile, 'container')\n elif exec_cmd is not None:\n self.logger.error(\n \"No container parameter found\"\n )\n exit(1)\n\n self.logger.info(\"%s is selected as container\" % container)\n return container", "def docker_client():\n return docker.from_env()", "def dc_name(self):\n return self.container_name", "def get(container, util, shell, ver_info):\n del util\n del ver_info\n\n version = \"latest\"\n\n # This class is intended to be used through LanguageBase, so\n # most of its methods are private\n #\n # suppress(too-few-public-methods)\n class ConanContainer(container.new_container_for(\"conan\", version)):\n \"\"\"A container representing an active conan installation.\"\"\"\n\n # pylint can't detect that this is a new-style class\n #\n # suppress(super-on-old-class)\n def __init__(self, version, installation, shell):\n \"\"\"Initialize a conan container for this version.\"\"\"\n super(ConanContainer, self).__init__(installation,\n \"conan\",\n version,\n shell)\n assert os.path.exists(self._installation)\n\n # suppress(super-on-old-class)\n def clean(self, util_mod):\n \"\"\"Clean out cruft in the container.\"\"\"\n super(ConanContainer, self).clean(util_mod)\n build = container.named_cache_dir(\"cmake-build\", ephemeral=True)\n util_mod.force_remove_tree(os.path.join(build, \"bin\"))\n util_mod.force_remove_tree(os.path.join(build, \"lib\"))\n\n def _active_environment(self, tuple_type):\n \"\"\"Return active environment for conan container.\"\"\"\n env_to_overwrite = dict()\n env_to_prepend = {\n \"PATH\": os.path.join(self._installation, \"bin\")\n }\n\n return tuple_type(overwrite=env_to_overwrite,\n prepend=env_to_prepend)\n\n return ConanContainer(version, container.language_dir(\"conan\"), shell)", "def get_app_container(device, app_identifier, container=None):\n command = 'get_app_container \"%s\" \"%s\"' % (device.udid, app_identifier)\n\n if container is not None:\n command += ' \"' + container + '\"'\n\n path = _run_command(command)\n\n # The path has an extra new line at the end, so remove it when returning\n return path[:-1]", "def container(app, container=None):\n if container is None:\n # Print containers\n table = Table([\n (\"NAME\", 30),\n ])\n table.print_header()\n for container in sorted(app.containers, key=lambda c: c.name):\n table.print_row([\n container.name,\n ])\n else:\n # Container name\n click.echo(CYAN(\"Name: \") + container.name)\n # Build parent\n click.echo(\n CYAN(\"Build ancestry: \") +\n \", \".join(other.name for other in app.containers.build_ancestry(container))\n )\n # Runtime dependencies\n dependencies = app.containers.dependencies(container)\n if dependencies:\n click.echo(CYAN(\"Depends on: \") + \", \".join(sorted(other.name for other in dependencies)))\n else:\n click.echo(CYAN(\"Depends on: \") + \"(nothing)\")\n # Dependents\n dependents = app.containers.dependents(container)\n if dependents:\n click.echo(CYAN(\"Depended on by: \") + \", \".join(sorted(other.name for other in dependents)))\n else:\n click.echo(CYAN(\"Depended on by: \") + \"(nothing)\")\n # Volumes\n click.echo(CYAN(\"Named volumes:\"))\n for mount_point, volume in container.named_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n click.echo(CYAN(\"Bind-mounted volumes:\"))\n for mount_point, volume in container.bound_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n # Devmodes\n click.echo(CYAN(\"Mounts (devmodes):\"))\n for name, mounts in container.devmodes.items():\n click.echo(\" {}:\".format(name))\n for mount_point, volume in mounts.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))", "def ensure_container():\n return exec_fn(_init_container)", "def test_show_container_privilege(self):\n pass", "def test_search_ldapdn(self):\n ldap_dn = LDAPDN(self.basedn)\n obj = self.conn.search(ldap_dn, 1)\n self.assertIsNotNone(obj)", "def get_container(self, instance_id, container_id):\n return self.database.get_docker_document(\n amazon_resource_id=instance_id, docker_resource_id=container_id, type=self.type\n )", "def get_connection(self):\n if not self.connection:\n # log.debug(\"Initialized new LDAPConnection\")\n new_conn = ldap.initialize(\"ldap://%s:%s\" % (self.host, self.port))\n self.connection = new_conn\n\n return self.connection", "def _get_container(self, boto3_session):\n return get_image_uri(boto3_session.region_name, 'xgboost')", "def _create_docker_container(self):\n cwd = os.getcwd()\n\n # get a docker client\n docker_client = docker.from_env()\n docker_image = \"aca_build0:latest\"\n mount_pnt = docker.types.Mount(\"/mnt/alcor-control-agent\",\n f'''{cwd}/../..''',\n type='bind')\n\n mount_modules = docker.types.Mount(\"/lib/modules\",\n \"/lib/modules\",\n type='bind')\n\n # Create the container in privileged mode\n container = docker_client.containers.create(\n docker_image, '/bin/bash', tty=True,\n stdin_open=True, auto_remove=False, mounts=[mount_pnt, mount_modules],\n privileged=True, cap_add=[\"SYS_PTRACE\"],\n ports={str(aca_droplet.port_internal) + \"/tcp\": ('0.0.0.0', aca_droplet.port_external)},\n security_opt=[\"seccomp=unconfined\"], name=self.id)\n container.start()\n container.reload()\n\n # Increment the static external port number counter\n aca_droplet.port_external = aca_droplet.port_external + 1\n\n # Restart dependancy services\n container.exec_run(\"/etc/init.d/rpcbind restart\")\n container.exec_run(\"/etc/init.d/rsyslog restart\")\n container.exec_run(\"ip link set dev eth0 up mtu 9000\")\n\n # We may need to restart ovs\n # container.exec_run(\"/etc/init.d/openvswitch-switch restart\")\n\n # Create simlinks\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/bin /trn_bin\")\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/xdp /trn_xdp\")\n container.exec_run(\"ln -s /sys/fs/bpf /bpffs\")\n\n container.exec_run(\n \"ln -s /mnt/alcor-control-agent/build/ /aca_build\")\n\n # Run the transitd in the background\n container.exec_run(\"/trn_bin/transitd \",\n detach=True)\n\n # Enable debug and tracing for the kernel\n container.exec_run(\n \"mount -t debugfs debugfs /sys/kernel/debug\")\n container.exec_run(\n \"echo 1 > /sys/kernel/debug/tracing/tracing_on\")\n\n # Enable core dumps (just in case!!)\n container.exec_run(\"ulimit -u\")\n cmd = \"echo '/mnt/alcor-control-agent/mizar/core/core_{}_%e.%p' |\\\n tee /proc/sys/kernel/core_pattern \".format(self.ip)\n container.exec_run(cmd)\n\n self.container = container\n self.ip = self.container.attrs['NetworkSettings']['IPAddress']\n self.mac = self.container.attrs['NetworkSettings']['MacAddress']", "def in_docker():\n rc, out, _ = j.sals.process.execute(\"cat /proc/1/cgroup\", die=False, showout=False)\n if rc == 0 and \"/docker/\" in out:\n return True\n return False", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def test_docker_api(proc):\n assert len(proc.docker_container_id)\n assert proc.docker_inspect()['Id'].startswith(proc.docker_container_id)\n assert proc.docker_stats()['Container'] == proc.docker_container_id", "def _get_container_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_container_name = f\"{dirname}_{self.config_name}\"\n container_name = self.config_options.get(\"container_name\", default_container_name)\n return container_name", "def test_rackspace_uploader_get_container(self, mock1):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n cdn_enabled_mock = PropertyMock(return_value=False)\r\n type(fake_container).cdn_enabled = cdn_enabled_mock\r\n mycf.get_container.side_effect = NoSuchContainer\r\n\r\n calls = [call.get_container('user_3'),\r\n call.create_container('user_3'),\r\n call.make_container_public('user_3')\r\n ]\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n assert u.get_container('user_3')\r\n mycf.assert_has_calls(calls, any_order=True)", "def get_instance (self):\n instances = self.data['instances']\n if not len(instances):\n raise Exception, \"ArchivalObject: No Instances found\"\n for instance in instances:\n # print json.dumps(instance, indent=3)\n try:\n instance['sub_container']['top_container']\n return instance\n except:\n pass\n return None", "def _currently_opened_container(cls):\n if Container._any_containers_opened():\n opened_container = Container._construction_stack[-1]\n if not isinstance(opened_container, cls):\n raise smach.InvalidStateError('Attempting to call a %s construction method without having opened a %s.' % (cls, cls))\n return opened_container\n else:\n raise smach.InvalidStateError('Attempting to access the currently opened container, but no container is opened.')", "def test_ldap(self):\n results = self.sync.ldap.conn.search_s('ou=example,o=test', ldap.SCOPE_ONELEVEL, '(cn=*)')\n self.assertEquals(self.ldapobj.methods_called(), ['initialize', 'simple_bind_s', 'search_s'])\n self.assertEquals(sorted(results), sorted([self.manager, self.alice]))", "def login_registry(self):\n status = []\n for name, container in self.containers.items():\n result = container.daemon.login()\n status.append(result)\n return status", "def container_ubuntu_release(self):\r\n return self._container_ubuntu", "def check_existing(self):\n if self.btcd_container != None:\n self.btcd_container.reload()\n if self.btcd_container.status == \"running\":\n rpcconn, container = self.detect_bitcoind_container(\n self.rpcconn.rpcport\n )\n if container == self.btcd_container:\n return rpcconn\n raise Exception(\"Ambigious Container running\")\n return None", "def __check_db_container(self, mode='running'):\n if mode == 'running':\n cmd_docker = ['docker', 'ps']\n elif mode == 'exist':\n cmd_docker = ['docker', 'ps', '-a']\n else:\n raise DockerExecError('Invalid container check mode: {}.'.format(mode))\n\n\n proc_docker = subprocess.Popen(cmd_docker,\n stdout=subprocess.PIPE)\n proc_grep = subprocess.Popen(['grep', self.__db_cont_name],\n stdin=proc_docker.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc_grep.communicate()\n output = str(stdout).split()\n LOGGER.debug(output)\n try:\n container_image = output[1]\n container_name = output[-1]\n container_port = output[-2]\n # remove new line spacial character\n container_name = container_name.rstrip(\"\\\\n'\")\n container_port = find_xtport(container_port) \n except IndexError:\n container_name = None\n container_image = None\n container_port = None\n \n LOGGER.debug('Found that there is an existing container with the name: {}'.format(container_name))\n\n if container_name == self.__db_cont_name:\n if container_image == self.__db_image:\n if mode == 'running':\n self.__is_db_running = True\n elif mode == 'exist':\n self.__is_db_exist = True\n if container_port != self.__dbport:\n LOGGER.warning('Using as external container port: {}'.format(container_port))\n self.__dbport = container_port\n else:\n msg = ('The name \\\"{}\\\" is used by another container.'\n 'Could not create postgres database container.' \n 'Please use other db container name.').format(self.__db_cont_name)\n raise DockerExecError(msg)", "def container_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"container_id\")", "def container_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"container_id\")", "def __get_container_stat(self, path, container_stat_obj, request_from_updater = False):\n try:\n self.logger.debug('Get container interface called')\n self.asyn_helper.call(\"get_container_stat\", path, container_stat_obj, request_from_updater)\n except Exception as err:\n self.logger.error(('get_container_stat for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def get_details_using_inspect_command(self, container_id):\n\n try:\n p = Popen(DOCKER_INSPECT_CMD.format(container_id), shell=True, stdout=PIPE, stderr=PIPE)\n data_dump, stderr_data = p.communicate()\n log.debug('{}[*]{} Inspect result:{}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, \n json.dumps(json.loads(data_dump)),indent=4))\n\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR, e))\n return False\n\n self.data = json.loads(data_dump.decode('utf-8'))\n\n if not self.data:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'Please check if container id is valid'))\n return False\n\n self.storage_driver = self.data[0]['Driver']\n self.pid = self.data[0]['State']['Pid']\n self.container_id = self.data[0]['Id']\n\n log.debug('{}[*]{} Storage Driver: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.storage_driver))\n if self.storage_driver == 'overlay2' or self.storage_driver == 'overlay':\n self.IS_OVERLAYFS = True\n self.overlay_upperdir_path = self.data[0]['GraphDriver']['Data']['UpperDir']\n self.overlay_merged_path = self.data[0]['GraphDriver']['Data']['MergedDir']\n elif self.storage_driver == 'aufs':\n self.IS_AUFSFS = True\n self.aufs_container_layerdb_path = AUFS_IMAGE_LAYERDB_PATH + self.data[0]['Id']\n else:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'This storage driver does not support'))\n False\n\n log.debug('{}[*]{} Container id: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.container_id))\n return True", "def container_workingdir(self):\n return self.environment['HOME']", "def get_container(self, container_name):\n response = self.client.get_container(container_name)\n return response", "def get_container_node(self, container):\n cont_node = None\n try:\n if isinstance(container, docker.models.containers.Container):\n # Reload container attributes and start setting data to containerNode object\n container.reload()\n cont_node = containerNode()\n cont_node.set_container_id(container.id)\n cont_node.set_pid(container.attrs[\"State\"][\"Pid\"])\n cont_node.set_status(container.attrs[\"State\"][\"Status\"])\n cont_node.set_container_ip_address(container.attrs[\"NetworkSettings\"][\"Networks\"][NETWORK_NAME][\"IPAddress\"])\n cont_node.set_init_command(container.attrs[\"Path\"]+\" \"+\" \".join(container.attrs[\"Args\"]))\n cont_node.set_init_env(container.attrs[\"Config\"][\"Env\"])\n cont_node.set_container_name(container.attrs[\"Name\"])\n\n except docker.errors.APIError as e:\n print(\"Error getting the list of running containers\")\n traceback.print_exc()\n print e\n\n return cont_node", "def dd_environment():\n\n # specify couchbase container name\n env = {\n 'GITLAB_TEST_TOKEN': GITLAB_TEST_TOKEN,\n 'GITLAB_LOCAL_MASTER_PORT': str(GITLAB_LOCAL_MASTER_PORT),\n 'GITLAB_LOCAL_RUNNER_PORT': str(GITLAB_LOCAL_RUNNER_PORT),\n }\n compose_file = os.path.join(HERE, 'compose', 'docker-compose.yml')\n with docker_run(\n compose_file=compose_file,\n env_vars=env,\n conditions=[\n CheckDockerLogs(\n compose_file, ['Gitlab is up!', 'Configuration loaded', 'Metrics server listening'], wait=5\n ),\n CheckEndpoints(GITLAB_RUNNER_URL, attempts=180),\n ],\n ):\n yield CONFIG, E2E_METADATA", "def init_app(self, app, cont_name=None):\r\n super(self.__class__, self).init_app(app)\r\n try:\r\n pyrax.set_setting(\"identity_type\", \"rackspace\")\r\n pyrax.set_credentials(username=app.config['RACKSPACE_USERNAME'],\r\n api_key=app.config['RACKSPACE_API_KEY'],\r\n region=app.config['RACKSPACE_REGION'])\r\n self.cf = pyrax.cloudfiles\r\n if cont_name:\r\n self.cont_name = cont_name\r\n return self.cf.get_container(self.cont_name)\r\n except pyrax.exceptions.NoSuchContainer:\r\n c = self.cf.create_container(self.cont_name)\r\n self.cf.make_container_public(self.cont_name)\r\n return c", "def test_inspection(self, nginx):\n attrs = nginx.inner().attrs\n\n assert attrs['Config']['Image'] == NginxContainer.DEFAULT_IMAGE\n assert attrs['Name'] == '/test_{}'.format(NginxContainer.DEFAULT_NAME)\n\n assert len(attrs['NetworkSettings']['Ports']['80/tcp']) == 1\n\n network = attrs['NetworkSettings']['Networks']['test_default']\n # The ``short_id`` attribute of the container is the first 10\n # characters, but the network alias is the first 12 :-/\n assert (network['Aliases'] ==\n [NginxContainer.DEFAULT_NAME, attrs['Id'][:12]])", "def describe_container(ContainerName=None):\n pass", "def path(self):\n return self._container_dir", "def start(self, container: Container):", "def __init__(self):\n # try to load the container\n # cf will be global... \n # self.cf = pyrax.cloudfiles\n logging.debug(\"Opening cloudfiles container '%s'\" % self.container_name)\n notify(\"Reading environment configuration\")\n \n # check if our container exists; if not create it\n all_containers = cf.list_containers()\n \n if self.container_name in all_containers:\n logging.debug(\"Container exists, opening\")\n mycontainer = cf.get_container(self.container_name)\n else:\n logging.warn(\"Container doesn't exist, creating...\")\n mycontainer = cf.create_container(self.container_name)\n \n self.container = mycontainer\n \n if not self.load_footprints():\n logging.warn(\"No footprints loaded\")\n notify(\"No footprints found.\")", "def _get_infrastructure_pid(self, container_id: str) -> str:\n docker_client = self._get_client()\n base_url = docker_client.api.base_url\n docker_client.close()\n return f\"{base_url}:{container_id}\"", "def dd_environment():\n\n # specify couchbase container name\n env = {\n 'GITLAB_TEST_PASSWORD': GITLAB_TEST_PASSWORD,\n 'GITLAB_LOCAL_PORT': str(GITLAB_LOCAL_PORT),\n 'GITLAB_LOCAL_PROMETHEUS_PORT': str(GITLAB_LOCAL_PROMETHEUS_PORT),\n }\n\n with docker_run(\n compose_file=os.path.join(HERE, 'compose', 'docker-compose.yml'),\n env_vars=env,\n conditions=[CheckEndpoints(GITLAB_URL, attempts=200), CheckEndpoints(PROMETHEUS_ENDPOINT)],\n ):\n # run pre-test commands\n for _ in range(100):\n requests.get(GITLAB_URL)\n sleep(2)\n\n yield CONFIG", "def get_one(self, container_id):\n container = _get_container(container_id)\n check_policy_on_container(container.as_dict(), \"container:get\")\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n container = compute_api.container_show(context, container)\n return view.format_container(pecan.request.host_url, container)", "def _get_dn(self, environ, identity):\n\n if self.bind_dn:\n try:\n self.ldap_connection.bind_s(self.bind_dn, self.bind_password)\n except ldap.LDAPError:\n raise ValueError(\"Couldn't bind with supplied credentials\")\n try:\n login_name = identity['login'].replace('*',r'\\*')\n srch = self.search_pattern % login_name\n dn_list = self.ldap_connection.search_s(\n self.base_dn,\n self.search_scope,\n srch,\n )\n\n # here is the change; return the CN of the first.\n if len(dn_list) >= 1:\n return dn_list[0][0]\n else:\n raise ValueError('No entry found for %s' %srch)\n except (KeyError, TypeError, ldap.LDAPError):\n raise # ValueError", "def get_container(self, container_uuid):\n if container_uuid not in self.containers:\n if container_uuid == 'RAW' or not container_uuid:\n self.containers[container_uuid] = 'RAW'\n else:\n try:\n container = self.funcx_client.get_container(container_uuid, self.config.container_type)\n except Exception:\n logger.exception(\"[FETCH_CONTAINER] Unable to resolve container location\")\n self.containers[container_uuid] = 'RAW'\n else:\n logger.info(\"[FETCH_CONTAINER] Got container info: {}\".format(container))\n self.containers[container_uuid] = container.get('location', 'RAW')\n return self.containers[container_uuid]", "def guest(self) -> Optional[GuestContainer]:\n return self._guest", "def container_name(self):\n if self._container_name:\n return self._container_name\n else:\n return self.image.split(u'/').pop()", "def databases(database_container):\n database_container.setupall()\n return database_container", "def container_by_name(self, name):\n if not name:\n return None\n\n # docker prepends a '/' to container names in the container dict\n name = '/'+name\n return next((container for container in self.containers(all=True)\n if name in container['Names']), None)", "def container_IP(self):\r\n return self._container_ip", "def get_docker_container_id(): # type: () -> t.Optional[str]\n path = '/proc/self/cpuset'\n container_id = None\n\n if os.path.exists(path):\n # File content varies based on the environment:\n # No Container: /\n # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507\n # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891\n # Podman: /../../../../../..\n contents = read_text_file(path)\n\n cgroup_path, cgroup_name = os.path.split(contents.strip())\n\n if cgroup_path in ('/docker', '/azpl_job'):\n container_id = cgroup_name\n\n if container_id:\n display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)\n\n return container_id", "def docker_build_context(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"docker_build_context\")", "def findNextSite(container):\n while container:\n if IContainmentRoot.providedBy(container):\n return None\n try:\n container = get_parent(container)\n if container is None:\n return None\n except TypeError:\n return None\n if ISite.providedBy(container):\n return container", "def test_update_container(self):\n pass", "def is_ldap_up(host, port):\n conn = ldap.initialize(f'ldap://{host}:{port}')\n conn.simple_bind_s(LDAP_BINDDN, LDAP_SECRET)\n\n # The OpenLDAP server is pretty quick to start up but it can still be building the indices\n # or computing the memberOf property. So check and wait until that's done before we let the\n # tests proceed, otherwise we get all kinds of crazy errors.\n # conn.search returns either True or False, depending on if the query succeeded or not. As\n # long as the query doesn't succeed we're still starting up.\n res = conn.search_s('dc=planetexpress,dc=com', ldap.SCOPE_BASE, '(objectclass=*)')\n return res", "def _start_instance(self, resource_handler):\n log.debug('Starting container')\n cli = resource_handler.cli\n #host_config=cli.create_host_config(network_mode=self.network_mode)\n container = cli.create_container(\n image='{0.image}:{0.tag}'.format(self),\n command=self.command,\n #host_config=host_config,\n environment=self.env\n )\n\n cli.start(container.get('Id'))\n log.debug('Started container [%s]', container)\n return str(container)", "def get(self, container_id):\n resp = self.client.api.inspect_container(container_id)\n return self.prepare_model(resp)", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(5, len(containers_list))\n\n container_obj = containers_list[0]\n\n self.assertEqual('/festive_perlman', container_obj.name)\n self.assertEqual(\n '2018-05-16T10:51:39.271019533Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n self.assertEqual(\n '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206',\n container_obj.container_id)\n\n container_obj = containers_list[3]\n self.assertEqual('/reverent_wing', container_obj.name)\n self.assertEqual(\n '2018-05-16T10:51:28.695738065Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertFalse(container_obj.running)\n self.assertEqual(\n '10acac0b3466813c9e1f85e2aa7d06298e51fbfe86bbcb6b7a19dd33d3798f6a',\n container_obj.container_id)\n self.assertEqual(\n {'12345/tcp': {}, '27017/tcp': {}}, container_obj.exposed_ports)", "def __init__(self, org_name):\n SpokeLDAP.__init__(self)\n self.config = config.setup()\n self.log = logging.getLogger(__name__)\n self.search_scope = 2 # ldap.SCOPE_SUBTREE\n self.retrieve_attr = None\n self.base_dn = self.config.get('LDAP', 'basedn')\n self.org_name = org_name\n self.org = self._get_org(self.org_name)\n if self.org['data'] == []:\n msg = 'Org %s not found: cannot delete children' % self.org_name\n raise error.NotFound(msg)\n self.org_dn = self.org['data'][0].__getitem__(0)\n self.container_attr = self.config.get('ATTR_MAP', 'container_attr', 'ou')\n self.container_class = self.config.get('ATTR_MAP', \\\n 'container_class', 'organizationalUnit')", "def test_build_tree(self):\n #logic is different from the fake backend.\n user_api = identity_ldap.UserApi(CONF)\n self.assertTrue(user_api)\n self.assertEquals(user_api.tree_dn, CONF.ldap.user_tree_dn)", "def get_container_policy(ContainerName=None):\n pass", "def getContainerImage(containerName):\n container = DOCKER_CLIENT.containers.get(containerName)\n if not container: return None\n if len(container.image.tags)==0: return None\n return container.image.tags[0]", "def current_container(self):\n return self.layout.container", "def in_host():\n return not in_docker()", "def docker_build_context(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"docker_build_context\")", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def get_container_by_name(self, container_name, is_source):\n if container_name not in self.containers:\n self.containers[container_name] = self.create_container(container_name, is_source)\n return self.containers[container_name]", "def test_get_container_assets(self):\n pass", "def start_access_logging(ContainerName=None):\n pass", "def docker_client():\n client = docker.from_env()\n return client", "def setUpContainer(self):\n self.dev1 = Device(name='dev1')\n eg = ElectrodeGroup(name='elec1',\n description='a test ElectrodeGroup',\n location='a nonexistent place',\n device=self.dev1)\n return eg", "def resolve(self, container: object) -> object:\n raise NotImplementedError()", "def _get_ldap_agent(self, bind=True, secondary=False):\n return _get_ldap_agent(self, bind, secondary)" ]
[ "0.61387736", "0.5880962", "0.57707727", "0.55800897", "0.5526084", "0.5511952", "0.54033214", "0.5372437", "0.52986604", "0.5272887", "0.52577835", "0.51881355", "0.51881355", "0.5154575", "0.5133689", "0.5107609", "0.51014864", "0.51009005", "0.51006734", "0.5086614", "0.50815177", "0.50745684", "0.506745", "0.50492716", "0.50432783", "0.50264627", "0.4994147", "0.49858493", "0.4960979", "0.49570847", "0.49516708", "0.49501625", "0.49429426", "0.49388137", "0.49208468", "0.49109226", "0.48959053", "0.4895636", "0.48922455", "0.48874515", "0.48762098", "0.4870611", "0.48703775", "0.48677236", "0.4847793", "0.48073375", "0.4789304", "0.47842783", "0.47837752", "0.47830626", "0.47816408", "0.47750032", "0.4774753", "0.47710204", "0.47695652", "0.47693908", "0.4763958", "0.4757731", "0.47577104", "0.47416624", "0.47413", "0.47363293", "0.47347614", "0.47248423", "0.47240865", "0.47198552", "0.47191155", "0.47083282", "0.47017536", "0.4700419", "0.46940717", "0.46872744", "0.46862572", "0.46757743", "0.46656516", "0.46634412", "0.4644323", "0.46388367", "0.46341234", "0.4624423", "0.46119416", "0.4610568", "0.46056998", "0.46054518", "0.460347", "0.46020547", "0.45947194", "0.4583509", "0.45826754", "0.45772815", "0.45707163", "0.45685652", "0.45677015", "0.45666188", "0.45661172", "0.45620534", "0.4557925", "0.45502308", "0.45445132", "0.45438933" ]
0.6203799
0
An application for the tests.
def app(openldap): _app = flask.Flask(__name__) _app.config['LDAP_URI'] = f'ldap://{LDAP_HOST}:{LDAP_PORT}' _app.config['LDAP_BINDDN'] = LDAP_BINDDN _app.config['LDAP_SECRET'] = LDAP_SECRET LDAP(_app) ctx = _app.test_request_context() ctx.push() yield _app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def application():\n yield create_test_application()", "def testapp():\n from space_rocks import main\n app = main({})\n from webtest import TestApp\n return TestApp(app)", "def app() -> Generator:\n app = create_app({\"TESTING\": True})\n\n yield app", "def application(self):\n if not ApplicationFixture._test_app:\n app = self.APP_CLASS()\n app.run_tests()\n ApplicationFixture._test_app = app\n return ApplicationFixture._test_app", "def app():\n print('creating app with test vars')\n\n app = create_app('test')\n app.testing = True\n\n ctx = app.app_context()\n ctx.push()\n yield app\n\n ctx.pop()", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def app():\n _app = create_app(TestConfig)\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def app():\n app = create_app()\n return app", "def app():\n return create_app()", "def test_app():\n param = {\n 'DEBUG': True,\n 'TESTING': True\n }\n _app = create_app(settings_override=param)\n\n ctx = _app.app_context()\n ctx.push()\n yield _app\n ctx.pop()", "def test_app():\n pass", "def create_and_run():\n\n app = App()\n app.run()", "def application():\n\n configure_app(app)\n yield app", "def app():\n db_uri = '{0}_test'.format(settings.SQLALCHEMY_DATABASE_URI)\n params = {\n 'DEBUG': False,\n 'TESTING': True,\n 'WTF_CSRF_ENABLED': False,\n 'SQLALCHEMY_DATABASE_URI': db_uri\n }\n\n _app = create_app(settings_override=params)\n\n # Establish an application context before running the tests.\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def main():\n app = App()\n app.run()", "def app():\n app = create_app(schema=create_schema())\n with app.test_request_context():\n yield app", "def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app", "def create_app(self):\n\n app = create_app()\n app.config.from_object('project.config.TestingConfig')\n return app", "def app():\n # create a temporary file to isolate the database for each test\n # create the app with common test config\n app = create_app({\"TESTING\": True, \"DATABASE_NAME\": \"AAPI_DB_Test\"})\n\n # create the database and load test data\n with app.app_context():\n init_db()\n\n yield app", "def test_app():\n # setup\n app = main.create_application()\n app.dependency_overrides[get_settings] = get_settings_override\n with TestClient(app) as test_client:\n yield test_client\n # teardown", "def app():\n # create the app with common test config\n app = create_app({\"DB_CONNECT\": TEST_DB_CONNECT})\n\n # create the database and load test data\n with app.app_context():\n get_db()\n aa = g.db.execute(_data_sql).fetchone() # 用来测试, 实际使用的时候应该是清理或初始化需要的数据\n\n yield app\n\n # 可以在这里做一些清理工作\n print(\"end ever test doing sonething\")", "def app():\n return aplicattion", "def app():\n\n config = {\n 'SERVER_NAME': 'localhost',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////tmp/mytest.db',\n 'cache': Cache(config={'CACHE_TYPE': 'simple',\n 'CACHE_THRESHOLD': 100}),\n 'db': FakeDB()\n }\n\n init_db(config, 100)\n _app = create_app(config)\n\n # Establish an application context before running the tests.\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def client(app): # pylint: disable=redefined-outer-name\n yield TestApp(app)", "def create_app(self):\n coverage.process_startup()\n app = create_test_app()\n selenium_server_url = \"http://{}:{}/wd/hub\".format(\n os.environ.get(\"{{ cookiecutter.project_slug|upper }}_SELENIUM_HOST\", \"chrome\"),\n os.environ.get(\"{{ cookiecutter.project_slug|upper }}_SELENIUM_PORT\", \"4444\"),\n )\n self.browser = Remote(\n command_executor=selenium_server_url,\n desired_capabilities=DesiredCapabilities.CHROME.copy(),\n )\n self.browser.implicitly_wait(3)\n return app", "def app():\n _app = create_app(\"flaskapp.settings.testing\")\n _app.logger.setLevel(logging.CRITICAL)\n ctx = _app.test_request_context()\n ctx.push()\n yield _app\n ctx.pop()", "def app(self):\n app = Flask('testapp')\n app.config.update({\n 'HADES_CELERY_APP_NAME': 'test',\n 'HADES_BROKER_URI': 'rpc://broker/',\n 'HADES_RESULT_BACKEND_URI': 'rpc://backend/',\n })\n return app", "def setup_application(self):\n pass", "def app():\n # create the app with common test config\n app = create_app({\n 'TESTING': True,\n 'SQLALCHEMY_DATABASE_URI': SQLALCHEMY_DATABASE_URI,\n 'LOGGER_ROTATING_FILE_CONF': None,\n })\n\n # create the database and load test data\n with app.app_context():\n init_db()\n init_data(db_data4test())\n\n yield app\n\n # clear database\n with app.app_context():\n clear_db()", "def test_06_applications_2(self):\r\n with self.flask_app.app_context():\r\n self.create()\r\n\r\n res = self.app.get('/app', follow_redirects=True)\r\n assert self.html_title(\"Applications\") in res.data, res.data\r\n assert \"Applications\" in res.data, res.data\r\n assert Fixtures.app_short_name in res.data, res.data", "def test_create_app():\n app = web.YumWeb(__name__)\n assert app.name == 'web_tests'", "def app():\n test_db = 'sqlite:///ratemovies_test.db'\n settings = {\n 'DEBUG': False,\n 'TESTING': True,\n 'SQLALCHEMY_DATABASE_URI': test_db\n }\n\n _app = create_app(test_config=settings)\n\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "def setup_app(app_name, app_directory, no_logs):\n\n test_mode = os.getenv(\"TEST_MODE\")\n if test_mode:\n print_warn(\"Running in TEST mode\")\n\n app_directory = abspath(app_directory)\n controller.load_app_modules(app_directory)\n\n os.chdir(app_directory)\n run_boot(app_directory)\n set_engine_config(test_mode, no_logs)\n load_tools(app_directory)\n setup_features()\n\n data_provider.set_base_dir(app_directory)\n cherrypy.tree.mount(controller.get_app_root(), config=web_app_config)", "def test_create(self):\n self.app\n pass", "def runner(app):\n\n return app.test_cli_runner()", "def get_app(self):\n return Application()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def runner(app):\n return app.test_cli_runner()", "def app():\n\n def _app(config_class):\n _app = create_app(config_class)\n seed_collection_with_csv(_app.config['DATA_FILENAME'])\n _app.app_context().push()\n return _app\n\n yield _app('config.TestingConfig')", "def app() -> flask.Flask:\n test_app: flask.Flask = create_app(\"tests.conftest.TestConfig\")\n test_app.logger.setLevel(\"CRITICAL\")\n test_context = test_app.test_request_context()\n test_context.push()\n\n yield test_app\n test_context.pop()", "def create_app(test_config=\"test_config.py\"):\n app = Flask(__name__, instance_relative_config=True)\n\n # set common config values\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # if not testing, config is loaded from config.py in the instance folder\n if test_config is None:\n app.config.from_pyfile(\"config.py\")\n else:\n # whichever config file name you pass in also has to be in the instance folder\n app.config.from_pyfile(test_config)\n\n db.init_app(app)\n login_manager.init_app(app)\n bootstrap.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n app.redis = Redis.from_url(REDIS_URL)\n app.task_queue = rq.Queue(\"yamz-tasks\", connection=app.redis)\n app.elasticsearch = Elasticsearch(ELASTICSEARCH_URL)\n\n # apply the blueprints to the app\n from .main import main as main_blueprint\n\n app.register_blueprint(main_blueprint)\n\n from .auth import auth as auth_blueprint\n\n app.register_blueprint(auth_blueprint)\n\n from .term import term as term_blueprint\n\n app.register_blueprint(term_blueprint, url_prefix=\"/term\")\n\n from .graph import graph as graph_blueprint\n\n app.register_blueprint(graph_blueprint, url_prefix=\"/graph\")\n\n # register command line functions\n @app.cli.command()\n def test():\n \"\"\"Run the unit tests.\"\"\"\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n return app", "def create_local_app():\n app = create_app()\n app.test_request_context().push()\n return app", "def client(application):\n\n return app.test_client()", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def load_app(name=application_name):\n return TestApp(\n loadapp(\n 'config:test.ini#%s' % name,\n relative_to=getcwd(),\n global_conf={\n 'test': 'true',\n },\n )\n )", "def get_test_app(relpath=None):\n from nucleon.config import settings\n\n app = get_app(relpath)\n settings._set_environment('test')\n on_initialise.fire()\n on_start.fire()\n return TestApp(app)", "def app(self):\n return self.__app", "def setUp(self):\r\n self.app = app.test_client()\r\n self.app.testing = True", "def app():\n\n # create a temp file to isolate the db for each test\n db_fd, db_path = tempfile.mkstemp()\n app = create_app()\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['DATABASE'] = db_path\n app.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite://\"\n\n # create db and load test data\n with app.app_context():\n db.init_app(app)\n db.create_all()\n\n yield app\n\n # close and remove the temporary db\n os.close(db_fd)\n os.unlink(db_path)", "def app(request):\n _app = create_app(TestConfig)\n request.instance.app = _app\n\n ctx = _app.test_request_context()\n ctx.push()\n\n request.addfinalizer(ctx.pop)\n return _app", "def make_app(controller_klass=None, environ=None):\n if controller_klass is None:\n controller_klass = TGController\n\n tg.config['renderers'] = default_config['renderers']\n app = TGApp(config=default_config)\n app.controller_classes['root'] = ControllerWrap(controller_klass)\n\n app = FakeRoutes(app)\n\n app = RegistryManager(app)\n app = beaker.middleware.SessionMiddleware(app, {}, data_dir=session_dir)\n app = CacheMiddleware(app, {}, data_dir=os.path.join(data_dir, 'cache'))\n return TestApp(app)", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def app(self):\r\n return self._app", "def setUp(self):\n self.app = app.test_client()\n self.app.testing = True", "def setup_app():\n\n # 1 Create Flask application\n app = Flask(\n import_name=__name__,\n template_folder=\"templates\",\n static_folder=\"static\"\n )\n\n # 2 Update the apps configuration\n app = config_selector(app)\n register_error_handlers(app)\n\n cache.init_app(app)\n\n # 3 Set up logger\n setup_logger(app.config)\n LOGGER.info(\"Set up app & logger.\")\n\n # 4 Init clients\n init_clients(app.config)\n\n # 5 Init Daemon\n start_daemon(app.config)\n\n # 6 Register blueprints\n register_blueprints(app)\n Bootstrap(app)\n\n return app", "def app():\n os.environ[\"FLASK_ENV\"] = \"test\"\n return create_app()", "def test_build(self):\n self.app.build()", "def create():\n\n return App()", "def app(request) -> Application:\n global fixture\n if fixture is None:\n fixture = WebApplication()\n return fixture", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def setUp(self):\n self.app = Flask(__name__)\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()", "def main():\n print(\"def main\")\n return APP.run()", "def app():\n application = create_app(**TEST_SETTINGS)\n application.test_client_class = TestClient\n application.client = application.test_client()\n\n # Using __get__ binds the function to the application instance\n application.user = user.__get__(application) # pylint: disable=E1121\n application.admin = admin.__get__(application) # pylint: disable=E1121\n\n yield application\n drop_database(application)", "def main():\n app.run(debug=True)", "def get_app():\n return ApplicationContainer()", "def app_mock():\n app.config['Testing'] = True\n return app", "def _get_app(flask_app):\n flask_app.test_client_class = TestClient\n return flask_app.test_client()", "def app(config_path, dbtransaction, test_url):\n settings = get_appsettings(config_path)\n settings['sqlalchemy.url'] = test_url\n app = main({}, **settings)\n return TestApp(app)", "def startapp():", "def test_app_is_created(app):\n assert app.name == \"myapp.app\"", "def create_app(test_config=None):\n app = Flask(__name__)\n\n # apply the blueprints to the app\n from app import common\n\n app.register_blueprint(common.bp)\n\n # default url for site\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def create_app(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)\n\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n\n # Add the partials directory so we have access in the FakeFS\n self.fs.add_real_directory(app.config['PARTIALS_TEMPLATE_PATH'])\n \n app.start()\n\n return app", "def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description", "def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()", "def app_client(app) -> Flask:\n app.testing = True\n app.response_class = JSONResponse\n return app.test_client()", "def run(**kwargs) -> None:\n\n # update the path to ensure the App has access to required modules\n app_lib = AppLib()\n app_lib.update_path()\n\n # import modules after path has been updated\n\n # third-party\n from tcex import TcEx # pylint: disable=import-outside-toplevel\n\n # first-party\n from app import App # pylint: disable=import-outside-toplevel\n\n tcex = TcEx()\n\n try:\n # load App class\n app = App(tcex)\n\n # set app property in testing framework\n if callable(kwargs.get('set_app')):\n kwargs.get('set_app')(app)\n\n # configure custom trigger message handler\n tcex.service.create_config_callback = app.create_config_callback\n tcex.service.delete_config_callback = app.delete_config_callback\n tcex.service.shutdown_callback = app.shutdown_callback\n tcex.service.webhook_event_callback = app.webhook_event_callback\n\n # perform prep/setup operations\n app.setup(**{})\n\n # listen on channel/topic\n tcex.service.listen()\n\n # start heartbeat threads\n tcex.service.heartbeat()\n\n # inform TC that micro-service is Ready\n tcex.service.ready = True\n\n # loop until exit\n if hasattr(app, 'loop_forever'):\n app.loop_forever() # pylint: disable=no-member\n else:\n tcex.log.info('Looping until shutdown')\n while tcex.service.loop_forever(sleep=1):\n pass\n\n # perform cleanup/teardown operations\n app.teardown(**{})\n\n # explicitly call the exit method\n tcex.playbook.exit(msg=app.exit_message)\n\n except Exception as e:\n main_err = f'Generic Error. See logs for more details ({e}).'\n tcex.log.error(traceback.format_exc())\n tcex.playbook.exit(1, main_err)", "def app() -> Flask:\n settings_override = {\n \"DEBUG\": True,\n \"SQLALCHEMY_DATABASE_URI\": \"sqlite:///\" + get_test_database_path(),\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False,\n \"PIPWATCH_API_RESET_DB_ON_START\": False,\n \"PIPWATCH_API_SEED_DB\": False\n }\n\n application = get_flask_application(settings_override=settings_override)\n context = application.app_context()\n context.push()\n\n yield application\n context.pop()", "def cli_runner(app):\n\n LOG.info(\"cli_runner.app: %s\", app)\n cli_runner = app.test_cli_runner()\n\n LOG.info(\"cli_runner.app.config: %s\", app.config)\n\n LOG.info(\"cli_runner.app.cli_runner: %s\", cli_runner)\n return cli_runner", "def setUp(self) -> None:\n self._app = WebTestApp(application)", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def testing_app() -> Flask:\n app = create_app(\"app.config.TestingConfig\")\n with app.app_context():\n db.drop_all()\n db.create_all()\n return app", "def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def client(app):\n return app.test_client()", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def app(request): \n\n # app instance\n app = create_app(\"testing_app\", {\n 'API_BASE_PATH': \"/api/\",\n 'TESTING': True, \n 'SECRET_KEY': 'test', \n })\n\n # add to the scope\n ctx = app.app_context()\n ctx.push()\n\n def teardown(): \n init_db()\n ctx.pop() \n\n init_db()\n\n request.addfinalizer(teardown)\n yield app \n \n drop_db()", "def main(args=None):\n app()\n return 0", "def setUp(self):\n self.app = init_api()", "def create_app(self):\n raise NotImplementedError", "def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing", "def main(config: str):\n application = Application(config_path=config)\n application.run()", "def test_main_app(self):\n resp = self.app.get('/')\n # ensure relevant pieces of UI are returned\n assert 'Foggy Fork' in resp.data\n assert 'A San Francisco Food Truck Map' in resp.data\n assert 'Where in the fog are you looking for food?' in resp.data\n assert '<div id=\"map-canvas\"></div>' in resp.data" ]
[ "0.82671756", "0.81794083", "0.80875695", "0.7958443", "0.79312617", "0.77322614", "0.77322614", "0.77322614", "0.77094376", "0.76604795", "0.7629246", "0.7622498", "0.7535449", "0.75292844", "0.7502602", "0.7494712", "0.7456499", "0.7428539", "0.737914", "0.73678696", "0.73401487", "0.7304298", "0.7294011", "0.7291663", "0.7290658", "0.72382987", "0.72329664", "0.7195877", "0.7183889", "0.71467936", "0.7139075", "0.71192974", "0.71178293", "0.70929825", "0.70795876", "0.7073956", "0.7061687", "0.7053797", "0.7053797", "0.7053797", "0.70380056", "0.70286524", "0.7001451", "0.70008737", "0.69911927", "0.697625", "0.69472694", "0.6931783", "0.692375", "0.69099087", "0.69068736", "0.68886995", "0.6870397", "0.6861496", "0.68603647", "0.6846197", "0.683633", "0.683584", "0.6823852", "0.68218565", "0.6807517", "0.68035674", "0.6801804", "0.67959446", "0.67958134", "0.67925256", "0.6791882", "0.67917234", "0.6790109", "0.6789633", "0.67893964", "0.6782439", "0.6777422", "0.67746633", "0.6765633", "0.6762023", "0.67597944", "0.67561054", "0.6752933", "0.6747167", "0.67463636", "0.67421234", "0.67384756", "0.67341626", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6726817", "0.6722751", "0.67173964", "0.67173904", "0.6711941", "0.670812", "0.6704458", "0.6702338", "0.66915494" ]
0.0
-1
Returns the gr_amount without bill between the party and supplier.
def get_gr(supplier_id: int, party_id: int) -> int: # Open a new connection db, cursor = db_connector.cursor() query = "select gr_amount from supplier_party_account where supplier_id = '{}' AND party_id = '{}'".format( supplier_id, party_id) cursor.execute(query) data = cursor.fetchall() db.disconnect() if len(data) == 0: return 0 return int(data[0][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_usable_gr(supplier_id: int, party_id: int) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select SUM(settle_amount) from gr_settle where supplier_id = '{}' AND party_id = '{}'\".format(\n supplier_id, party_id)\n cursor.execute(query)\n data = cursor.fetchall()\n\n db.disconnect()\n\n if data[0][0] is None:\n return get_gr(supplier_id, party_id)\n else:\n return get_gr(supplier_id, party_id) - int(data[0][0])", "def get_billed_amount(self):\n return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()", "def get_clean_discharge(\n _db: Session = db, *, start: datetime = None, end: datetime = None,\n store_internal_id: int = None, owner_id: int = None\n) -> float:\n exclude_contracts: t.List[int] = []\n discharges: t.List[float] = []\n claims = crud.claim.get_query(_db)\n if start and end:\n claims = claims.filter(models.Claim.created_at.between(start, end)) # noqa\n if store_internal_id:\n claims = claims.filter(models.Claim.store_internal_id == store_internal_id) # noqa\n if owner_id:\n claims = claims.filter(models.Claim.owner_id == owner_id) # noqa\n # FOR BETTER SQL PERFORMANCE WE ENCLOSE OUR QUERY: EXCLUDE ALL WITH DISCHARGE IS NULL;\n claims = claims.filter(models.Claim.discharge.isnot(None)) # noqa\n claims: t.List[models.Claim] = claims.all() # noqa\n\n for claim in claims:\n if claim.contract_nr not in exclude_contracts:\n exclude_contracts.append(claim.contract_nr)\n discharges.append(claim.discharge)\n total_discharges: float = sum(discharges)\n total_discharges_round: float = round(total_discharges, 2)\n return total_discharges_round", "def get_debt(self):\n sum_import = self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n ).aggregate(Sum(\"amount\"))\n return sum_import.get(\"amount__sum\", None)", "def get_gross(self):\n\n try:\n receiver = self.cleaned_data[\"customer\"]\n net_total = float(self.cleaned_data[\"net\"])\n except KeyError:\n return \"------\"\n except AttributeError:\n return \"------\"\n\n if receiver and net_total:\n try:\n tax = float(self.INTERNAL_TAX_DICT[receiver])\n except KeyError:\n tax = float(self.INTERNAL_TAX_DICT[self.cleaned_data[\"warehouse\"]])\n gross_total = round(net_total * tax + net_total, 2)\n\n return f\"{gross_total:.2f}\"\n\n return \"------\"", "def get_gr_between_dates(supplier_id: int, party_id: int, start_date: str, end_date: str) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n start_date = str(datetime.datetime.strptime(start_date, \"%d/%m/%Y\"))\n end_date = str(datetime.datetime.strptime(end_date, \"%d/%m/%Y\"))\n\n query = \"select SUM(settle_amount) from gr_settle where \" \\\n \"party_id = '{}' AND supplier_id = '{}' AND \" \\\n \"start_date >= '{}' AND end_date <= '{}';\".format(party_id, supplier_id, start_date, end_date)\n\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if data[0][0] is None or len(data) == 0 or data[0][0] == 0:\n return -1\n return data[0][0]", "def get_non_traffic_charges_grid_row_details(self):\n self.grid_row_details_dictionary.clear()\n self.grid_row_details_dictionary.update({\"Accounting Code\": \"\", \"Start Date\": \"\", \"End Date\": \"\", \"Charge Name\": \"\", \"Charge Category\": \"\", \"Currency\": \"\", \"Amount\": \"\", \"Tax Template\": \"\"})\n non_traffic_charges_grid_row_details = self.get_grid_row_details(self.non_traffic_charges_grid_div_id, self.grid_row_details_dictionary)\n if \",\" in non_traffic_charges_grid_row_details[\"Amount\"]:\n non_traffic_charges_grid_row_details[\"Amount\"] = non_traffic_charges_grid_row_details[\"Amount\"].replace(\",\", \"\")\n return non_traffic_charges_grid_row_details", "def remaining_cash_without_100s(self, amount):\n return amount % 100", "def get_wh_debit_credit(self, fb_brw):\n get_wh_debit_credit_sum = 0.0\n for line in fb_brw.fbl_ids:\n get_wh_debit_credit_sum += line['get_wh_debit_credit']\n return get_wh_debit_credit_sum", "def number_only(number_available):\n number_available = number_available.removeprefix('In stock (')\n number_available = number_available.removesuffix(' available)')\n return number_available", "def unequal_paid(self):\n return self._unequal_paid", "def get_money(self) -> float: \n money = get_owned()\n try:\n assert type(self.owned_init) == float\n except AssertionError: #The first time one tries to make a bet this is evoked\n self.owned_init = money\n finally:\n return money", "def remaining_cash_without_20s(self, amount):\n return amount % 20", "def remaining_cash_without_50s(self, amount):\n return amount % 50", "def getamount(self):\n return self.__amount", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def get_missing(self, other_draft):\n missing = {}\n this_picks = self.order_dict\n other_picks = other_draft.order_dict\n\n\n for k, v in this_picks.items():\n if k not in other_picks:\n missing[k] = v\n\n return sorted(missing.items(), key=lambda e: e[1])", "def amount_due(self):\n queryset = self.supplyorderitem_set.filter(delivery_date__isnull=False).aggregate(\n amount_due=Sum(F('unit_price')*F('quantity_ordered'))\n )\n return queryset['amount_due'] or 0", "def get_amount_exempt_document(self, txt_line):\n tax = 0\n amount_doc = 0\n for tax_line in txt_line.invoice_id.tax_line:\n if 'SDCF' in tax_line.name or \\\n (tax_line.base and not tax_line.amount):\n tax = tax_line.base + tax\n else:\n amount_doc = tax_line.base + amount_doc\n return (tax, amount_doc)", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def incomes_from_outside(self) -> Decimal:\n return Decimal(\n sum(\n [\n t.amount\n for t in self.transactions_all\n if t.amount > 0 and not t.other_party.is_user_owner\n ]\n )\n )", "def get_bill_amt():\n\n return float(input(\"How much was your total bill: \"))", "def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab", "def stripe_amount(self):\n if self.currency.code in (\n 'BIF', 'XAF', 'XPF', 'CLP',\n 'KMF', 'DJF', 'GNF', 'JPY',\n 'MGA', 'PYG', 'RWF', 'KRW',\n 'VUV', 'VND', 'XOF'):\n return int(self.amount)\n return int(self.amount * 100)", "def compute_fee_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n result = 0\n # Check if the session has min threshold and max threshold to get the right value for result\n if supplier_item.get('has_session_fee') and supplier_item.get(\n 'has_minimum_billing_threshold') and supplier_item.get('has_max_session_fee'):\n if supplier_item.get('min_billing_amount', 0) > supplier_item.get('session_fee', 0):\n result = supplier_item.get('min_billing_amount', 0)\n elif supplier_item.get('max_session_fee') > supplier_item['session_fee'] > supplier_item[\n 'min_billing_amount']:\n result = supplier_item.get('session_fee', 0)\n elif supplier_item.get('session_fee', 0) > supplier_item.get('max_session_fee'):\n result = supplier_item.get('max_session_fee')\n # Check for min threshold only to get the min bill\n elif supplier_item.get('has_session_fee') and supplier_item.get('has_minimum_billing_threshold'):\n if supplier_item.get('min_billing_amount') > supplier_item.get('session_fee'):\n result = supplier_item.get('min_billing_amount')\n elif supplier_item.get('session_fee') > supplier_item.get('min_billing_amount'):\n result = supplier_item.get('session_fee')\n return result", "def calc_free_g(energies, temperatures):\n pass", "def _drop_units(q):\n try:\n return q.value\n except AttributeError:\n try:\n return q.value\n except AttributeError:\n return q", "def get_inbound_statements_grid_raised_dispute_amount(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_raised_dispute_amount_column_name)", "def get_nuclear_potential(self, r):\n\n return -self.nuclear_charge/r", "def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):\n res = {}\n tax_obj = self.pool.get('account.tax')\n cur_obj = self.pool.get('res.currency')\n for line in self.browse(cr, uid, ids):\n price = line.price_unit * (1-(line.discount or 0.0)/100.0)\n taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)\n res[line.id] = taxes['total'] + line.variation_amount\n if line.invoice_id:\n cur = line.invoice_id.currency_id\n res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])\n return res", "def fidelity(order: Order) -> Decimal:\n if order.customer.fidelity >= 1000:\n return order.total() * Decimal('0.05')\n return Decimal(0)", "def get_safe_price(side: str):\n return (\n const.MIN_SUM if side == const.BUY\n else const.MAX_SUM\n )", "def free(self,source):\n return self.near(source, self.free_radius)", "def getAmount2(*args):", "def burn(amount: int) -> int:\n global total_supply\n\n _assert_is_bank(context.sender)\n total_supply = base.burn(balance_of, total_supply, context.sender, amount)\n return total_supply", "def format_amount(self) -> str:\n if self.amount_debit != '':\n return self.amount_debit.replace('-', '')\n return self.amount_credit.replace('-', '')", "def missing_data_amounts():\n\n return [2]", "def discharge(self):\n return self._discharge", "def mass_anti_ice(\n design_mass_TOGW: float,\n):\n return 0.002 * design_mass_TOGW", "def expenses_to_outside(self) -> Decimal:\n return Decimal(\n sum(\n [\n t.amount\n for t in self.transactions_all\n if t.amount < 0 and not t.other_party.is_user_owner\n ]\n )\n )", "def getFineAmount(id):\n data = db.Database(filename=\"IFB299.db\")\n fine = data.retrieve(\"finePayments\",\"Fine_Number\",id)\n if fine['Citation_Type'] == \"Parking\":\n amt = 90 + getOverDuePayment(id)\n else:\n amt = 50 + getOverDuePayment(id)\n data.close()\n return amt", "def get_amount_line(self, txt_line, amount_exempt):\n ali_max = self.get_max_aliquot(txt_line)\n exempt = 0\n\n if ali_max == int(txt_line.tax_wh_iva_id.tax_id.amount * 100):\n exempt = amount_exempt\n total = (txt_line.tax_wh_iva_id.base + txt_line.tax_wh_iva_id.amount +\n exempt)\n return total, exempt", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def get_amount_normal(self, billing_cycle):\n if self.is_one_off():\n billing_cycle_number = self._get_billing_cycle_number(billing_cycle)\n\n if billing_cycle_number > self.total_billing_cycles:\n # A future billing cycle after this one has ended\n return Decimal('0')\n else:\n # This is a current cycle. Split the amount into\n # equal parts then return the part for this cycle\n splits = ratio_split(\n amount=self.fixed_amount,\n ratios=[Decimal('1')] * self.total_billing_cycles,\n )\n return splits[billing_cycle_number - 1]\n else:\n # This is a none-one-off recurring cost, so the logic is simple\n return self.fixed_amount", "def check_if_dropship(doc,method):\n\n\tmr_list = []\n\tconditions = \"\"\n\tdairy = frappe.db.get_value(\"Company\",{\"is_dairy\":1},\"name\")\n\tuser_doc = frappe.db.get_value(\"User\",{\"name\":frappe.session.user},['operator_type','company'], as_dict =1)\n\tco = frappe.db.get_value(\"Village Level Collection Centre\",{\"name\":user_doc.get('company')},\"camp_office\")\n\n\tif user_doc.get(\"operator_type\") == 'Chilling Centre' and not doc.flags.is_api:\n\t\tfor item in doc.items:\n\t\t\tif item.material_request:\n\t\t\t\tmr_list.append(str(item.material_request))\n\n\t\tif mr_list:\n\t\t\tconditions = \"and pi.material_request = '{0}'\".format(mr_list[0]) if len(mr_list) == 1 else \"and pi.material_request in {0}\".format(tuple(mr_list))\n\n\t\t#check PO with dropship\n\t\tif conditions:\n\t\t\tpo = frappe.db.sql(\"\"\"select p.name,pi.material_request from `tabPurchase Order` p,`tabPurchase Order Item` pi where p.company = '{0}' \n\t\t\t\t\t\t\t{1} and p.docstatus = 1 and p.name = pi.parent and p.is_dropship = 1 group by pi.material_request\"\"\".format(dairy,conditions),as_dict=1)\n\t\t\tif po:\n\t\t\t\tpo_data = [data.get('name') for data in po]\n\n\t\t\t\tfor data in set(po_data):\n\t\t\t\t\tpo_doc = frappe.get_doc(\"Purchase Order\",data)\n\n\t\t\t\t\tpi = make_pi_against_localsupp(po_doc,doc)\n\t\t\t\t\tpr = make_pr_against_localsupp(po_doc,doc)\t\t\n\t\t\t\t\n\t\t\t\tif pi:\n\t\t\t\t\tpi.flags.ignore_permissions = True \t\t\n\t\t\t\t\tpi.save()\n\t\t\t\t\tpi.submit()\n\n\t\t\t\t# mi_status_update(doc)", "def basket_total_excl_tax(self):\n return self.total_excl_tax - self.shipping_excl_tax - self.surcharge_excl_tax", "def charges_net_effect(self):\n return Decimal(\n sum(\n [\n charge.charge_value\n for charge in self.charges.values()\n if charge.net_affecting\n ]\n )\n )", "def get_price_excluding_tax(article):\n price_without_tax = article.select(\"tr\")\n return price_without_tax[2].td.text", "def amount(self):\n return(self.order_master.amount)", "def basket_total_before_discounts_excl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_excl_tax\"))\n return result[\"total\"]", "def clean_rent(self, rent):\n # assume rent is either int/float or str\n if isinstance(rent, str):\n return int(rent.replace('$', '').replace(',', ''))\n else:\n return rent", "def getTransferListSummaryWithoutPrices(self):\n players = self.getAllPlayerInfoTransferlist()\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n sold_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n\n # TODO subtract bought price\n return num_p_sold, num_p_expired, num_p_unlisted, num_p_listed", "def get_suppliers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE sup.supplier_type\n\t\t\t\tWHEN 'Company' THEN sup.supplier_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE sup.supplier_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(sup.supplier_name, LOCATE(' ', sup.supplier_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE sup.supplier_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(sup.supplier_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE sup.supplier_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tsup.website as 'Internet',\n\t\t\tsup.tax_id as 'Steuernummer',\n\t\t\tcase sup.on_hold when 1 then sup.release_date else null end as 'Zahlungssperre bis'\n\n\t\tFROM `tabSupplier` sup\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = sup.name\n\t\t\tand par.parenttype = 'Supplier'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = sup.name\n\t\t\tand dyn_adr.link_doctype = 'Supplier'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def get_amount(self):\n\t\tif self.amount is not None:\n\t\t\treturn self.amount\n\t\treturn abort(400, {\"message\" : \"please provide the amount to process\"})", "def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def removePlayerMoney(self, player, amount):\n\t\tplayer.removeMoney(amount)\n\t\tif player.money == 0:\n\t\t\tself.allinQ.append(player.name)", "def debit(self):\n debit = 0 #variable to track the remaining debit\n debit = self.total_purchase() - self.total_clearance()\n return debit", "def _get_new_qty_for_none_goodies_line(self, cr, uid, qty, product_id, order_id, context=None):\n goodies_line_ids = self.search(cr, uid, [\n ['order_id', '=', order_id],\n ['product_id', '=', product_id],\n ['goodie_for_line_id', '!=', False]\n ], context=context)\n for goodie_line in self.browse(cr, uid, goodies_line_ids, context=context):\n qty -= goodie_line.product_qty\n if qty < 0:\n qty = 0\n return qty", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0}\n amount_untaxed = 0.0\n amount_tax = 0.0\n amount_total = 0.0\n\t if not record.allowances_lines_after and record.allowances_lines_before:\n \tfor line in record.allowances_lines_before:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n\n\t elif record.allowances_lines_after and record.allowances_lines_before :\n \tfor line in record.allowances_lines_after:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n return res", "def toll_free(self, country_code):\r\n return AvailablePhoneNumbersTollFree(self, country_code)", "def calculate_production_drop(node,reduction_factor):\n original_supply = {}\n dropped_supply = {}\n for supplier in node.suppliers:\n if supplier.product not in original_supply:\n original_supply[supplier.product] = 0\n dropped_supply[supplier.product] = 0\n supply_volume_approximation = supplier.production_volume\n # supply_volume_approximation = supplier.production_volume / len(supplier.costumers) # Alternative approximation method\n original_supply[supplier.product] += supply_volume_approximation\n dropped_supply[supplier.product] += supply_volume_approximation * (\n (100 - node.in_edge_capacity_drop[supplier.name]) / 100)\n drops_list = []\n for key in original_supply:\n drops_list.append((1 - (dropped_supply[key] / original_supply[key])) * 100)\n node.production_drop = max(drops_list) * ((100 - reduction_factor) / 100)", "def _get_supplier_(obj, line):\n \n iNo = 0\n strRet = None\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n if (len(item.product_id.seller_ids)>0):\n strRet = item.product_id.seller_ids[0] and item.product_id.seller_ids[0].name.name or None\n break\n \n \n return strRet", "def obtain_amount(cls, amount_string):\n return float(string.replace(amount_string, ',', '.'))", "def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res[record.id] = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n return res", "def _amount_residual(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n if context is None:\n context = {}\n cur_obj = self.pool.get('res.currency')\n for move_line in self.browse(cr, uid, ids, context=context):\n res[move_line.id] = {\n 'amount_residual': 0.0,\n 'amount_residual_currency': 0.0,\n }\n\n if move_line.reconcile_id:\n continue\n if not move_line.account_id.type in ('payable', 'receivable'):\n #this function does not suport to be used on move lines not related to payable or receivable accounts\n continue\n\n if move_line.currency_id:\n move_line_total = move_line.amount_currency\n sign = move_line.amount_currency < 0 and -1 or 1\n else:\n move_line_total = move_line.debit - move_line.credit\n sign = (move_line.debit - move_line.credit) < 0 and -1 or 1\n line_total_in_company_currency = move_line.debit - move_line.credit\n context_unreconciled = context.copy()\n if move_line.reconcile_partial_id:\n for payment_line in move_line.reconcile_partial_id.line_partial_ids:\n if payment_line.id == move_line.id:\n continue\n if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:\n move_line_total += payment_line.amount_currency\n else:\n if move_line.currency_id:\n context_unreconciled.update({'date': payment_line.date})\n amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)\n move_line_total += amount_in_foreign_currency\n else:\n move_line_total += (payment_line.debit - payment_line.credit)\n line_total_in_company_currency += (payment_line.debit - payment_line.credit)\n\n result = move_line_total\n res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency\n return res", "def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')", "def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company) if company else self.env.user.company_id\n if src_currency and src_currency != company_currency:\n amount_currency = amount\n amount = src_currency._convert(amount, company_currency, company, date)\n currency_id = src_currency.id\n debit = amount > 0 and amount or 0.0\n credit = amount < 0 and -amount or 0.0\n return debit, credit, amount_currency, currency_id", "def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):\n reference_amount, reference_currency_id = super(stock_move, self)._get_reference_accounting_values_for_valuation(cr, uid, move, context=context)\n if move.product_id.cost_method != 'average' or not move.price_unit:\n # no average price costing or cost not specified during picking validation, we will\n # plug the purchase line values if they are found.\n if move.purchase_line_id and move.picking_id.purchase_id.company_id:\n reference_amount, reference_currency_id = move.purchase_line_id.price_unit, move.picking_id.purchase_id.company_id.currency_id.id\n return reference_amount, reference_currency_id", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def getAmount(self):\n return self.amount", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def balance(self):\n return self.our_state.balance(self.partner_state)", "def monome_plus_petit_degre(self):\n\t\tif self.__tete:\n\t\t\t\"\"\" plus grand == plus a droite dans l'ABR \"\"\"\n\t\t\treturn self.__tete.plus_grand()\n\t\telse:\n\t\t\treturn None", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))", "def remaining(self):\n if self.goal:\n return self.goal - self.total_donated()\n else:\n return 0", "def payoff_nopool(p=0.06,\n MLB_contract=4158333, minor_contract=6600, thresh=1500000):\n E_nopool = MLB_contract*p + minor_contract * (1-p)\n E_nopool_var = (MLB_contract - E_nopool)**2 * p + (minor_contract - E_nopool)**2 * (1-p)\n return E_nopool, E_nopool_var**0.5", "def get_sale_price(self):\n Currency = Pool().get('currency.currency')\n Company = Pool().get('company.company')\n\n if self.carrier_cost_method != 'gls':\n return super(Carrier, self).get_sale_price() # pragma: no cover\n\n currency, = Currency.search([('code', '=', 'EUR')])\n company = Transaction().context.get('company')\n\n if company:\n currency = Company(company).currency\n\n return Decimal('0'), currency.id", "def get_money(self, money: float):\n\n assert isinstance(money, float), f\"{money} must be float.\"\n assert money > 0.0, f\"{money} must be a positive number.\"\n assert self.money >= money,(\n f\"There's no enough {money} in the account. \" \n f\"Current money: {self.money}\"\n )\n self.money -= money", "def somme(self) -> Numeric:\n return query_sum(self.offres(), \"prix\", output_field=models.DecimalField())", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def debitMoney(user_id, expense_group_id, cursor):\n query = \"\"\"\n SELECT a.user_id, SUM(a.amount) as amount\n FROM accured_expenses AS a, expense AS e\n WHERE a.expense_id = e.id AND e.user_id = ? AND e.expense_group_id = ? AND a.paid = 0\n GROUP BY a.user_id \n \"\"\"\n cursor.execute(query, (user_id, expense_group_id))\n return cursor.fetchall()", "def amount(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.AMOUNT_INPUT)\n\t\treturn element.element_value", "def somme_encaissee(self) -> Numeric:\n return query_sum(\n self.offres().filter(paye=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def monome_plus_grand_degre(self):\n\t\tif self.__tete:\n\t\t\t\"\"\" plus petit == plus a gauche dans l'ABR \"\"\"\n\t\t\treturn self.__tete.plus_petit()\n\t\telse:\n\t\t\treturn None", "def amount_on_field(self):\n\n query = \"\"\" SELECT SUM(field_entry.value::DOUBLE PRECISION)\n FROM crowdataapp_documentsetfieldentry field_entry\n INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id\n INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id\n WHERE document.document_set_id = %d\n AND field_entry.verified = TRUE\n AND field_entry.field_id = %d\"\"\" % ( self.id, self.tosum_field.id)\n\n cursor = connection.cursor()\n cursor.execute(query)\n\n amount = cursor.fetchall()[0][0]\n\n return amount", "def partial_charge(self, params):\n return self.post(f\"{self.gateway_path}/partial_debit\", params)", "def get_money(self):\n return self.money", "def get_pl_balances(self):\n\n\t\tdimension_fields = ['t1.cost_center']\n\n\t\tself.accounting_dimensions = get_accounting_dimensions()\n\t\tfor dimension in self.accounting_dimensions:\n\t\t\tdimension_fields.append('t1.{0}'.format(dimension))\n\n\t\treturn frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.account, t2.account_currency, {dimension_fields},\n\t\t\t\tsum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) as bal_in_account_currency,\n\t\t\t\tsum(t1.debit) - sum(t1.credit) as bal_in_company_currency\n\t\t\tfrom `tabGL Entry` t1, `tabAccount` t2\n\t\t\twhere t1.is_cancelled = 0 and t1.account = t2.name and t2.report_type = 'Profit and Loss'\n\t\t\tand t2.docstatus < 2 and t2.company = %s\n\t\t\tand t1.posting_date between %s and %s\n\t\t\tgroup by t1.account, {dimension_fields}\n\t\t\"\"\".format(dimension_fields = ', '.join(dimension_fields)), (self.company, self.get(\"year_start_date\"), self.posting_date), as_dict=1)", "def _format_contract_number_partyA_21N(self, val):\n return val", "def amount_to_charge(opportunity):\n amount = float(opportunity.amount)\n if opportunity.agreed_to_pay_fees:\n total = (amount + 0.30) / (1 - 0.022)\n else:\n total = amount\n return quantize(total)", "def _compute_amount_qty_delivered(self):\n for line in self:\n # if line.product_id.invoice_policy == 'delivery':\n # qty = line.qty_delivered\n # else:\n # qty = line.product_uom_qty\n # line.price_total_without_discount = qty * line.price_unit\n # line.price_discount = (line.price_total_without_discount * line.discount) / 100\n line.update({\n # 'price_discount': line.price_discount,\n # 'price_total_without_discount': line.price_total_without_discount,\n 'sea_price_total_qty_delivered': line.untaxed_amount_to_invoice + line.untaxed_amount_invoiced,\n })", "def amount_bet(self, user):\n try:\n return Bet.objects.get(group_match = self, user=user).amount\n except Bet.DoesNotExist:\n return Decimal(0)", "def _amount(amount, asset='HBD'):\n assert asset == 'HBD', 'unhandled asset %s' % asset\n return \"%.3f HBD\" % amount", "def extract_price_excluding_tax(soup):\r\n price_excluding_tax = extract_attributs_book(soup)['Price (excl. tax)']\r\n return price_excluding_tax", "def getAmount1(*args):", "def make_pi_against_localsupp(po_doc,stock_doc):\n\n\n\tuser_doc = frappe.db.get_value(\"User\",{\"name\":frappe.session.user},['operator_type','company', 'branch_office'], as_dict =1)\n\t#co = frappe.db.get_value(\"Village Level Collection Centre\",{\"name\":user_doc.get('branch_office')},\"camp_office\")\n\texpense_account = frappe.db.get_value(\"Address\",po_doc.camp_office, \"expense_account\")\n\tpi = frappe.new_doc(\"Purchase Invoice\")\n\tpi.supplier = po_doc.supplier\n\tpi.company = po_doc.company\n\t# pi.camp_office = frappe.db.get_value(\"Village Level Collection Centre\",{\"name\":user_doc.get('company')},\"camp_office\")\n\tpi.camp_office = po_doc.camp_office\n\tpi.buying_price_list = get_price_list()\n\tif expense_account:\n\t\tpi.remarks = \"[#\"+expense_account+\"#]\"\n\tfor row_ in stock_doc.items:\n\t\tpi.append(\"items\",\n\t\t\t{\n\t\t\t\t\"qty\":row_.qty,\n\t\t\t\t\"item_code\": row_.item_code,\n\t\t\t\t# \"rate\": row_.rate, #frappe.db.get('Item Price',{'name':row_.item_code,'buying':'1','company':po_doc.company,'price_list':po_doc.buying_price_list},'rate'),\n\t\t\t\t\"purchase_order\": po_doc.name\n\t\t\t})\n\treturn pi", "def bill_to_global_ultimate(self):\n return self._bill_to_global_ultimate", "def amount(self):\n return self.__amount", "def amount(self):\n return self.__amount" ]
[ "0.65945584", "0.5881917", "0.5234184", "0.52274364", "0.5213804", "0.5200786", "0.5200704", "0.5107766", "0.501036", "0.49734", "0.49710652", "0.49704546", "0.49508998", "0.4943134", "0.4939742", "0.49362305", "0.4931719", "0.49290752", "0.49084964", "0.4873696", "0.48604906", "0.48111403", "0.4803902", "0.47809243", "0.476887", "0.47643673", "0.4758666", "0.47568178", "0.47402665", "0.4738357", "0.472047", "0.4698798", "0.46751353", "0.4669745", "0.46639794", "0.46633008", "0.46612537", "0.46610436", "0.46440932", "0.4643132", "0.4631122", "0.46305963", "0.46232623", "0.4608624", "0.46018526", "0.46006748", "0.46001035", "0.45897874", "0.4580374", "0.45577595", "0.45575944", "0.4554763", "0.45547506", "0.45542148", "0.45541322", "0.45530757", "0.45414045", "0.454134", "0.45362112", "0.4535377", "0.45272022", "0.45258263", "0.45173565", "0.4516264", "0.4511996", "0.4510056", "0.4506569", "0.45012966", "0.44998989", "0.44983384", "0.4494069", "0.44896328", "0.44893917", "0.44879523", "0.44877222", "0.4481457", "0.44776702", "0.44767207", "0.44710734", "0.44710612", "0.4469147", "0.44689822", "0.44606054", "0.4458509", "0.44559872", "0.44559363", "0.4455628", "0.44539773", "0.44436973", "0.44426024", "0.44402272", "0.4438949", "0.44388387", "0.4438327", "0.44355124", "0.4434488", "0.44172886", "0.44131967", "0.4409007", "0.4409007" ]
0.66093844
0
Gets the usable gr_amount
def get_usable_gr(supplier_id: int, party_id: int) -> int: # Open a new connection db, cursor = db_connector.cursor() query = "select SUM(settle_amount) from gr_settle where supplier_id = '{}' AND party_id = '{}'".format( supplier_id, party_id) cursor.execute(query) data = cursor.fetchall() db.disconnect() if data[0][0] is None: return get_gr(supplier_id, party_id) else: return get_gr(supplier_id, party_id) - int(data[0][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getamount(self):\n return self.__amount", "def get_gr(supplier_id: int, party_id: int) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select gr_amount from supplier_party_account where supplier_id = '{}' AND party_id = '{}'\".format(\n supplier_id, party_id)\n cursor.execute(query)\n data = cursor.fetchall()\n\n db.disconnect()\n\n if len(data) == 0:\n return 0\n return int(data[0][0])", "def get_billed_amount(self):\n return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()", "def getAmount(self):\n return self.amount", "def get_total_supply() -> int:\n return total_supply", "def amount(self):\n return self.__amount", "def amount(self):\n return self.__amount", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def get_usage(self):\r\n return self.box_usage", "def getAmount(self):\n return self.base.get(\"amount\", [])", "def amount(self) -> float:\n return self._amount", "def amount(self) -> float:\n return self._amount", "def amount(self):\n return self._amount", "def amount(self):\r\n return self._data['amount']", "def get_used_balance():\n try:\n if CONF.exchange == 'bitmex':\n position = EXCHANGE.private_get_position()\n if not position:\n return None\n return position[0]['currentQty']\n if CONF.exchange == 'kraken':\n result = EXCHANGE.private_post_tradebalance()['result']\n return round(float(result['e']) - float(result['mf']))\n if CONF.exchange == 'liquid':\n return round(get_crypto_balance()['used'] * get_current_price())\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_used_balance()", "def money(self):\r\n return self._money", "def state(self):\n if self._xfinity_data.total_usage is not None:\n return self._xfinity_data.total_usage", "def get_usage(self):\n return self.box_usage", "def amount(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.AMOUNT_INPUT)\n\t\treturn element.element_value", "def BuyingPrice(self):\n return self.buying_rice", "def __get__(self) -> float:\n\n return float(self.balance)", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def get_grating(self):\n grating = c_int()\n self._dll.ShamrockGetGrating(self._device, byref(grating))\n return grating.value", "def get_money(self):\n return self.money", "def amount_bet(self, user):\n try:\n return Bet.objects.get(group_match = self, user=user).amount\n except Bet.DoesNotExist:\n return Decimal(0)", "def get_gross(self):\n\n try:\n receiver = self.cleaned_data[\"customer\"]\n net_total = float(self.cleaned_data[\"net\"])\n except KeyError:\n return \"------\"\n except AttributeError:\n return \"------\"\n\n if receiver and net_total:\n try:\n tax = float(self.INTERNAL_TAX_DICT[receiver])\n except KeyError:\n tax = float(self.INTERNAL_TAX_DICT[self.cleaned_data[\"warehouse\"]])\n gross_total = round(net_total * tax + net_total, 2)\n\n return f\"{gross_total:.2f}\"\n\n return \"------\"", "def amount_on_field(self):\n\n query = \"\"\" SELECT SUM(field_entry.value::DOUBLE PRECISION)\n FROM crowdataapp_documentsetfieldentry field_entry\n INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id\n INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id\n WHERE document.document_set_id = %d\n AND field_entry.verified = TRUE\n AND field_entry.field_id = %d\"\"\" % ( self.id, self.tosum_field.id)\n\n cursor = connection.cursor()\n cursor.execute(query)\n\n amount = cursor.fetchall()[0][0]\n\n return amount", "def amount(self):\n return(self.order_master.amount)", "def money(self):\n return self._money", "def get(self) -> float:\n ...", "def get_bill_amt():\n\n return float(input(\"How much was your total bill: \"))", "def _compute_amount(self):\n raise NotImplementedError()", "def amount(self) -> pulumi.Output['outputs.BudgetAmount']:\n return pulumi.get(self, \"amount\")", "def amount(self) -> pulumi.Output['outputs.BudgetAmount']:\n return pulumi.get(self, \"amount\")", "def get_amount(self):\n\t\tif self.amount is not None:\n\t\t\treturn self.amount\n\t\treturn abort(400, {\"message\" : \"please provide the amount to process\"})", "def get_money(self) -> float: \n money = get_owned()\n try:\n assert type(self.owned_init) == float\n except AssertionError: #The first time one tries to make a bet this is evoked\n self.owned_init = money\n finally:\n return money", "def getCurrentBalance(self):\r\n return self.balance_amt", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def getBalance(self):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n sql_command = \"\"\"select amount from accounts where name=?;\"\"\"\n\n cursor.execute(sql_command, (self.name, ))\n\n return round(float(re.sub(r'[\\(\\),]', '', str(cursor.fetchone()))), 2)", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def get_balance(self):\r\n return self.balance", "def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0", "def get_balance(self):\n final_amount = 0\n for i in range(len(self.ledger)):\n final_amount += self.ledger[i]['amount']\n return final_amount", "def get_total(self):\n\n # Total sum\n self.sum = 0.00\n\n # Determine which Check buttons are selected\n # and add the charges to find the total\n if self.check_1.get() == 1:\n self.sum += 30.00\n if self.check_2.get() == 1:\n self.sum += 20.00\n if self.check_3.get() == 1:\n self.sum += 40.00\n if self.check_4.get() == 1:\n self.sum += 100.00\n if self.check_5.get() == 1:\n self.sum += 35.00\n if self.check_6.get() == 1:\n self.sum += 200.00\n if self.check_7.get() == 1:\n self.sum += 20.00\n\n # Convert the sum to string\n # and store in StringVar object\n # to automatically update the total_val label\n self.sum_str.set(self.sum)", "def amount(self) -> int:\n return self._amount", "def total_spent(self):\n total_sum = Order.objects.filter(\n email=self.email).aggregate(\n Sum('total_price')\n ).get('total_price__sum')\n return round(total_sum, 4) if total_sum else 0", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def get_total(self):\r\n \r\n return str(round(self._total, 2))", "def getGatingHold(self, channel, unitCode=0):\n resp = self.XAPCommand('GHOLD', channel, unitCode=unitCode)\n return float(resp)", "def get_total_paid(self):\n return sum(self.paid)", "def get_usage(self, source):\n return sum(i.quantity for i in self.items.all() if i.source == source)", "def _sum_g_i(self) -> float:\n elems = self.composition.get_el_amt_dict()\n\n if self.interpolated:\n sum_g_i = 0\n for elem, amt in elems.items():\n g_interp = interp1d(\n [float(t) for t in G_ELEMS.keys()],\n [g_dict[elem] for g_dict in G_ELEMS.values()],\n )\n sum_g_i += amt * g_interp(self.temp)\n else:\n sum_g_i = sum(amt * G_ELEMS[str(self.temp)][elem] for elem, amt in elems.items())\n\n return sum_g_i", "def current_summation_delivered(self):\n return self._safe_value(VAR_CURRENTSUMMATIONDELIVERED, float)", "def current_summation_received(self):\n return self._safe_value(VAR_CURRENTSUMMATIONRECEIVED, float)", "def get_total_lui(self):\r\n \r\n return str(round(self._total_lui, 2))", "def get_gst_subtotals(self):\n self.__subtotal_gst = 0\n for current_item in self.__items_list:\n self.__subtotal_gst += current_item.calculate_gst()\n return self.__subtotal_gst", "def get_outbound_statement_grid_amount_column_value(self):\n self.grid_row_details_dictionary = self.get_outbound_statement_grid_row_data()\n amount = self.grid_row_details_dictionary[self.outbound_statement_grid_amount_column_name]\n return amount", "def available_cash(self):\n return self._cash", "def total(self):\n\t\treturn self._total", "def sum_availability(val, quant) -> float:\n return val + qty_available(quant)", "def get_value(self):\r\n return 0", "def getUserCurrency():", "def __get_balance(self):\n return self.__balance", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def get_tx_amount():\n return float(input(\"Enter Transaction Amount: \"))", "def GOAL_TOTAL() -> int:\n return 21", "def unconsumedValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tif bottle.consumption == None:\n\t\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits", "def getGemPrice():\n return Gw2Spidy._request('gem-price')", "def get_balance(self):\n return self.balance", "def get_balance(self):\n return self.balance", "def getFineAmount(id):\n data = db.Database(filename=\"IFB299.db\")\n fine = data.retrieve(\"finePayments\",\"Fine_Number\",id)\n if fine['Citation_Type'] == \"Parking\":\n amt = 90 + getOverDuePayment(id)\n else:\n amt = 50 + getOverDuePayment(id)\n data.close()\n return amt", "def cost(self):\n assert(self._calculated)\n settings = config_get_group('shipping.modules.ups')\n if settings.HANDLING_FEE and Decimal(str(settings.HANDLING_FEE)) > Decimal(0):\n self.charges = Decimal(self.charges) + Decimal(str(settings.HANDLING_FEE))\n\n return(Decimal(self.charges))", "def get_cash(self):\r\n return self.cash", "def calc_total_btc():\n total_btc_val = 0\n for holding in val[\"accHoldings\"]:\n free = val[\"accHoldings\"][holding][\"free\"]\n locked = val[\"accHoldings\"][holding][\"locked\"]\n total = float(free) + float(locked)\n\n if holding + \"BTC\" in val[\"coins\"]:\n if holding != \"BTC\" and total * float(val[\"tickers\"][holding + \"BTC\"][\"lastPrice\"]) > 0.001:\n\n coin_total = total * float(val[\"tickers\"][holding + \"BTC\"][\"lastPrice\"])\n total_btc_val += coin_total\n\n elif holding == \"BTC\":\n total_btc_val += total\n\n total_formatted = '{number:.{digits}f}'.format(number=float(total_btc_val), digits=8) + \" BTC\"\n # print(\"total: \" + total_formatted)\n return total_formatted", "def remaining(self):\n if self.goal:\n return self.goal - self.total_donated()\n else:\n return 0", "def get_inbound_statements_grid_pending_amount(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_pending_amount_column_name)", "def total(self) -> float:\n return self._total", "def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)", "def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate", "def value(self) -> int:\n return self.n000_val + self.grp_val", "def convenience_fee_amount(self):\n return self._convenience_fee_amount", "def amount_raised_online(self):\r\n return self.stats.amount_raised_online", "def get_total_elle(self):\r\n \r\n return str(round(self._total_elle, 2))", "def total_donation(self):\n return self._total_donation", "def installment_amount(self) -> Amount:\n return self._installment_amount", "def get_amount(self): \n return len(self.get_cards())", "async def get_garages_total(self):\r\n async with self._db.acquire() as conn:\r\n s = await (await conn.execute(Garage.count())).scalar()\r\n return s", "def amount_raised(self):\r\n return self.stats.amount_raised", "def get_balance(self) -> float:\n return self._balance", "def get_usage(self):\n res = self.conn.get_send_quota()\n res = res['GetSendQuotaResponse']\n result = res['GetSendQuotaResult']\n quota = float(result['Max24HourSend'])\n sent = float(result['SentLast24Hours'])\n return sent, quota", "def total_raised(self):\n return self.total_donated() + (self.community_contribution or 0)", "def value_of_live_book(expected_outstanding_repayment):\n return expected_outstanding_repayment.expected_repayment.sum()", "def getGateRatio(self, channel, unitCode=0):\n resp = self.XAPCommand('GRATIO', channel, unitCode=unitCode)\n return float(resp)", "def assets(self) -> float:\n return self._money", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def get_total(self) -> float:\n if self.__open:\n raise RuntimeError(\"Cash drawer must be closed to count.\")\n total: float = 0.0\n for denom in CashDenomination:\n total += self.__contents[denom] * denom.amount\n return total", "def amount_due(self):\n queryset = self.supplyorderitem_set.filter(delivery_date__isnull=False).aggregate(\n amount_due=Sum(F('unit_price')*F('quantity_ordered'))\n )\n return queryset['amount_due'] or 0" ]
[ "0.68214357", "0.68126625", "0.66985095", "0.6473604", "0.6365867", "0.63424534", "0.63424534", "0.6316719", "0.623471", "0.6230868", "0.622357", "0.622357", "0.62221104", "0.61813056", "0.61567175", "0.61359364", "0.6116038", "0.61114824", "0.61012554", "0.60943663", "0.60817873", "0.6068271", "0.60313094", "0.60282373", "0.60236925", "0.59629065", "0.59596187", "0.5949983", "0.59480876", "0.5938578", "0.5937666", "0.5937188", "0.5919046", "0.5919046", "0.5914697", "0.5892694", "0.58875257", "0.5882636", "0.5877506", "0.58663267", "0.5855683", "0.5843441", "0.5790667", "0.57852966", "0.57833254", "0.5782561", "0.578191", "0.57770145", "0.5770974", "0.5768349", "0.57659394", "0.5764772", "0.57605445", "0.5749136", "0.572161", "0.5720392", "0.57068133", "0.57002085", "0.5695187", "0.5694575", "0.5693625", "0.5688847", "0.5687798", "0.5677942", "0.56777877", "0.5671726", "0.5665908", "0.56655663", "0.5661873", "0.56601566", "0.56517524", "0.5648879", "0.5648879", "0.5648383", "0.5644052", "0.5642189", "0.56403834", "0.5639554", "0.563663", "0.56258804", "0.56258404", "0.5624612", "0.56166303", "0.5615679", "0.5615304", "0.5608725", "0.55972826", "0.5593499", "0.55922806", "0.5588872", "0.55888206", "0.55867654", "0.5586121", "0.5580274", "0.5576823", "0.55721337", "0.5569406", "0.5564667", "0.5560333", "0.556017" ]
0.6732322
2
Get the gr_between dates used to settle the account
def get_gr_between_dates(supplier_id: int, party_id: int, start_date: str, end_date: str) -> int: # Open a new connection db, cursor = db_connector.cursor() start_date = str(datetime.datetime.strptime(start_date, "%d/%m/%Y")) end_date = str(datetime.datetime.strptime(end_date, "%d/%m/%Y")) query = "select SUM(settle_amount) from gr_settle where " \ "party_id = '{}' AND supplier_id = '{}' AND " \ "start_date >= '{}' AND end_date <= '{}';".format(party_id, supplier_id, start_date, end_date) cursor.execute(query) data = cursor.fetchall() db.disconnect() if data[0][0] is None or len(data) == 0 or data[0][0] == 0: return -1 return data[0][0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def date_start_end(mytrip_start_date,mytrip_end_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n mytrip_end_date= dt.date(2015, 8,14)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\")\n end_dt_strftime=dt.datetime.strptime('2014-08-14',\"%Y-%m-%d\") \n date_start_end_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).filter(measurements.date <= end_dt_strftime).all()\n return(date_start_end_results)", "def get_state_in_period(course_key, from_date, to_date):\n enrollment_stat = (\n EnrollmentTabCache.objects\n .filter(course_id=course_key, created__range=(from_date, to_date))\n .values('unenroll', 'enroll', 'total', 'created')\n .order_by('created')\n )\n return enrollment_stat", "def report_start_and_end_date(self):\n start_date, end_date = self.start_date, self.end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=30)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n\n return start_date, end_date", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def get_dates():\n return {\n \"years\": range(datetime.date.today().year, datetime.date.today().year + 5),\n \"months\": range(1, 13),\n \"days\": range(1, 32)\n }", "def period_dates(period):\n end = date.today() - timedelta(days=1) # yesterday\n\n if period == LAST_7_DAYS:\n start = end - timedelta(days=7)\n elif period == LAST_30_DAYS:\n start = end - timedelta(days=30)\n elif period == LAST_90_DAYS:\n start = end - timedelta(days=90)\n elif ALL_TIME:\n start = settings.GA_START_DATE\n\n return start, end", "def _rate_dates(self, common_object):\n if common_object.IsKindOf(acm.FCashFlow):\n start_date = common_object.StartDate()\n elif common_object.IsKindOf(acm.FReset):\n start_date = common_object.Day()\n else:\n message = \"Rate dates for {0} object are not defined\".format(\n type(common_object))\n raise ProvisionHandlerError(message)\n\n end_date = acm.Time().DateAddDelta(start_date, 0, 3, 0)\n end_date = self._adjust_to_banking_day(end_date)\n\n return (start_date, end_date)", "def dates_inbetween(self, start, end):\n\n return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]", "def range_date():\n # Query all stations within a certain range\n data = [Measurement.date, func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)]\n qry = session.query(*data).filter(Measurement.date.between('2014-01-17', '2017-01-01')).all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)", "def pullGateCountDateRange(start_date, end_date):\n start_date = parser.parse(start_date)\n end_date = parser.parse(end_date) + timedelta(days=1)\n dates = []\n for single_date in daterange(start_date, end_date):\n dates.append(single_date.strftime(\"%Y-%m-%d\"))\n for i in range(len(dates)):\n req = pullGateCount(dates[i], dates[i+1])\n data = req.json()\n if req.status_code >= 400:\n print(\"Error1:\", dates[i], json.dumps(data, indent=0))\n else:\n # Load data\n for itm in data[\"results\"]:\n tmpTZD = {}\n localDT = parser.parse(itm[\"recordDate_hour_1\"]).replace(\n tzinfo=pytz.utc).astimezone(local_tz)\n tmpTZD['local_timestamp'] = localDT.isoformat()\n tmpTZD['year'] = localDT.year\n tmpTZD['month'] = localDT.month\n tmpTZD['day'] = localDT.day\n tmpTZD['hour'] = localDT.hour\n tmpTZD['minute'] = localDT.minute\n tmpTZD['second'] = localDT.second\n tmpTZD['time_zone_name'] = localDT.tzname()\n tmp = itm\n tmp['localDateTime'] = tmpTZD\n saveCybercomData(tmp)\n # print(dates[i])\n if dates[i+1] == dates[-1]:\n break\n return \"Date(s) Imported/Updated: {0}\".format(\",\".join(dates[:-1]))", "def circulation_upcoming_return_range():\n return arrow.utcnow() + timedelta(\n days=current_app.config[\"ILS_UPCOMING_RETURN_RANGE\"])", "def test_date_range_fields():\n now = datetime.datetime(2017, 6, 13, 9, 44, 31, 62870)\n fields = {\n 'estimated_land_date_after': now,\n 'estimated_land_date_before': now,\n 'adviser.id': 1234,\n }\n\n filters, ranges = _split_range_fields(fields)\n\n assert filters == {\n 'adviser.id': 1234,\n }\n assert ranges == {\n 'estimated_land_date': {\n 'gte': now,\n 'lte': now,\n },\n }", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def get_day_range(self, order_date, reg_date):\n\t\tdays = (order_date - reg_date).days\n\t\tfor day_range in self.day_ranges:\n\t\t\tif days >= day_range[0] and days <= day_range[1]:\n\t\t\t\treturn day_range\n\t\treturn []", "def date_range(self, start, end, check_date):\n if start <= end:\n return start <= check_date <= end\n else:\n return start <= check_date or check_date <= end", "def between(start, end):\n # retrieve temp observations from start and end dates given and convert to list\n between_dates = list(session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all())\n return jsonify(between_dates)", "def _date_range(self, granularity, since, to=None):\n if since is None:\n since = datetime.utcnow() - timedelta(days=7) # Default to 7 days\n\n if to is None:\n to = datetime.utcnow()\n elapsed = (to - since)\n\n # Figure out how many units to generate for the elapsed time.\n # I'm going to use `granularity` as a keyword parameter to timedelta,\n # so I need to change the wording for hours and anything > days.\n if granularity == \"seconds\":\n units = elapsed.total_seconds()\n units = 300 if units > 300 else units\n elif granularity == \"minutes\":\n units = elapsed.total_seconds() / 60\n units = 480 if units > 480 else units\n elif granularity == \"hourly\":\n granularity = \"hours\"\n units = elapsed.total_seconds() / 3600\n units = 720 if units > 720 else units\n else:\n granularity = \"days\"\n units = elapsed.days + 1\n\n return (to - timedelta(**{granularity: u}) for u in range(int(units)))", "def created_between(self, date_a: datetime, date_b: datetime):\n return self.created_search(date_a, date_b, search_type=\"between\")", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def get_range(self):\n return time_to_range(self.get_time())", "def limit_date_range_from(self):\n return self._limit_date_range_from", "def pullGateCountToday():\n now = datetime.now()\n # catch 23 hour when date changes\n start_date = now - timedelta(hours=1)\n start_date = start_date.strftime(\"%Y-%m-%d\")\n end_date = now + timedelta(days=1)\n end_date = end_date.strftime(\"%Y-%m-%d\")\n return pullGateCountDateRange(start_date, end_date)", "def get_date_range(day_positive, days_to_check):\n\t\n\treturn (\"{},{}\".format((day_positive - timedelta(days=days_to_check)).strftime(\"%Y-%m-%dT%H:%M:%SZ\"), day_positive.strftime(\"%Y-%m-%dT%H:%M:%SZ\")))", "def get_spend_by_account_custom_daterange(self, account_id, start_date, end_date):\n try:\n account = Client.objects.get(id=account_id)\n except Client.DoesNotExist:\n return\n\n spend_sum = 0\n adwords_accounts = account.adwords.all()\n for adwords_account in adwords_accounts:\n client = get_client()\n client.client_customer_id = adwords_account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n ],\n 'dateRange': {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n }\n\n try:\n campaign_exclusion = CampaignExclusions.objects.get(account=account)\n excluded_campaign_ids = [campaign.campaign_id for campaign in campaign_exclusion.aw_campaigns.all()]\n if len(excluded_campaign_ids) > 0:\n campaign_report_selector['predicates'].append({\n 'field': 'CampaignId',\n 'operator': 'NOT_IN',\n 'values': excluded_campaign_ids\n })\n except CampaignExclusions.DoesNotExist:\n pass\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n # This is the cost for this timerange\n cost = int(campaign_row['cost']) / 1000000\n spend_sum += cost\n\n return spend_sum", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def get_date_range_where(start_date, end_date):\n where = {}\n if start_date:\n where[\"event_date__gt\"] = start_date.date()\n if end_date:\n where[\"event_date__lt\"] = end_date.date() + timedelta(days=1)\n return where", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id), ('alternative_setting_id','=',act.alternative_setting_id.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def createJoinedCal(rangeLimit1, rangeLimit2):\n\n # find the latest start time and convert it to minutes\n start = max(CTM(rangeLimit1[0]), CTM(rangeLimit2[0]))\n # find the earliest stop time and convert it to minutes\n end = min(CTM(rangeLimit1[1]), CTM(rangeLimit2[1]))\n\n # create a dict containing all minutes between start and end indicating available minutes during the day\n # this is the default without considering meetings\n available = {}\n for i in range(start, end + 1):\n available[i] = True\n return available", "def get_between(self, start, end):\n now = datetime.now()\n now = datetime(now.year, now.month, now.day)\n \n assert isinstance(start, datetime), 'start need to be datetime instance'\n assert isinstance(end, datetime), 'end need to be datetime instance'\n assert start < end, 'start need to be less than end'\n assert end < now, 'end need to be less or equal than yesterday'\n assert start >= start_date, 'no data before \\\"2003-01-01\\\"'\n \n strftime = datetime.strftime\n self.db.DBFILE = \\\n strftime(start, date_str) + \"+\" + strftime(end, date_str)\n \n \n # write all the data in the file at once\n lst_dict = self._helper_get_between(start, end)\n self.db.save_iter(lst_dict)", "def _get_prorata_interval_rate(self, cr, uid, change_date, context=None):\n month_days = calendar.monthrange(change_date.year,\n change_date.month)[1]\n start_date = add_months(change_date, 1)\n end_date = start_date.replace(day=month_days)\n used_days = month_days - change_date.day\n ptx = self._prorata_rate(used_days, month_days)\n\n return start_date, end_date, ptx", "def test_shift_restrictions_gbr(self):\n records = map(\n to_integer,\n simulate_records(\n self.df_casesrecord[self.df_casesrecord[\"iso_code\"] == \"GBR\"],\n self.df_knotdateset,\n self.df_modeldaterange,\n self.df_possibledateset,\n (None, date(2020, 3, 14)),\n ),\n )\n record_gbr = select(\"GBR\", date(2020, 4, 10), records)\n self.assertDictEqual(\n {\n \"date\": date(2020, 4, 10),\n \"iso_code\": \"GBR\",\n \"area\": 241930,\n \"population\": 66834405,\n \"weekly_avg_cases\": 4781,\n \"summed_avg_cases\": 9581,\n },\n record_gbr,\n )", "def rangeselector_date():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 7, \"label\": \"1w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 14, \"label\": \"2w\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 1, \"label\": \"1m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3m\", \"step\": \"month\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }", "def calculate_ranges(period, availability, service_recipe, resources):\n\n ranges = []\n\n period_start_dt, period_end_dt = period\n\n delta_duration = get_service_duration(service_recipe)\n delta_step = get_service_step(service_recipe)\n\n loop_dt_range = by_timedelta_range((timedelta(0), delta_duration),\n period_start_dt)\n\n while contains(period, loop_dt_range):\n\n if not is_datetime_range_available(loop_dt_range, availability):\n near_working_dt_range = nearest_working_datetime_range(\n loop_dt_range, availability)\n\n if near_working_dt_range is not None:\n loop_dt_range = by_timedelta_range(\n (timedelta(0), delta_duration), near_working_dt_range[0])\n else:\n loop_dt_range = by_timedelta_range(\n (timedelta(0), delta_duration),\n start_of_tomorrow(loop_dt_range[0]))\n\n continue\n\n resource_occupations = get_resource_occupations_in_dt_range(\n loop_dt_range, service_recipe, resources)\n if resource_occupations:\n ranges.append((loop_dt_range, resource_occupations))\n\n # like i++ but more cool\n loop_dt_range = by_timedelta_range(\n (delta_step, delta_step + delta_duration), loop_dt_range[0])\n\n return ranges", "def get_signups_per_day_for_range(day_from, day_until, still_active=False):\n noon = dt_time(12, 0)\n d1 = datetime.combine(day_from, noon).replace(tzinfo=pytz.utc)\n d2 = datetime.combine(day_until, noon).replace(tzinfo=pytz.utc)\n\n qs = User.objects.filter(date_joined__gte=d1, date_joined__lte=d2)\n if still_active:\n qs = qs.filter(is_active=True, last_login__isnull=False)\n qs = qs.extra({'day': \"date(date_joined)\"}).values('day')\n qs = qs.annotate(Count('id')).order_by('day')\n\n return qs", "def diveDates(self,start,finish):\n start = datetime.strptime(start,\"%Y-%m-%d\")\n finish = datetime.strptime(finish,\"%Y-%m-%d\")\n return start+(finish-start)/2", "def renter_accounting(sid, start, end):\n rents = session.query(distinct(part2.Sailors.name), part2.Reserves.bid, part2.Reserves.day, part2.Prices.price). \\\n select_from(part2.Reserves). \\\n join(part2.Prices, and_(part2.Reserves.sid == part2.Prices.sid, part2.Reserves.bid == part2.Prices.bid, part2.Reserves.day == part2.Prices.day)). \\\n join(part2.Sailors, part2.Sailors.id == part2.Reserves.sid). \\\n filter(part2.Sailors.id == sid). \\\n filter(part2.Payments.day >= start). \\\n filter(part2.Payments.day <= end). \\\n all()\n payments = session.query(part2.Sailors.name, part2.Payments.bid, part2.Payments.payDay, part2.Payments.amount).\\\n select_from(part2.Payments). \\\n join(part2.Sailors, part2.Sailors.id == part2.Payments.sid). \\\n filter(part2.Sailors.id == sid). \\\n filter(part2.Payments.day >= start). \\\n filter(part2.Payments.day <= end). \\\n all()\n results = []\n for rent in rents:\n results.append({rent[2]: {\"boat\": rent[1], \"credit\": False, \"amount\": rent[3]}})\n for payment in payments:\n results.append({payment[2]: {\"boat\": payment[1], \"credit\": True, \"amount\": payment[3]}})\n results = sorted(results, key=lambda d: list(d.keys()))\n return [rents[0][0], results]", "def enough_days(self, cur, username, start_date, end_date):\n cur.execute('SELECT days_free FROM users WHERE username = ?', (username,))\n days_free = cur.fetchone()[0]\n days_between = abs(self.days_difference(start_date, end_date))\n return days_free >= days_between", "def _date_range(start: str, end: str) -> List[str]:\n start_dt = _parse_ISO8601_date(start)\n end_dt = _parse_ISO8601_date(end)\n if start_dt > end_dt:\n raise ValidationError(\n \"Start date needs to be greater than or equal end date.\"\n )\n if (\n start_dt < _parse_ISO8601_date('1900') or\n end_dt > datetime.datetime.now().astimezone()\n ):\n raise ValidationError(\n \"Start date needs to be less than 1900-01-01T00:00:00Z and end\"\n \" date can't be from the feature.\"\n )\n return map(lambda date: date.isoformat(), rrule(\n freq=DAILY,\n dtstart=start_dt,\n until=end_dt,\n cache=True\n ))", "def limit_date_range_to(self):\n return self._limit_date_range_to", "def get_date_range(from_date, to_date):\n if from_date > to_date:\n raise ValueError('The start date {} is > the end date'.format(from_date, to_date))\n return rrule.rrule(rrule.DAILY, dtstart=from_date, until=to_date)", "def _compute_days_tarea(self, cr, uid, ids, field, arg, context=None):\n import datetime\n result = {}\n records = self.browse(cr, uid, ids, context=context)\n for r in records:\n if r.date_start_tarea:\n d = time.strptime(r.date_start_tarea,'%Y-%m-%d %H:%M:%S')\n for r2 in records:\n if r2.date_end_tarea:\n c = time.strptime(r2.date_end_tarea,'%Y-%m-%d %H:%M:%S')\n delta = datetime.datetime(c[0], c[1], c[2]) - datetime.datetime(d[0], d[1], d[2])\n weeks, days = divmod(delta.days, 1)\n result[r2.id] = weeks\n return result", "def _get_day_limits(self) -> Tuple[datetime, datetime]:\n day_start = min(self.dset.time.datetime)\n day_end = max(self.dset.time.datetime)\n \n return day_start, day_end", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def date_range(start, end):\n session = Session(engine)\n \n sel = [func.min(measurement.tobs),\n func.max(measurement.tobs),\n func.avg(measurement.tobs)]\n \n range_data = session.query(*sel).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n \n session.close()\n \n range_x = list(np.ravel(range_data))\n\n return jsonify(range_x)", "def _latest_meter_glucose_entry_in_range(from_datetime, to_datetime):\n pump_history = history_in_range(from_datetime, to_datetime)\n\n for history_dict in [h for h in pump_history if h.get(\"_type\") == \"CalBGForPH\"]:\n amount = history_dict.get(\"amount\", 0)\n if amount > 0 and from_datetime <= parse(history_dict[\"timestamp\"]) <= to_datetime:\n return history_dict", "def getBeginEnd(self):\n if (self.dr_type == choices.DATE_RANGE_TYPE_FIXED):\n return self.begin, self.end\n\n elif (self.dr_type == choices.DATE_RANGE_TYPE_VARIABLE):\n end = datetime.now()\n\n if (self.unit == choices.TIME_UNIT_DAY):\n begin = end - relativedelta(days=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_WEEK):\n begin = end - relativedelta(weeks=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_MONTH):\n begin = end - relativedelta(months=self.quantity)\n\n elif (self.unit == choices.TIME_UNIT_YEAR):\n begin = end - relativedelta(years=self.quantity)\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'unit' must be a numeric\"\n \" value in: {units}.\".format(units=\", \".join([\n \"{const} ({name})\".format(const=unit, name=unit_name)\n for unit, unit_name in choices.TIME_UNIT\n if unit is not None]))\n )\n\n return begin, end\n\n else:\n # This case should not happen\n raise Exception(\"A DateRange object's 'dr_type' must be one of:\"\n \" {const_fixed} (fixed range) or {const_dynamic}\"\n \" (dynamic range).\".format(\n const_fixed=choices.DATE_RANGE_TYPE_FIXED,\n const_dynamic=choices.DATE_RANGE_TYPE_VARIABLE\n ))", "def base_depth_for_period(resort_name, start_date, end_date):\n\n start_date_year = int(start_date[0:4])\n start_date_month = int(start_date[4:6])\n start_date_day = int(start_date[6:8])\n\n end_date_year = int(end_date[0:4])\n end_date_month = int(end_date[4:6])\n end_date_day = int(end_date[6:8])\n\n resort_table = resort_table_dict[resort_name]\n\n query = \"SELECT status_date FROM %s\" %(resort_table)\n connection = get_connection()\n\n period_date_list = []\n base_depth_list = []\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n row_year = int(row[0].strftime('%Y'))\n row_month = int(row[0].strftime('%m'))\n row_day = int(row[0].strftime('%d'))\n\n if row_year < start_date_year or row_year > end_date_year:\n continue\n if start_date_year == row_year:\n if start_date_month > row_month:\n continue\n if start_date_year == row_year:\n if start_date_month == row_month:\n if start_date_day > row_day:\n continue\n if end_date_year == row_year:\n if end_date_month < row_month:\n continue\n if end_date_year == row_year:\n if end_date_month == row_month:\n if end_date_day < row_day:\n continue\n\n date_to_add = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))\n period_date_list.append(date_to_add)\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n for date in period_date_list:\n base_depth_for_list = base_depth_for_date(resort_name, date)\n base_depth_list.append(base_depth_for_list)\n\n return json.dumps(base_depth_list)", "def get_date_range(startdate, enddate):\n if enddate < startdate:\n raise Exception(\"Passed in enddate that was before start date, did you flip your variables around?\")\n \n if isinstance(startdate, datetime.datetime): startdate = startdate.date()\n if isinstance(enddate, datetime.datetime): enddate = enddate.date()\n \n totalspan = enddate-startdate\n return [startdate + timedelta(days=day) for day in range(0, totalspan.days+1)]", "def create_date_list(start_date = start_date, end_date = end_date):", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def get_daterange(daterange: List[str]) -> Tuple[dt.datetime, dt.datetime]:\n if daterange is None:\n dt_start = dt.datetime.strptime(\"2017-01-01\", \"%Y-%m-%d\")\n dt_end = dt.datetime.now()\n else:\n dt_start = parse_date(daterange[0])\n if len(daterange) == 1:\n dt_end = parse_date(None)\n else:\n dt_end = parse_date(daterange[1])\n\n return dt_start, dt_end", "def pullGateCount(start_date, end_date):\n headers = getSenSourceHeaders()\n url = \"https://vea.sensourceinc.com/api/data/traffic?dateGroupings=hour(1)&endDate={1}T00:00:00.000Z&entityType=zone&excludeClosedHours=false&include=zone,sensor,site,location&meta=&metrics=ins,outs&relativeDate=custom&startDate={0}T00:00:00.000Z\"\n url = url.format(start_date, end_date)\n req = requests.get(url, headers=headers)\n return req", "def compute_daterange(df: pd.DataFrame):\n\n start_date = df[\"Date\"].iloc[0]\n end_date = df[\"Date\"].iloc[-1]\n return pd.date_range(start_date, end_date)", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def dates_within(\n gedcom_date_first : str,\n gedcom_date_second : str,\n limit : int,\n units : str\n) -> bool:\n\n conversion = {'days': 1, 'months': 30.4, 'years': 365.25}\n\n dt1 = gedcom_date_to_datetime(gedcom_date_first)\n dt2 = gedcom_date_to_datetime(gedcom_date_second)\n\n return (abs((dt1 - dt2).days) / conversion[units]) <= limit", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def _phase_range(self, change_dates):\n start_dates = [self.dates[0], *change_dates]\n end_dates_without_last = [\n (\n datetime.strptime(date, self.DATE_FORMAT) - timedelta(days=1)\n ).strftime(self.DATE_FORMAT)\n for date in change_dates\n ]\n end_dates = [*end_dates_without_last, self.dates[-1]]\n return (start_dates, end_dates)", "def GetAllCostByDateOfPaymentBandFromDB(startDate, endDate):\n\n logs.logger.debug(\n \"Start to get back all Cost object from database \"\n \"based on payment date band.\")\n try:\n searchedCostByDateOfPaymentBandFromDB = session.query(\n Cost.Cost).filter(Cost.Cost.dateOfPayment >= startDate, Cost.Cost.dateOfPayment <= endDate).all()\n logs.logger.info(\n \"Get back all Cost object from database \"\n \"based on payment date band.\")\n return [item for item in searchedCostByDateOfPaymentBandFromDB]\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def _onchange_date_from(self):\n\t\tdate_from = self.date_from\n\t\tdate_to = self.date_to\n\t\tself.compute_valid_leaves_for_employee(date_from, date_to)\n\n\t\t# policy_id = self.env['leaves.policy'].sudo().search(\n\t\t# \t[('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# if date_from and not date_to:\n\t\t# \tdate_to_with_delta = fields.Datetime.from_string(date_from) + timedelta(hours=8)\n\t\t# \tself.date_to = str(date_to_with_delta)\n\t\t# \tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(date_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds()/(24*3600)\n\t\t# \tself.number_of_days_temp = number_of_day\n\t\t# # Compute and update the number of days\n\t\t# if (date_to and date_from) and (date_from <= date_to):\n\t\t# \tif policy_id:\n\t\t# \t\tfor val in policy_id:\n\t\t# \t\t\tnumber_of_days = 0\n\t\t# \t\t\tif val.weekends_leave_period == 'dont_count':\n\t\t# \t\t\t\tnum_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t#\n\t\t# \t\t\t\t# Logic of Public Holidays when week offs count as holidays is True 2019-11-19\n\t\t# \t\t\t\temp_shift = self.employee_id.resource_calendar_ids\n\t\t# \t\t\t\tglobal_leaves = emp_shift.global_leave_ids\n\t\t# \t\t\t\t# List to store the global leaves\n\t\t# \t\t\t\tpublic_holidays = []\n\t\t# \t\t\t\tfor holiday in global_leaves:\n\t\t# \t\t\t\t\tpublic_holidays.append((holiday.date_from, holiday.date_to))\n\t\t#\n\t\t# \t\t\t\t# Public holidays between leave period\n\t\t# \t\t\t\tleave_period_dates = []\n\t\t# \t\t\t\tstart_date = date_from1.date()\n\t\t# \t\t\t\tend_date = date_to1.date()\n\t\t# \t\t\t\tdelta = end_date - start_date\n\t\t# \t\t\t\tfor i in range(delta.days + 1):\n\t\t# \t\t\t\t\tday = start_date + timedelta(days=i)\n\t\t# \t\t\t\t\tleave_period_dates.append(day)\n\t\t# \t\t\t\tcount = 0\n\t\t# \t\t\t\tfor date in public_holidays:\n\t\t# \t\t\t\t\tif datetime.strptime(date[0], '%Y-%m-%d %H:%M:%S').date() in leave_period_dates:\n\t\t# \t\t\t\t\t\tcount += 1\n\t\t# \t\t\t# End of Public Holidays logic\n\t\t#\n\t\t# \t\t\t\tself.number_of_days_temp = num_days - count\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tnumber_of_days = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\t\t\tdate_to1 = datetime.strptime(date_to, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tdate_from1 = datetime.strptime(date_from, '%Y-%m-%d %H:%M:%S')\n\t\t# \t\t\t\tif val.dur_full and not val.dur_half:\n\t\t# \t\t\t\t\ttotal_days = (date_to1 - date_from1).days\n\t\t# \t\t\t\telse:\n\t\t# \t\t\t\t\ttotal_seconds = (date_to1 - date_from1).seconds\n\t\t# \t\t\t\t\ttotal_days = total_seconds / (24 * 3600)\n\t\t#\n\t\t# \t\t\t\tweek_offs = total_days - number_of_days\n\t\t# \t\t\t\tself.number_of_days_temp = number_of_days + week_offs\n\t\t# \telse:\n\t\t# \t\t# self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\t\t# \t\tnumber_of_day = (datetime.strptime(self.date_to, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(\n\t\t# \t\t\tdate_from, DEFAULT_SERVER_DATETIME_FORMAT)).total_seconds() / (24 * 3600)\n\t\t# \t\tself.number_of_days_temp = number_of_day\n\t\t#\n\t\t# elif (date_to and date_from) and (date_from > date_to):\n\t\t# \traise ValidationError(\"From Date cannot be greater then To Date\")\n\t\t# else:\n\t\t# \tself.number_of_days_temp = 0", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))", "def trackRentRequest(self):\n\t\t#start_date = timezone.now().date()\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Rents.objects.filter(date_of_issue__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\tif ans is None:\n\t\t\tprint \"not found\"\n\t\telse:\n\t\t\tprint \"found\"\n\t\treturn lst", "def get_diff_dates(self):\n if self.projected_start_date and self.projected_finish_date:\n diff = self.projected_finish_date - self.projected_start_date\n return diff.days\n return 0", "def getPurchaseDates(self):\n\t\treturn self.dateList", "def date_range(start_date, end_date):\n return [start_date + timedelta(x) for x in range((end_date - start_date).days + 1)]", "def billing_choose_dates(self):\n number_of_dates_to_be_generated_per_patient = (\n self.number_of_dates_to_be_generated_per_patient\n )\n dunning_cycle_length = self.dunning_cycle_length\n dates = self.dates\n first_date = random.choice(\n dates\n ) # randomly choose a start date from the list of possible start dates\n last_possible_date = first_date + datetime.timedelta(\n days=dunning_cycle_length\n ) # calculate the last date possible based on Dunnin Cycle\n time_between_dates = last_possible_date - first_date\n subsequent_events = random.sample(\n list(np.arange(0, time_between_dates.days)),\n number_of_dates_to_be_generated_per_patient,\n )\n subsequent_events.sort()\n dates = [\n first_date + datetime.timedelta(days=np.int(subsequent_event))\n for subsequent_event in subsequent_events\n ]\n event_list = pd.DataFrame(dates)\n return event_list", "def SumCostByBetweenDates(startDate, endDate):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects between two dates.\")\n try:\n searchedCostByBetweenDatesFromDB = GetAllCostByDateOfPaymentBandFromDB(\n startDate, endDate)\n sumTotal = 0\n for item in searchedCostByBetweenDatesFromDB:\n sumTotal += item.amount\n logs.logger.info(\n \"Between two dates adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def test_list_grading_periods_accounts(self):\r\n account_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_accounts(account_id)", "def get_date_range(delta, date_source=None):\n date_now = date_source if date_source else datetime.datetime.now()\n date_delta = date_now + datetime.timedelta(days=delta)\n datetime_now = datetime_to_DateTime(date_now)\n datetime_delta = datetime_to_DateTime(date_delta)\n return sorted([datetime_now, datetime_delta])", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def get_date_range(num_days):\n\n date1 = datetime.datetime.utcnow()\n dateranges = []\n \n if num_days > 90:\n chunks = math.ceil(num_days/90)\n print('Breaking dates into into', chunks,'90 day chunks.')\n\n for chunk in range(1,chunks+1):\n date2 = date1 - datetime.timedelta(days=90)\n\n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n\n print('Chunk', chunk, ': ', date1, 'to', date2)\n dateranges.append((start,end))\n date1 = date2 - datetime.timedelta(days=1)\n \n else: \n date1 = datetime.datetime.utcnow()\n date2 = date1 - datetime.timedelta(days=num_days)\n \n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n \n dateranges.append((start,end))\n \n return(dateranges)", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def __json__(self, request=None):\n # start = self.start.isoformat() if self.start else None\n # end = self.end.isoformat() if self.end else None\n return dict(\n timeref_type=\"daterange\",\n interval=self.interval,\n start=self.start.isoformat(),\n end=self.end.isoformat(),\n )", "def dates(self):\n dates = []\n d = self.date_a\n while d < self.date_b:\n dates.append(d)\n d += datetime.timedelta(1)\n\n return dates", "def days_between(date_1, date_2):\n date_1 = datetime.strptime(date_1, \"%d/%m/%Y\")\n date_2 = datetime.strptime(date_2, \"%d/%m/%Y\")\n days_between.time_between = abs((date_2 - date_1).days)", "def start_end(start_date, end_date):\n print(\"server received request for tobs stats start date to end date...\")\n # correct for dates before the start of our data\n if start_date < '2010-01-01':\n start_date = '2010-01-01'\n # correct for dates beyond the end of our data\n if end_date > '2017-08-23':\n end_date = '2017-08-23'\n range_df = temps_df[(temps_df['date'] >= start_date) & (temps_df['date'] <= end_date)]\n lowest = range_df['tobs'].min()\n highest = range_df['tobs'].max()\n average = range_df['tobs'].mean()\n output = {'TMIN': lowest, 'TMAX': highest, 'TAVG': average}\n return jsonify(output)", "def ledger(self, start=None, end=None):\r\n\r\n DEBIT_IN_DB = self._DEBIT_IN_DB()\r\n\r\n flip = 1\r\n if self._positive_credit():\r\n flip *= -1\r\n\r\n qs = self._entries()\r\n balance = Decimal(\"0.00\")\r\n if start:\r\n balance = self.balance(start)\r\n qs = qs.filter(transaction__t_stamp__gte=start)\r\n if end:\r\n qs = qs.filter(transaction__t_stamp__lt=end)\r\n qs = qs.order_by(\"transaction__t_stamp\", \"transaction__tid\")\r\n\r\n if not qs:\r\n return []\r\n\r\n #helper is a hack so the caller can test for no entries.\r\n def helper(balance_in):\r\n balance = balance_in\r\n for e in qs.all():\r\n amount = e.amount*DEBIT_IN_DB\r\n o_balance = balance\r\n balance += flip*amount\r\n\r\n yield LedgerEntry(amount, e, o_balance, balance)\r\n\r\n return helper(balance)", "def get_periods():\n return [\n relativedelta(),\n relativedelta(days=6),\n relativedelta(months=1),\n relativedelta(months=3),\n relativedelta(years=1),\n relativedelta(years=5)\n ]", "def _time_range_list(days: int) -> List[Tuple[Arrow, Arrow]]:\n time_range = []\n for days_diff in range(days, -1, -1):\n date_begin = date.today() - timedelta(days=days_diff)\n date_end = date.today() - timedelta(days=days_diff - 1)\n begin = arrow.get(date_begin.strftime('%Y-%m-%d'), 'YYYY-MM-DD')\n end = arrow.get(date_end.strftime('%Y-%m-%d'), 'YYYY-MM-DD')\n time_range.append((begin, end))\n\n return time_range", "def dates_within_cond(\n gedcom_date_first : str,\n gedcom_date_second : str,\n limit : int,\n units : str,\n cond : str\n) -> bool:\n\n return cond == 'NA' or dates_within(gedcom_date_first, gedcom_date_second, limit, units)", "def get_excluded_dates(self):\n raise NotImplementedError", "def get_excluded_dates(self):\n raise NotImplementedError", "def between(cls, begin_date: datetime.date, end_date: datetime.date) -> float:\n\n if begin_date > end_date:\n raise ValueError('End date must not be before begin date.')\n if begin_date == end_date:\n return 0\n data = cls.cumulative()\n first = data.get((begin_date.year, begin_date.month), None)\n last = data.get((end_date.year, end_date.month), None)\n if first is None or last is None:\n raise ValidationError(\"Inflation figures don't cover entire period requested: {} - {}\".format(begin_date,\n end_date))\n return (last / first) - 1", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def dates(self):\n pass", "def glad_between_dates(\n data,\n start_date,\n end_date,\n return_days=True,\n return_intensity=False,\n image_type=None):\n DATE_STR_FMT = '%Y-%m-%d'\n INT_DATE_FMT = '%Y%m%d'\n if image_type == \"FORMA\":\n GLAD_START_DATE = datetime.strptime('2012-01-01', DATE_STR_FMT)\n elif image_type == \"GLAD\":\n GLAD_START_DATE = datetime.strptime('2015-01-01', DATE_STR_FMT)\n else:\n GLAD_START_DATE = datetime.strptime('2015-01-01', DATE_STR_FMT)\n\n intensity, days = _get_intensity_days(data)\n is_between_dates = _days_are_between_dates(days, start_date, end_date)\n if return_days:\n if return_intensity:\n bands = [_between_dates(is_between_dates, intensity),\n _between_dates(is_between_dates, days)]\n im = np.dstack(bands)\n else:\n im = _between_dates(is_between_dates, days)\n elif return_intensity:\n im = _between_dates(is_between_dates, intensity)\n else:\n im = is_between_dates.astype(int)\n return im", "def get_gates(self):\n return self.gates", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates", "def git_stats_date_range(\n project: Project,\n formatter: Formatter,\n date_start: datetime.datetime,\n date_end: datetime.datetime,\n) -> GitStats:\n name_start = f\"{formatter.name}@{{{date_start:%F %T}}}\"\n name_end = f\"{formatter.name}@{{{date_end:%F %T}}}\"\n return _git_stats(project, name_start, name_end)", "def user_weeks_between(self, user, start, end):\n base = self.filter(user=user.id)\n if start != ldates.idx_beginning or end != ldates.idx_last_sunday:\n if start == end:\n return base.filter(week_idx=start)\n else:\n return base.filter(week_idx__range=(start, end))\n else:\n return base", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def dates(start, end):\n \n sel4 = [\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs),]\n\n if end is None: \n start_date = dt.datetime.strptime(start , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date >= start_date).all() \n else\n end_date = dt.datetime.strptime(end , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date.between (start_date, end_date)).all() \n\n# Create a dictionary from the row data and append to a list of all_dates\n all_dates = []\n for Measurement.tobs in temp_analysis:\n date_dict = {}\n date_dict['TMIN'] = func.min(Measurement.tobs)\n date_dict['TMAX'] = func.max(Measurement.tobs)\n date_dict['TAVG'] = func.avg(Measurement.tobs)\n all_dates.append(date_dict)\n\n return jsonify(date_dict)", "def get_debt_state(member, limit_year, limit_month):\n if member.first_payment_year is None:\n # never paid! using registration date to start with\n yearmonths_paid = set()\n year_to_check = member.registration_date.year\n month_to_check = member.registration_date.month\n else:\n # build a set for the year/month of paid quotas\n quotas = Quota.objects.filter(member=member).all()\n yearmonths_paid = {(q.year, q.month) for q in quotas}\n\n year_to_check = member.first_payment_year\n month_to_check = member.first_payment_month\n\n # verify the limit is after member started paying\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n return []\n elif year_to_check > limit_year:\n return []\n\n # build a set of all the year/month the member should have paid up to (including) the limit\n should_have_paid = set()\n while True:\n should_have_paid.add((year_to_check, month_to_check))\n year_to_check, month_to_check = increment_year_month(year_to_check, month_to_check)\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n break\n elif year_to_check > limit_year:\n break\n\n return sorted(should_have_paid - yearmonths_paid)", "def get_income(start, end):\n\n payments = session.query(part2.Sailors.name, part2.Payments.bid, part2.Payments.day, part2.Payments.payDay, part2.Payments.amount).\\\n select_from(part2.Payments). \\\n join(part2.Sailors, part2.Sailors.id == part2.Payments.sid). \\\n filter(part2.Payments.day >= start). \\\n filter(part2.Payments.day <= end). \\\n all()\n results = []\n for payment in payments:\n results.append({\"name\": payment[0], \"boat\": payment[1], \"rent_day\": payment[2], \"pay_day\": payment[3], \"amount\": payment[4]})\n return results", "def date_setter():\n rental_date = datetime.date.today()\n return_date = rental_date + timedelta(days= 40)\n\n rental_dates = []\n rental_dates.append(date.strftime(rental_date,'%d.%m.%Y'))\n return_dates.append(date.strftime(return_date,'%d.%m.%Y'))\n\n return rental_dates", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])" ]
[ "0.60416496", "0.5759463", "0.56746733", "0.55839163", "0.5545205", "0.55324453", "0.54982615", "0.54696906", "0.54090685", "0.5405709", "0.54010266", "0.5398621", "0.53888017", "0.5384724", "0.53818357", "0.537875", "0.53136253", "0.53007126", "0.5292468", "0.5280808", "0.5277218", "0.5254051", "0.5225376", "0.52180076", "0.5201321", "0.51830566", "0.5179543", "0.51649857", "0.5149164", "0.51369184", "0.5133808", "0.51285505", "0.51250196", "0.5120875", "0.5118873", "0.5100652", "0.50972843", "0.5096779", "0.50926465", "0.50770766", "0.50768024", "0.5076553", "0.50513303", "0.5050365", "0.5029305", "0.5028373", "0.502055", "0.5010973", "0.50093585", "0.5007738", "0.50051004", "0.5003392", "0.5002784", "0.49999908", "0.49935234", "0.4990953", "0.49904874", "0.49767426", "0.49704954", "0.4963515", "0.49564266", "0.49559313", "0.4954586", "0.49504575", "0.49497458", "0.49433818", "0.49172282", "0.49138027", "0.49107215", "0.4910252", "0.49048913", "0.49003688", "0.4896915", "0.48888072", "0.48782232", "0.4875809", "0.48727617", "0.48720697", "0.4859805", "0.48582986", "0.4854797", "0.48536325", "0.4849877", "0.48479864", "0.48479864", "0.48389032", "0.48365948", "0.48320648", "0.4825466", "0.4823732", "0.4811401", "0.4811401", "0.48111716", "0.48085552", "0.48056424", "0.47957104", "0.47915593", "0.47900808", "0.47853482", "0.47809923" ]
0.6374636
0
Split off parameter part from path. Returns tuple (pathwithoutparam, param)
def splitparams(path): if '/' in path: i = path.find(';', path.rfind('/')) else: i = path.find(';') if i < 0: return path, '' return path[:i], path[i + 1:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetPathParamsFromPath(path: str) -> List[str]:\n path_params = []\n\n components = path.split(\"/\")\n for component in components:\n if _IsPathParameter(component):\n normalized_component = _NormalizePathComponent(component)\n normalized_component = normalized_component[1:-1]\n path_params.append(normalized_component)\n\n return path_params", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def get_input_path_parameters(path):\n path_params = []\n params = path.split('/')\n for param in params:\n if len(param) > 0 and param[0] == '{' and param[len(param) - 1] \\\n == '}':\n path_params.append(param[1:-1])\n return path_params", "def get_parameter(request, param):\n if param == \"Params\":\n return request.split(\"\\r\\n\\r\\n\")[1]\n if isinstance(param, type([])):\n return [request.split(\"\\r\\n\\r\\n\")[1] if x == \"Param\" else request.split(x + \": \")[1].split(\"\\r\\n\")[0] for x in param]\n if isinstance(param, type(\"\")):\n return request.split(param + \": \")[1].split(\"\\r\\n\")[0]", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def splitPath(path):\n return tuple(\n element for element in os.path.split(path.rstrip(os.path.sep)) if element\n )", "def parse_request_path( self, path ):\n path_parts = path.strip(\"/\").split(\"/\")\n \n if len(path_parts) == 0:\n # invalid \n return (None, None)\n \n if len(path_parts) > 2:\n # invalid\n return (None, None)\n \n slice_name = path_parts[0]\n if len(slice_name) == 0:\n # empty string is invalid \n return (None, None)\n \n volume_name = None\n \n if len(path_parts) > 1:\n volume_name = path_parts[1]\n \n return slice_name, volume_name", "def pathsplit(path):\n stem, basename = os.path.split(path)\n if stem == '':\n return (basename,)\n if stem == path: # fixed point, likely '/'\n return (path,)\n return pathsplit(stem) + (basename,)", "def _split_uri(uri):\n parts = uri.split('/')\n assert '' == parts.pop(0)\n params = []\n res = pkcollections.Dict(params=params)\n in_optional = None\n in_path_info = None\n first = None\n for p in parts:\n assert not in_path_info, \\\n 'path_info parameter={} must be last: next={}'.format(rp.name, p)\n m = _PARAM_RE.search(p)\n if not m:\n assert first is None, \\\n 'too many non-parameter components of uri={}'.format(uri)\n first = p\n continue\n rp = pkcollections.Dict()\n params.append(rp)\n rp.is_optional = bool(m.group(1))\n if rp.is_optional:\n rp.is_path_info = m.group(1) == _PATH_INFO_CHAR\n in_path_info = rp.is_path_info\n else:\n rp.is_path_info = False\n rp.name = m.group(2)\n if rp.is_optional:\n in_optional = True\n else:\n assert not in_optional, \\\n '{}: optional parameter ({}) followed by non-optional'.format(\n uri,\n rp.name,\n )\n res.base_uri = first or ''\n return res", "def parse_path(path):\n assert path is not None and len(path) > 0, \"Invalid path: %s.\" % str(path)\n if not isinstance(path, tuple):\n path = str(path).split('.')\n return path", "def split_url_and_query_params(url):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n url = urlunsplit((scheme, netloc, path, None, fragment))\n return url, query_params", "def normalize_parameter(parameter: str, expression: str) -> Tuple[Optional[str], str, str]:\n try:\n # The parameter name is prefixed with its location. Example: `path.id`\n location, name = tuple(parameter.split(\".\"))\n return location, name, expression\n except ValueError:\n return None, parameter, expression", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def _split_url_string(param_str):\n parameters = parse_qs(param_str, keep_blank_values=False)\n for key, val in parameters.iteritems():\n parameters[key] = urllib.unquote(val[0])\n return parameters", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def __get_parameters(self, url):\n rest_url = url.split('?')\n parameter_tuple = ()\n if len(rest_url) > 1:\n target_url = rest_url[1]\n variable_pairs = target_url.split(self.__param_delimiter)\n for one_pair in variable_pairs:\n key_value_pair = one_pair.split(\"=\")\n if len(key_value_pair) == 2:\n parameter_tuple += (tuple(key_value_pair),)\n return parameter_tuple\n return None", "def _split_path(xj_path):\n\n res = xj_path.rsplit('.', 1)\n root_key = res[0]\n if len(res) > 1:\n return root_key, res[1]\n else:\n if root_key and root_key != '.':\n return None, root_key\n else:\n raise XJPathError('Path cannot be empty', (xj_path,))", "def parse(path, root=True):\n if path.startswith(\"/\"):\n return path[1:], \"\"\n\n if \"/\" not in path:\n return path, \"\"\n\n if root:\n return path.split(\"/\", 1)\n else:\n return path.rsplit(\"/\", 1)", "def get_dir_params(dirpath, params):\n\t(head, tail) = os.path.split(dirpath)\n\tparams.update(split_params(tail))\n\treturn head", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def get_macro_and_param(item: str) -> Tuple[str, str]:\n macro, param = item[len(MACRO) :].split(\":\")\n return macro, param", "def _parse_params(self, params):\r\n if params[0] == \":\":\r\n params = [params[1:]]\r\n else:\r\n params = params.split(\" :\", 1)\r\n if len(params) == 1:\r\n last_arg = None\r\n else:\r\n last_arg = params[1]\r\n params = params[0].split(None)\r\n if last_arg != None:\r\n params.append(last_arg)\r\n return params", "def __parse_full_path(path):\n dir = path[:path.rfind('/') + 1]\n name = path[path.rfind('/') + 1:]\n return dir, name", "def parse_path(path):\n if path == '/':\n return None, None, None\n paths = path[1:].split('/', 1)\n\n #Filter Empty strings\n paths = [p for p in paths if p != '']\n if len(paths) == 1:\n return paths[0], None, None\n else:\n file_hash, rest = paths\n paths = rest.split('/', 1)\n #Filter Empty strings\n paths = [p for p in paths if p != '']\n if len(paths) == 1:\n return file_hash, paths[0], None\n else:\n action, rest = paths\n return file_hash, action, rest", "def split_path(self, path):\n path = path.strip(\"/\")\n return path.split(\"/\") if len(path) > 0 else []", "def _urlparse_splitfragment(url):\r\n\r\n fpart = url.split(\"#\", 1)\r\n if len(fpart) == 2:\r\n fragment = fpart[1]\r\n else:\r\n fragment = \"\"\r\n\r\n return fpart[0], fragment", "def get_params(raw):\n parts = raw.split(\" \", 1)\n return None if len(parts) == 1 else parts[1]", "def _split_url(url):\n return url[1:].split('/')", "def split_path(path):\n #drop file extension\n filename = path.rsplit('.', 1)[0]\n #drop static/img/\n filename = filename[11:]\n return filename", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def pathComponents(path):\n parts = [p for p in path.split(os.path.sep) if p not in [\"\", \".\"]]\n return parts", "def _split_varpath(self, path):\n try:\n compname, varname = path.split('.', 1)\n except ValueError:\n return (None, self, path)\n\n t = self.get_trait(compname)\n if t and t.iotype:\n return (None, self, path)\n return (compname, getattr(self, compname), varname)", "def get_video_parts(video_path):\n parts = video_path.split(os.path.sep)\n #print(parts)\n filename = parts[-1]\n filename_no_ext = filename.split('.')[0]\n return filename_no_ext, filename#('video6514', 'video6514.mp4')", "def _split_url_string(query_string):\r\n parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)\r\n for k, v in parameters.iteritems():\r\n parameters[k] = urllib.unquote(v[0])\r\n return parameters", "def split_path(path:str):\n if path is None or len(path) == 0:\n return '', '', ''\n path = sanitize_path(path)\n folder, filename = os.path.split(path)\n ext = ''\n if '.' in filename:\n filename, ext = os.path.splitext(filename)\n # handle double ext, like 'mode.pth.tar'\n filename, ext2 = os.path.splitext(filename)\n ext = ext2 + ext\n else:\n folder = os.path.join(folder, filename)\n filename = ''\n return folder, filename, ext", "def _remove_parameter(value: Optional[str]) -> Optional[str]:\n if value is None:\n return None\n\n return value.split(\";\")[0]", "def splitPath(self, path):\n return os.path.split(path)", "def parse_managed_path(path):\n fields = path.split(':', 1)\n return fields[0], fields[1]", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def split_datastore_path(datastore_path):\n spl = datastore_path.split('[', 1)[1].split(']', 1)\n path = \"\"\n if len(spl) == 1:\n datastore_name = spl[0]\n else:\n datastore_name, path = spl\n return datastore_name, path.strip()", "def split_param(text: str, prefixes: Sequence[str], sep: str) -> tuple[str, str, str]:\n stripped = text.strip()\n if not prefixes:\n prefix = ''\n rest = stripped\n else:\n try:\n prefix = next(filter(stripped.startswith, prefixes))\n except StopIteration:\n prefix = ''\n rest = stripped\n else:\n rest = stripped.split(prefix, maxsplit=1)[1].strip()\n assert len(prefix) >= 1\n assert rest\n arg, part_sep, descr = rest.partition(sep.join((' ', ' ')))\n if not part_sep:\n if rest.endswith(sep):\n arg = rest[:-1]\n elif sep + ' ' in rest:\n arg, _, descr = rest.partition(sep + ' ')\n # if we hit neither then there is no '-' in text, possible case of '[prefix] foo'?\n return prefix, arg.strip(), descr.lstrip()", "def split_name(name):\n split_name = [x for x in name.split(\"/\") if x != '']\n bucket_name = split_name[0]\n key_path = \"/\".join(split_name[1:])\n return bucket_name, key_path", "def test_splitParamArgs(self):\n res = irc.ServerSupportedFeatures._splitParamArgs([\"A:1\", \"B:2\", \"C:\", \"D\"])\n self.assertEqual(res, [(\"A\", \"1\"), (\"B\", \"2\"), (\"C\", \"\"), (\"D\", \"\")])", "def split_params(self, params):\n\t\tindex = 0\n\t\tacc = ''\n\t\tret = [] #return value (is ret a bad name?)\n\t\twhile index < len(params):\n\t\t\tif params[index] == ',': #End of a parameter\n\t\t\t\tret.append(acc)\n\t\t\t\tacc = ''\n\t\t\telif params[index] == '(': #start of a type that is a function\n\t\t\t\tend = params.match_paren(index)\n\t\t\t\twhile index <= end: #so the commas in the function type\n\t\t\t\t\t# are disregarded\n\t\t\t\t\tacc += params[index]\n\t\t\t\t\tindex += 1\n\t\t\t\tcontinue #so index doesn't get incremented again\n\t\t\telse:\n\t\t\t\tacc += params[index]\n\t\t\tindex += 1\n\n\t\tif acc: #if they ended the list with a comma then acc would be ''\n\t\t\tret.append(acc) #parameters not ended with a comma,\n\t\t\t# acc last the last param\n\n\t\treturn ret", "def getParams(text):\n s = text.split('=')\n for i in range(len(s)):\n s[i] = s[i].strip()\n\n param = s[0]\n val = s[1]\n # Assume that there are two values only: (variable, value) pair\n assert len(s) == 2\n \n return (param, val)", "def _ExtractPathParamsFromRouteList(route_comps: Collection[str]) -> Set[str]:\n return set(filter(_IsPathParameter, route_comps))", "def get_path_parameters(parameters):\n\n param_list = []\n for param in parameters:\n if param['paramType'] == 'path':\n param_name = param['name']\n param_list.append('{0}={1}'.format(param_name, param_name))\n return param_list", "def pathify(urlpattern, **context):\n\n repl = lambda match: context[match.group(1)]\n path = re.sub(r':([a-z]+)', repl, urlpattern)\n return tuple(path[1:].split('/'))", "def first_path_segment(self, path):\n if not path:\n return None\n slashes = ''\n while path.startswith('/'):\n slashes += '/'\n path = path[1:]\n idx = path.find('/')\n if idx == -1:\n idx = len(path)\n return path, idx, slashes", "def split_path(path):\n parts = []\n path, end = os.path.split(path)\n while end:\n parts.append(end)\n path, end = os.path.split(path)\n\n if path:\n parts.append(path)\n parts.reverse()\n return parts", "def flatten_param(param):\n\n param = param.replace(']', '').replace('[', '_').replace('<','').replace('>','')\n if param.startswith('_'):\n param = param.replace('_', '', 1)\n\n return param", "def extract_path(path: str) -> str:\n return _RE_URL.sub(r'{\\1}', path)", "def split_params(param_string):\n\t#TODO: check for negatives i.e. alpha--1\n\tparts = param_string.split('_')\n\tparams = {}\n\n\tfor i in range(len(parts)):\n\t\tparam = split_items(parts[i])\n\t\tif len(param) < 2:\n\t\t\ttry:\n\t\t\t\tparts[i+1] = parts[i] + \"_\" + parts[i+1]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\telif len(param) == 2:\n\t\t\tparams[param[0]] = param[1]\n\t\telif len(param) == 3 and len(param[1]) == 0:\n\t\t\tparams[param[0]] = -param[2]\n\t\telse:\n\t\t\tparams[param[0]] = param[1:]\n\treturn params", "def extractparam(pardict, parsuffix):\n return next(v for k, v in pardict.items() if k.endswith(parsuffix))", "def get_url_path(url):\n return filter(lambda x: x!='', url.split('/'))", "def parse_url_path(url_path):\r\n\r\n m = re.match('^/([^/]+)/?$',url_path)\r\n if m:\r\n return (m.group(1),None)\r\n \r\n m = re.match('^/([^/]+)/(.+)$',url_path)\r\n if m:\r\n return (m.group(1),m.group(2).replace('%25','%'))\r\n \r\n return (None,None)", "def build_path_parameters(self):\n url_params = URL_PARAMS_PATTERN.findall(self.path)\n params = []\n\n for param in url_params:\n params.append({\n 'name': param,\n 'type': 'string',\n 'in': 'path',\n 'required': True\n })\n\n return params", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def filenameSplit (p):\n\tfrom os.path import split as splitPath, splitdrive, splitext\n\t\n\tsplt = splitPath (p)\n\tdisk,dir_ = splitdrive(splt[0])\n\ttry:\n\t\tif disk[1] != \":\":\n\t\t\traise IndexError\n\texcept IndexError:\n\t\tdisk,dir_ = \"\", splt[0]\n\tname,ext = splitext(splt[1])\n\treturn disk,dir_,name,ext", "def split(value: str, sep: str = \":\") -> Tuple:\n left, _, right = value.partition(sep)\n return (left, right) if right else (None, left)", "def split_s3_path(url):\n\tparsed = urlparse (url)\n\tif not parsed.netloc or not parsed.path:\n\t\traise ValueError (\"bad s3 path {}\".format (url))\n\tbucket_name = parsed.netloc\n\ts3_path = parsed.path\n\t# Remove '/' at beginning of path.\n\tif s3_path.startswith (\"/\"):\n\t\ts3_path = s3_path[1:]\n\treturn bucket_name, s3_path", "def split(p):\n if not p:\n return []\n p = p.strip('/').split('/')\n return [] if p == [''] else p", "def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension", "def extract_params(url):\n params = url.split(\"downloads/\")\n\n file_id = \"\"\n recipient_id = \"\"\n security_hash = \"\"\n\n if len(params) > 0:\n [file_id, recipient_id, security_hash] = ['', '', '']\n\n if \"http\" in params[0]:\n parts = params[1].split('/')\n [file_id, security_hash] = parts\n else:\n if len(parts) > 2:\n # The url is similar to\n # https://www.wetransfer.com/downloads/XXXXXXXXXX/YYYYYYYYY/ZZZZZZZZ\n [file_id, recipient_id, security_hash] = params\n else:\n # The url is similar to https://www.wetransfer.com/downloads/XXXXXXXXXX/ZZZZZZZZ\n # In this case we have no recipient_id\n [file_id, security_hash] = parts\n else:\n print(\"no params\")\n\n return [file_id, recipient_id, security_hash]", "def split_scope_key(key: str) -> Tuple[Optional[str], str]:\n split_index = key.find('.')\n if split_index != -1:\n return key[:split_index], key[split_index + 1:]\n else:\n return None, key", "def splitdrive(path):\n relative = get_instance(path).relpath(path)\n drive = path.rsplit(relative, 1)[0]\n if drive and not drive[-2:] == '//':\n # Keep \"/\" tail side\n relative = '/' + relative\n drive = drive.rstrip('/')\n return drive, relative", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def path_name(self, path):\r\n ind = path.rfind(\"/\") + 1\r\n return (path[:ind], path[ind:])", "def split_path(self, path: str) -> List[str]:\n dirs = path.split('/')\n return list(filter(lambda x: x!='', dirs))", "def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]", "def parse_query_param(url, param):\n\n try:\n return parse.parse_qs(parse.urlparse(url).query)[param][0]\n except:\n return None", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def _get_query_part(params: dict) -> str:\n params_cleaned = {k: v for k, v in params.items() if v is not None}\n return ('?' + urlencode(params_cleaned, quote_via=quote, safe=\"/,\")) if params_cleaned else \"\"", "def test_splitParamArgsProcessor(self):\n res = irc.ServerSupportedFeatures._splitParamArgs(\n [\"A:1\", \"B:2\", \"C\"], irc._intOrDefault\n )\n self.assertEqual(res, [(\"A\", 1), (\"B\", 2), (\"C\", None)])", "def _reduce_path(path):\n return tuple([i % 2 != 0 and \n element or \n getattr(element, 'base_mapper', element) \n for i, element in enumerate(path)])", "def decode_fullpath(fullpath):\n fp = fullpath.split(\"/\")\n if len(fp) != 5:\n raise ValueError(\"fullpath: invalid format\")\n decoded = []\n for part in fp:\n decoded.append(unquote(part))\n return tuple(decoded)", "def delete_query_parameter(url, param_name):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params.pop(param_name, None)\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url", "def _split_term(term):\n if '*' in term:\n variable_and_parameter = term.split('*')\n variable_and_parameter = [label.strip()\n for label in variable_and_parameter]\n else:\n raise TermNotProduct(term)\n\n if len(variable_and_parameter) != 2:\n raise TermNotProduct(term)\n\n return variable_and_parameter", "def _get_path_parameters(self) -> Generator[Tuple[str, Type], None, None]:", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list", "def _split_name(name):\n name_split = name.split('_view_')\n view_num = None\n if(len(name_split) > 1):\n view_num = int(name_split[1])\n optimizer_key = ''\n fp16_key = ''\n if name_split[0].startswith('Moment_1'):\n optimizer_key = 'Moment_1_'\n elif name_split[0].startswith('Moment_2'):\n optimizer_key = 'Moment_2_'\n elif name_split[0].startswith('Update_Count'):\n optimizer_key = 'Update_Count_'\n elif name_split[0].endswith('_fp16'):\n fp16_key = '_fp16'\n param_name = name_split[0]\n if optimizer_key != '':\n param_name = param_name.split(optimizer_key)[1]\n param_name = param_name.split('_fp16')[0]\n return param_name, optimizer_key, view_num, fp16_key", "def split_token_in_parts(token):\n result = []\n current = []\n for part in token + (':', ):\n if part == ':':\n if current:\n result.append(tuple(current))\n current = []\n else:\n current.append(part)\n\n return result", "def extract_params(self, fname):\n return re.findall(self.regexp_params, os.path.basename(fname))", "def split_gcs_uri(gcs_uri):\n m = GCS_REGEX.match(gcs_uri)\n bucket = m.group(1)\n path = \"\"\n if m.group(2):\n path = m.group(2).lstrip(\"/\")\n return bucket, path", "def spath_stripoptions(spath):\n l = [comp.split(\"?\", 1)[0] for comp in spath.split(\"/\")]\n return \"/\".join(l)", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n levels = dirname.strip('/').split(os.path.sep)[2:][-2:]\n return PATH_SPLIT.split(' '.join(levels + [fname_noext]))", "def split_extended_slug(slug):\n if not slug:\n return None, None, 0, 0\n\n parts = slug.rsplit('/')\n\n if len(parts) == 1:\n return parts[0], None, 0, 0\n elif len(parts) == 2:\n return parts[0], parts[1], 0, 0\n\n build_id, sep, job_id = parts[2].partition('.')\n build_id = int(build_id)\n if job_id:\n job_id = int(job_id)\n\n return parts[0], parts[1], build_id, job_id", "def get_video_parts(video_path):\n parts = video_path.split(os.path.sep)\n filename = parts[1]\n filename_no_ext = filename.split('.')[0]\n train_or_test = parts[0]\n\n return train_or_test, filename_no_ext, filename", "def split(path):\r\n if path.lower().startswith(\"smb://\"):\r\n if '/' not in path[6:]:\r\n path = path.replace(\"smb://\", \"smb:///\", 1)\r\n return path.rsplit('/', 1)\r\n else:\r\n return os.path.split(path)", "def spilt_path(unclean_path_to_file):\n if os.path.exists(unclean_path_to_file) == True:\n return os.path.split(unclean_path_to_file)\n else:\n return None, unclean_path_to_file", "def split_filename(path):\n filename = os.path.basename(path)\n name, extension = os.path.splitext(filename)\n region = name.split('.')[0]\n\n return region, name, extension", "def parse_request(self, request: str) -> tuple:\n parts = request.split(b\" \")\n return parts[0], parts[1]", "def _pop_path_info(self, req):\n path = req.path_info\n if not path:\n return None\n while path.startswith('/'):\n path = path[1:]\n idx = path.find('/')\n if idx == -1:\n idx = len(path)\n r = path[:idx]\n req.path_info = path[idx:]\n return r", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n for part in dirname.strip('/').split(os.path.sep)[2:][-2:] + [fname_noext]:\n for match in PATH_SPLIT.split(part):\n if match:\n yield match", "def split_gcs_uri(gcs_uri):\n m = GCS_REGEX.match(gcs_uri)\n bucket = m.group(1)\n path = \"\"\n if m.group(2):\n path = m.group(2).lstrip(\"/\")\n return bucket, path", "def fullsplit(path, result = None):\n if result is None:\n result = []\n head, tail = os.path.split(path)\n if head == '':\n return [tail] + result\n if head == path:\n return result\n return fullsplit(head, [tail] + result)" ]
[ "0.674751", "0.6523064", "0.6511854", "0.64421445", "0.641207", "0.6401571", "0.6344312", "0.63051647", "0.62601167", "0.6211556", "0.61409175", "0.61268425", "0.61259776", "0.6124089", "0.6106624", "0.6058917", "0.6055915", "0.6025726", "0.6003291", "0.599609", "0.59699416", "0.59637666", "0.59298307", "0.591867", "0.59165275", "0.5859101", "0.5788912", "0.5763644", "0.5762552", "0.5759214", "0.57545924", "0.5742557", "0.57206184", "0.5706762", "0.5694748", "0.5672325", "0.5670416", "0.56644875", "0.56268364", "0.56259716", "0.5618546", "0.5615407", "0.5605963", "0.5603483", "0.55994886", "0.5595558", "0.55917907", "0.5588601", "0.55878204", "0.5577197", "0.5555031", "0.55532444", "0.5547661", "0.5520004", "0.55108094", "0.549647", "0.5495817", "0.5485059", "0.54845726", "0.5474374", "0.54661536", "0.5462437", "0.5449634", "0.542903", "0.5426016", "0.54232407", "0.5413285", "0.5385198", "0.5351698", "0.5344377", "0.5335115", "0.5334213", "0.5322567", "0.5316221", "0.53152925", "0.53152925", "0.53106236", "0.5307478", "0.53056765", "0.5298274", "0.52903634", "0.5269507", "0.5244862", "0.52433413", "0.524103", "0.52196985", "0.520328", "0.5200611", "0.51881737", "0.5186471", "0.5186275", "0.5183829", "0.5178784", "0.5178025", "0.51641977", "0.51627123", "0.51596177", "0.5157682", "0.5157189", "0.51567" ]
0.8304237
0
Return regular expression pattern with given host for URL testing.
def safe_host_pattern(host): return "(?i)%s://%s%s(#%s)?" % \ (_safe_scheme_pattern, host, _safe_path_pattern, _safe_fragment_pattern)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url(regex, view):\n return RegexPattern(regex, view)", "def get_hostmask_regex(mask):\n mask = re.escape(mask)\n mask = mask.replace(r'\\*', '.*')\n return re.compile(mask + '$', re.I)", "def _compile_audience_pattern(self, pattern):\n re_pattern = fnmatch.translate(pattern)\n if \"://\" not in pattern:\n re_pattern = \"[a-z]+://\" + re_pattern\n return re.compile(re_pattern)", "def test_get_proxied_url_2(self):\n test_urlpattern = URLPattern(url=\"platform.almanhal.com\")\n self.assertEqual(test_urlpattern.get_proxied_url, \"platform-almanhal-com\")", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def __init__(self, url_pattern):\n self._url_regex = re.compile(r'^%s$' % re.escape(url_pattern).replace('_', '([^/]+)'))", "def test_get_proxied_url_1(self):\n test_urlpattern = URLPattern(url=\"gale.com\")\n self.assertEqual(test_urlpattern.get_proxied_url, \"gale-com\")", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def get_ip_pattern(ip):\n return re.compile(ip.replace('.', '[.]'))", "def _match_url(self, _url):\n\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain\n r'localhost|' # localhost\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n if re.match(regex, _url):\n return True\n else:\n return False", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def test_host(self):\n url = create_url(host=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")", "def _get_re_from_pool(pool):\n no_mask = pool.rsplit('/', 1)[0]\n no_last_octet = no_mask.rsplit('.', 1)[0]\n regex = re.escape(no_last_octet) + r'\\.\\d{1,3}/\\d{1,2}\\s+blackhole'\n return regex", "def _match_hostname(url, condition, require_path=None, require_no_path=False):\n scheme, _, other = url.partition(\":\")\n if scheme not in (\n \"git\", # lxc-python2\n \"git+https\", # asyncssh\n \"http\",\n \"https\",\n \"svn\", # wsgiref\n ):\n return False\n\n if condition.startswith(\"http://\"):\n condition = condition[7:]\n\n hostname, _, path = condition.partition(\"/\")\n if \":\" in hostname:\n hostname = hostname.split(\":\", 1)[0]\n\n if \".\" not in other: # pragma: no cover\n return False # '/dev/' in http://www.reportlab.com/\n\n other = other.lstrip(\"/\")\n match_subdomains = hostname.startswith(\"*.\")\n if match_subdomains:\n hostname = hostname[2:]\n\n subdomain, other = other.split(\".\", 1)\n if subdomain in [\"www\"]:\n logger.debug(\"url {} subdomain www\".format(url))\n return False\n if not other.startswith(hostname):\n return None\n\n if require_path is None:\n require_path = not match_subdomains\n\n # Require at least a suffix\n other = other[len(hostname) :]\n other = other.lstrip(\"/\")\n if not other:\n if require_no_path:\n return True\n\n if require_path:\n logger.debug(\"url {} no path\".format(url))\n return False\n\n if path:\n if not other.startswith(path):\n logger.debug(\"url {} not path {}\".format(url, path))\n return False\n\n return True", "def from_regex(pattern:str) -> str:\n raise NotImplementedError()", "def get_host(url):\n parts = url.split('/')\n if url.startswith('http'):\n return parts[2]\n else:\n return parts[0]", "def test_parse_url_lowercase_host() -> None:\n assert indieauth._parse_url(\"http://ex.com/hello\").path == \"/hello\"\n assert indieauth._parse_url(\"http://EX.COM/hello\").hostname == \"ex.com\"\n\n parts = indieauth._parse_url(\"http://EX.COM:123/HELLO\")\n assert parts.netloc == \"ex.com:123\"\n assert parts.path == \"/HELLO\"", "def test_host_path(self):\n url = create_url(\n host=\"www.example.com\", path=\"path/to/resource\", scheme_no_ssl=\"http\"\n )\n self.assertEqual(url, \"http://www.example.com/path/to/resource\")", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def get_host_name(url):\n return urlparse.urlparse(url)[1]", "def extract_api_name(url):\n host = RE_HOST.sub('\\\\1', url)\n return host", "def test_getHostnameFromURL(self):\n agent = txrecaptcha._getAgent(self.reactor, self.url)\n contextFactory = agent._contextFactory\n self.assertRegexpMatches(contextFactory.hostname,\n '.*www\\.example\\.com')", "def get_match_with_re(pattern, unknown):\n pattern, unknown = _check_params(pattern, unknown)\n regex = re.compile(pattern)\n if not regex.search(unknown):\n return False\n return True", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def test_address_host(self):\n url = create_url(address=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")", "def make_regexp(ref):\n\n if not ref.startswith('^'):\n ref = '^' + ref\n if not ref.endswith('$'):\n ref += '$'\n ref = ref.replace('.', '\\.').replace('%', '.*')\n return ref", "def is_same_domain(host, pattern):\n if not pattern:\n return False\n\n pattern = pattern.lower()\n return (\n pattern[0] == \".\"\n and (host.endswith(pattern) or host == pattern[1:])\n or pattern == host\n )", "def _url(regex, view, kwargs=None, name=None, prefix='', decorators=None,\n pattern=RegexURLPattern, resolver=RegexURLResolver):\n if isinstance(view, (list,tuple)):\n # For include(...) processing.\n return resolver(regex, view[0], kwargs, *view[1:])\n else:\n if isinstance(view, six.string_types):\n if not view:\n raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)\n if prefix:\n view = prefix + '.' + view\n return pattern(regex, view, kwargs, name)", "def test_host_port(self):\n url = create_url(host=\"www.example.com\", port=8000, scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com:8000\")", "def is_valid_host(host):\n host = host.encode('idna').lower()\n if not hasattr(is_valid_host, '_re'):\n is_valid_host._re = re.compile(r'^([0-9a-z][-\\w]*[0-9a-z]\\.)+[a-z0-9\\-]{2,15}$')\n return bool(is_valid_host._re.match(host))", "def getHost(self):\n host = self.url[self.host_head:self.host_tail]\n return host", "def _build_regex(path):\n re_list = ['^']\n var_list = list()\n is_var = False\n for v in _RE_ROUTE.split(path):\n if is_var:\n var_name = v[1:]\n var_list.append(var_name)\n re_list.append(r'(?P<%s>[^\\/]+)' % var_name)\n else:\n s = ''\n for ch in v:\n if ch in _LETTERS_DIGITS:\n s += ch\n else:\n s += '\\\\' + ch\n re_list.append(s)\n is_var = not is_var\n re_list.append('$')\n return ''.join(re_list)", "def test_get_host(self):\n pass", "def vhost_regex(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vhost_regex\")", "def vhost_regex(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vhost_regex\")", "def validate_host(self, host: str) -> bool:\n ip_address_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}'\n r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])$')\n hostname_regex = re.compile(r'^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$')\n url_regex = re.compile(r'^(ldaps?)://'\n r'((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]):'\n r'([0-9]{1,5})$')\n if bool(ip_address_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(hostname_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(url_regex.match(host)):\n # using host url address\n match = url_regex.match(host)\n proto = match.group(1)\n if proto == 'ldaps':\n self.server_use_ssl = True\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid", "def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1", "def compile(domain_rule):\n\n regex_parts = []\n\n def _build_regex(rule):\n for p_rule, p_var in parse_rule(rule):\n if p_rule:\n regex_parts.append(re.escape(p_rule))\n if p_var:\n regex_parts.append('(?P<%s>[^/]{1,})' % p_var)\n\n _build_regex(domain_rule)\n\n regex = r'^%s$' % (u''.join(regex_parts))\n _regex = re.compile(regex, re.UNICODE)\n print regex\n return _regex", "def test_address_host_port(self):\n url = create_url(address=\"www.example.com:8000\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com:8000\")", "def match_url(url, domainlist):\n if not url:\n return False\n return match_host(url_split(url)[1], domainlist)", "def test_url_domain(self):\n assert ct.url_domain(\"http://www.google.com\") == \"google.com\"\n assert ct.url_domain(\"http://localhost\") == \"localhost\"\n assert ct.url_domain(\"http://192.168.1.19:5010\") == \"192.168.1.19\"", "def address_regex(self) -> Any:", "def validate_url(cls, url: str) -> Optional[Match[str]]:\n match = re.match(cls._VALID_URL, url)\n return match", "def __find_hostname(self, url):\n match = self.__REGEX_HOST.search(url)\n if match:\n return match.group(0)\n return None", "def _get_base_path_pattern(self):\n if self._base_path is not None:\n return '^%s' % re.escape(self._base_path)\n return None", "def vhost_regex(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vhost_regex\")", "def regexp_predicate(value):\n return re.compile(value).match", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")", "def matching_regex_pattern(self):\n if not self._pattern:\n # Match one or more words separated by whitespace\n word = \"[a-zA-Z0-9?,\\.\\-_!;:']+\"\n regex = \"(\\s+%s)+\" % word\n self._pattern = re.compile(regex)\n return self._pattern", "def _match_url(parsed_url: ParsedUrl, rules: Sequence[str]) -> Mapping[str, Any]:\n # We use the routing capabilities of werkzeug to match the URL path\n urls = werkzeug.routing.Map(\n [werkzeug.routing.Rule(rule) for rule in rules],\n strict_slashes=False,\n ).bind(parsed_url.hostname)\n try:\n _, components = urls.match(parsed_url.path)\n except werkzeug.exceptions.HTTPException:\n raise ValueError(f\"The provided {parsed_url.hostname} URL is not valid\")\n return components", "def test_password_masker_mask_db_url(monkeypatch, tmp_path):\n # PostgreSQL\n # default\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql://scott:tiger@{db_hostname}:65432/mydatabase\"\n )\n == f\"postgresql://scott:***@{db_hostname}:65432/mydatabase\"\n )\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql://scott:tiger@{db_hostname}:65432/mydatabase\",\n use_urlparse=True,\n )\n == f\"postgresql://scott:***@{db_hostname}:65432/mydatabase\"\n )\n # missing port number, using urlparse\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql://scott:tiger@{db_hostname}/mydatabase\", use_urlparse=True\n )\n == f\"postgresql://scott:***@{db_hostname}/mydatabase\"\n )\n\n # psycopg2\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase\"\n )\n == f\"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase\"\n )\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase\",\n use_urlparse=True,\n )\n == f\"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase\"\n )\n\n # pg8000 (if installed in test environment)\n try:\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase\"\n )\n == f\"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase\"\n )\n except ModuleNotFoundError:\n pass\n assert (\n PasswordMasker.mask_db_url(\n f\"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase\",\n use_urlparse=True,\n )\n == f\"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase\"\n )\n\n # MySQL\n # default (if installed in test environment)\n try:\n assert (\n PasswordMasker.mask_db_url(f\"mysql://scott:tiger@{db_hostname}:65432/foo\")\n == f\"mysql://scott:***@{db_hostname}:65432/foo\"\n )\n except ModuleNotFoundError:\n pass\n\n assert (\n PasswordMasker.mask_db_url(\n f\"mysql://scott:tiger@{db_hostname}:65432/foo\", use_urlparse=True\n )\n == f\"mysql://scott:***@{db_hostname}:65432/foo\"\n )\n\n # mysqlclient (a maintained fork of MySQL-Python) (if installed in test environment)\n try:\n assert (\n PasswordMasker.mask_db_url(\n f\"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo\"\n )\n == f\"mysql+mysqldb://scott:***@{db_hostname}:65432/foo\"\n )\n except ModuleNotFoundError:\n pass\n assert (\n PasswordMasker.mask_db_url(\n f\"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo\", use_urlparse=True\n )\n == f\"mysql+mysqldb://scott:***@{db_hostname}:65432/foo\"\n )\n\n # PyMySQL\n assert (\n PasswordMasker.mask_db_url(\n f\"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo\"\n )\n == f\"mysql+pymysql://scott:***@{db_hostname}:65432/foo\"\n )\n assert (\n PasswordMasker.mask_db_url(\n f\"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo\", use_urlparse=True\n )\n == f\"mysql+pymysql://scott:***@{db_hostname}:65432/foo\"\n )\n\n # Oracle (if installed in test environment)\n url_host = os.getenv(\"GE_TEST_LOCALHOST_URL\", \"127.0.0.1\")\n try:\n assert (\n PasswordMasker.mask_db_url(f\"oracle://scott:tiger@{url_host}:1521/sidname\")\n == f\"oracle://scott:***@{url_host}:1521/sidname\"\n )\n except ModuleNotFoundError:\n pass\n\n assert (\n PasswordMasker.mask_db_url(\n f\"oracle://scott:tiger@{url_host}:1521/sidname\", use_urlparse=True\n )\n == f\"oracle://scott:***@{url_host}:1521/sidname\"\n )\n\n try:\n assert (\n PasswordMasker.mask_db_url(\"oracle+cx_oracle://scott:tiger@tnsname\")\n == \"oracle+cx_oracle://scott:***@tnsname\"\n )\n except ModuleNotFoundError:\n pass\n assert (\n PasswordMasker.mask_db_url(\n \"oracle+cx_oracle://scott:tiger@tnsname\", use_urlparse=True\n )\n == \"oracle+cx_oracle://scott:***@tnsname\"\n )\n\n # Microsoft SQL Server\n # pyodbc\n assert (\n PasswordMasker.mask_db_url(\"mssql+pyodbc://scott:tiger@mydsn\")\n == \"mssql+pyodbc://scott:***@mydsn\"\n )\n assert (\n PasswordMasker.mask_db_url(\n \"mssql+pyodbc://scott:tiger@mydsn\", use_urlparse=True\n )\n == \"mssql+pyodbc://scott:***@mydsn\"\n )\n\n # pymssql (if installed in test environment)\n try:\n assert (\n PasswordMasker.mask_db_url(\n f\"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname\"\n )\n == f\"mssql+pymssql://scott:***@{db_hostname}:12345/dbname\"\n )\n except ModuleNotFoundError:\n pass\n assert (\n PasswordMasker.mask_db_url(\n f\"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname\", use_urlparse=True\n )\n == f\"mssql+pymssql://scott:***@{db_hostname}:12345/dbname\"\n )\n\n # SQLite\n # relative path\n temp_dir = tmp_path / \"sqllite_tests\"\n temp_dir.mkdir()\n monkeypatch.chdir(temp_dir)\n assert (\n PasswordMasker.mask_db_url(f\"sqlite:///something/foo.db\")\n == f\"sqlite:///something/foo.db\"\n )\n assert (\n PasswordMasker.mask_db_url(f\"sqlite:///something/foo.db\", use_urlparse=True)\n == f\"sqlite:///something/foo.db\"\n )\n\n # absolute path\n # Unix/Mac - 4 initial slashes in total\n assert (\n PasswordMasker.mask_db_url(\"sqlite:////absolute/path/to/foo.db\")\n == \"sqlite:////absolute/path/to/foo.db\"\n )\n assert (\n PasswordMasker.mask_db_url(\n \"sqlite:////absolute/path/to/foo.db\", use_urlparse=True\n )\n == \"sqlite:////absolute/path/to/foo.db\"\n )\n\n # Windows\n assert (\n PasswordMasker.mask_db_url(\"sqlite:///C:\\\\path\\\\to\\\\foo.db\")\n == \"sqlite:///C:\\\\path\\\\to\\\\foo.db\"\n )\n assert (\n PasswordMasker.mask_db_url(\"sqlite:///C:\\\\path\\\\to\\\\foo.db\", use_urlparse=True)\n == \"sqlite:///C:\\\\path\\\\to\\\\foo.db\"\n )\n\n # Windows alternative using raw string\n assert (\n PasswordMasker.mask_db_url(r\"sqlite:///C:\\path\\to\\foo.db\")\n == r\"sqlite:///C:\\path\\to\\foo.db\"\n )\n assert (\n PasswordMasker.mask_db_url(r\"sqlite:///C:\\path\\to\\foo.db\", use_urlparse=True)\n == r\"sqlite:///C:\\path\\to\\foo.db\"\n )\n\n # in-memory\n assert PasswordMasker.mask_db_url(\"sqlite://\") == \"sqlite://\"\n assert PasswordMasker.mask_db_url(\"sqlite://\", use_urlparse=True) == \"sqlite://\"", "def _get_host(self, scheme='', hostname_only=False):\n host = self.host or ''\n # urlparse requires '//' to be provided if scheme is not specified\n original_parsed = urlparse.urlsplit(host)\n if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:\n host = '%s://%s' % (scheme, host) if scheme else '//%s' % host\n parsed = urlparse.urlsplit(host)\n\n if hostname_only:\n return parsed.hostname\n\n try:\n port = parsed.port or self.port\n except ValueError:\n port = self.port\n netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)\n\n url_components = list(parsed)\n url_components[1] = netloc\n ret = urlparse.urlunsplit(url_components)\n return ret.lstrip('/')", "def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)", "def lookup_pattern(name):\n\treturn _registered_patterns[name]", "def url(self, **kwargs) -> str:\n return self._pattern.format(**kwargs)", "def matches_host(self, host: str, requires_data_uri: bool = False) -> bool:\n return (\n self.url\n and self.site_host\n and self.site_host in host\n and (self.data_uri if requires_data_uri else True)\n )", "def get_shortcut_regexp():\n\n global _SHORTCUT_REGEXP\n # Build shortcut regexp once\n if _SHORTCUT_REGEXP is None:\n values = shortcuts.SHORTCUTS.keys()\n values = sorted(values, key=len, reverse=True)\n pattern = u'(?<=\\s)(' + u'|'.join(re.escape(u) for u in values) + u')((?=(\\s|\\)|\\.))|$)'\n _SHORTCUT_REGEXP = re.compile(pattern)\n return _SHORTCUT_REGEXP", "def __find_protocol(self, url):\n match = self.__REGEX_SCHEMA.search(url)\n if match:\n protocol = match.group(0).split(':')[0]\n return protocol\n return None", "def host_to_site(host):\n\n if host:\n # www.facebook.com m.facebook.com l.facebook.com lm.facebook.com\n if host.endswith('facebook.com'):\n return 'Facebook'\n # youtu.be www.youtube.com youtube.com m.youtube.com\n elif host.endswith('youtube.com') or host == 'youtu.be':\n return 'YouTube'\n # old.reddit.com www.reddit.com\n elif host.endswith('reddit.com'):\n return 'Reddit'\n # t.co twitter.com\n elif host.endswith('twitter.com') or host == 't.co':\n return 'Twitter'\n elif host.endswith('tiktok.com'):\n return 'TikTok'\n return None", "def api_host_port(hostport):\n if hostport is None:\n return (None, None)\n formatted_host = __host_per_rfc_2732(hostport)\n # The \"bogus\" is to make it look like a real, parseable URL.\n parsed = urlparse(\"bogus://%s\" % (formatted_host)) \n return (None if parsed.hostname == \"none\" else parsed.hostname,\n parsed.port)", "def getHost():", "def getHost():", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def test_url_pattern(self):\n\t\turl = URLFilter()\n\t\turl.set_limit(\"goog*\")\n\t\tself.assertTrue(url.check(Object(get_urls=lambda: ['google.com'])))", "def getPattern(self):\n return self.pattern", "def _MakeRE(regex_str):\n return re.compile(regex_str.format(**SHORTHAND))", "def _compile_fnmatch(pattern: str) -> re.Pattern:\n return re.compile(translate(pattern))", "def api_url_hostport(hostport=None,\n path=None,\n protocol=None\n ):\n (host, port) = api_host_port(hostport)\n return api_url(host=host, port=port, path=path, protocol=protocol)", "def build_regex(self) -> typing.Pattern:\n self._regex = re.compile(\"|\".join(sorted(self._includes)))\n return self._regex", "def placeholder_to_regex(placeholder,encloser='%',matcher='(.+)'):\n pattern = placeholder\n pattern = pattern.replace('\\\\','/')\n if pattern.count('%') == 0 or pattern.count('%') % 2 != 0:\n return '',[]\n else:\n borders = pattern.split(encloser)[::2]\n fields = pattern.split(encloser)[1::2]\n for field in fields:\n pattern = pattern.replace(encloser+field+encloser, matcher, 1)\n pattern = pattern.replace('/','\\\\/')\n return pattern,fields", "def get_hostname (surl):\n if str(surl).find('srm://'):\n surl = surl [str(surl).find('srm://'):]\n\n reg = re.search('[^:]+:(/)*([^:/]+)(:[0-9]+)?(/)?.*', surl)\n host = ''\n try:\n host = reg.group(2)\n except:\n pass\n \n return host", "def matchWildcardUrls(url, listOfUrls):\n if not url or not listOfUrls:\n return None\n pattern = re.compile('^[a-zA-Z][+a-zA-Z0-9.-]*:.*')\n if not pattern.search(str(url)) and not url.startswith('//'):\n url = '//' + url\n cspUrl = urlparse(str(url)) \n host = cspUrl.netloc.lower() or \"\"\n hostHasWildcard = host.startswith(\"*.\")\n wildcardFreeHost = re.sub(\"^\\*\", \"\", host, flags=re.IGNORECASE)\n path = cspUrl.path or ''\n hasPath = len(cspUrl.path) > 0 \n\n for url2 in listOfUrls:\n url = urlparse(str(url2))\n domain = url.netloc.lower() or \"\"\n domainHasWildCard = domain.startswith(\"*.\")\n if (not domainHasWildCard):\n if (not domain.endswith(wildcardFreeHost) ): \n continue\n if (not hostHasWildcard and host != domain):\n continue\n else:\n domainparts = list(reversed(domain.split('.')))\n hostparts = list(reversed(host.split('.')))\n stop = False\n domainlen = len(domain.split('.'))\n hostlen = len(host.split('.'))\n \n for idx, domainpart in enumerate(domainparts):\n if idx < hostlen:\n hostpart = hostparts[idx]\n if hostpart != domainpart and (domainpart != '*' and hostpart != '*'):\n stop = True\n if stop:\n continue\n if (hasPath):\n if (path.endswith('/')): \n if (not url.path.startswith(path)):\n continue\n elif (url.path != path):\n continue\n\n return url\n\n return None", "def test_build_path_pattern():\n\n assert (build_path_pattern('/foo') ==\n re.compile('^/foo/?$'))\n assert (build_path_pattern('/foo/{u_id}') ==\n re.compile('^/foo/(?P<u_id>[\\w\\-]+)/?$'))\n assert (build_path_pattern('/foo/{u_id}/aaa/{n_id}') ==\n re.compile('^/foo/(?P<u_id>[\\w\\-]+)/aaa/(?P<n_id>[\\w\\-]+)/?$'))", "def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names", "def compile_regex(self, fmt, query):\n return re.compile(fmt.format(\n query.pattern.replace('.', '\\.').replace('*', '[^\\.]*').replace(\n '{', '(').replace(',', '|').replace('}', ')')\n ))", "def reflect(host, port):\n global URL\n URL = URL.replace('<ip>', host).replace('<port>', str(port))", "def BuildServerUrl(host_):\n assert host_\n scheme_port = GetApacheSchemePortFromListen()\n if not scheme_port:\n return None\n\n (scheme, port) = scheme_port[0], scheme_port[1]\n assert scheme\n assert port\n\n server_url = \"{0}://{1}\".format(scheme, host_)\n if port and port != \"80\" and port != \"443\":\n server_url += \":{0}\".format(port)\n\n return server_url", "def split_type_host(url):\n type, rest = urllib.splittype(url)\n host, selector = urllib.splithost(rest)\n return type, host, selector", "def test_url_parse():\n host, port, path = parse_url(\"http://localhost:8000/form.html\")\n assert host == \"localhost\"\n assert port == \"8000\"\n assert path == \"/form.html\"", "def to_p(url):\n if type(url) != str:\n return\n match = config.p_re.match(url.strip())\n if match:\n return match.group(config.p_re_group_id)", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)", "def get_regex(self):\n return r\".\"", "def integrated_address_regex(self) -> Any:", "def get_values_re():\n\n regex = r\"([1-9]|1[0-9]) \" +\\\n r\"(localhost) \" +\\\n r\"(\\d{1,5}) \" +\\\n r\"(\\d{1,5}) \" +\\\n r\"(YES|NO) \" +\\\n r\"(YES|NO) \" +\\\n r\"(YES|NO) \" + \\\n r\"(YES|NO)\"\n\n regex = regex.split(\" \")\n\n return regex", "def RegEx(self):\n return self._regex", "def get_from_host(cls, host, silent=False):\n if cls.search([], count=True) == 1:\n return cls.search([])[0]\n try:\n website, = cls.search([('name', '=', host)])\n except ValueError:\n if not silent:\n raise WebsiteNotFound()\n else:\n return website", "def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname", "def _patternToRegEx(self,pattern):\n if (pattern == \"*\"):\n # special case that matches anything\n regex = \".*?\"\n else:\n regex = pattern\n if (regex.find(\".\") >= 0):\n regex = regex.replace(\".\", \"\\.\")\n #endIf\n \n asteriskIndex = regex.find(\"*\")\n if (asteriskIndex < 0):\n # no wildcard in pattern\n regex = \"%s$\" % regex\n elif (asteriskIndex + 1 != len(regex)):\n raise TraceSpecificationException(\"Invalid entity pattern: %s. A wildcard character may only be used to terminate a pattern.\" % pattern)\n else:\n # remove * and add \".*?\"\n regex = \"%s.*?\" % regex[:-1]\n #endIf\n #endIf\n return regex", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host", "def get_regex(self, strict=True):\n _ctx = construction.Context(strict=strict)\n return self._get_regex(_ctx)", "def match_url(self, url):\n pass" ]
[ "0.6459515", "0.6248357", "0.5812781", "0.57181805", "0.5674186", "0.56623316", "0.5633071", "0.5583016", "0.55545926", "0.55380994", "0.55205655", "0.5495875", "0.5463588", "0.5458524", "0.5437907", "0.5387938", "0.5373938", "0.5351687", "0.53417546", "0.53417546", "0.53417546", "0.53417546", "0.53417546", "0.53417546", "0.53417546", "0.5337709", "0.53350693", "0.52986205", "0.52422935", "0.51973855", "0.5177787", "0.5175534", "0.5172263", "0.51602453", "0.51505816", "0.5150456", "0.51431537", "0.5141193", "0.5111301", "0.51064855", "0.51064855", "0.51047736", "0.50919104", "0.50828445", "0.5079389", "0.5071875", "0.50703895", "0.5066858", "0.5062945", "0.5059099", "0.5057816", "0.50296766", "0.5024032", "0.5010802", "0.5010802", "0.5010802", "0.49988237", "0.4962001", "0.495321", "0.49503684", "0.49220738", "0.49205652", "0.49009597", "0.4898209", "0.48974803", "0.4884852", "0.48768783", "0.4870495", "0.4866979", "0.4866979", "0.4840487", "0.48392382", "0.48373726", "0.48280805", "0.48044357", "0.4804091", "0.4794974", "0.47915116", "0.47890463", "0.47819674", "0.47780767", "0.4767891", "0.4763563", "0.47527078", "0.47508806", "0.47492304", "0.47440708", "0.4733821", "0.47333062", "0.47287133", "0.47284514", "0.47248307", "0.47230178", "0.47214067", "0.47196212", "0.47076884", "0.4704889", "0.46980435", "0.4696272", "0.46883276" ]
0.7322857
0
Parse a query given as a string argument.
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): pairs = [] name_value_amp = qs.split('&') for name_value in name_value_amp: if ';' in name_value: pairs.extend([x, ';'] for x in name_value.split(';')) pairs[-1][1] = '&' else: pairs.append([name_value, '&']) pairs[-1][1] = '' r = [] for name_value, sep in pairs: nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % name_value) elif len(nv) == 1: # None value indicates missing equal sign nv = (nv[0], None) else: continue if nv[1] or keep_blank_values: name = urllib.unquote(nv[0].replace('+', ' ')) if nv[1]: value = urllib.unquote(nv[1].replace('+', ' ')) else: value = nv[1] r.append((name, value, sep)) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_query(query):\n qlist = []\n splitted = query.split(\"&\")\n for entry in splitted:\n cmd, arg = entry.split(\"=\")\n qlist.append((cmd, arg))\n return qlist", "def _parsed_query(self, query_string):\r\n return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query", "def parse_saildocs_query(query_str):\n # remove any trailing white space\n query_str = query_str.strip()\n command_split = query_str.split(' ', 1)\n if len(command_split) != 2:\n raise BadQuery(\"Expected a space between the command and the body\")\n command, body = command_split\n opts_args = filter(len, body.split(' '))\n if len(opts_args) > 1:\n args = opts_args[0]\n opts = opts_args[1:]\n else:\n args = opts_args[0]\n opts = None\n # Check if the command is supported\n if not command.lower() in _supported_commands:\n raise BadQuery(\"Unsupported command %s, only %s are supported\"\n % (command.lower(), ','.join(_supported_commands)))\n\n if command.lower() == 'send':\n query = parse_send_request(args)\n else:\n raise BadQuery(\"Unknown command handler.\")\n return query", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def _parse_query(queryStr):\n mainNode = LogicNode.LogicNode()\n queryStr = queryStr.replace(' ','')\n \n logicList, myOPList = _break_query(queryStr)\n\n #converts operator strings to actual operators\n convertOp = {\n '&':operator.and_,\n '|':operator.or_,\n '^':operator.xor\n }\n\n for item in myOPList:\n mainNode.operators.append(convertOp[item])\n \n #adds the simple comparisons to the LogicNode\n mainNode.add_children(logicList)\n return mainNode", "def parseQuery(s):\n result = Parser.parseRule('dummy :- %s\\n' % s)\n result.lhs = None\n return result", "def parse_qs(query_string):\n query_string = to_utf8_if_unicode(query_string) or \"\"\n if query_string.startswith(\"?\"):\n logging.warning(\"Ignoring `?` query string prefix -- `%r`\" % query_string)\n query_string = query_string[1:]\n return _parse_qs(query_string, keep_blank_values=True)", "def parse_query(query, default_text='Time is up!'):\n try:\n regex = r'''\n ^((\n (?P<at>at\\ ) # at\n (?P<clock>\n (2[0-3]|[01]?[0-9]) # 0-23\n (:([0-5][0-9]))? # :0-59 (optional)\n )\n )| # OR\n ^(?P<time>\\d+) # 0-infinite digit\n (?P<measure>[mhs])? # mhs (optional, default: m)\n ) \n (?P<message>\\ .*)?$ # optional message\n '''\n m = re.match(regex, query, re.IGNORECASE | re.VERBOSE)\n \n if m.group('at') is not None:\n now = datetime.datetime.now()\n clock = m.group('clock').split(\":\")\n\n # if input has no minutes set to 0\n if(len(clock) == 1):\n clock.append(0)\n # calculate delta between now and inputed clock\n # if clock > now: set timer to next day\n time_sec = int((datetime.timedelta(hours=24) - (now - now.replace(hour=int(clock[0]), minute=int(clock[1])))).total_seconds() % (24 * 3600)) \n time_arg = m.group('clock')\n else:\n time_sec = int(m.group('time')) * TIME_MULT[(m.group('measure') or 'm').lower()]\n time_arg = m.group('time') + (m.group('measure') or \"\")\n\n message = m.group('message') or default_text\n\n return (time_sec, time_arg, message[1:])\n except Exception as e:\n raise ParseQueryError(str(e))", "def test_process_query(self):\n self.assertEqual(process_query(\"\"), \"\")\n self.assertEqual(process_query(\"org\"), \"org:*\")\n self.assertEqual(process_query(\"a b\"), \"a & b:*\")\n self.assertEqual(process_query(\"(foo bar)\"), \"foo & bar:*\")", "def parse_query(self, query_dict):\n if query_dict is None:\n return xapian.Query('') # Match everything\n elif query_dict == {}:\n return xapian.Query() # Match nothing\n\n query_tree = self.build_query_tree(query_dict)\n\n return query_tree.to_query(self.schema, self.database)", "def _parsed_query(self, query_string):\r\n return urlparse(handler_url(self.block, 'handler', query=query_string)).query", "def parse_query(query, nameserver, duration):\n flag_list = flags.to_text(query.response.flags)\n return {\n 'Query': get_query(nameserver, duration),\n 'QuestionSection': get_question(query),\n 'AnswerSection': get_rrs_from_rrsets(query.response.answer),\n 'AdditionalSection': get_rrs_from_rrsets(query.response.additional),\n 'AuthoritySection': get_rrs_from_rrsets(query.response.authority),\n 'ReturnCode': rcode.to_text(query.response.rcode()),\n 'ID': query.response.id,\n 'AA': 'AA' in flag_list,\n 'TC': 'TC' in flag_list,\n 'RD': 'RD' in flag_list,\n 'RA': 'RA' in flag_list,\n 'AD': 'AD' in flag_list\n }", "def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset", "def process_query(s):\n query = re.sub(r'[!\\'()|&:\\x00<>]', ' ', s).strip()\n if query:\n query = re.sub(r'\\s+', ' & ', query)\n # Support prefix search on the last word. A tsquery of 'toda:*' will\n # match against any words that start with 'toda', which is good for\n # search-as-you-type.\n query += ':*'\n return query", "def read_parse_helper(self, query, path, triples, explicit_vars, implicit_vars, given_vars) :\n\t\t# constants\n\t\tif type(query) == int or type(query) == float :\n\t\t\treturn unicode(query)\n\t\telif type(query) == str or type(query) == unicode:\n\t\t\tif type(query) == str :\n\t\t\t\tquery = unicode(query)\n\t\t\tif self.n.matches(query) :\n\t\t\t\treturn query\n\t\t\telse :\n\t\t\t\tquery = query.replace('\\\\', '\\\\\\\\')\n\t\t\t\tquery = query.replace('\\n', '\\\\n')\n\t\t\t\tquery = query.replace('\\r', '\\\\r')\n\t\t\t\tif '\"' not in query :\n\t\t\t\t\treturn u'\"'+query+u'\"@'+self.lang\n\t\t\t\tif \"'\" not in query :\n\t\t\t\t\treturn u\"'\"+query+u\"'@\"+self.lang\n\t\t\t\tif '\"\"\"' not in query :\n\t\t\t\t\treturn u'\"\"\"'+query+u'\"\"\"@'+self.lang\n\t\t\t\tif \"'''\" not in query :\n\t\t\t\t\treturn u\"'''\"+query+u\"'''@\"+self.lang\n\t\t\t\traise Exception(\"can't figure out how to put this in quotes...\")\n\t\telif type(query) == datetime.datetime :\n\t\t\treturn u'\"%d-%d-%dT%d:%d:%dT\"^^xsd:dateTime' % (query.year, query.month, query.day, query.hour, query.minute, query.second)\n\t\telif type(query) == time.struct_time :\n\t\t\treturn u'\"%d-%d-%dT%d:%d:%dT\"^^xsd:dateTime' % query[0:6]\n\t\telif type(query) == rdflib.URIRef :\n\t\t\treturn query.n3()\n\t\telif type(query) == rdflib.Literal :\n\t\t\tif query.datatype == None :\n\t\t\t\t# this is a string\n\t\t\t\treturn query.n3()+'@'+self.lang\n\t\t\telse :\n\t\t\t\treturn query.n3()\n\t\t\n\t\t# cases resulting in explicit variables\n\t\telif query == None :\n\t\t\treturn self._new_var(explicit_vars, path)\n\t\telif query == [] :\n\t\t\tpath = copy.copy(path)\n\t\t\tpath.append(list)\n\t\t\treturn self._new_var(explicit_vars, path)\n\t\t\n\t\telif type(query) == list and len(query) == 1 and type(query[0]) == dict :\n\t\t\tpath = copy.copy(path)\n\t\t\tpath.append(list)\n\t\t\treturn self.read_parse_helper(query[0], path, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\n\t\t# a list of only dicts length > 1 (length > 1 known because not the above case)\n\t\telif type(query) == list and all([type(i) == dict for i in query]) :\n\t\t\t# TODO !!!\n\t\t\t# should this match any of these object or all of these?\n\t\t\t# should maybe not require that the type of all objects in the list are \n\t\t\t# dicts.\n\t\t\t# An any clause requires optional subqueries to be implemented\n\t\t\traise Exception('ambiguous case not yet implemented (dont have a list of more than one item)')\n\t\t\n\t\t# complex queries\n\t\telif type(query) == dict :\n\t\t\tif self.n.sparql.subject in query :\n\t\t\t\tsubject = query[self.n.sparql.subject]\n\t\t\t\tif isinstance(subject, URIRef) :\n\t\t\t\t\tsubject = subject.n3()\n\t\t\t\tdel query[self.n.sparql.subject]\n\t\t\t\tif subject == None :\n\t\t\t\t\tsubject = self._new_var(explicit_vars, path)\n\t\t\telse :\n\t\t\t\tsubject = self._new_var(implicit_vars, path)\n\t\t\tfor key, value in query.iteritems() :\n\t\t\t\t# print 'k',key,'v',value\n\t\t\t\tpath2 = copy.copy(path)\n\t\t\t\tnk = self.read_parse_helper(key, path, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\t\tpath2.append(key)\n\t\t\t\tnv = self.read_parse_helper(value, path2, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\t\t# print '---', nk, nv, type(nk), type(nv)\n\t\t\t\t# if the new value is not a uri or a variable, then its a given value\n\t\t\t\tif len(nv) != 0 and nv[0] != '<' and nv[0] != '?' :\n\t\t\t\t\tgiven_vars.append(copy.copy(path2))\n\t\t\t\tpair = (nk, nv)\n\t\t\t\t#print 'dict', pair\n\t\t\t\ttriples.append((subject, nk, nv))\n\t\t\treturn subject\n\t\t\n\t\t# else ...\n\t\telse :\n\t\t\traise Exception(\"unkown data type: %s\" % str(type(query)))", "def inspect_query(query):\n return _parse_query(query)", "def parse_query(query, delim='/'):\n key = ''\n prefix = ''\n postfix = ''\n\n parsed = urlparse(query)\n query = parsed.path.lstrip(delim)\n bucket = parsed.netloc\n\n if not parsed.scheme.lower() in ('', \"gs\", \"s3\", \"s3n\"):\n raise ValueError(\"Query scheme must be one of '', 'gs', 's3', or 's3n'; \"\n \"got: '%s'\" % parsed.scheme)\n storage = parsed.scheme.lower()\n\n if not bucket.strip() and query:\n toks = query.split(delim, 1)\n bucket = toks[0]\n if len(toks) == 2:\n key = toks[1]\n else:\n key = ''\n\n if not bucket.strip():\n raise ValueError(\"Could not parse bucket name from query string '%s'\" % query)\n\n tokens = query.split(\"*\")\n n = len(tokens)\n if n == 0:\n pass\n elif n == 1:\n key = tokens[0]\n elif n == 2:\n index = tokens[0].rfind(delim)\n if index >= 0:\n key = tokens[0][:(index + 1)]\n prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else ''\n else:\n prefix = tokens[0]\n postfix = tokens[1]\n else:\n raise ValueError(\"Only one wildcard ('*') allowed in query string, got: '%s'\" % query)\n\n return storage, bucket, key, prefix, postfix", "def url_parse_query(query, encoding=None):\n if isinstance(query, unicode):\n if encoding is None:\n encoding = url_encoding\n query = query.encode(encoding, 'ignore')\n query = query.replace('?', '')\n\n l = set()\n for k, v, sep in parse_qsl(query, True):\n k = url_quote_part(k, '/-:,;')\n if not k:\n continue\n if v:\n v = url_quote_part(v, '/-:,;')\n l.add(\"%s=%s\" % (k, v))\n elif v is None:\n l.add(\"%s\" % k)\n else:\n # some sites do not work when the equal sign is missing\n l.add(\"%s=\" % k)\n query = '&'.join(sorted(l))\n return query", "def parse_query(request):\n\n querystring = request.uri['query']\n fp = StringIO(querystring)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n query = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return query", "def parse(query):\n if isinstance(query, RowQuery):\n # already parsed\n return query\n parts = re.split(r'([<>]=?|!?=|!?~|\\bis\\b)', hxl.datatypes.normalise_string(query), maxsplit=1)\n pattern = TagPattern.parse(parts[0])\n op_name = hxl.datatypes.normalise_string(parts[1])\n op = RowQuery.OPERATOR_MAP.get(op_name)\n value = hxl.datatypes.normalise_string(parts[2])\n is_aggregate = False\n # special handling for aggregates (FIXME)\n if op_name == 'is' and value in ('min', 'max', 'not min', 'not max'):\n is_aggregate = True\n return RowQuery(pattern, op, value, is_aggregate)", "def query(self, query, ctxs=None, engine=None):\n if isinstance(query, str):\n query = QP.parseString(query, self._query_parser)\n elif isinstance(query, Sentence):\n query = Query([query])\n if not isinstance(query, Query):\n raise AcabParseException(\"Unrecognised query target: {}\".format(type(query)))\n\n return self._query_sentence(query, ctxs=ctxs, engine=engine)", "def parse_for_query(query):\n index = query.find('@')\n if index == -1:\n return \"\"\n elif index == len(query)-1:\n # Make sure the final return doesn't index outside the list.\n return \"\"\n else:\n return query[index+1:]", "def build(self, query_str):\r\n try:\r\n parsed = self.parser.parseString(query_str)\r\n except ParseException,e:\r\n raise QueryException(e)\r\n\r\n source = self.__get_source(parsed)\r\n tree = self.__get_tree(parsed)\r\n handler = self.__get_handler(parsed)\r\n query = Query(tree, source, handler)\r\n return query", "def inspect_query(querystring: str) -> dict:\n return _parse_query(querystring)", "def smart_query_string(parser, token):\n args = token.split_contents()\n additions = args[1:]\n\n addition_pairs = []\n while additions:\n addition_pairs.append(additions[0:2])\n additions = additions[2:]\n\n return SmartQueryStringNode(addition_pairs)", "def reformulate_query(s):\n words = tokenize(s)\n tags = [tag for _, tag in pos_tag(words)]\n\n if tags[-1] == '.':\n words.pop()\n\n # what/who questions\n if tags[0] in set(['WP', 'WDT']):\n if tags[1] in set(['VBZ', 'VBD', 'VBP']):\n if tags[-1] is not 'IN':\n exact_query = '{0}\\s*{1}\\s*{2}'.format(' '.join(words[2:]),\n '(?:\\(.*\\))?', words[1])\n inexact_query = '{0} {1}'.format(' '.join(words[2:]), words[1])\n return exact_query, inexact_query\n return s, s", "def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val\n\n return results", "def parse_query_params(query_string):\n # Parse the query param string\n parsed = urlparse(query_string)\n print(parsed)\n query_params = dict(parse_qs(parsed.path))\n print(query_params)\n # Get the value from the list\n query_params = {k: v[0] for k, v in query_params.items()}\n return query_params", "def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val if val.lower() != 'null' else None\n\n return results", "def getParsedQueryString(self):\n return cgi.parse_qs(self.query_string)", "def process_query(self, query_str):\n # make sure everything is lower case\n query = query_str.lower()\n # split on whitespace\n query = query.split()\n # remove non alphanumeric characters\n query = [self.alphanum.sub('', xx) for xx in query]\n # stem words\n query = [self.p.stem(xx) for xx in query]\n return query", "def search(query_string):", "def process_query(self, query):\n def parse_query(query):\n \"\"\"Returns [(command, parameter)] list.\"\"\"\n qlist = []\n splitted = query.split(\"&\")\n for entry in splitted:\n cmd, arg = entry.split(\"=\")\n qlist.append((cmd, arg))\n return qlist\n qlist = parse_query(query)\n print \"Trying to execute query '\" + str(qlist) + \"'\"\n result = None\n q, args = qlist[0]\n try:\n method_call = getattr(self, q)\n try:\n with Timer() as t:\n if len(qlist) > 1:\n print qlist[1:]\n result = method_call(args, opt=qlist[1:])\n else:\n result = method_call(args)\n print \" --> This took %s seconds.\" % t.secs\n except TypeError as e:\n print \"ERROR:\", e\n print \"Success.\"\n except AttributeError as e:\n print e\n print \"Failed!\"\n return result", "def parseQueryString():\n\tqs = cgi.FieldStorage()\n\treturn({'char': qs.getvalue('char'), 'zone': qs.getvalue('zone')})", "def parse_query_spec(self, query_spec):\n try:\n return self.QUERY_TYPE_MAP[query_spec['type']](query_spec)\n except KeyError:\n raise exceptions.QueryError('invalid query spec')\n except TypeError:\n raise exceptions.QueryError('Query must be a dictionary specifyng type and value of the query')", "def __init__(self, query_string: str) -> None:\n super().__init__(parse_qs(query_string))", "def parse_query(query: str, dataset: Dataset, default_graph: str, context: dict) -> Tuple[PreemptableIterator, dict]:\n # transaction timestamp\n start_timestamp = datetime.now()\n # rdflib has no tool for parsing both read and update query,\n # so we must rely on a try/catch dirty trick...\n try:\n logical_plan = translateQuery(parseQuery(query)).algebra\n cardinalities = list()\n iterator = parse_query_node(logical_plan, dataset, [default_graph], context, cardinalities, as_of=start_timestamp)\n return iterator, cardinalities\n except ParseException:\n return parse_update(query, dataset, default_graph, context, as_of=start_timestamp)", "def parse_query_part(\n part: str,\n query_classes: Dict = {},\n prefixes: Dict = {},\n default_class: Type[query.SubstringQuery] = query.SubstringQuery,\n) -> Tuple[Optional[str], str, Type[query.Query], bool]:\n # Apply the regular expression and extract the components.\n part = part.strip()\n match = PARSE_QUERY_PART_REGEX.match(part)\n\n assert match # Regex should always match\n negate = bool(match.group(1))\n key = match.group(2)\n term = match.group(3).replace('\\\\:', ':')\n\n # Check whether there's a prefix in the query and use the\n # corresponding query type.\n for pre, query_class in prefixes.items():\n if term.startswith(pre):\n return key, term[len(pre):], query_class, negate\n\n # No matching prefix, so use either the query class determined by\n # the field or the default as a fallback.\n query_class = query_classes.get(key, default_class)\n return key, term, query_class, negate", "def execute_gql_string(\n self, gql_string: str\n ) -> dict:\n try:\n document_node = graphql.parse(gql_string)\n if self._schema is MISSING:\n self._service_endpoint = os.getenv(\"SERVICE_URL\") + \"/graphql\"\n self._load_schema()\n returned_errors = graphql.validation.validate(\n schema=self._schema,\n document_ast=document_node\n )\n if returned_errors:\n formatted_error_list = list(map(lambda x: str(x.formatted), returned_errors))\n print(\"ERROR: failed to validate query string against schema.\")\n for error in formatted_error_list:\n print(pprint.pformat(error, indent=4, compact=True).replace(\"\\\\\", \"\"))\n e = ServiceCoreException(\n f\"failed to validate query string against schema\"\n )\n print(str(e))\n raise e\n\n return self._send_gql_query(gql_string)\n\n except graphql.GraphQLSyntaxError as e:\n raise ServiceCoreException(\n \"Failed to parse provided string\",\n nested_exception=e\n )\n\n except BaseException as e:\n raise ServiceCoreException(\n \"Failed to parse provided string\",\n nested_exception=e\n )", "def lexer(string): # TODO: refactor\n parsedlist = []\n parsedstring = ''\n leftbcounter = 0\n rightbcounter = 0\n qcounter = 0\n for index, a in enumerate(string):\n if qcounter == 2:\n if a.isalpha():\n qcounter = 1\n else:\n qcounter = 0\n if a == '(':\n leftbcounter += 1\n if a == ')':\n rightbcounter += 1\n if a == \"'\" and leftbcounter == rightbcounter:\n qcounter += 1\n if a != ' ' and leftbcounter == rightbcounter \\\n and qcounter == 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n elif leftbcounter != rightbcounter:\n parsedstring += a\n elif qcounter > 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n else:\n parsedlist.append(parsedstring)\n parsedstring = ''\n if leftbcounter != rightbcounter:\n raise BadRequest()\n bl = []\n sl = []\n counter = 0\n for index, query in enumerate(parsedlist, 1):\n if query == \"and\" or query == \"or\" or query == \"not\":\n if sl:\n bl.append(sl)\n bl.append([query])\n counter = 0\n sl = []\n continue\n sl.append(query)\n counter += 1\n if index == len(parsedlist) and sl:\n bl.append(sl)\n # i later added a third nested list to seperate AND and OR\n query_list = []\n al = []\n counter = 0\n for index, grouped_query in enumerate(bl, 1):\n if grouped_query[0] == \"or\":\n query_list.append(al)\n query_list.append([grouped_query])\n counter = 0\n al = []\n continue\n al.append(grouped_query)\n counter += 1\n if index == len(bl):\n query_list.append(al)\n\n for x in query_list:\n for y in x:\n if y[0] == 'and' or y[0] == 'or' or y[0] == 'not':\n QueryObjects.B.append(y[0])\n continue\n if y[0][0] == '(' and y[0][-1] == ')':\n QueryObjects.B.append(y[0][0])\n lexer(y[0][1:-1])\n QueryObjects.B.append(y[0][-1])\n else:\n QueryObjects.IND += 1\n n = 'arg' + str(QueryObjects.IND)\n QueryObjects.D[n] = query_mapping(y, QueryObjects.IND)[\"query\"]\n QueryObjects.B.append(n)\n return QueryObjects.B", "def _parse_user_query(self, query):\n def _parse_basic_query(attr, value):\n if isinstance(value, str) and '*' in value:\n return MatchGlob(attr, value)\n else:\n return Eq(attr, value)\n\n if isinstance(query, dict):\n subqueries = []\n for attr, value in query.iteritems():\n if isinstance(value, (list, set, tuple)):\n # If value is a list or similar, we build an OR\n or_queries = []\n for or_query in value:\n or_queries.append( _parse_basic_query(attr, or_query) )\n subqueries.append( Or(*or_queries) )\n else:\n subqueries.append(_parse_basic_query(attr, value))\n query = And(*subqueries)\n return query", "def query(querystring: str,\n db: tsdb.Database,\n **kwargs):\n queryobj = _parse_query(querystring)\n\n if queryobj['type'] in ('select', 'retrieve'):\n return _select(\n queryobj['projection'],\n queryobj['relations'],\n queryobj['condition'],\n db,\n record_class=kwargs.get('record_class', None))\n else:\n # not really a syntax error; replace with TSQLError or something\n # when the proper exception class exists\n raise TSQLSyntaxError(queryobj['type'] + ' queries are not supported',\n text=querystring)", "def process_sparql_query_text(query_text, loader, call_name, extraMetadata):\n # We get the endpoint name first, since some query metadata fields (eg enums) require it\n endpoint, _ = gquery.guess_endpoint_uri(query_text, loader)\n glogger.debug(\"Read query endpoint: {}\".format(endpoint))\n\n try:\n query_metadata = gquery.get_metadata(query_text, endpoint)\n except Exception as e:\n raise Exception('Could not parse query {}: {}'.format(call_name, str(e)))\n\n tags = query_metadata['tags'] if 'tags' in query_metadata else []\n\n summary = query_metadata['summary'] if 'summary' in query_metadata else \"\"\n\n description = query_metadata['description'] if 'description' in query_metadata else \"\"\n\n method = query_metadata['method'].lower() if 'method' in query_metadata else \"\"\n if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:\n method = \"\"\n\n pagination = query_metadata['pagination'] if 'pagination' in query_metadata else \"\"\n\n endpoint_in_url = query_metadata['endpoint_in_url'] if 'endpoint_in_url' in query_metadata else True\n\n # Processing of the parameters\n params = []\n\n # PV properties\n item_properties = {}\n\n # If this query allows pagination, add page number as parameter\n if pagination:\n params.append(pageUtils.getSwaggerPaginationDef(pagination))\n\n if query_metadata['type'] in ['SelectQuery', 'ConstructQuery', 'InsertData']:\n # TODO: do something intelligent with the parameters!\n # As per #3, prefetching IRIs via SPARQL and filling enum\n parameters = query_metadata['parameters']\n\n for _, p in list(parameters.items()):\n param = {}\n param['name'] = p['name']\n param['type'] = p['type']\n param['required'] = p['required']\n param['in'] = \"query\"\n param['description'] = \"A value of type {} that will substitute {} in the original query\".format(\n p['type'], p['original'])\n if 'lang' in p:\n param['description'] = \"A value of type {}@{} that will substitute {} in the original query\".format(\n p['type'], p['lang'], p['original'])\n if 'format' in p:\n param['format'] = p['format']\n param['description'] = \"A value of type {} ({}) that will substitute {} in the original query\".format(\n p['type'], p['format'], p['original'])\n if 'enum' in p:\n param['enum'] = p['enum']\n if 'default' in p:\n param['default'] = p['default']\n\n params.append(param)\n\n if endpoint_in_url:\n endpoint_param = {}\n endpoint_param['name'] = \"endpoint\"\n endpoint_param['type'] = \"string\"\n endpoint_param['in'] = \"query\"\n endpoint_param['description'] = \"Alternative endpoint for SPARQL query\"\n endpoint_param['default'] = endpoint\n params.append(endpoint_param)\n\n # If this is a URL generated spec we need to force API calls with the specUrl parameter set\n if type(loader) is URLLoader:\n specUrl_param = {}\n specUrl_param['name'] = \"specUrl\"\n specUrl_param['type'] = \"string\"\n specUrl_param['in'] = \"query\"\n specUrl_param['description'] = \"URL of the API specification\"\n specUrl_param['default'] = loader.getRawRepoUri()\n params.append(specUrl_param)\n\n if query_metadata['type'] == 'SelectQuery':\n # Fill in the spec for SELECT\n if not method:\n method = 'get'\n for pv in query_metadata['variables']:\n item_properties[pv] = {\n \"name\": pv,\n \"type\": \"object\",\n \"required\": [\"type\", \"value\"],\n \"properties\": {\n \"type\": {\n \"type\": \"string\"\n },\n \"value\": {\n \"type\": \"string\"\n },\n \"xml:lang\": {\n \"type\": \"string\"\n },\n \"datatype\": {\n \"type\": \"string\"\n }\n }\n }\n\n elif query_metadata['type'] == 'ConstructQuery':\n if not method:\n method = 'get'\n elif query_metadata['type'] == 'InsertData' or query_metadata['type'] == 'Modify': # UPDATE queries should map here\n if not method:\n method = 'post'\n elif query_metadata['type'] == 'UNKNOWN':\n glogger.warning(\"grlc could not parse this query; assuming a plain, non-parametric SELECT in the API spec\")\n if not method:\n method = 'get'\n else:\n # TODO: process all other kinds of queries\n glogger.debug('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n raise Exception('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n\n # Finally: main structure of the callname spec\n item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)\n\n return item", "def parseString(self, s):\n pass", "def parse(s):\n return expr.parseString(s, parseAll=True)", "def test_sqpp_single_quotes(self):\n self.assertEqual(self.parser.parse_query(\"(expr1) - expr2 | 'expressions - in + quotes | (are) not - parsed ' - (expr3) | expr4\"),\n ['+', 'expr1', '-', 'expr2', '|', \"'expressions - in + quotes | (are) not - parsed '\", '-', 'expr3', '|', 'expr4'])\n #['+', '+ \\'expressions - in + quotes | (are) not - parsed \\' | expr1 | expr4',\n # '+', '- expr3 | expr1 | expr4',\n # '+', '+ \\'expressions - in + quotes | (are) not - parsed \\' - expr2 | expr4',\n # '+', '- expr3 - expr2 | expr4'])", "def phrase_query_retrieve(self, query_str):\n query = self.process_query(query_str)\n return self.phrase_retrieve(query)", "def parse(self):\n args = self.args\n if args and not args[0] in [\"'\", \",\", \":\"]:\n args = \" %s\" % args.strip()\n self.args = args", "def query(args):\n dbh = despydb.DesDbi(args.service, args.section)\n if args.query not in \"-+\":\n do1Query(dbh, args.query, args)\n elif args.query == \"-\":\n line = sys.stdin.readline()\n while line:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n pass\n else:\n do1Query(dbh, line, args)\n line = sys.stdin.readline()\n else: #if args.query == \"+\":\n lines = sys.stdin.read()\n do1Query(dbh, lines, args)\n dbh.close()", "def parse_int_from_qs(query, name, default):\n \n try:\n if len(query) > 0:\n qs = urllib.parse.parse_qs(query)\n if name in qs:\n return int(qs[name][0])\n except ValueError:\n pass\n \n return default", "def request(query):", "def parse_query(self, question_text, **kwargs):\n if not self.session.platform_is_6_5(**kwargs):\n m = \"ParseJob not supported in version: {}\".format\n m = m(self.session.server_version)\n raise pytan.exceptions.UnsupportedVersionError(m)\n\n parse_job = taniumpy.ParseJob()\n parse_job.question_text = question_text\n parse_job.parser_version = 2\n\n clean_keys = ['obj']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n parse_job_results = self.session.add(obj=parse_job, **clean_kwargs)\n return parse_job_results", "def parse(self, input):\r\n query = None\r\n for handler in self._line_handlers:\r\n try:\r\n query = handler.handle(input)\r\n except Exception as e:\r\n query = None\r\n finally:\r\n if query is not None:\r\n return query\r\n return None", "def test_sqpp_double_quotes(self):\n self.assertEqual(self.parser.parse_query(\n '(expr1) - expr2 | \"expressions - in + quotes | (are) not - parsed \" - (expr3) | expr4'),\n ['+', 'expr1', '-', 'expr2', '|', '\"expressions - in + quotes | (are) not - parsed \"', '-', 'expr3', '|', 'expr4'])\n #['+', '+ \"expressions - in + quotes | (are) not - parsed \" | expr1 | expr4',\n # '+', '- expr3 | expr1 | expr4',\n # '+', '+ \"expressions - in + quotes | (are) not - parsed \" - expr2 | expr4',\n # '+', '- expr3 - expr2 | expr4'])", "def func_PARSE(self):\n self.parsed_url = parse.urlparse(\"http://{0}:{1}{2}\".format(args.HTTP_HOST, args.HTTP_PORT, self.path).lower())\n self.parsed_param = parse.parse_qs(self.parsed_url[4])", "def test_sqpp_beginning_double_quotes(self):\n self.assertEqual(self.parser.parse_query('\"expr1\" - (expr2)'),\n ['+', '\"expr1\"', '-', 'expr2'])", "def parse_query_value(combined_value):\n split = len(combined_value) - len(combined_value.lstrip('<>='))\n operator = combined_value[:split]\n if operator == '':\n operator = '='\n try:\n operator_func = search_operators[operator]\n except KeyError:\n raise ValueError(\n 'Numeric query should start with operator, choose from %s'\n % ', '.join(search_operators.keys()))\n value = combined_value[split:].strip()\n return operator_func, value", "def parseQuery(query):\n\twith open('userdata.csv') as csvfile:\n\t\treader = csv.reader(csvfile,quotechar='\"', delimiter=',',quoting=csv.QUOTE_ALL, skipinitialspace=True)\n\t\tfor row in reader:\n\t\t\tif matches(row[0], query):\n\t\t\t\t\n\t\t\t\ttts = gTTS(text=row[1], lang='en')\n\t\t\t\tcwd = os.getcwd()\n\t\t\t\ttts.save(\"good.mp3\")\n\n\t\t\t\troot = Tk()\n\n\t\t\t\tp = vlc.MediaPlayer(\"good.mp3\")\n\t\t\t\tp.play()\n\n\t\t\t\troot.mainloop()\n\t\t\t\treturn row[1]\n\t\treturn \"I cannot answer that, please call our customer helpline\"", "def parse_query(self, query: Optional[str]) -> event_filter.ParsedTerms:\n if query is None:\n return []\n\n try:\n parsed_terms = event_search.parse_search_query(\n query,\n params=self.filter_params,\n builder=self,\n config_overrides=self.parser_config_overrides,\n )\n except ParseError as e:\n raise InvalidSearchQuery(f\"Parse error: {e.expr.name} (column {e.column():d})\")\n\n if not parsed_terms:\n return []\n\n return parsed_terms", "def test_sqpp_quoted_expr1_and_paren_expr2_and_expr3(self):\n self.assertEqual(self.parser.parse_query('\"expr1\" (expr2) expr3'),\n ['+', '\"expr1\"', '+', 'expr2', '+', 'expr3'])", "def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]:\n if not query_str:\n return None\n\n # spilt:\n # \"subcommand_name rest of query\" -> [\"subcommand_name\", \"rest of query\"\"]\n query_parts = query_str.strip().split(None, maxsplit=1)\n\n if len(query_parts) < 2:\n query_str = \"\"\n else:\n query_str = query_parts[1]\n\n subcommand = get_subcommand_for_name(query_parts[0])\n if subcommand:\n return SubcommandQuery(subcommand=subcommand, query=query_str)", "def construct_query(attribute, param1, param2 = None):\n # TODO: Validate data type of param1 and param2 and raise exception if it's wrong.\n def match_string():\n bash_special = r\"|&;<>()$`\\\"' \\t\\n\"\n pat = param1.encode('string-escape')\n pat_bashsafe = filter(lambda c: c not in bash_special, pat)\n label = '-'.join(map(str, [attribute, param1]))\n def match(cell_value):\n return bool(re.search(pat, cell_value, flags = re.IGNORECASE))\n return Query(attribute, match, pat_bashsafe, label)\n def match_numeric():\n if param2 is None:\n label = '-'.join(map(str, [attribute, param1]))\n def matchsingle(cell_value):\n return cell_value == param1\n return Query(attribute, matchsingle, param1, label)\n else:\n label = '-'.join(map(str, [attribute, param1, param2]))\n def matchrange(cell_value):\n # Assume the cell value is either numeric, or an iterable containing only numerics\n return (param1 <= np.min(cell_value)) and (cell_value <= np.max(param2))\n return Query(attribute, matchrange, None, label)\n if isinstance(param1, str):\n return match_string()\n else:# param1 is numeric\n return match_numeric()", "def parse_input():\n if len(sys.argv) == 1 or len(sys.argv) > 3:\n print(\"\"\"Usage: ./search.py \"query\" [k]\nReturns the top k results of the search. The second argument is optional, by default k = 10.\"\"\")\n sys.exit(1)\n elif len(sys.argv) == 2:\n query_string = sys.argv[1]\n k = 10\n else:\n query_string = sys.argv[1]\n k = int(sys.argv[2])\n if k < 1 or k > 100000:\n print(\"Error! k must be between 1 and 100000, setting k = 10\")\n k = 10\n return (query_string, k)", "def _parse(url):\n url = url.strip()\n parsed = urlparse(url)\n return _parsed_url_args(parsed)", "def parser(self, q, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'parser')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def parse(s):\n return s", "def parse_for_filters(query_string):\n if ';' in query_string:\n strings = query_string.split(';')\n else:\n strings = query_string.split('&')\n\n filters = []\n leftovers = [] \n for string in strings:\n query = cgi.parse_qs(string)\n try:\n key, value = query.items()[0]\n\n try:\n argument = unicode(value[0], 'UTF-8')\n except TypeError:\n argument = value[0]\n\n func = FILTER_PARSERS[key](argument)\n filters.append(func)\n except(KeyError, IndexError):\n leftovers.append(string)\n\n leftovers = ';'.join(leftovers)\n return filters, leftovers", "def _parse_args(input_date, input_meal):\n parser = ArgumentParser()\n parser.add_argument('-d', '--date', type=str)\n parser.add_argument('-m', '--meal', type=str)\n args = parser.parse_args()\n # Allows getting the args from either CLI or as the function parameters\n query_date = args.date or input_date\n query_meal = args.meal or input_meal\n # Validate and sanitize the meal\n if query_meal and query_meal not in constants.MEAL_CHOICES:\n raise ValueError(\"Refeições suportadas são apenas 'almoço', 'jantar' e 'todas'.\")\n # Validate and sanitize the date\n if query_date == constants.DATE_TOMORROW:\n query_date = date.today() + timedelta(days=1)\n else:\n try:\n query_date = parse_date(args.date if args.date else input_date or None)\n except ValueError:\n query_date = None\n return query_date, query_meal", "def parse(self, string):\r\n # Tidy up our line\r\n string = self._check_line_is_good(string)\r\n \r\n # Break up into origin, token and body\r\n high_level_parts = string.split(None, 2)\r\n origin = parse_numeric(high_level_parts[0], self._maxclientnum)\r\n command = high_level_parts[1]\r\n if not command.isupper() and not command.isdigit():\r\n raise ProtocolError('Command not in uppercase', string)\r\n if len(high_level_parts) > 2:\r\n params = self._parse_params(high_level_parts[2])\r\n else:\r\n params = []\r\n \r\n # If this is an invalid command, pass it upwards\r\n try:\r\n self._pass_to_handler(origin, command, params)\r\n except ParseError, error:\r\n raise ParseError(error.value, string)", "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def test_parse_set_query():\n # List of 2-tuples of (query, expected_result)\n set_tests = {\n 'foo=bar': [\n ('intersection', 'foo', 'bar'),\n ],\n 'foo=bar owner=jathan': [\n ('intersection', 'foo', 'bar'),\n ('intersection', 'owner', 'jathan'),\n ],\n '-owner=gary': [\n ('difference', 'owner', 'gary'),\n ],\n 'cluster +foo=baz': [\n ('intersection', 'cluster', ''),\n ('union', 'foo', 'baz'),\n ],\n # Extra white space\n 'cluster=lax +foo=baz': [\n ('intersection', 'cluster', 'lax'),\n ('union', 'foo', 'baz'),\n ],\n }\n\n # Make sure that result matches expected_result\n for query, expected_result in set_tests.iteritems():\n result = parse_set_query(query)\n assert result == expected_result", "def _get_query_parser(self):\n return whoosh.qparser.MultifieldParser(\n ('title', 'content'),\n self._index.schema,\n plugins=[whoosh.qparser.PrefixPlugin],\n group=whoosh.qparser.OrGroup.factory(0.9)\n )", "def parse(cls, s):\n raise NotImplementedError", "def get_query_result(query_string: str) -> Any:\n table = get_template_attribute(\"_query_table.html\", \"querytable\")\n contents, types, rows = g.ledger.query_shell.execute_query(\n g.filtered.entries, query_string\n )\n if contents and \"ERROR\" in contents:\n raise FavaAPIError(contents)\n table = table(g.ledger, contents, types, rows)\n\n if types and g.ledger.charts.can_plot_query(types):\n return QueryResult(table, g.ledger.charts.query(types, rows))\n return QueryResult(table)", "def parse(self, text, dm=False):\n return (self.dm_expr if dm else self.expr).parseString(text)", "def querystring(parser, token):\r\n bits = token.split_contents()\r\n tag = bits.pop(0)\r\n updates = token_kwargs(bits, parser)\r\n # ``bits`` should now be empty of a=b pairs, it should either be empty, or\r\n # have ``without`` arguments.\r\n if bits and bits.pop(0) != \"without\":\r\n raise TemplateSyntaxError(\"Malformed arguments to '%s'\" % tag)\r\n removals = [parser.compile_filter(bit) for bit in bits]\r\n return QuerystringNode(updates, removals)", "def query(url):", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def ParseQuery(mr, config, services):\n canned_query = savedqueries_helpers.SavedQueryIDToCond(\n mr.cnxn, services.features, mr.can)\n query_ast = query2ast.ParseUserQuery(\n mr.query, canned_query, query2ast.BUILTIN_ISSUE_FIELDS, config)\n\n is_fulltext_query = bool(\n query_ast.conjunctions and\n fulltext_helpers.BuildFTSQuery(\n query_ast.conjunctions[0], tracker_fulltext.ISSUE_FULLTEXT_FIELDS))\n\n return query_ast, is_fulltext_query", "def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',\n errors='replace', fields_limit=None):\n date = {'phenomenonTime', 'resultTime', 'validTime'}\n FIELDS_MATCH = re.compile('[&]')\n pairs = FIELDS_MATCH.split(qs)\n r = []\n for name_value in pairs:\n if not name_value:\n continue\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n # Handle case of a control-name with no equal sign\n if keep_blank_values:\n nv.append('')\n else:\n continue\n if nv[1] or keep_blank_values:\n name = nv[0].replace('+', ' ')\n name = nv[0]\n name = unquote(name, encoding=encoding, errors=errors)\n if not any(a in nv[1] for a in date):\n value = nv[1].replace('+', ' ')\n value = nv[1]\n value = unquote(value, encoding=encoding, errors=errors)\n r.append((name, value))\n query_dict = {}\n for key, value in r:\n query_dict[key] = value\n return query_dict", "def format_query(query, option=''):\n q_tmp = []\n for s in query:\n if option != 'url' or s.isalnum():\n q_tmp.append(s)\n else:\n q_tmp.append('\\\"' + s.lower() + '\\\"')\n\n if option == 'file':\n sep = '_'\n else:\n sep = ' '\n q = sep.join(q_tmp)\n if option == 'url':\n q = urllib2.quote('\\'' + q + '\\'')\n return q", "def parse_query_string(s):\n res = {}\n pairs = s.split('&')\n for p in pairs:\n vals = [urldecode_plus(x) for x in p.split('=', 1)]\n if len(vals) == 1:\n res[vals[0]] = ''\n else:\n res[vals[0]] = vals[1]\n return res", "def parse(string):\r\n \r\n global local_vars\r\n # print \"parse(\"+string+\")\"\r\n\r\n # variables\r\n if string in local_vars: # e.g. 'y'\r\n return string\r\n elif string == 'it':\r\n # print 'it: ',references[0]\r\n return g.it\r\n\r\n # operators\r\n elif string.find('\\gamma') == 0:\r\n return gamma(string[7],string[9:-1])\r\n elif string.find('\\iota') == 0:\r\n # treating iota as gamma for now\r\n return iota(string[6],string[8:-1])\r\n\r\n # function application\r\n else:\r\n fun = string.split( '(' , 1)[0]\r\n arg = parse(string.split( '(' , 1)[1][:-1])\r\n exec(fun+'(arg)')", "def pp_query(query):\n print(format_query(query))", "def load_query(self, query_string: str) -> np.ndarray:\n rdmol = load_molecule(query_string)\n fp = build_fp(rdmol, self.fp_type, self.fp_params, 0)\n return np.array(fp, dtype=np.uint64)", "def __init__(self, query_string, parent = None):\n self.statement = query_string.strip()\n self.parent = parent\n # self.child is explicitly set by operators\n self.child = None", "def _stringquery(url_stub, querystring, simplequery, **kwargs):\n token = prep_token(**kwargs)\n query_dict = {'querystring':querystring,\n 'simplequery':simplequery}\n if 'maxresult' in kwargs:\n query_dict['maxresult'] = kwargs['maxresult']\n if 'sortfield' in kwargs:\n query_dict['sortfield'] = kwargs['sortfield']\n if 'sortascending' in kwargs:\n query_dict['sortascending'] = kwargs['sortascending']\n api_url = url_stub + '/textquery'\n resp_data = oauth2_wrappers.df_get(api_url, token, query_dict)\n return resp_data", "def parseString(self, s):\n return self.parser.parseString(s)", "def parse(string):\n \n global local_vars\n print \"parse(\"+string+\")\"\n\n # variables\n if string in local_vars: # e.g. 'y'\n return string\n elif string == 'it':\n # print 'it: ',references[0]\n return g.it\n\n # operators\n elif string.find('\\gamma') == 0:\n return gamma(string[7],string[9:-1])\n elif string.find('\\iota') == 0:\n # treating iota as gamma for now\n return iota(string[6],string[8:-1])\n\n # function application\n else:\n fun = string.split( '(' , 1)[0]\n arg = parse(string.split( '(' , 1)[1][:-1])\n exec(fun+'(arg)')", "def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):\n pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]\n r = []\n for name_value in pairs:\n if not name_value and not strict_parsing:\n continue\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n if strict_parsing:\n raise ValueError, \"bad query field: %r\" % (name_value,)\n # Handle case of a control-name with no equal sign\n if keep_blank_values:\n nv.append('')\n else:\n continue\n if len(nv[1]) or keep_blank_values:\n name = unquote(nv[0].replace('+', ' '))\n value = unquote(nv[1].replace('+', ' '))\n r.append((name, value))\n\n return r", "def parse_string(self, data):\r\n return self._parse(antlr3.ANTLRStringStream(data))", "def parse(self, input):\n pass", "def parseURL(url):\n\n\n scheme, host, path, params, query, hash = urlparse(url)\n if not path: path = \"/\"\n\n args = parse_qs(query)\n\n escapedArgs = {}\n for name in args:\n if len(args[name]) == 1:\n escapedArgs[unquote(name)] = unquote(args[name][0])\n else:\n escapedArgs[unquote(name)] = escapedSet = []\n for item in args[name]:\n escapedSet.append(unquote(item))\n\n return host, path, params, escapedArgs", "def parse(self, arg, local=True):\n if not isinstance(arg, StringTypes):\n raise TypeError('Expected a string argument')\n\n if not arg:\n raise SyntaxError(arg)\n\n if arg.find(' ') == -1 and len(arg) >= 5 and arg[4] == '-':\n yr, mo, dy, hr, mn, sc, tz = self._parse_iso8601(arg)\n else:\n yr, mo, dy, hr, mn, sc, tz = self._parse(arg, local)\n\n if not self._validDate(yr, mo, dy):\n raise DateError(arg, yr, mo, dy)\n if not self._validTime(hr, mn, int(sc)):\n raise TimeError(arg)\n\n return yr, mo, dy, hr, mn, sc, tz", "def test_parse():\n service = WebService(TestFactory())\n query = service.parse(\n parse_qs(\n \"id=BOU&starttime=2016-06-06\"\n \"&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60\"\n \"&format=iaga2002&type=variation\"\n )\n )\n assert_equal(query.observatory_id, \"BOU\")\n assert_equal(query.starttime, UTCDateTime(2016, 6, 6, 0))\n assert_equal(query.endtime, UTCDateTime(2016, 6, 7, 0))\n assert_equal(query.elements, [\"H\", \"E\", \"Z\", \"F\"])\n assert_equal(query.sampling_period, \"60\")\n assert_equal(query.output_format, \"iaga2002\")\n assert_equal(query.data_type, \"variation\")\n # Test that defaults are set for unspecified values\n now = datetime.now()\n today = UTCDateTime(year=now.year, month=now.month, day=now.day, hour=0)\n tomorrow = today + (24 * 60 * 60 - 1)\n query = service.parse(parse_qs(\"id=BOU\"))\n assert_equal(query.observatory_id, \"BOU\")\n assert_equal(query.starttime, today)\n assert_equal(query.endtime, tomorrow)\n assert_equal(query.elements, (\"X\", \"Y\", \"Z\", \"F\"))\n assert_equal(query.sampling_period, \"60\")\n assert_equal(query.output_format, \"iaga2002\")\n assert_equal(query.data_type, \"variation\")\n assert_raises(Exception, service.parse, parse_qs(\"/?id=bad\"))", "def query_retrieve(self, query_str):\n query = self.process_query(query_str)\n return self.boolean_retrieve(query)", "def parse_string(self, in_str):\n match = MAIN_REGEX.search(in_str)\n if not match:\n err_str = \"Unable to parse string: %s\" % in_str\n raise ValueError(err_str)\n self.parse_completed(match.group(1))\n self.parse_priority(match.group(2))\n if match.group(3) and match.group(4):\n self.parse_completion_date(match.group(3))\n self.parse_creation_date(match.group(4))\n else:\n self.parse_creation_date(match.group(3))\n self.parse_description(match.group(5))", "def query_schema(self, name, param):\n\n alias, name, need_list = self.parse_entry(name)\n\n if not name:\n result = self.process_multiple_query(need_list, param)\n else:\n result = self.process_single_query(name, need_list, param)\n return alias, result", "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\n raise GraphQLError(\"\".join([e[\"message\"] for e in response[\"errors\"]]))\n else:\n raise requests.exceptions.RequestException(\n f\"HTTP Status: {r.status_code}, Response Body: {r.text}\"\n )", "def query(self, query):" ]
[ "0.6770412", "0.66441345", "0.6639585", "0.6633932", "0.6599556", "0.65507025", "0.6502794", "0.6495754", "0.64781725", "0.63631403", "0.6356323", "0.62238264", "0.62083244", "0.62080956", "0.6205427", "0.6139065", "0.6117859", "0.61134577", "0.6103953", "0.6038121", "0.6020819", "0.60109127", "0.5934991", "0.59249544", "0.589493", "0.5885861", "0.5864818", "0.58507276", "0.58479214", "0.58163154", "0.57843614", "0.5776385", "0.57751304", "0.5749186", "0.57395416", "0.5733977", "0.57299924", "0.56688386", "0.5662902", "0.5634936", "0.56279975", "0.5617458", "0.5590844", "0.5586901", "0.5585908", "0.55538344", "0.5547803", "0.55428284", "0.5540112", "0.55349123", "0.5523754", "0.5519117", "0.55118746", "0.5503337", "0.54960036", "0.54754174", "0.54715204", "0.54700667", "0.5461345", "0.54368705", "0.5426149", "0.5402446", "0.53987503", "0.5387543", "0.5387327", "0.53602004", "0.5358237", "0.534045", "0.5337419", "0.53285193", "0.5319066", "0.5299579", "0.5275421", "0.5271625", "0.52701306", "0.52683675", "0.5267394", "0.5261519", "0.5250641", "0.5243343", "0.5233228", "0.5229074", "0.52280927", "0.5227717", "0.52205336", "0.5214655", "0.5212346", "0.52048963", "0.5202235", "0.52005637", "0.519016", "0.51784265", "0.51711524", "0.51694745", "0.5164814", "0.5163242", "0.5162543", "0.5158322", "0.5152522", "0.51434934" ]
0.5299957
71
Encode hostname as internationalized domain name (IDN) according to RFC 3490.
def idna_encode(host): if host and isinstance(host, unicode): try: host.encode('ascii') return host, False except UnicodeError: uhost = host.encode('idna').decode('ascii') return uhost, uhost != host return host, False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idna_encode(self, domain):\n try:\n if isinstance(domain, str):\n domain = domain.decode('utf-8')\n return domain.encode('idna')\n except UnicodeError:\n return domain", "def _convert_to_idn(url):\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url", "def sanitize_hostname(hostname):\n if isinstance(hostname, six.string_types):\n hostname = hostname.encode('latin-1', 'ignore')\n if six.PY3:\n hostname = hostname.decode()\n hostname = re.sub('[ _]', '-', hostname)\n hostname = re.sub('[^\\w.-]+', '', hostname)\n hostname = hostname.lower()\n hostname = hostname.strip('.-')\n\n return hostname", "def fqdn_identifier(fqdn):\n return messages.Identifier(\n typ=messages.IDENTIFIER_FQDN, value=fqdn)", "def format_hostname(hostname: str) -> str:\n if has_ipv6 and re.match(r\"\\d+.\\d+.\\d+.\\d+\", hostname) is not None:\n hostname = f\"::ffff:{hostname}\"\n return hostname", "def create_domain_name(self, name):\n return (\"%s.%s.%s\" % (name, \"net\", self.domain)).lower()", "def nodename(name, hostname):\n return NODENAME_SEP.join((name, hostname))", "def _getHostname(fqdn):\n\treturn fqdn.split('.')[0]", "def fullHostname(self) -> str:\n\t\treturn self.hostname[1]", "def domain_dns_name(self):\n domain_dn = self.get_default_basedn()\n return domain_dn.canonical_str().split('/')[0]", "def domain_to_idna(line):\n\n if not line.startswith(\"#\"):\n tabs = \"\\t\"\n space = \" \"\n\n tabs_position, space_position = (line.find(tabs), line.find(space))\n\n if tabs_position > -1 and space_position > -1:\n if space_position < tabs_position:\n separator = space\n else:\n separator = tabs\n elif not tabs_position == -1:\n separator = tabs\n elif not space_position == -1:\n separator = space\n else:\n separator = \"\"\n\n if separator:\n splited_line = line.split(separator)\n\n try:\n index = 1\n while index < len(splited_line):\n if splited_line[index]:\n break\n index += 1\n\n if \"#\" in splited_line[index]:\n index_comment = splited_line[index].find(\"#\")\n\n if index_comment > -1:\n comment = splited_line[index][index_comment:]\n\n splited_line[index] = (\n splited_line[index]\n .split(comment)[0]\n .encode(\"IDNA\")\n .decode(\"UTF-8\")\n + comment\n )\n\n splited_line[index] = splited_line[index].encode(\"IDNA\").decode(\"UTF-8\")\n except IndexError:\n pass\n return separator.join(splited_line)\n return line.encode(\"IDNA\").decode(\"UTF-8\")\n return line.encode(\"UTF-8\").decode(\"UTF-8\")", "def ssl_hostname(self, code):\n return self.hostname(code)", "def shortHostname(self) -> str:\n\t\treturn self.hostname[0]", "def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)", "def genHostname(ipAddr):\n\tdomain = '.osdev.skrill.net.'\n\tif ipAddr:\n\t\treturn 'vm-' + '-'.join(ipAddr.split('.')) + domain\n\telse:\n\t\treturn ''", "def _hostname_prefix(self, hostname_str):\n\n if not hostname_str or len(hostname_str) == 0:\n msg = _(\"Invalid Hostname: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n if isinstance(hostname_str, unicode):\n hostname_str = hostname_str.translate(\n self._unicode_host_name_filter)\n elif isinstance(hostname_str, str):\n hostname_str = hostname_str.translate(\n self._string_host_name_filter)\n else:\n msg = _(\"Cannot clean host name: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n hostname_str = str(hostname_str)\n return hostname_str[:55]", "def flatten_hostname(hostname):\n return hostname.replace('.', '-')", "def hostname(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"hostname\", _args)\n return _ctx.execute_sync(str)", "def slicename_to_hostname(vs_name):\n fields = vs_name.split('_')\n if len(fields) == 1:\n prefix = vs_name\n else:\n # The vs_name prefix is the PlanetLab site name.\n # The rest is user-chosen. Place the site name after user-chosen name.\n prefix = '.'.join(fields[1:] + [fields[0]])\n return '%s.%s' % (prefix, _root_hostname)", "def hostname(name: str = \"\") -> str:\n ...", "def hostname():\n return socket.gethostname()", "def hostname(self, code):\n return self.domain", "def hostname():\n hostname = socket.gethostname()\n if '.' in hostname:\n hostname = hostname.split('.')[0]\n return hostname", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def get_hostname(self):\n raise NotImplementedError('get_hostname')", "def _namespace_to_ascii(self):\n\t\tparts = [part.encode(\"utf-8\") for part in self.namespace_parts]\n\t\treturn \":\".join(parts)", "def encode_name(self,name):\n #print(\"Hackable label decoder in place\")\n if not isinstance(name,dnslib.dns.DNSLabel):\n name = dnslib.dns.DNSLabel(name)\n if len(name) > 253:\n raise dnslib.dns.DNSLabelError(\"Domain label too long: %r\" % name)\n name = list(name.label)\n while name:\n if tuple(name) in self.names:\n # Cached - set pointer\n pointer = self.names[tuple(name)]\n pointer = set_bits(pointer,3,14,2)\n self.pack(\"!H\",pointer)\n return\n else:\n self.names[tuple(name)] = self.offset\n element = name.pop(0)\n if len(element) > MAX_LABEL_LEN:\n raise dnslib.dns.DNSLabelError(\"Label component too long: %r\" % element)\n self.pack(\"!B\",len(element))\n self.append(element)\n self.append(b'\\x00')", "def generate_domainname():\n domainname = ''.join(generate_string(10, valid_domain_name_chars))\n domain = random.choice(['com', 'co.il', 'info'])\n return domainname+'.'+domain", "def host_format(s, host=None, name=None, **extra):\n host = host or gethostname()\n hname, _, domain = host.partition('.')\n name = name or hname\n keys = dict({\n 'h': host, 'n': name, 'd': domain,\n 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix,\n }, **extra)\n return simple_format(s, keys)", "def _hostname(self, code, protocol=None):\n if protocol is None:\n protocol = self.protocol(code)\n if protocol == 'https':\n host = self.ssl_hostname(code)\n else:\n host = self.hostname(code)\n return protocol, host", "def domain(cls) -> str:\n return f'{cls.name}.wikimedia.org'", "def get_hostname(ip):\n hostname, aliases, ipaddresses = socket.gethostbyaddr(ip)\n return hostname", "def hostname_label(self):\n return self._hostname_label", "def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )", "def host_dns_name(self):\n res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])\n return str(res[0]['dNSHostName'][0])", "def format_host(host):\n\n host = strip_suffix(host, \".lan.urlab.be\")\n host = strip_suffix(host, \".lan\")\n host = strip_suffix(host, \".local\")\n host = strip_suffix(host, \"iPodtouch\")\n host = strip_suffix(host, \"-PC\")\n host = strip_suffix(host, \"-pc\")\n\n host = strip_prefix(host, \"pc-\")\n host = strip_prefix(host, \"PC-\")\n host = strip_prefix(host, \"DESKTOP-\")\n host = strip_prefix(host, \"LAPTOP-\")\n host = strip_prefix(host, \"iPod-de-\")\n host = strip_prefix(host, \"iPadde\")\n\n return host", "def hostname_from_addon_slug(addon_slug: str) -> str:\n return addon_slug.replace(\"_\", \"-\")", "def hostname_to_site(hostname: str) -> str:\n if hostname == \"biothings.ncats.io\" or hostname.endswith(\"transltr.io\"):\n return \"ncats\"\n\n return \"pending\"", "def encode_url(url):\n\treturn url.replace(' ', '_')", "def server_domain(self):\n url = self.api.address\n domain_start = url.find('://') + 3 if url.find('://') >= 0 else 0\n domain_end = url.find(':', domain_start) if url.find(':', domain_start) >= 0 else \\\n url.find('/', domain_start) if url.find('/', domain_start) >= 0 else \\\n url.find('?', domain_start) if url.find('?', domain_start) >= 0 else \\\n len(url)\n regex = re.compile('[^a-zA-Z0-9\\.]') # being cautious as changing this later will invalidate everyone's cache\n return regex.sub('_', url[domain_start:domain_end]).lower()", "def get_domain_name(self):\n return self.domain_name.get_text()", "def get_hostname(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostname', self.handle)", "def get_hostname():\n\thostname = socket.gethostname()\n\n\treturn hostname", "def convert_x509_name(name):\n types = {\n 'country_name': 'C',\n 'state_or_province_name': 'ST',\n 'locality_name': 'L',\n 'organization_name': 'O',\n 'organizational_unit_name': 'OU',\n 'common_name': 'CN',\n 'email_address': 'emailAddress'\n }\n\n return '/'.join(['{}={}'.format(types[attr], name.native[attr]) for attr in name.native])", "def nickname(self):\n if (self.__email and self.__auth_domain and\n self.__email.endswith('@' + self.__auth_domain)):\n suffix_len = len(self.__auth_domain) + 1\n return self.__email[:-suffix_len]\n else:\n return self.__email", "def fqdn(self) -> str:\n\n return socket.getfqdn()", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def _ensure_fqdn(self, name):\n if name[-1:] != \".\":\n return \"%s.\" % name\n else:\n return name", "def fqdn(self):\n if not self._fqdn:\n self._fqdn = socket.getfqdn()\n return self._fqdn", "def fqdn(self):\n raise NotImplementedError", "def internet_domain(self) -> str:\n return pulumi.get(self, \"internet_domain\")", "def get_fqdn():\n return socket.getfqdn()", "def encode(self, idna_encoder=None): # noqa: C901\n authority = self.authority\n if authority:\n if idna_encoder is None:\n if idna is None: # pragma: no cover\n raise exceptions.MissingDependencyError(\n \"Could not import the 'idna' module \"\n \"and the IRI hostname requires encoding\"\n )\n\n def idna_encoder(name):\n if any(ord(c) > 128 for c in name):\n try:\n return idna.encode(name.lower(),\n strict=True,\n std3_rules=True)\n except idna.IDNAError:\n raise exceptions.InvalidAuthority(self.authority)\n return name\n\n authority = \"\"\n if self.host:\n authority = \".\".join([compat.to_str(idna_encoder(part))\n for part in self.host.split(\".\")])\n\n if self.userinfo is not None:\n authority = (normalizers.encode_component(\n self.userinfo, self.encoding) + '@' + authority)\n\n if self.port is not None:\n authority += \":\" + str(self.port)\n\n return uri.URIReference(self.scheme,\n authority,\n path=self.path,\n query=self.query,\n fragment=self.fragment,\n encoding=self.encoding)", "def hostname_for_event(self, clean_server_name, agentConfig):\n uri = urlsplit(clean_server_name)\n if '@' in uri.netloc:\n hostname = uri.netloc.split('@')[1].split(':')[0]\n else:\n hostname = uri.netloc.split(':')[0]\n if hostname == 'localhost':\n hostname = self.hostname\n return hostname", "def test_sanitized_hostname(self):\n value = \" ../ ../some/dubious/hostname \"\n response = clean.hostname(value)\n assert response == \"somedubioushostname\"", "def getRequestHostname():", "def getHostnameFromURL(self, url):\n hostname = urllib.splithost(urllib.splittype(url)[1])[0]\n logging.debug(\"Parsed hostname %r for cert CN matching.\" % hostname)\n return hostname", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()", "def get_host_name():\n return socket.gethostname()", "def server_hostname(self):\n return dns.future_hostname(\n future_gethostbyaddr=self._server_host,\n fallback_ip=self.server_ip)", "def Hostname(self):\n if self.force_auto_sync:\n self.get('Hostname')\n return self._Hostname", "def _make_id(self,kwargs, key=\"nid\"):\n\t\teid = \"\"\n\t\tif kwargs.get(\"host\", False):\n\t\t\teid = kwargs.get('host', \"\") + \"-\" + kwargs.get(key)\n\t\telse:\n\t\t\teid = kwargs.get(key)\n\t\treturn eid", "def encode_email(email, key):\n return", "def get_hostname(url: str) -> str:\n return urlsplit(url).hostname", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def encode(self, longUrl: str) -> str:\n if longUrl in long2short:\n return prefix + long2short[longUrl]\n else:\n gen_letter = ''.join([letters[random.randint(0,61)] for i in range(6)])\n long2short[longUrl] = gen_letter\n short2long[gen_letter] = longUrl\n return prefix + gen_letter", "def hostname(self):\n return self._hostname", "def test_verify_hostname(self):\n verify_certificate_hostname(X509_DNS_ONLY, u\"twistedmatrix.com\")", "def hostname(self, hostname):\n if hostname is not None and not re.search('^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\\\.?$', hostname): # noqa: E501\n raise ValueError(\"Invalid value for `hostname`, must be a follow pattern or equal to `/^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\\\.?$/`\") # noqa: E501\n\n self._hostname = hostname", "def resolve_hostname(request, hostname):\n try:\n ipaddress = usm_wrapper_utils.resolve_hostname(hostname)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while resolving hostname'}, status=417)\n\n return Response({'IP_Address': ipaddress}, status=200)", "def gethostbycondorname(name):\n\n m = htcondor_ip_name_re.match(name)\n if m is not None:\n return m.group(1).replace('-', '.')\n else:\n return socket.gethostbyname(name)", "def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)", "def get_hostname(self):\n prompt = self.session.find_prompt()\n backup_logger.info(f\"Getting hostname configured for {self.current_device}:\")\n hostname_configured = re.search(r'.*?[:@]?([\\w\\-_]*)[#>]', prompt, re.MULTILINE).group(1)\n self.hostname = hostname_configured", "def encode_cookie(uid):\n unsigned_ints = []\n if len(uid) != 32:\n return None\n for i in range(0, 32, 8):\n host_byte_str = uid[i:i+8]\n try:\n host_byte_int = int(host_byte_str, 16)\n except ValueError:\n return None\n net_byte_int = socket.htonl(host_byte_int)\n unsigned_ints.append(net_byte_int)\n binary_cookie = struct.pack('!4I', *unsigned_ints)\n cookie_bytes = base64.b64encode(binary_cookie)\n return str(cookie_bytes, 'utf8')", "def localizeForHostName(filename): \n hostname = socket.gethostname()\n if hostname in filename:\n updated_filename = filename.replace(hostname, '')\n return updated_filename.strip('-')\n return filename", "def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname", "def ZeusEmailId(cls, mail_domain):\n return ('zeus+%s+noreply@%s.%s' %\n (PipelineConfig.Instance().pipeline_id(), socket.gethostname(),\n mail_domain))", "def connection_string_to_hostname(conn_str):\n conn_str_obj = ConnectionString(conn_str)\n return conn_str_obj.get(\"HostName\")", "def _encode_userinfo_part(text, maximal=True):\n if maximal:\n bytestr = normalize('NFC', text).encode('utf8')\n return u''.join([_USERINFO_PART_QUOTE_MAP[b] for b in bytestr])\n return u''.join([_USERINFO_PART_QUOTE_MAP[t] if t in _USERINFO_DELIMS\n else t for t in text])", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def url_fix_host(urlparts):\n # if not urlparts[1]:\n # urlparts[2] = urllib.unquote(urlparts[2])\n # return False\n userpass, netloc = urllib.splituser(urlparts[1])\n if userpass:\n userpass = urllib.unquote(userpass)\n netloc, is_idn = idna_encode(urllib.unquote(netloc).lower())\n # a leading backslash in path causes urlsplit() to add the\n # path components up to the first slash to host\n # try to find this case...\n i = netloc.find(\"\\\\\")\n if i != -1:\n # ...and fix it by prepending the misplaced components to the path\n comps = netloc[i:] # note: still has leading backslash\n if not urlparts[2] or urlparts[2] == '/':\n urlparts[2] = comps\n else:\n urlparts[2] = \"%s%s\" % (comps, urllib.unquote(urlparts[2]))\n netloc = netloc[:i]\n else:\n # a leading ? in path causes urlsplit() to add the query to the\n # host name\n i = netloc.find(\"?\")\n if i != -1:\n netloc, urlparts[3] = netloc.split('?', 1)\n # path\n urlparts[2] = urllib.unquote(urlparts[2])\n if userpass and userpass != ':':\n # append AT for easy concatenation\n userpass += \"@\"\n else:\n userpass = \"\"\n\n if urlparts[0] in default_ports:\n dport = default_ports[urlparts[0]]\n host, port = splitport(netloc, port=dport)\n\n host = host.rstrip('. ')\n if port != dport:\n host = \"%s:%d\" % (host, port)\n netloc = host\n urlparts[1] = userpass + netloc\n return is_idn", "def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )", "def get_hostname():\n global HOST\n if '.' in HOST:\n HOST = HOST.split('.')[0]\n return HOST", "def domain_sort_key(domain):\n import re\n domain_expr = r'(.*\\.)?(.*\\.)(.*)' # Eg: (www.)(google.)(com)\n domain_search = re.search(domain_expr, domain)\n\n if domain_search and domain_search.group(1):\n # sort by domain name and then everything left of\n # Eg: google, com, www\n domain_values = (\n domain_search.group(2),\n domain_search.group(3),\n domain_search.group(1)\n )\n key = '%s%s%s' % domain_values\n else:\n # no host portion, just return the domain name\n key = domain\n return(key)", "def get_true_hostname():\n try:\n address = socket.gethostbyname(socket.gethostname())\n except:\n address = ''\n if not address or address.startswith('127.'):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('4.2.2.1', 0))\n address = s.getsockname()[0]\n return address", "def elReplaceHostname(self, hostname):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n hostname = re.escape(hostname) # precaution\n commandSection = self.sectionByName(\"command\")\n # change to hostname\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--hostname[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + hostname + r\"\\g<2>\",\n commandSection.string)\n return self", "def get_host_name(self):\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n try:\n return self.keyinfo['tracking_id'].attrs['hostname']\n except:\n return None\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True", "def get_hostname(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHostname', self.handle)", "def get_fqdn(ip_address):\n return socket.gethostbyaddr(ip_address)", "def qp_encode_display_name(display_name: str) -> str:\n words: list[str] = []\n current_word: list[str] = []\n\n def finish_word() -> None:\n nonlocal current_word\n content = ''.join(current_word)\n words.append(f'{qp_prefix}{content}{qp_suffix}')\n current_word = []\n\n for character in display_name:\n if character == ' ':\n # special case for header encoding\n characters = ['_']\n elif character in alphanumeric:\n # no need to encode this character\n characters = [character]\n else:\n # QP encode the character\n characters = list(\n ''.join(f'={c:02X}' for c in character.encode('utf-8'))\n )\n\n if len(current_word) + len(characters) > QP_CONTENT_LENGTH:\n finish_word()\n\n current_word.extend(characters)\n\n finish_word()\n if len(words) == 1:\n # We can omit the enclosing double quotes\n return words[0]\n\n # NOTE: The enclosing double quotes are necessary so that spaces\n # as word separators can be parsed correctly.\n return f'\"{\" \".join(words)}\"'", "def Hostname(self):\n return self._get_attribute('hostname')", "def gethostname(ipaddr):\n return socket.gethostbyaddr(ipaddr)[0]", "def hostname(self):\n version_data = self._raw_version_data()\n if self._hostname is None:\n self._hostname = version_data[\"hostname\"]\n\n return self._hostname" ]
[ "0.67306507", "0.6296275", "0.60186315", "0.5950868", "0.59477776", "0.59150565", "0.5861521", "0.585729", "0.56459177", "0.5601266", "0.55888164", "0.5584511", "0.5578367", "0.5569673", "0.5569479", "0.5557148", "0.55279016", "0.55123955", "0.5506608", "0.5506079", "0.5461082", "0.54462546", "0.54368913", "0.5417022", "0.5417022", "0.54131514", "0.54117095", "0.5410212", "0.5408858", "0.54048574", "0.53991055", "0.53965", "0.5394829", "0.53929704", "0.53927267", "0.5369583", "0.536941", "0.53375566", "0.53334033", "0.53259987", "0.5313899", "0.52689403", "0.5251169", "0.5235177", "0.5212587", "0.52100575", "0.5199493", "0.5178011", "0.51769567", "0.51769567", "0.51769567", "0.51764727", "0.5166669", "0.51614016", "0.5150846", "0.5138229", "0.5137133", "0.51304615", "0.51255107", "0.5123384", "0.511603", "0.5112887", "0.5107715", "0.5103448", "0.5099979", "0.5098109", "0.50974613", "0.50932366", "0.5084566", "0.5078694", "0.5078694", "0.5078694", "0.507739", "0.50739306", "0.5073334", "0.5072935", "0.5063779", "0.50585574", "0.5058042", "0.50558674", "0.5039181", "0.5031629", "0.5017332", "0.50011796", "0.49946785", "0.49907893", "0.49882215", "0.4983079", "0.4974049", "0.49728742", "0.4969962", "0.49667385", "0.49580437", "0.49579766", "0.4957636", "0.4952795", "0.49518225", "0.49479768", "0.49443728", "0.49434566" ]
0.59071285
6
Unquote and fix hostname. Returns is_idn.
def url_fix_host(urlparts): # if not urlparts[1]: # urlparts[2] = urllib.unquote(urlparts[2]) # return False userpass, netloc = urllib.splituser(urlparts[1]) if userpass: userpass = urllib.unquote(userpass) netloc, is_idn = idna_encode(urllib.unquote(netloc).lower()) # a leading backslash in path causes urlsplit() to add the # path components up to the first slash to host # try to find this case... i = netloc.find("\\") if i != -1: # ...and fix it by prepending the misplaced components to the path comps = netloc[i:] # note: still has leading backslash if not urlparts[2] or urlparts[2] == '/': urlparts[2] = comps else: urlparts[2] = "%s%s" % (comps, urllib.unquote(urlparts[2])) netloc = netloc[:i] else: # a leading ? in path causes urlsplit() to add the query to the # host name i = netloc.find("?") if i != -1: netloc, urlparts[3] = netloc.split('?', 1) # path urlparts[2] = urllib.unquote(urlparts[2]) if userpass and userpass != ':': # append AT for easy concatenation userpass += "@" else: userpass = "" if urlparts[0] in default_ports: dport = default_ports[urlparts[0]] host, port = splitport(netloc, port=dport) host = host.rstrip('. ') if port != dport: host = "%s:%d" % (host, port) netloc = host urlparts[1] = userpass + netloc return is_idn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idna_encode(host):\n if host and isinstance(host, unicode):\n try:\n host.encode('ascii')\n return host, False\n except UnicodeError:\n uhost = host.encode('idna').decode('ascii')\n return uhost, uhost != host\n return host, False", "def test_sanitized_hostname(self):\n value = \" ../ ../some/dubious/hostname \"\n response = clean.hostname(value)\n assert response == \"somedubioushostname\"", "def sanitize_hostname(hostname):\n if isinstance(hostname, six.string_types):\n hostname = hostname.encode('latin-1', 'ignore')\n if six.PY3:\n hostname = hostname.decode()\n hostname = re.sub('[ _]', '-', hostname)\n hostname = re.sub('[^\\w.-]+', '', hostname)\n hostname = hostname.lower()\n hostname = hostname.strip('.-')\n\n return hostname", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def is_valid_host(host):\n host = host.encode('idna').lower()\n if not hasattr(is_valid_host, '_re'):\n is_valid_host._re = re.compile(r'^([0-9a-z][-\\w]*[0-9a-z]\\.)+[a-z0-9\\-]{2,15}$')\n return bool(is_valid_host._re.match(host))", "def is_valid_hostname(string: str) -> bool:\n\n return hostname_regex.match(string) is not None", "def _convert_to_idn(url):\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def is_fqdn(address):\n\n return bool(re.match(re_fqdn, address))", "def chkfqdn(fqdn):\n if fqdn is None:\n return False\n hp = hostportion(fqdn)\n # not needed right now: pp = portportion(fqdn)\n # TODO need to augment this for IPv6 addresses\n return re.match('^[a-zA-Z0-9_-]+(\\\\.[a-zA-Z0-9_-]+)+$', hp) is not None", "def format_hostname(hostname: str) -> str:\n if has_ipv6 and re.match(r\"\\d+.\\d+.\\d+.\\d+\", hostname) is not None:\n hostname = f\"::ffff:{hostname}\"\n return hostname", "def _is_valid_target(hostname):\n if not hostname:\n return False\n\n # Check if it's a valid IP\n if _is_valid_ipv4_address(hostname) or _is_valid_ipv6_address(hostname):\n return True\n\n # Check if it's a valid DNS name\n\n if hostname[-1] == '.':\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n if len(hostname) < 1 or len(hostname) > 253: # Technically 255 octets but 2 are used for encoding\n return False\n\n labels = hostname.split(\".\")\n\n # the TLD must be not all-numeric\n if re.match(r\"[0-9]+$\", labels[-1]):\n return False\n\n allowed = re.compile(r\"(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(label) for label in labels)", "def _is_shorthand_ip(ip_str):\r\n if ip_str.count('::') == 1:\r\n return True\r\n if filter(lambda x: len(x) < 4, ip_str.split(':')):\r\n return True\r\n return False", "def _validate_hostname(input: str):\n if len(input) == 0:\n return input\n name_re = re.compile(r\"^(kafka://)?(([^[:/]+|\\[[^\\]/]+\\])(:[0-9]*)?)$\")\n match = name_re.match(input)\n if match is None:\n raise RuntimeError(\"Unable to parse hostname. \"\n \"Please enter either `hostname` or `hostname:port`.\")\n if match.group(1) is not None:\n logger.warning(f\"Ignoring '{match.group(1)}' prefix on hostname\")\n return match.group(2)", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def get_host_by_name(self, hostname: str) -> Union[str, Literal[False]]:\n self._read_line()\n if self._debug:\n print(\"*** Get host by name\")\n if isinstance(hostname, str):\n hostname = bytes(hostname, \"utf-8\")\n\n self._uart_write(b'AT+CDNSGIP=\"' + hostname + b'\"\\r\\n')\n self._read_line(10000) # Read the +CDNSGIP, takes a while\n\n if not self._parse_reply(b\"+CDNSGIP: \", idx=2):\n return False\n return self._buf", "def test_verify_hostname(self):\n verify_certificate_hostname(X509_DNS_ONLY, u\"twistedmatrix.com\")", "def validate_host(self, host: str) -> bool:\n ip_address_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}'\n r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])$')\n hostname_regex = re.compile(r'^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$')\n url_regex = re.compile(r'^(ldaps?)://'\n r'((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]):'\n r'([0-9]{1,5})$')\n if bool(ip_address_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(hostname_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(url_regex.match(host)):\n # using host url address\n match = url_regex.match(host)\n proto = match.group(1)\n if proto == 'ldaps':\n self.server_use_ssl = True\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def _getHostname(fqdn):\n\treturn fqdn.split('.')[0]", "def test_hostname_value(self):\n \n hostname = get_hostname()\n \n # Check to make sure the hostname is \"tjw-imac.grid.labs\"\n self.assertEqual(hostname, 'tjw-imac.grid.labs')", "def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def hostname_to_site(hostname: str) -> str:\n if hostname == \"biothings.ncats.io\" or hostname.endswith(\"transltr.io\"):\n return \"ncats\"\n\n return \"pending\"", "def get_true_hostname():\n try:\n address = socket.gethostbyname(socket.gethostname())\n except:\n address = ''\n if not address or address.startswith('127.'):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('4.2.2.1', 0))\n address = s.getsockname()[0]\n return address", "def hostname_for_event(self, clean_server_name, agentConfig):\n uri = urlsplit(clean_server_name)\n if '@' in uri.netloc:\n hostname = uri.netloc.split('@')[1].split(':')[0]\n else:\n hostname = uri.netloc.split(':')[0]\n if hostname == 'localhost':\n hostname = self.hostname\n return hostname", "def hostname_from_addon_slug(addon_slug: str) -> str:\n return addon_slug.replace(\"_\", \"-\")", "def validate_slug(host_slug: str,\n database_connection: mysql.connector.connect) -> bool:\n host_slug = host_slug.strip()\n if not host_slug:\n return False\n\n try:\n cursor = database_connection.cursor()\n query = \"SELECT hostslug FROM ww_hosts WHERE hostslug = %s;\"\n cursor.execute(query, (host_slug,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def host_okay(self, host: str) -> bool:\n host = host.lower()\n if host in self.root_domains:\n return True\n\n if re.match(r'\\A[\\d\\.]*\\Z', host):\n return False\n\n if self.strict:\n return self.host_valid_strict(host)\n\n return self.host_valid_lenient(host)", "def VerifyNodeHostname(self, node, ssh_port):\n cmd = (\"if test -z \\\"$GANETI_HOSTNAME\\\"; then\"\n \" hostname --fqdn;\"\n \"else\"\n \" echo \\\"$GANETI_HOSTNAME\\\";\"\n \"fi\")\n retval = self.Run(node, constants.SSH_LOGIN_USER, cmd,\n quiet=False, port=ssh_port)\n\n if retval.failed:\n msg = \"ssh problem\"\n output = retval.output\n if output:\n msg += \": %s\" % output\n else:\n msg += \": %s (no output)\" % retval.fail_reason\n logging.error(\"Command %s failed: %s\", retval.cmd, msg)\n return False, msg\n\n remotehostname = retval.stdout.strip()\n\n if not remotehostname or remotehostname != node:\n if node.startswith(remotehostname + \".\"):\n msg = \"hostname not FQDN\"\n else:\n msg = \"hostname mismatch\"\n return False, (\"%s: expected %s but got %s\" %\n (msg, node, remotehostname))\n\n return True, \"host matches\"", "def enforce_hostnames(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def handle_hostname(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<ipnr>')\n return\n try:\n hostname = socket.gethostbyaddr(item)\n ievent.reply(hostname[0])\n except:\n ievent.reply(\"can't match \" + str(item))", "def is_id(s):\n return s and ' ' not in s.strip()", "def _transform_identifier(self, identifier, scheme):\n urlize = self.context.get(\"urlize_identifiers\", True)\n prefix_scheme = self.context.get(\"prefix_identifier_schemes\", True)\n result = None\n\n if urlize:\n result = idutils.to_url(identifier, scheme, url_scheme=\"https\")\n\n if not result and prefix_scheme and not identifier.startswith(scheme):\n result = f\"{scheme}:{identifier}\"\n\n return result or identifier", "def flatten_hostname(hostname):\n return hostname.replace('.', '-')", "def format_host(host):\n\n host = strip_suffix(host, \".lan.urlab.be\")\n host = strip_suffix(host, \".lan\")\n host = strip_suffix(host, \".local\")\n host = strip_suffix(host, \"iPodtouch\")\n host = strip_suffix(host, \"-PC\")\n host = strip_suffix(host, \"-pc\")\n\n host = strip_prefix(host, \"pc-\")\n host = strip_prefix(host, \"PC-\")\n host = strip_prefix(host, \"DESKTOP-\")\n host = strip_prefix(host, \"LAPTOP-\")\n host = strip_prefix(host, \"iPod-de-\")\n host = strip_prefix(host, \"iPadde\")\n\n return host", "def isSane(self):\n\n if self.host == 'localhost':\n return True\n\n host_parts = self.host.split('.')\n if self.wildcard:\n assert host_parts[0] == '', host_parts\n del host_parts[0]\n\n # If it's an absolute domain name, remove the empty string\n # from the end.\n if host_parts and not host_parts[-1]:\n del host_parts[-1]\n\n if not host_parts:\n return False\n\n # Do not allow adjacent dots\n if '' in host_parts:\n return False\n\n tld = host_parts[-1]\n if tld not in _top_level_domains:\n return False\n\n if len(tld) == 2:\n if len(host_parts) == 1:\n # entire host part is 2-letter tld\n return False\n\n if len(host_parts[-2]) <= 3:\n # It's a 2-letter tld with a short second to last segment\n # so there needs to be more than two segments specified \n # (e.g. *.co.uk is insane)\n return len(host_parts) > 2\n else:\n # A long second to last segment is specified.\n return len(host_parts) > 1\n else:\n # It's a regular tld, so it needs at least one more segment\n return len(host_parts) > 1\n\n # Fell through, so not sane\n return False", "def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True", "def hostname(name: str = \"\") -> str:\n ...", "def _host(name):\n\n\ttry:\n\t\tsocket.gethostbyname(name)\n\t\treturn name\n\texcept socket.error:\n\t\traise argparse.ArgumentTypeError(\"Invalid hostname: \" + name)\n\ttry:\n\t\tsocket.inet_aton(name)\n\t\treturn name\n\texcept socket.error:\n\t\traise argparse.ArgumentTypeError(\"Invalid ip address: \" + name)", "def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))", "def test_parse_url_lowercase_host() -> None:\n assert indieauth._parse_url(\"http://ex.com/hello\").path == \"/hello\"\n assert indieauth._parse_url(\"http://EX.COM/hello\").hostname == \"ex.com\"\n\n parts = indieauth._parse_url(\"http://EX.COM:123/HELLO\")\n assert parts.netloc == \"ex.com:123\"\n assert parts.path == \"/HELLO\"", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def slug_exists(host_slug: str,\n database_connection: mysql.connector.connect) -> bool:\n return validate_slug(host_slug, database_connection)", "def test_idna():\n assert (normalize_url(\"http://ドメイン.テスト\") ==\n \"http://xn--eckwd4c7c.xn--zckzah/\")\n assert (normalize_url(\"http://Яндекс.рф\") ==\n \"http://xn--d1acpjx3f.xn--p1ai/\")", "def fullHostname(self) -> str:\n\t\treturn self.hostname[1]", "def _hostname_prefix(self, hostname_str):\n\n if not hostname_str or len(hostname_str) == 0:\n msg = _(\"Invalid Hostname: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n if isinstance(hostname_str, unicode):\n hostname_str = hostname_str.translate(\n self._unicode_host_name_filter)\n elif isinstance(hostname_str, str):\n hostname_str = hostname_str.translate(\n self._string_host_name_filter)\n else:\n msg = _(\"Cannot clean host name: %(hostname_str)s for storage\") % \\\n locals()\n LOG.exception(msg)\n ex_args = {'hostname': hostname_str}\n raise SVCInvalidHostnameError(**ex_args)\n hostname_str = str(hostname_str)\n return hostname_str[:55]", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def test_hostname_type(self):\n \n hostname = get_hostname()\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(hostname), str)", "def is_host(self):\n return self.host", "def test_client_id_hostname() -> None:\n assert indieauth._parse_client_id(\"http://www.home-assistant.io/\")\n assert indieauth._parse_client_id(\"http://[::1]\")\n assert indieauth._parse_client_id(\"http://127.0.0.1\")\n assert indieauth._parse_client_id(\"http://10.0.0.0\")\n assert indieauth._parse_client_id(\"http://10.255.255.255\")\n assert indieauth._parse_client_id(\"http://172.16.0.0\")\n assert indieauth._parse_client_id(\"http://172.31.255.255\")\n assert indieauth._parse_client_id(\"http://192.168.0.0\")\n assert indieauth._parse_client_id(\"http://192.168.255.255\")\n\n with pytest.raises(ValueError):\n assert indieauth._parse_client_id(\"http://255.255.255.255/\")\n with pytest.raises(ValueError):\n assert indieauth._parse_client_id(\"http://11.0.0.0/\")\n with pytest.raises(ValueError):\n assert indieauth._parse_client_id(\"http://172.32.0.0/\")\n with pytest.raises(ValueError):\n assert indieauth._parse_client_id(\"http://192.167.0.0/\")", "def get_hostname(url: str) -> str:\n return urlsplit(url).hostname", "def hostname(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"hostname\", _args)\n return _ctx.execute_sync(str)", "def shortHostname(self) -> str:\n\t\treturn self.hostname[0]", "def _is_url(string):\n return \"http\" in string", "def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip", "def is_urn(val):\n res = urlparse(val)\n return bool(res.scheme == \"urn\" and res.netloc == \"\" and res.path != \"\")", "def _hostname(self, code, protocol=None):\n if protocol is None:\n protocol = self.protocol(code)\n if protocol == 'https':\n host = self.ssl_hostname(code)\n else:\n host = self.hostname(code)\n return protocol, host", "def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False", "def connection_string_to_hostname(conn_str):\n conn_str_obj = ConnectionString(conn_str)\n return conn_str_obj.get(\"HostName\")", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def isSystemId(_idtf):\n if _idtf.startswith(\"@@\"): return True\n if _idtf.startswith(\"tmp_\"): return True\n if len(_idtf) == 36 and _idtf[8] == '-' and _idtf[13] == '-' and _idtf[23] == '-': return True\n \n return False", "def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)", "def nodename(name, hostname):\n return NODENAME_SEP.join((name, hostname))", "def test_scopeID(self):\n self.assertTrue(isIPv6Address(\"fe80::1%eth0\"))\n self.assertTrue(isIPv6Address(\"fe80::2%1\"))\n self.assertTrue(isIPv6Address(\"fe80::3%en2\"))", "def is_alias(email):\n return MAIL_ALIAS_REGEX.match(strip_domains(email))", "def get_hostname(ip):\n hostname, aliases, ipaddresses = socket.gethostbyaddr(ip)\n return hostname", "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def test_address_host(self):\n url = create_url(address=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")", "def is_to_public_id(self) -> bool:\n return PublicId.is_valid_str(self.to)", "def test_host(self):\n url = create_url(host=\"www.example.com\", scheme_no_ssl=\"http\")\n self.assertEqual(url, \"http://www.example.com\")", "def hostname_lookup(hostname):\n try:\n # The {host} must be resolved to an IP address; if this fails, this\n # will throw a socket.gaierror.\n host_address = gethostbyname(hostname)\n\n # Reset {host} to the resolved address.\n LOG.debug(\n 'Resolved hostname %s to IP address %s.', hostname, host_address\n )\n return host_address\n\n except gaierror:\n # The {host}-as-hostname did not resolve to an IP address.\n LOG.debug('Could not resolve hostname %s to an IP address.', hostname)\n return hostname", "def needs_ssh(hostname, _socket=None):\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True", "def get_hostname():\n global HOST\n if '.' in HOST:\n HOST = HOST.split('.')[0]\n return HOST", "def hostname():\n hostname = socket.gethostname()\n if '.' in hostname:\n hostname = hostname.split('.')[0]\n return hostname", "def _get_host(self, scheme='', hostname_only=False):\n host = self.host or ''\n # urlparse requires '//' to be provided if scheme is not specified\n original_parsed = urlparse.urlsplit(host)\n if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:\n host = '%s://%s' % (scheme, host) if scheme else '//%s' % host\n parsed = urlparse.urlsplit(host)\n\n if hostname_only:\n return parsed.hostname\n\n try:\n port = parsed.port or self.port\n except ValueError:\n port = self.port\n netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)\n\n url_components = list(parsed)\n url_components[1] = netloc\n ret = urlparse.urlunsplit(url_components)\n return ret.lstrip('/')", "def get_host_name(self):\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n try:\n return self.keyinfo['tracking_id'].attrs['hostname']\n except:\n return None\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True", "def _is_url(s: str) -> bool:\n\n return urlparse(s).netloc != \"\"", "def normalize_address(addr: str) -> str:\n # bitcoin hrps\n hrps = {net[\"bech32\"] + \"1\" for net in NETWORKS.values()}\n # liquid hrps\n # Blech32 addresses are intended for confidential assets\n hrps = hrps.union(\n {net[\"blech32\"] + \"1\" for net in NETWORKS.values() if \"blech32\" in net}\n )\n if addr.lower().startswith(tuple(hrps)):\n return addr.lower()\n return addr", "def is_ipv4_address(s):\n # split the string on dots\n s_split = s.split('.')\n \n return len(s_split) == 4 and all(num.isdigit() and 0 <= int(num) < 256 for num in s_split)", "def is_identifier(cls, s):\n\n if isinstance(s, Selector):\n return len(s) == 1\n\n if np.iterable(s):\n\n # Try to expand string:\n if isinstance(s, basestring):\n try:\n s_exp = cls.expand(s)\n except:\n return False\n else:\n if len(s_exp) == 1:\n return True\n else:\n return False\n\n # If all entries are lists or tuples, try to expand:\n elif all([(isinstance(x, (list, slice))) for x in s]):\n if len(cls.expand(s)) == 1:\n return True\n else:\n return False\n\n # A sequence of integers and/or strings is a valid port identifier:\n elif all(map(lambda x: isinstance(x, (int, long, basestring)), s)):\n #elif set(map(type, s)).issubset([int, basestring]):\n return True\n else:\n return False\n\n # A non-iterable cannot be a valid identifier:\n else:\n return False", "def get_hostname(self):\n raise NotImplementedError('get_hostname')", "def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()", "def __isSameHost( self, hostCN, hostConn ):\n hostCN_m = hostCN\n if '/' in hostCN:\n hostCN_m = hostCN.split( '/' )[1]\n if hostCN_m == hostConn:\n return True\n result = checkHostsMatch( hostCN_m, hostConn )\n if not result[ 'OK' ]:\n return False\n return result[ 'Value' ]", "def guess_is_sys_id(value):\n return re.match(r'^[A-Za-z0-9]{32}$', value) is not None", "def is_url_quoted(url):\n try:\n url_ = urlunquote(url)\n return url != url_\n except: # problem with unquoting -- then it must be wasn't quoted (correctly)\n return False", "def hostname(self, hostname):\n if hostname is not None and not re.search('^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\\\.?$', hostname): # noqa: E501\n raise ValueError(\"Invalid value for `hostname`, must be a follow pattern or equal to `/^(?=.{1,255}$)[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?(?:\\\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\\\\.?$/`\") # noqa: E501\n\n self._hostname = hostname", "def test_detectCanonicalNameLoop(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n 'answers': [('example.com', Record_CNAME('example.net')),\n ('example.net', Record_CNAME('example.com'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n return self.assertFailure(d, ResolverError)", "def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)", "def identify_id(id: str) -> bool:\n return validate_handle(id)", "def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname", "def host_ip(hostname: str) -> str:\n try:\n return socket.gethostbyname(hostname)\n except socket.gaierror:\n return \"No record found.\"", "def via_host_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"via_host_name\")", "def via_host_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"via_host_name\")", "def extractHostname(self, url):\n return url.split(\"http://\")[1].split('/')[0]" ]
[ "0.6449013", "0.6380736", "0.63365686", "0.6324431", "0.61450845", "0.605697", "0.6042931", "0.5928755", "0.589544", "0.58683395", "0.58616215", "0.58118397", "0.5743484", "0.56844443", "0.5675291", "0.56356144", "0.56274545", "0.561817", "0.56118816", "0.5569066", "0.5540591", "0.55318403", "0.55318403", "0.55293155", "0.55281466", "0.549252", "0.5487594", "0.54744846", "0.5459334", "0.5427799", "0.53711224", "0.5365115", "0.5340509", "0.5323882", "0.53164446", "0.5315989", "0.5311048", "0.53052264", "0.529659", "0.52932155", "0.5289015", "0.5273571", "0.5266497", "0.5257457", "0.52373177", "0.5229677", "0.5221464", "0.52138025", "0.5200801", "0.5197837", "0.5196022", "0.51944745", "0.51928574", "0.5192381", "0.51816905", "0.5168066", "0.5158165", "0.5155456", "0.51536894", "0.5139825", "0.513976", "0.51217514", "0.5111906", "0.51116794", "0.5089961", "0.50660133", "0.50608283", "0.50576186", "0.5057326", "0.5053447", "0.5052794", "0.50483024", "0.50450873", "0.5042615", "0.5039091", "0.5037042", "0.50323063", "0.5025484", "0.5019749", "0.501705", "0.50109816", "0.4998787", "0.49955505", "0.49907932", "0.49816537", "0.49661684", "0.49652678", "0.49635014", "0.49613473", "0.49582514", "0.49556935", "0.49449033", "0.4940543", "0.49378848", "0.49314865", "0.49282408", "0.4926693", "0.49191162", "0.49191162", "0.491817" ]
0.66974527
0
Fix common typos in given URL like forgotten colon.
def url_fix_common_typos(url): if url.startswith("http//"): url = "http://" + url[6:] elif url.startswith("https//"): url = "https://" + url[7:] return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_url(cls, url: str):\r\n ...", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def correct_url(self, url: str) -> str:\n # check if url has \"http://\" prefix\n if \"http://\" not in url:\n if \"https://\" not in url:\n url = \"http://\" + url\n url_split = url.split(\"/\")\n # correct URL as needed for script\n if url_split[4] == '':\n raise URLError('No Story ID given')\n if len(url_split) == 5:\n url_split.append('')\n else:\n raise URLError('Unknown URL format')\n url = '/'.join(url_split)\n url = urljoin(url, ' ')[0:-2]\n return url", "def clean_url(url):\n for noisy_url in noisy_urls:\n url = str(url).replace(noisy_url,\"\").lower()\n return url", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def normalize_url(self, url):\n pass", "def uncanonicalize(self, url):\n pass", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def test_unreserved_percentencoding():\n assert (normalize_url(\"http://www.example.com/%7Eusername/\") ==\n \"http://www.example.com/~username\")\n assert (normalize_url('http://example.com/foo%23bar') ==\n 'http://example.com/foo%23bar')\n assert (normalize_url('http://example.com/foo%2fbar') ==\n 'http://example.com/foo%2Fbar')\n assert (normalize_url('http://example.com/foo%3fbar') ==\n 'http://example.com/foo%3Fbar')", "def fixURLS():\n url_re = re.compile(r'http t co \\S+')\n tweets = Tweet.objects.all()\n for tweet in tweets:\n tweet.text = url_re.sub(' ', tweet.text)\n tweet.text = ' '.join(tweet.text.split())\n tweet.save()", "def _fix_url(self, curr_url, rel):\n\n rel_l = rel.lower()\n if rel_l.startswith(\"http://\") or rel_l.startswith(\"https://\"):\n curr_url, rel = rel, \"\"\n\n # compute the new url based on import\n curr_url = urlparse.urldefrag(curr_url)[0]\n parsed_url = urlparse.urlparse(curr_url)\n return urlparse.urljoin(parsed_url.geturl(), rel)", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"", "def obfuscate_url(url: str) -> str:\n return re.sub(r\"\\/\\/.*:.*@\", \"//***:***@\", url)", "def cleanUrl(url):\n\turl_clean = url.replace(' ','%20')\n\t\"\"\" add /index.html where necessary \"\"\"\n\tif (url[-1:]=='/'):\n\t\turl_clean += 'index.html'\n\telif (url[-5:].find('.') == -1):\n\t\t url_clean += '/index.html'\n\treturn url_clean", "def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def cleanmatomo_url(self):\n self.matomo_url = re.sub(r\"/\\/$/\", \"\", self.matomo_url) # Cuts \"/\"\n\n if re.match(r\"^http://\", self.matomo_url): # replace it to \"https://\"\n self.matomo_url = re.sub(\"^http://\", \"\", self.matomo_url)\n self.matomo_url = self.protocol + self.matomo_url\n elif not bool(re.match(\"^https://\", self.matomo_url)): # check for \"https://\" and set it\n self.matomo_url = self.protocol + self.matomo_url", "def test_dont_percent_encode_safe_chars_query():\n assert (normalize_url(\"http://example.com/a/?face=(-.-)\") ==\n \"http://example.com/a?face=(-.-)\")", "def fix_links():\n pass", "def fix_website(raw_website):\n if url_is_good(raw_website):\n return raw_website\n else:\n return \"http://\" + raw_website", "def url_fix_host(urlparts):\n # if not urlparts[1]:\n # urlparts[2] = urllib.unquote(urlparts[2])\n # return False\n userpass, netloc = urllib.splituser(urlparts[1])\n if userpass:\n userpass = urllib.unquote(userpass)\n netloc, is_idn = idna_encode(urllib.unquote(netloc).lower())\n # a leading backslash in path causes urlsplit() to add the\n # path components up to the first slash to host\n # try to find this case...\n i = netloc.find(\"\\\\\")\n if i != -1:\n # ...and fix it by prepending the misplaced components to the path\n comps = netloc[i:] # note: still has leading backslash\n if not urlparts[2] or urlparts[2] == '/':\n urlparts[2] = comps\n else:\n urlparts[2] = \"%s%s\" % (comps, urllib.unquote(urlparts[2]))\n netloc = netloc[:i]\n else:\n # a leading ? in path causes urlsplit() to add the query to the\n # host name\n i = netloc.find(\"?\")\n if i != -1:\n netloc, urlparts[3] = netloc.split('?', 1)\n # path\n urlparts[2] = urllib.unquote(urlparts[2])\n if userpass and userpass != ':':\n # append AT for easy concatenation\n userpass += \"@\"\n else:\n userpass = \"\"\n\n if urlparts[0] in default_ports:\n dport = default_ports[urlparts[0]]\n host, port = splitport(netloc, port=dport)\n\n host = host.rstrip('. ')\n if port != dport:\n host = \"%s:%d\" % (host, port)\n netloc = host\n urlparts[1] = userpass + netloc\n return is_idn", "def fix_big_encoded_urls(message):\n try:\n new_message = urllib.parse.unquote(message)\n except: # noqa\n pass\n if new_message != message:\n return re.sub(r\"[\\(\\)\\{\\}#%]\", \" \", new_message)\n return message", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def _update_url_scheme(self, url):\n if self.base_scheme and not url.startswith(\"%s://\" % self.base_scheme):\n # url_split = urlparse.urlsplit(url)\n url_split = urlsplit(url)\n # url = urlparse.urlunsplit(\n url = urlunsplit(\n [\n self.base_scheme,\n url_split.netloc,\n url_split.path,\n url_split.query,\n url_split.fragment\n ]\n )\n return url", "def fix_filename(urlTitle):\n fixed = urlTitle.replace('//', '/')\n fixed = fixed.replace('*', 'xXx')\n return fixed", "def test_url_add_missing_protocol(self):\n assert ct.url_add_missing_protocol(\"https://www.bad-actor.services/\") == \"https://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"http://www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\n \"www.bad-actor.services/\",\n default=\"https\") == \"https://www.bad-actor.services/\"", "def test_drop_trailing_questionmark():\n assert normalize_url(\"http://example.com/?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a?\") == \"http://example.com/a\"\n assert normalize_url(\"http://example.com/a/?\") == \"http://example.com/a\"", "def test_fix_google_url():\n url = _search.fix_google_url(\"www.example.com/questions/1234\")\n assert url == \"https://www.example.com/questions/1234\"", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def test_non_urls():\n assert normalize_url(\"\") is None\n assert normalize_url(\"abc xyz\") is None\n assert normalize_url(\"asb#abc\") is None\n assert normalize_url(\"Яндекс.рф\") is not None\n assert normalize_url(\"google.blog\") is not None\n assert normalize_url(\"http//google.com\") is None\n assert normalize_url(\"http://user@pass:example.com\") is None", "def fixup_parameters(url, backend):\n result = url\n if backend == \"django\":\n result = url.replace(\"{\", \"(?P<\").replace(\"}\", \">.+)\")\n\n return result", "def clean_url(url):\n\n if url is None:\n return None\n\n if '??' in url:\n url = url.split('??')[0]\n\n if url.endswith('?'):\n url = url[:-1]\n\n if '`' in url:\n url = url.replace('`', '')\n\n return url", "def url_fix(s, charset='utf-8'):\n # First step is to convert backslashes (which are invalid in URLs anyways)\n # to slashes. This is consistent with what Chrome does.\n s = s.replace('\\\\', '/')\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if (\n s.startswith('file://') and\n s[7:8].isalpha() and\n s[8:10] in (':/', '|/')\n ):\n s = 'file:///' + s[7:]\n\n url = urlsplit(s)\n\n netloc = _encode_netloc(url)\n\n path = urlquote(\n url.path, encoding=charset, safe='/%+$!*\\'(),'\n )\n qs = urlquote_plus(\n url.query, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n anchor = urlquote_plus(\n url.fragment, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n\n return urlunsplit(\n (url.scheme, netloc, path, qs, anchor)\n )", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def clean_url(url):\r\n s = url\r\n url = url.encode('utf8')\r\n url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])\r\n return url", "def _sanitize_url_prefix(url_prefix: Optional[str]) -> str:\n if not url_prefix:\n return ''\n\n while url_prefix.startswith('//'):\n url_prefix = url_prefix[1:]\n while url_prefix.endswith('/'):\n url_prefix = url_prefix[:-1]\n\n if url_prefix == '':\n return ''\n\n if url_prefix.startswith('/') \\\n or url_prefix.startswith('http://') \\\n or url_prefix.startswith('https://'):\n return url_prefix\n\n return '/' + url_prefix", "def sanitize_url(urlstring):\n\n # A blog's url is the best unique identifier for the data store\n # (some Twitter handles have more than one blog), but certain\n # punctuation in a string throws an error in Firebase when\n # you attempt to use that string as a key.\n return annoying_punctuation.sub('', urlstring)", "def sanitize_url(url: str) -> Optional[str]:\n try:\n parts = urllib.parse.urlparse(url.replace(\" \", \"%20\"))\n scheme, netloc, path, params, query, fragment = parts\n except ValueError:\n # Bad URL - so bad it couldn't be parsed.\n return \"\"\n\n # If there is no scheme or netloc and there is a '@' in the path,\n # treat it as a mailto: and set the appropriate scheme\n if scheme == \"\" and netloc == \"\" and \"@\" in path:\n scheme = \"mailto\"\n elif scheme == \"\" and netloc == \"\" and len(path) > 0 and path[0] == \"/\":\n # Allow domain-relative links\n return urllib.parse.urlunparse((\"\", \"\", path, params, query, fragment))\n elif (scheme, netloc, path, params, query) == (\"\", \"\", \"\", \"\", \"\") and len(fragment) > 0:\n # Allow fragment links\n return urllib.parse.urlunparse((\"\", \"\", \"\", \"\", \"\", fragment))\n\n # Zulip modification: If scheme is not specified, assume http://\n # We re-enter sanitize_url because netloc etc. need to be re-parsed.\n if not scheme:\n return sanitize_url(\"http://\" + url)\n\n # Upstream code will accept a URL like javascript://foo because it\n # appears to have a netloc. Additionally there are plenty of other\n # schemes that do weird things like launch external programs. To be\n # on the safe side, we allow a fixed set of schemes.\n if scheme not in allowed_schemes:\n return None\n\n # Upstream code scans path, parameters, and query for colon characters\n # because\n #\n # some aliases [for javascript:] will appear to urllib.parse to have\n # no scheme. On top of that relative links (i.e.: \"foo/bar.html\")\n # have no scheme.\n #\n # We already converted an empty scheme to http:// above, so we skip\n # the colon check, which would also forbid a lot of legitimate URLs.\n\n # URL passes all tests. Return URL as-is.\n return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def fix_slash(environ, wantslash):\n from colubrid.exceptions import HttpMoved\n #FIXME\n # argh. never did something that supid\n # find a better solution for that problem.\n url = quote(environ.get('SCRIPT_NAME', ''))\n url += quote(environ.get('PATH_INFO', ''))\n query = environ.get('QUERY_STRING', '')\n oldurl = query and ('%s?%s' % (url, query)) or url\n \n if oldurl and oldurl != '/':\n if url.endswith('/'):\n if not wantslash:\n url = url[:-1]\n else:\n if wantslash:\n url += '/'\n \n newurl = query and ('%s?%s' % (url, query)) or url\n if oldurl != newurl:\n raise HttpMoved(newurl)", "def canonicalize(self, url):\n pass", "def test_lower_case():\n assert normalize_url(\"HTTP://examPle.cOm/\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/A\") == \"http://example.com/A\"", "def format_url(url):\n if not (url.startswith(\"//\") or url.startswith(\"http\")):\n url = \"http://\" + url\n return url", "def sanitize_url(url, require_scheme = False):\r\n if not url or ' ' in url:\r\n return\r\n\r\n url = url.strip()\r\n if url.lower() == 'self':\r\n return url\r\n\r\n u = urlparse(url)\r\n # first pass: make sure a scheme has been specified\r\n if not require_scheme and not u.scheme:\r\n url = 'http://' + url\r\n u = urlparse(url)\r\n\r\n if (u.scheme and u.scheme in valid_schemes\r\n and u.hostname and len(u.hostname) < 255\r\n and '%' not in u.netloc):\r\n return url", "def test_splits_urls_for_nouns(self):\r\n test_value = \"http://google.com/drives/autonomous/cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def testTrailingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab('http://tomtom.foobar.org/ ', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab('http://www.foobi.org/saatoimia ', self.needScheme))", "def host_cleanup(host):\n if not host.startswith('https://'):\n host = 'https://' + host # Add schema\n host = strip_end(host, '/')\n host = strip_end(host, '/api/v1')\n host = strip_end(host, '/')\n return host", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def refactor_app_url(self, url ):\n up = urlparse.urlparse( url )\n qs = urlparse.parse_qs(up.query)\n nqs = [('appid', qs.get('appid')) , ('pkgid',qs.get('pkgid',-1))]\n up = list(up)\n up[4] = urllib.urlencode(nqs,doseq=True)\n return urlparse.urlunparse(tuple(up))", "def sanitize_url(url: str, protocol: str = 'https://') -> str:\n sanitized = url[0:-1] if url[-1] == '/' else url\n with_protocol = sanitized if sanitized.startswith('http') else f'{protocol}{sanitized}'\n return with_protocol", "def replace_url(alert_url: str, prefix_url: str) -> str:\n if alert_url.startswith('http://'): # NOSONAR\n alert_url = alert_url.replace('http://', '') # NOSONAR\n\n elif alert_url.startswith('https://'):\n alert_url = alert_url.replace('https://', '')\n\n if alert_url.startswith('www.'):\n alert_url = alert_url.replace('www.', '')\n\n elif alert_url.startswith('WWW.'):\n alert_url = alert_url.replace('WWW.', '')\n\n if not prefix_url.endswith('/'):\n prefix_url = f\"{prefix_url + '/'}\"\n\n alert_url_split = alert_url.split('/', 1)\n\n suffix_url = ''.join(\n alert_url_split[count]\n for count in range(len(alert_url_split))\n if count != 0\n )\n\n return f'{prefix_url + suffix_url}'", "def test_remove_default_port():\n assert (normalize_url(\"http://www.example.com:80/bar.html\") ==\n \"http://www.example.com/bar.html\")\n assert (normalize_url(\"HTTPS://example.com:443/abc/\") ==\n \"https://example.com/abc\")", "def youtube_fix_url(url):\n p = urlparse.urlparse(url)\n path = p.path\n if '&' in p.path:\n # sign of a malformed path\n path = re.sub('\\&.+', '', p.path)\n return urlparse.urlunparse((p.scheme, p.netloc, path, p.params, p.query, p.fragment))", "def fix_apiroot(root):\n if '://' in root:\n return root\n if ('/' not in root) or ('.' not in root.split('/')[0]):\n root = \"www.pennapps.com/\" + root\n return \"http://%s\" % root", "def testLeadingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab(' http://tomtom.foobar.org/', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab(' http://www.foobi.org/saatoimia', self.needScheme))", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def clean_url(url):\n return url[:url.find('?')]", "def sanitize_link(link, url):\n if link.startswith('//'):\n link = f'http:{link}'\n elif link.startswith('/'):\n parsed_url = urlparse(url)\n link = f'http://{parsed_url.hostname}{link}'\n return link", "def test__canonizeURL(self):\n self.run_script_for_compat('alert(e._canonizeURL(foo));')\n self.assert_silent()\n self.assert_compat_error()", "def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def test_splits_url_parts(self):\r\n test_value = \"http://google.com/drives-autonomous_cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def test_dont_change_username_password():\n assert (normalize_url(\"http://Foo:BAR@exaMPLE.COM/\") ==\n \"http://Foo:BAR@example.com/\")", "def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"", "def clean(self, sub):\n sub = re.sub(r'^RT[\\s]+', '', sub)\n sub = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', sub)\n sub = re.sub(r'#', '', sub)\n sub = re.sub(r'@[A-Za-z0–9]+', '', sub) \n\n return sub", "def _clean_authorization_request_url(request_url):\n parsed_url = urlparse(request_url)\n query_params = dict(parse_qsl(parsed_url.query, keep_blank_values=True))\n for param in [\"code\", \"state\"]:\n if param in query_params:\n query_params[param] = \"redacted\"\n url_parts = list(parsed_url) # cast to list to override query params\n url_parts[4] = urlencode(query=query_params)\n request_url = urlunparse(url_parts)\n return request_url", "def test_remove_empty_port():\n assert (normalize_url(\"http://www.example.com:/\") ==\n \"http://www.example.com/\")", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean", "def _transform_dropbox_url(self):\n self.url = requests.utils.urlunparse(\n self.parsed._replace(query='dl=1'))", "def getFilteredUrl(self, url):\n url = url.split('#')[0]\n if url.startswith('/wiki'):\n return ('https://en.wikipedia.org' + url)\n if 'en.wikipedia.org/wiki/' not in url:\n return ('https://en.wikipedia.org/wiki' + url)\n return url", "def fix_url(url, root):\n if root in url:\n if validate_url(url):\n return url\n else:\n if not url.endswith('/'):\n if validate_url(url + '/'):\n return url + '/'\n if url.startswith('https://'):\n if validate_url(url[:4] + url[5:]):\n return url[:4] + url[5:]\n else:\n return None\n else:\n return None\n else:\n parsed = get_root_domain(url)\n if parsed == '':\n if url.startswith('/'): # '/link'\n if validate_url(root[:-1] + url):\n return root[:-1] + url\n else:\n return None\n else: # 'link'\n if url.startswith('./'): # '/link'\n if validate_url(root + url[2:]):\n return root[:-1] + url\n else:\n return None\n elif validate_url(root + url):\n return root + url\n else:\n return None\n else:\n return None", "def clean_link(self, url: str) -> str:\n return self.CLEAN_REGEX.sub(lambda match: f\"%{ord(match.group(0)):02x}\", url)", "def remove_urls(text):\n pass", "def _remove_urls(self, text: str) -> str:\n pattern = r\"http\\S+\"\n return re.sub(pattern, \" \", str(text))", "def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)", "def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")", "def urlify_pythonic(text, length):\n return text.rstrip().replace(\" \", \"%20\")", "def generate_clean_url(self):\n\n\t\tspaces_replaced = self.title.replace(' ', '-')\n\t\tpattern = re.compile('[^a-zA-Z0-9-]+')\n\t\tstripped = pattern.sub('', spaces_replaced)\n\t\tself.cleanurl = '-'.join([str(self.pid), stripped.lower()])", "def test_query_string_spaces():\n assert (normalize_url(\"http://example.com/search?q=a b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a+b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a%20b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")", "def test_idna():\n assert (normalize_url(\"http://ドメイン.テスト\") ==\n \"http://xn--eckwd4c7c.xn--zckzah/\")\n assert (normalize_url(\"http://Яндекс.рф\") ==\n \"http://xn--d1acpjx3f.xn--p1ai/\")", "def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")", "def test_unicode_path():\n assert (normalize_url(\"http://example.com/résumé\") ==\n \"http://example.com/r%C3%A9sum%C3%A9\")", "def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url", "def deprotocolise(url):\n return PROTORE.sub('', url)", "def fixpaths(d):\n if isinstance(d, dict):\n if \"path\" in d:\n if \":\" not in d[\"path\"]:\n local_path = os.path.normpath(\n os.path.join(os.getcwd(), basedir, d[\"path\"])\n )\n d[\"location\"] = pathname2url(local_path)\n else:\n d[\"location\"] = d[\"path\"]\n del d[\"path\"]", "def format_url(url: str) -> str:\n return urljoin(url.replace('https://app', 'https://api'), '')", "def test_append_slash():\n assert normalize_url(\"http://example.com\") == \"http://example.com/\"", "def sanitizeUrl(url):\n return url.split('?')[0]", "def make_url_compatible(category: str) -> str:\n return urllib.parse.quote(category)", "def test_unicode_query_string():\n assert (normalize_url(\"http://example.com/?file=résumé.pdf\") ==\n \"http://example.com/?file=r%C3%A9sum%C3%A9.pdf\")", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')" ]
[ "0.770113", "0.73291516", "0.68543786", "0.6504218", "0.64858186", "0.6459376", "0.6437735", "0.63930553", "0.6289801", "0.6263048", "0.6251412", "0.62297577", "0.62242717", "0.613285", "0.61042875", "0.60988045", "0.60984194", "0.6093388", "0.606762", "0.60473937", "0.6038494", "0.6028973", "0.60203034", "0.6016071", "0.60018903", "0.5990357", "0.59525067", "0.59509563", "0.59452564", "0.5932441", "0.5931482", "0.5928471", "0.5880595", "0.5874972", "0.5858227", "0.5839503", "0.58391905", "0.58370274", "0.58295286", "0.5829421", "0.5825373", "0.58039594", "0.580185", "0.57594544", "0.57581556", "0.5756426", "0.5753721", "0.57422423", "0.5741573", "0.5739724", "0.57299095", "0.5723717", "0.57189655", "0.57124704", "0.5709626", "0.570098", "0.56901586", "0.5682979", "0.566507", "0.5642734", "0.56425726", "0.5639773", "0.5628368", "0.5625685", "0.5618192", "0.56083566", "0.55932033", "0.5592824", "0.55810136", "0.55729175", "0.5568623", "0.556588", "0.555525", "0.5546838", "0.5545259", "0.5544183", "0.5541774", "0.55041236", "0.5502496", "0.55016315", "0.5497264", "0.5478829", "0.5475112", "0.5474405", "0.5457842", "0.5456623", "0.54518354", "0.5450097", "0.5426021", "0.5425734", "0.542242", "0.54205376", "0.54181254", "0.5412208", "0.54093164", "0.5405664", "0.5394264", "0.5392666", "0.5392657", "0.5392657" ]
0.7982654
0
Split query part of mailto url if found.
def url_fix_mailto_urlsplit(urlparts): if "?" in urlparts[2]: urlparts[2], urlparts[3] = urlparts[2].split('?', 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def test_split_url_for_query_1(self):\n url = \"testurl.com\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%\"))", "def test_split_url_for_query_2(self):\n url = \"testurl.com/test\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%./test%\"))", "def _split_url_string(query_string):\r\n parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)\r\n for k, v in parameters.iteritems():\r\n parameters[k] = urllib.unquote(v[0])\r\n return parameters", "def test_split_url_for_query_3(self):\n url = \"*.testurl.com/test\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%./test%\"))", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def split_url_and_query_params(url):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n url = urlunsplit((scheme, netloc, path, None, fragment))\n return url, query_params", "def filter_url_parse_email_links(match):\n email = html.unescape(match.group(0))\n email = html.escape(email)\n caption = filter_url_trim(email, filter_url_length)\n return '<a href=\"mailto:' + email + '\">' + caption + '</a>'", "def _split_url_string(param_str):\n parameters = parse_qs(param_str, keep_blank_values=False)\n for key, val in parameters.iteritems():\n parameters[key] = urllib.unquote(val[0])\n return parameters", "def _split_url(url):\n return url[1:].split('/')", "def _split_url(self, url):\n url_split = urlsplit(url)\n try:\n if url_split.netloc is not None and url_split.netloc.find(\" \") > 0:\n return None\n decoded_netloc = url_split.netloc.decode(\"utf-8\").encode(\"idna\")\n url_parts = (\n url_split.scheme,\n decoded_netloc,\n url_split.path,\n url_split.query,\n url_split.fragment)\n url_splitted = urlunsplit(url_parts)\n return url_splitted\n except UnicodeError:\n return None", "def parse_email(self, email):\r\n if not email:\r\n return ''\r\n \r\n email = str(email).lower().replace('.comhome','.com')\r\n \r\n for item in EMAIL_CHARS_TO_SPLIT:\r\n if item in email:\r\n email = email.split(item)[0]\r\n \r\n for item in ILLEGAL_SCRUB_ITEMS:\r\n if item in email:\r\n return ''\r\n \r\n if not \"@\" in email:\r\n return ''\r\n elif not \".\" in email:\r\n return ''\r\n elif not len(email) > 5:\r\n return ''\r\n \r\n return email", "def parse_for_query(query):\n index = query.find('@')\n if index == -1:\n return \"\"\n elif index == len(query)-1:\n # Make sure the final return doesn't index outside the list.\n return \"\"\n else:\n return query[index+1:]", "def url_parse_query(query, encoding=None):\n if isinstance(query, unicode):\n if encoding is None:\n encoding = url_encoding\n query = query.encode(encoding, 'ignore')\n query = query.replace('?', '')\n\n l = set()\n for k, v, sep in parse_qsl(query, True):\n k = url_quote_part(k, '/-:,;')\n if not k:\n continue\n if v:\n v = url_quote_part(v, '/-:,;')\n l.add(\"%s=%s\" % (k, v))\n elif v is None:\n l.add(\"%s\" % k)\n else:\n # some sites do not work when the equal sign is missing\n l.add(\"%s=\" % k)\n query = '&'.join(sorted(l))\n return query", "def clean_url(url):\n return url[:url.find('?')]", "def split_url(url):\n match = re.match(\"(.*\\.org)(/.*)\", url)\n return match.group(1), match.group(2)", "def _replace_url_query(url, new_query):\n scheme, netloc, path, _, fragment = urlparse.urlsplit(url)\n return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))", "def _get_query_part(params: dict) -> str:\n params_cleaned = {k: v for k, v in params.items() if v is not None}\n return ('?' + urlencode(params_cleaned, quote_via=quote, safe=\"/,\")) if params_cleaned else \"\"", "def SplitQuery(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _urlparse_splitnetloc(url, start=0):\r\n\r\n # By default, the netloc is delimited by the end of the URL.\r\n delim = len(url)\r\n\r\n # Find the left-most delimiter.\r\n for char in \"/?#\":\r\n xdelim = url.find(char, start)\r\n if xdelim >= 0:\r\n delim = min(delim, xdelim)\r\n\r\n # Return the netloc and the rest of the URL.\r\n return url[start:delim], url[delim:]", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def splitQuery(query):\n triples = list()\n bgp = re.search('WHERE {(.*)}', query).group(1)\n for triple in bgp.split(' . '):\n triples.append(triple.strip())\n return triples", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def parse_query(query):\n qlist = []\n splitted = query.split(\"&\")\n for entry in splitted:\n cmd, arg = entry.split(\"=\")\n qlist.append((cmd, arg))\n return qlist", "def handles_url(self, url):\n\n parsed = urllib.parse.urlparse(url)\n if parsed.scheme not in ('', 'mailto'):\n return None\n\n address = parsed.path.strip()\n\n if ' ' in address or '!' in address:\n return None\n\n if validate_email.validate_email(address):\n return 'mailto:' + address.lower()\n\n return None", "def split_addr(self, a):\n a = a.replace('http://', '')\n a = a.replace('https://', '')\n\n addr = tlde.extract(a)\n is_ip = tlde.tldextract.looks_like_ip(addr.domain)\n if is_ip:\n ip = addr.domain\n path_and_params = a[a.index(ip)+len(ip):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'ip': ip, 't3': None, 't2': None, 'path': path, 'params': params, 'url/ip': 'ip'}\n else:\n t3 = addr.subdomain\n t2 = addr.registered_domain\n path_and_params = a[a.index(addr.fqdn)+len(addr.fqdn):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'t3': t3, 't2': t2, 'ip': None, 'path': path, 'params': params, 'url/ip': 'url'}", "def test_additional_query_args():\n assert (normalize_url(\"http://example.com?c=d\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b&c=d\")\n assert (normalize_url(\"http://example.com\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b\")\n assert (normalize_url(\"http://example.com\", [(\"résumé\", \"résumé\")]) ==\n \"http://example.com/?r%C3%A9sum%C3%A9=r%C3%A9sum%C3%A9\")", "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def parse_url(feedback):\n data = {}\n if 'feedback' in feedback.url or '?' not in feedback.url:\n return data\n split_fields = feedback.url.split('?')[1].split('&')\n for field in split_fields:\n pair = field.split('=')\n data[pair[0]] = pair[1]\n return data", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def good_url(a, start_url):\n for i in range(len(a)):\n par=a[i].find('?')\n if par!=-1:\n a[i]=a[i][:par]\n anc=a[i].find('#')\n if anc!=-1:\n a[i]=a[i][:anc]\n if a[i]!='' and a[i][0]=='/':\n a[i]=str(start_url)+a[i][1:i]\n #print(a[i]) \n return list(set(a))", "def clean_params(self, url):\n if isinstance(url, unicode):\n url = url.encode(\"utf-8\")\n parts = list(urlparse.urlsplit(url))\n if not parts[3]:\n return url\n query = urlparse.parse_qsl(parts[3])\n query = [q for q in query if self._is_param_allowed(*q)]\n if query:\n parts[3] = urllib.urlencode(query)\n else:\n parts[3] = ''\n return urlparse.urlunsplit(parts).decode(\"utf-8\")", "def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result", "def toString(self):\n self.query = {}\n for i in self.arguments:\n self.query[i] = self.arguments[i]\n\n self.query = urlencode(self.query)\n\n return urlparse.urlunsplit((self.scheme, self.netloc,\n self.path, self.query,self.fragment))", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def parse_url(url):\n url = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url.query)\n query_ = query.get('dn', query.get('title', ''))[0]\n if url.scheme == \"magnet\":\n return \"magnet:?xt={}\".format(query['xt'][0]), query_\n return \"http://{}{}{}\".format(*url[0:3]), query_", "def _urlparse_splitfragment(url):\r\n\r\n fpart = url.split(\"#\", 1)\r\n if len(fpart) == 2:\r\n fragment = fpart[1]\r\n else:\r\n fragment = \"\"\r\n\r\n return fpart[0], fragment", "def split_url(url): # Change the url so it can be iterated\n url = url.split('index') \n url = url[0] + 'page-1.html'\n url = url.split('page-')\n url = f\"{url[0]}page-1.html\"\n return url", "def get_url(self, msg):\n search = None # Search object\n\n search = self.url_pattern.search(msg)\n\n if search:\n return search.group(search.lastindex)\n else:\n return False", "def mail_address(mail_addr_list):\n if mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\" \", \"\")\n if \",\" in mail_addr_list:\n mail_addr_list = mail_addr_list.replace(\",\", \";\")\n mail_addr_list = mail_addr_list.split(\";\")\n for mail_addr in mail_addr_list:\n if len(mail_addr.split(\"@\")) != 2:\n raise ArgumentTypeError(\"Invalid mail address: %s\" % mail_addr)\n return mail_addr_list\n else:\n raise ArgumentTypeError(\"mail address is not specified\")", "def processUrl(url):\n domain = 'http://www.gsmarena.com/'\n if domain not in url:\n url = urllib.parse.urljoin(domain, url)\n return url", "def _split_query(query):\n\n\tqq = query.split(' ')\n\tkeywords = []\n\taccum = None\n\tfor kw in qq: \n\t\tif accum is None: \n\t\t\tif kw.startswith('\"'):\n\t\t\t\taccum = kw[1:]\n\t\t\telif kw: \n\t\t\t\tkeywords.append(kw)\n\t\telse:\n\t\t\taccum += ' ' + kw\n\t\t\tif kw.endswith('\"'):\n\t\t\t\tkeywords.append(accum[0:-1])\n\t\t\t\taccum = None\n\tif accum is not None:\n\t\tkeywords.append(accum)\n\treturn [kw.strip() for kw in keywords if kw.strip()]", "def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query", "def sanitizeUrl(url):\n return url.split('?')[0]", "def _urlparse_splitscheme(url):\r\n # The scheme is valid only if it contains these characters.\r\n scheme_chars = \\\r\n \"abcdefghijklmnopqrstuvwxyz0123456789+-.\"\r\n\r\n scheme = \"\"\r\n rest = url\r\n\r\n spart = url.split(\":\", 1)\r\n if len(spart) == 2:\r\n\r\n # Normalize the scheme.\r\n spart[0] = spart[0].lower()\r\n\r\n # A scheme is valid only if it starts with an alpha character.\r\n if spart[0] and spart[0][0].isalpha():\r\n for char in spart[0]:\r\n if char not in scheme_chars:\r\n break\r\n (scheme, rest) = spart\r\n\r\n return scheme, rest", "def url_split(url):\n scheme, netloc = urllib.splittype(url)\n host, document = urllib.splithost(netloc)\n port = default_ports.get(scheme, 0)\n if host:\n host = host.lower()\n host, port = splitport(host, port=port)\n return scheme, host, port, document", "def get_return_to_from_query(req: web.Request) -> str:\n return_to = req.query.get(\"return_to\", \"/\")\n return urllib.parse.quote(return_to, safe=\"\")", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def clean_url_part(self):\n complete_url = \"{url_prefix}{url_part}\".format(\n url_prefix=self.URL_PREFIX,\n url_part=self.cleaned_data['url_part']\n )\n URLValidator(complete_url)\n self.cleaned_data['repo_url'] = complete_url\n return self.cleaned_data['url_part']", "def separate_query_params(\n url: str, param_names: Optional[Iterable[str]] = None\n ) -> Tuple[str, Mapping[str, Iterable[str]]]:\n o = urlparse(url)\n qp = parse_qs(o.query, keep_blank_values=True)\n\n # Separate requested parameters from the query parameters\n params = {\n k: v for k, v in qp.items() if param_names is None or k in param_names\n }\n for p in params:\n qp.pop(p)\n\n # Rebuild url with the remaining query parameters\n qs = urlencode(qp, doseq=True, quote_via=quote)\n url = urlunparse(o._replace(query=qs))\n\n return url, params", "def process_url(url: str) -> str:\n split_url = urlsplit(url.strip())\n if split_url.scheme == 'amqp+ssl':\n split_url = split_url._replace(scheme='amqps')\n\n if ((not split_url.username or not split_url.password) and\n 'username' in config and 'password' in config):\n user_pass = f\"{config['username']}:{config['password']}@\"\n new_netloc = user_pass + split_url.netloc\n split_url = split_url._replace(netloc=new_netloc)\n\n return urlunsplit(split_url)", "def getFilteredUrl(self, url):\n url = url.split('#')[0]\n if url.startswith('/wiki'):\n return ('https://en.wikipedia.org' + url)\n if 'en.wikipedia.org/wiki/' not in url:\n return ('https://en.wikipedia.org/wiki' + url)\n return url", "def split_emails(df, column):\n\n df['Email'] = df[column].str.extract(r'(.*?@.*?\\....),?', expand=True)\n df['Email 2'] = df[column].str.extract(r'.*@.*\\....,\\s?(.*@.*\\....)', expand=True)\n df['Email 3'] = df[column].str.extract(r'.*@.*\\....,\\s?.*@.*\\....,\\s?(.*@.*\\....)', expand=True)\n return df", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def get_query_string(self):\r\n pass", "def get_url_path(url):\n return filter(lambda x: x!='', url.split('/'))", "def _fixup_find_links(find_links):\n if isinstance(find_links, str):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links", "def getMailRecipient(self, mail=None):\n\n to_field_list = []\n if mail is not None:\n message = message_from_string(mail)\n to_field = message.get('To')\n to_field_list = self.email_pattern.findall(to_field)\n return to_field_list", "def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url", "def split_domain(name: str) -> Tuple[str, str]:\n parts = name.split(\":\", 1)\n if len(parts) == 1:\n return \"\", parts[0]\n return parts[0], parts[1]", "def splitQuery(query_string):\n\ttry:\n\t\td = dict([x.split('=') for x in query_string.split('&') ])\n\texcept ValueError:\n\t\td = {}\n\treturn d", "def test_query_string():\n assert (normalize_url(\"http://example.com/?a=1\") ==\n \"http://example.com/?a=1\")\n assert (normalize_url(\"http://example.com?a=1\") ==\n \"http://example.com/?a=1\")\n assert (normalize_url(\"http://example.com/a?b=1\") ==\n \"http://example.com/a?b=1\")\n assert (normalize_url(\"http://example.com/a/?b=1\") ==\n \"http://example.com/a?b=1\")", "def extract_base(subject: Union[str, List[str]]) -> Union[str, List[str]]:\n\n subject = subject.replace(\"*\", \"\").replace(\"~\", \"\")\n\n try:\n return Url2Netloc(subject).get_converted()\n except ValueError:\n return subject", "def url_unsplit(parts):\n if parts[2] == default_ports.get(parts[0]):\n return \"%s://%s%s\" % (parts[0], parts[1], parts[3])\n return \"%s://%s:%d%s\" % parts", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def splitparams(path):\n if '/' in path:\n i = path.find(';', path.rfind('/'))\n else:\n i = path.find(';')\n if i < 0:\n return path, ''\n return path[:i], path[i + 1:]", "def build_url(self, query):\n\n parts = list(urlparse.urlparse(self.addon_url))\n parts[4] = urllib.urlencode(query)\n\n return urlparse.urlunparse(parts)", "def process_query(s):\n query = re.sub(r'[!\\'()|&:\\x00<>]', ' ', s).strip()\n if query:\n query = re.sub(r'\\s+', ' & ', query)\n # Support prefix search on the last word. A tsquery of 'toda:*' will\n # match against any words that start with 'toda', which is good for\n # search-as-you-type.\n query += ':*'\n return query", "def get_destination(post_request):\n return post_request.POST.get('recipient').split('@')[0]", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def url_build(web_url, url_part):\r\n url_full = urljoin(web_url,url_part.get('href'))\r\n return url_full", "def getParsedQueryString(self):\n return cgi.parse_qs(self.query_string)", "def extract_real_link(self, text):\n if text.startswith('https://www.google.com/url?'):\n return parse_qs(urlparse(text).query)['url'][0]\n\n return text", "def _url_join(self, *parts):\n return \"/\".join(map(lambda fragment: fragment.rstrip('/'), parts))", "def query_url(query: str) -> str:\n base_url = \"https://www.mta-dialog.de/stellenmarkt.html?tx_jobs_pi1%5Baction%5D=fullTextSearch&\" \\\n \"tx_jobs_pi1[value]=\"\n return base_url + query", "def SplitScmUrl(url):\r\n url_split = url.split('@')\r\n scm_url = url_split[0]\r\n scm_rev = 'HEAD'\r\n if len(url_split) == 2:\r\n scm_rev = url_split[1]\r\n return (scm_url, scm_rev)", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def parse_link_to_id(self, playlist_link: str) -> str:\n split_1 = playlist_link.split('/')[4]\n split_2 = split_1.split('?')\n return split_2[0]", "def _parsed_query(self, query_string):\r\n return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query", "def parse_event_url(url: str) -> (str, str):\n event_type = url.split(\"?mailto\")[0].split(\"/\")[-1]\n if event_type == \"events\":\n date = url.split(\"from-collected-date=\")[1].split(\"&\")[0]\n else:\n date = url.split(\"from-updated-date=\")[1].split(\"&\")[0]\n\n return event_type, date", "def _parse_emails(self, emails):\n return [e.strip() for e in emails.split(',')]", "def get_url_words(self, url):\n token_filter = r';|,|-|/'\n url_words = re.split(token_filter, url)\n \n return \" \".join(url_words)", "def process_url(url):\n parsed = urlparse(url)\n if parsed.scheme:\n return parsed.netloc, parsed.path\n else:\n host_part = parsed.path\n hostname = host_part.partition(\"/\")[0]\n path = \"/\" + host_part.partition(\"/\")[2]\n return hostname, path", "def strip_domains(email):\n if '@' in email:\n for domain in MAIL_DOMAINS:\n if email.lower().endswith(domain):\n email = email[:email.find('@')]\n return email", "def separate_name_and_email(person_info: str) -> Tuple[str, Union[str, None]]:\n name = email = None\n for idx, ch in enumerate(person_info):\n if ch == \"<\":\n name = person_info[: idx - 1]\n # Email Validation\n if Command.validate_email(person_info[idx + 1: -1]):\n email = person_info[idx + 1: -1]\n if not name:\n name = person_info\n return name, email", "def format_url(url, msg):\n return url+\"?str={}\".format(urllib.parse.quote(msg))", "def _GetUrlParams(self, query=None):\n params = sum([c_w_c._GetUrlParams() for c_w_c in self.reactants], [])\n params.extend(self.aq_params._GetUrlParams())\n \n if query is not None:\n for arrow in constants.POSSIBLE_REACTION_ARROWS:\n tmp_query = query.replace(arrow, '=>')\n params.append('query=%s' % urllib.quote(tmp_query))\n \n return params", "def test_query_sorting():\n assert (normalize_url('http://example.com/a?b=1&c=2') ==\n 'http://example.com/a?b=1&c=2')\n assert (normalize_url('http://example.com/a?c=2&b=1') ==\n 'http://example.com/a?b=1&c=2')", "def html_filter(self, string):\n words = string.split()\n for word in words:\n if re.search('^http:\\/\\/.*$', word):\n word = '<a href=\"%(url)s\">%(url)s</a>' % {'url':word}\n if re.search('^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}$', word.upper()):\n word = '<a href=\"mailto:%(email)s\">%(email)s</a>' % {'email':word} \n return ' '.join(words)", "def normalize_url_without_bewit(url, bewit):\n bewit_pos = url.find('bewit=')\n # Chop off the last character before 'bewit=' which is either a ? or a &\n bewit_pos -= 1\n bewit_end = bewit_pos + len(\"bewit=\" + bewit) + 1\n o_url = ''.join([url[0:bewit_pos], url[bewit_end:]])\n return o_url" ]
[ "0.67279136", "0.6404292", "0.6209104", "0.61731404", "0.6047626", "0.5870148", "0.5843502", "0.56338006", "0.55826217", "0.55750227", "0.5401481", "0.5397454", "0.5378606", "0.5351534", "0.53454334", "0.5219765", "0.5213521", "0.5209964", "0.5108182", "0.5093666", "0.5083529", "0.50822127", "0.50821626", "0.50736403", "0.5069674", "0.50652236", "0.50642914", "0.506275", "0.50468314", "0.5045113", "0.5026768", "0.50108486", "0.4997517", "0.49970523", "0.49926448", "0.49874687", "0.49629152", "0.49578315", "0.49547073", "0.49424574", "0.4938102", "0.4917782", "0.49125454", "0.4895852", "0.48851368", "0.48785403", "0.4868557", "0.48605594", "0.48605594", "0.48605594", "0.48605594", "0.48605594", "0.48605594", "0.48605594", "0.48312703", "0.48312703", "0.4819611", "0.48088947", "0.478874", "0.477894", "0.47747096", "0.47713447", "0.47654474", "0.47417268", "0.4736337", "0.47338456", "0.47251356", "0.4720791", "0.47199616", "0.47152984", "0.4709892", "0.47045296", "0.47005928", "0.46893558", "0.46867695", "0.46836662", "0.46834865", "0.46824187", "0.46774817", "0.4675349", "0.46671706", "0.46662548", "0.46603462", "0.46577954", "0.4651367", "0.4646087", "0.4633628", "0.4617543", "0.46047997", "0.46008414", "0.4585753", "0.45840228", "0.45805332", "0.4576649", "0.457567", "0.45681372", "0.4567321", "0.45535964", "0.4548852", "0.45433816" ]
0.7407154
0
Parse and rejoin the given CGI query.
def url_parse_query(query, encoding=None): if isinstance(query, unicode): if encoding is None: encoding = url_encoding query = query.encode(encoding, 'ignore') query = query.replace('?', '') l = set() for k, v, sep in parse_qsl(query, True): k = url_quote_part(k, '/-:,;') if not k: continue if v: v = url_quote_part(v, '/-:,;') l.add("%s=%s" % (k, v)) elif v is None: l.add("%s" % k) else: # some sites do not work when the equal sign is missing l.add("%s=" % k) query = '&'.join(sorted(l)) return query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_query(request):\n\n querystring = request.uri['query']\n fp = StringIO(querystring)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n query = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return query", "def process_query(s):\n query = re.sub(r'[!\\'()|&:\\x00<>]', ' ', s).strip()\n if query:\n query = re.sub(r'\\s+', ' & ', query)\n # Support prefix search on the last word. A tsquery of 'toda:*' will\n # match against any words that start with 'toda', which is good for\n # search-as-you-type.\n query += ':*'\n return query", "def format_query(query, option=''):\n q_tmp = []\n for s in query:\n if option != 'url' or s.isalnum():\n q_tmp.append(s)\n else:\n q_tmp.append('\\\"' + s.lower() + '\\\"')\n\n if option == 'file':\n sep = '_'\n else:\n sep = ' '\n q = sep.join(q_tmp)\n if option == 'url':\n q = urllib2.quote('\\'' + q + '\\'')\n return q", "def parse_query(query):\n qlist = []\n splitted = query.split(\"&\")\n for entry in splitted:\n cmd, arg = entry.split(\"=\")\n qlist.append((cmd, arg))\n return qlist", "def request(query):", "def format_query(query):\n query = query.encode('utf-8')\n # all queries must start with 'c '!\n if not query.startswith('c '):\n query = 'c ' + query.strip()\n # check if outcome of query is set to '?'\n if query[-1] != '?':\n query = query + ' ?'\n return query + '\\n'", "def _replace_url_query(url, new_query):\n scheme, netloc, path, _, fragment = urlparse.urlsplit(url)\n return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))", "def reformulate_query(s):\n words = tokenize(s)\n tags = [tag for _, tag in pos_tag(words)]\n\n if tags[-1] == '.':\n words.pop()\n\n # what/who questions\n if tags[0] in set(['WP', 'WDT']):\n if tags[1] in set(['VBZ', 'VBD', 'VBP']):\n if tags[-1] is not 'IN':\n exact_query = '{0}\\s*{1}\\s*{2}'.format(' '.join(words[2:]),\n '(?:\\(.*\\))?', words[1])\n inexact_query = '{0} {1}'.format(' '.join(words[2:]), words[1])\n return exact_query, inexact_query\n return s, s", "def makeQueries(baseQuery, joiningChar):\n results = []\n searchQueries = sys.argv[2:]\n for query in searchQueries: # for every individual query\n queryList = query.split() # split individual terms in a query\n # join them back with the joining char between them\n formatedQuery = joiningChar.join(queryList)\n # append the structured query to the result\n results.append(baseQuery + formatedQuery)\n return results", "def unparse(self):\r\n # only parse the query params if there is an update dict\r\n q = self.query\r\n if self._url_updates or self._query_dict is not None:\r\n q = self._query_dict or self.query_dict\r\n q.update(self._url_updates)\r\n q = query_string(q).lstrip('?')\r\n\r\n # make sure the port is not doubly specified \r\n if self.port and \":\" in self.hostname:\r\n self.hostname = self.hostname.split(':')[0]\r\n\r\n # if there is a netloc, there had better be a scheme\r\n if self.netloc and not self.scheme:\r\n self.scheme = \"http\"\r\n \r\n return urlunparse((self.scheme, self.netloc,\r\n self.path.replace('//', '/'),\r\n self.params, q, self.fragment))", "def parseQueryString():\n\tqs = cgi.FieldStorage()\n\treturn({'char': qs.getvalue('char'), 'zone': qs.getvalue('zone')})", "def parse_for_query(query):\n index = query.find('@')\n if index == -1:\n return \"\"\n elif index == len(query)-1:\n # Make sure the final return doesn't index outside the list.\n return \"\"\n else:\n return query[index+1:]", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def lexer(string): # TODO: refactor\n parsedlist = []\n parsedstring = ''\n leftbcounter = 0\n rightbcounter = 0\n qcounter = 0\n for index, a in enumerate(string):\n if qcounter == 2:\n if a.isalpha():\n qcounter = 1\n else:\n qcounter = 0\n if a == '(':\n leftbcounter += 1\n if a == ')':\n rightbcounter += 1\n if a == \"'\" and leftbcounter == rightbcounter:\n qcounter += 1\n if a != ' ' and leftbcounter == rightbcounter \\\n and qcounter == 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n elif leftbcounter != rightbcounter:\n parsedstring += a\n elif qcounter > 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n else:\n parsedlist.append(parsedstring)\n parsedstring = ''\n if leftbcounter != rightbcounter:\n raise BadRequest()\n bl = []\n sl = []\n counter = 0\n for index, query in enumerate(parsedlist, 1):\n if query == \"and\" or query == \"or\" or query == \"not\":\n if sl:\n bl.append(sl)\n bl.append([query])\n counter = 0\n sl = []\n continue\n sl.append(query)\n counter += 1\n if index == len(parsedlist) and sl:\n bl.append(sl)\n # i later added a third nested list to seperate AND and OR\n query_list = []\n al = []\n counter = 0\n for index, grouped_query in enumerate(bl, 1):\n if grouped_query[0] == \"or\":\n query_list.append(al)\n query_list.append([grouped_query])\n counter = 0\n al = []\n continue\n al.append(grouped_query)\n counter += 1\n if index == len(bl):\n query_list.append(al)\n\n for x in query_list:\n for y in x:\n if y[0] == 'and' or y[0] == 'or' or y[0] == 'not':\n QueryObjects.B.append(y[0])\n continue\n if y[0][0] == '(' and y[0][-1] == ')':\n QueryObjects.B.append(y[0][0])\n lexer(y[0][1:-1])\n QueryObjects.B.append(y[0][-1])\n else:\n QueryObjects.IND += 1\n n = 'arg' + str(QueryObjects.IND)\n QueryObjects.D[n] = query_mapping(y, QueryObjects.IND)[\"query\"]\n QueryObjects.B.append(n)\n return QueryObjects.B", "def _parsed_query(self, query_string):\r\n return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query", "def process_query(self, query_str):\n # make sure everything is lower case\n query = query_str.lower()\n # split on whitespace\n query = query.split()\n # remove non alphanumeric characters\n query = [self.alphanum.sub('', xx) for xx in query]\n # stem words\n query = [self.p.stem(xx) for xx in query]\n return query", "def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def _clean_query(self, query):\n for object_query in query:\n filters = object_query.get(\"filters\", {}).get(\"expression\")\n self._clean_filters(filters)\n self._macro_expand_object_query(object_query)\n return query", "def clean_query_element(self, search=\"), \\n\", replace=\");\"):\n if len(search) > 1 and self.q_str.endswith(search):\n ln1 = len(search)\n self.q_str = self.q_str[:-ln1] + replace", "def _parse_query(queryStr):\n mainNode = LogicNode.LogicNode()\n queryStr = queryStr.replace(' ','')\n \n logicList, myOPList = _break_query(queryStr)\n\n #converts operator strings to actual operators\n convertOp = {\n '&':operator.and_,\n '|':operator.or_,\n '^':operator.xor\n }\n\n for item in myOPList:\n mainNode.operators.append(convertOp[item])\n \n #adds the simple comparisons to the LogicNode\n mainNode.add_children(logicList)\n return mainNode", "def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict", "def parse_for_filters(query_string):\n if ';' in query_string:\n strings = query_string.split(';')\n else:\n strings = query_string.split('&')\n\n filters = []\n leftovers = [] \n for string in strings:\n query = cgi.parse_qs(string)\n try:\n key, value = query.items()[0]\n\n try:\n argument = unicode(value[0], 'UTF-8')\n except TypeError:\n argument = value[0]\n\n func = FILTER_PARSERS[key](argument)\n filters.append(func)\n except(KeyError, IndexError):\n leftovers.append(string)\n\n leftovers = ';'.join(leftovers)\n return filters, leftovers", "def normalise_query(query: str) -> str:\n return EXTRA_WHITE_SPACE.sub(' ', query.strip())", "def _parsed_query(self, query_string):\r\n return urlparse(handler_url(self.block, 'handler', query=query_string)).query", "def _process_query(self, url, query=None, add_authtok=True):\n if add_authtok:\n if self.authtok == '':\n self._error('No auth token, must login first')\n return False\n if query is None:\n query = {}\n query.update({'auth': self.authtok, 'email': self.email})\n\n if len(query) > 0:\n request = url + '?' + urllib.urlencode(query)\n else:\n request = url\n\n self.api_count += 1\n try:\n fh = urllib2.urlopen(request)\n response = fh.read()\n fh.close()\n except urllib2.HTTPError, e:\n # Received a non 2xx status code\n raise SimplenoteError('http error: {}'.format(e.code))\n except urllib2.URLError, e:\n # Non http error, like network issue\n raise SimplenoteError('url error: {}'.format(e.reason))\n return json.loads(response)", "def _sanitize_query(self, uri=None, querydict=None):\n # import pdb; pdb.set_trace()\n if querydict is None:\n query = urisplit(uri).getquerydict()\n else:\n query = querydict\n result = {}\n for (key, values) in query.items():\n logger.debug('sanitize_query 1 %s:%s',key,values)\n result_values = []\n for value in values:\n if key == 'date':\n year = self._sanitize_year(str(value))\n if year:\n result_values.append(year)\n # we possibly could introduce query['year'],\n # query['month'] etc.\n # Maybe later\n elif value:\n result_values.append(value)\n if result_values:\n result[key] = result_values\n return result", "def query_str(self, new_query_str):\n self.query_buffer.text = new_query_str", "def test_process_query(self):\n self.assertEqual(process_query(\"\"), \"\")\n self.assertEqual(process_query(\"org\"), \"org:*\")\n self.assertEqual(process_query(\"a b\"), \"a & b:*\")\n self.assertEqual(process_query(\"(foo bar)\"), \"foo & bar:*\")", "def _process_query(self, query):\n query_search_pattern = r'\\nquery: (\\{.*\\}) nreturned'\n query_search_remove_pattern = r'(.*)(\\nquery: \\{.*\\} )( nreturned.*)'\n\n command_search_pattern = r'command: (\\{.*\\}) reslen'\n command_search_remove_pattern = r'(.*)(command: \\{.*\\})( reslen.*)'\n\n out = {}\n out['millis'] = query.get('millis', 0)\n out['ts'] = query.get('ts')\n\n out['org_info'] = query.get('info')\n\n info = query.get('info').split(' ')\n out['operation_type'] = info[0]\n out['collection'] = info[1]\n\n info = ' '.join(info[2:])\n mongo_query = re.search(query_search_pattern, info)\n mongo_command = re.search(command_search_pattern, info)\n\n if mongo_query:\n out['query'] = mongo_query.group(1)\n info = re.sub(query_search_remove_pattern, r'\\1\\3', info)\n\n elif mongo_command:\n out['query'] = mongo_command.group(1)\n info = re.sub(command_search_remove_pattern, r'\\1\\3', info)\n else:\n out['query'] = \"\"\n\n out['extra'] = info\n out['optimizations'] = ', '.join(self._should_optimize(out))\n\n return out", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n program_id = self.request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n level = self.request.GET.get(\"level\")\n description = self.request.GET.get(\"description\")\n status = self.request.GET.get(\"status\")\n job_tag = self.request.GET.get(\"job_tag\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(program_id=query) |\n Q(category=query) |\n Q(title__icontains=query) |\n #Q(category__category_name__icontains=query) |\n Q(description__icontains=query) |\n Q(job_tag__tag__in=str(query).split(\",\"))\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(level__icontains=int(query)), Q.OR)\n\n q_object.add(Q(status=strtobool(query)), Q.OR) if query in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n else:\n if program_id:\n q_object.add(\n Q(program_id=program_id),\n Q.AND)\n\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n if description:\n q_object.add(\n Q(description__icontains=description), Q.AND)\n\n if job_tag:\n q_object.add(\n Q(job_tag__tag__in=str(job_tag).split(\",\")),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n q_object.add(Q(status=strtobool(status)), Q.AND) if status in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n return q_object", "def read_parse_helper(self, query, path, triples, explicit_vars, implicit_vars, given_vars) :\n\t\t# constants\n\t\tif type(query) == int or type(query) == float :\n\t\t\treturn unicode(query)\n\t\telif type(query) == str or type(query) == unicode:\n\t\t\tif type(query) == str :\n\t\t\t\tquery = unicode(query)\n\t\t\tif self.n.matches(query) :\n\t\t\t\treturn query\n\t\t\telse :\n\t\t\t\tquery = query.replace('\\\\', '\\\\\\\\')\n\t\t\t\tquery = query.replace('\\n', '\\\\n')\n\t\t\t\tquery = query.replace('\\r', '\\\\r')\n\t\t\t\tif '\"' not in query :\n\t\t\t\t\treturn u'\"'+query+u'\"@'+self.lang\n\t\t\t\tif \"'\" not in query :\n\t\t\t\t\treturn u\"'\"+query+u\"'@\"+self.lang\n\t\t\t\tif '\"\"\"' not in query :\n\t\t\t\t\treturn u'\"\"\"'+query+u'\"\"\"@'+self.lang\n\t\t\t\tif \"'''\" not in query :\n\t\t\t\t\treturn u\"'''\"+query+u\"'''@\"+self.lang\n\t\t\t\traise Exception(\"can't figure out how to put this in quotes...\")\n\t\telif type(query) == datetime.datetime :\n\t\t\treturn u'\"%d-%d-%dT%d:%d:%dT\"^^xsd:dateTime' % (query.year, query.month, query.day, query.hour, query.minute, query.second)\n\t\telif type(query) == time.struct_time :\n\t\t\treturn u'\"%d-%d-%dT%d:%d:%dT\"^^xsd:dateTime' % query[0:6]\n\t\telif type(query) == rdflib.URIRef :\n\t\t\treturn query.n3()\n\t\telif type(query) == rdflib.Literal :\n\t\t\tif query.datatype == None :\n\t\t\t\t# this is a string\n\t\t\t\treturn query.n3()+'@'+self.lang\n\t\t\telse :\n\t\t\t\treturn query.n3()\n\t\t\n\t\t# cases resulting in explicit variables\n\t\telif query == None :\n\t\t\treturn self._new_var(explicit_vars, path)\n\t\telif query == [] :\n\t\t\tpath = copy.copy(path)\n\t\t\tpath.append(list)\n\t\t\treturn self._new_var(explicit_vars, path)\n\t\t\n\t\telif type(query) == list and len(query) == 1 and type(query[0]) == dict :\n\t\t\tpath = copy.copy(path)\n\t\t\tpath.append(list)\n\t\t\treturn self.read_parse_helper(query[0], path, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\n\t\t# a list of only dicts length > 1 (length > 1 known because not the above case)\n\t\telif type(query) == list and all([type(i) == dict for i in query]) :\n\t\t\t# TODO !!!\n\t\t\t# should this match any of these object or all of these?\n\t\t\t# should maybe not require that the type of all objects in the list are \n\t\t\t# dicts.\n\t\t\t# An any clause requires optional subqueries to be implemented\n\t\t\traise Exception('ambiguous case not yet implemented (dont have a list of more than one item)')\n\t\t\n\t\t# complex queries\n\t\telif type(query) == dict :\n\t\t\tif self.n.sparql.subject in query :\n\t\t\t\tsubject = query[self.n.sparql.subject]\n\t\t\t\tif isinstance(subject, URIRef) :\n\t\t\t\t\tsubject = subject.n3()\n\t\t\t\tdel query[self.n.sparql.subject]\n\t\t\t\tif subject == None :\n\t\t\t\t\tsubject = self._new_var(explicit_vars, path)\n\t\t\telse :\n\t\t\t\tsubject = self._new_var(implicit_vars, path)\n\t\t\tfor key, value in query.iteritems() :\n\t\t\t\t# print 'k',key,'v',value\n\t\t\t\tpath2 = copy.copy(path)\n\t\t\t\tnk = self.read_parse_helper(key, path, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\t\tpath2.append(key)\n\t\t\t\tnv = self.read_parse_helper(value, path2, triples, explicit_vars, implicit_vars, given_vars)\n\t\t\t\t# print '---', nk, nv, type(nk), type(nv)\n\t\t\t\t# if the new value is not a uri or a variable, then its a given value\n\t\t\t\tif len(nv) != 0 and nv[0] != '<' and nv[0] != '?' :\n\t\t\t\t\tgiven_vars.append(copy.copy(path2))\n\t\t\t\tpair = (nk, nv)\n\t\t\t\t#print 'dict', pair\n\t\t\t\ttriples.append((subject, nk, nv))\n\t\t\treturn subject\n\t\t\n\t\t# else ...\n\t\telse :\n\t\t\traise Exception(\"unkown data type: %s\" % str(type(query)))", "def getParsedQueryString(self):\n return cgi.parse_qs(self.query_string)", "def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):\n pairs = []\n name_value_amp = qs.split('&')\n for name_value in name_value_amp:\n if ';' in name_value:\n pairs.extend([x, ';'] for x in name_value.split(';'))\n pairs[-1][1] = '&'\n else:\n pairs.append([name_value, '&'])\n pairs[-1][1] = ''\n r = []\n for name_value, sep in pairs:\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n if strict_parsing:\n raise ValueError(\"bad query field: %r\" % name_value)\n elif len(nv) == 1:\n # None value indicates missing equal sign\n nv = (nv[0], None)\n else:\n continue\n if nv[1] or keep_blank_values:\n name = urllib.unquote(nv[0].replace('+', ' '))\n if nv[1]:\n value = urllib.unquote(nv[1].replace('+', ' '))\n else:\n value = nv[1]\n r.append((name, value, sep))\n return r", "def parse(raw_query, EXPAND_SET = False):\r\n\t\r\n\t# tokenize and tag the query using nltk tools, use .lower() to standardize the input\r\n\ttokenized_query = nltk.word_tokenize(raw_query.lower())\r\n\ttagged_query = nltk.pos_tag(tokenized_query)\r\n\t\r\n\t#master_chunk = r\"Chunk: {(<VB\\w?>|<JJ>*|<RB\\w?>)<DT>?(<NN\\w?>+)}\" \r\n\t\r\n\t\r\n\t# master_chunk now captures prepositional phrase, as they are typically part of one thought.\r\n\t\r\n\tmaster_chunk = r\"Chunk: {((<JJ\\w?>*|<RB\\w?>*)<DT>?(<NN\\w?>+))(<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*}\" # Regex to identify chunks that may be useful \r\n\t#\t\t\t\t\tmaster_chunk breakdown\r\n\t#\r\n\t#\tFirst half : ((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+))\r\n\t#\t<JJ\\w?>* | <RB\\w>?>* allows an arbitrary number of adjectives to precede the noun\r\n\t# \t\"\\w\" is \"any character\" and allows the capture of all JJ and RB tags, which include JJ, JJR, JJS, RB, RBR, and RBS\r\n\t#\t<DT>? allows for exactly one (1) or zero (0) determiner, often this will capture things like \"no\" and then a noun\r\n\t# \t(<NN\\w>+) captures one (1) or arbitrarily more nouns\r\n\t#\t\r\n\t#\tSecond half: (<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*\r\n\t#\t<IN> captures prepostions \"of\", \"with\", and so on.\r\n\t# \tThe rest of the expression is the same as the first half \r\n\t# \tThe final * (kleene star) allows zero (0) or more prepositional phrases to be captured\r\n\t\r\n\t\r\n\tmaster_parser = nltk.RegexpParser(master_chunk) # Create the parser from the Regex\r\n\tmaster = master_parser.parse(tagged_query) # Parse the query previously tagged\r\n\t\r\n\tchunk_list = []\r\n\tkeywords = []\r\n\tfor phrase in master:\r\n\t\tif (not isinstance(phrase, tuple)): # all non-chunks are tuples, a chunk is a nltk.tree type\r\n\t\t\tchunk_list.append(phrase)\r\n\t\t\ttmp = \"\"\r\n\t\t\tfor word in phrase: # generate keyword phrases from the chunks\r\n\t\t\t\ttmp += word[0] + \" \"\r\n\t\t\t\r\n\t\t\ttmp = tmp[:-1] # Remove final space\r\n\t\t\tkeywords.append(tmp)\r\n\t\t\t\r\n\tif EXPAND_SET: # defualt is not to expand\r\n\t\t# combine the two lists, using set() to remove any repeated phrases\r\n\t\treturn list(set(generate_keywords(chunk_list) + keywords))\r\n\telse:\r\n\t\treturn keywords", "def _parse_user_query(self, query):\n def _parse_basic_query(attr, value):\n if isinstance(value, str) and '*' in value:\n return MatchGlob(attr, value)\n else:\n return Eq(attr, value)\n\n if isinstance(query, dict):\n subqueries = []\n for attr, value in query.iteritems():\n if isinstance(value, (list, set, tuple)):\n # If value is a list or similar, we build an OR\n or_queries = []\n for or_query in value:\n or_queries.append( _parse_basic_query(attr, or_query) )\n subqueries.append( Or(*or_queries) )\n else:\n subqueries.append(_parse_basic_query(attr, value))\n query = And(*subqueries)\n return query", "def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',\n errors='replace', fields_limit=None):\n date = {'phenomenonTime', 'resultTime', 'validTime'}\n FIELDS_MATCH = re.compile('[&]')\n pairs = FIELDS_MATCH.split(qs)\n r = []\n for name_value in pairs:\n if not name_value:\n continue\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n # Handle case of a control-name with no equal sign\n if keep_blank_values:\n nv.append('')\n else:\n continue\n if nv[1] or keep_blank_values:\n name = nv[0].replace('+', ' ')\n name = nv[0]\n name = unquote(name, encoding=encoding, errors=errors)\n if not any(a in nv[1] for a in date):\n value = nv[1].replace('+', ' ')\n value = nv[1]\n value = unquote(value, encoding=encoding, errors=errors)\n r.append((name, value))\n query_dict = {}\n for key, value in r:\n query_dict[key] = value\n return query_dict", "def _split_query(query):\n\n\tqq = query.split(' ')\n\tkeywords = []\n\taccum = None\n\tfor kw in qq: \n\t\tif accum is None: \n\t\t\tif kw.startswith('\"'):\n\t\t\t\taccum = kw[1:]\n\t\t\telif kw: \n\t\t\t\tkeywords.append(kw)\n\t\telse:\n\t\t\taccum += ' ' + kw\n\t\t\tif kw.endswith('\"'):\n\t\t\t\tkeywords.append(accum[0:-1])\n\t\t\t\taccum = None\n\tif accum is not None:\n\t\tkeywords.append(accum)\n\treturn [kw.strip() for kw in keywords if kw.strip()]", "def parse_request(self, request):\n request = str(request)\n str_start = request.find(\"GET /?\")\n str_end = request.find(\"HTTP\")\n str_full = request[str_start + 6:str_end - 1]\n\n options = {}\n temp_option = []\n temp_selector = \"\"\n\n for i, letter in enumerate(str_full):\n if letter == \"=\":\n options[\"\".join(temp_option)] = []\n temp_selector = \"\".join(temp_option)\n temp_option = []\n elif letter == \"&\":\n options[temp_selector] = \"\".join(temp_option)\n temp_selector = \"\"\n temp_option = []\n elif i + 1 >= len(str_full):\n temp_option.append(letter)\n options[temp_selector] = \"\".join(temp_option)\n else:\n temp_option.append(letter)\n\n return options", "def splitQuery(query):\n triples = list()\n bgp = re.search('WHERE {(.*)}', query).group(1)\n for triple in bgp.split(' . '):\n triples.append(triple.strip())\n return triples", "def cleanQuery(self, query):\n query_input = re.sub(r\"\\W+\", \" \", query).lower()\n query_input = query_input.split(\" \")\n\n parsed_query = []\n for word in self.stop_words:\n if word in query_input:\n query_input.remove(word)\n parsed_query = ' '.join(query_input)\n parsed_query = parsed_query.strip()\n return parsed_query", "def parse_query(query, nameserver, duration):\n flag_list = flags.to_text(query.response.flags)\n return {\n 'Query': get_query(nameserver, duration),\n 'QuestionSection': get_question(query),\n 'AnswerSection': get_rrs_from_rrsets(query.response.answer),\n 'AdditionalSection': get_rrs_from_rrsets(query.response.additional),\n 'AuthoritySection': get_rrs_from_rrsets(query.response.authority),\n 'ReturnCode': rcode.to_text(query.response.rcode()),\n 'ID': query.response.id,\n 'AA': 'AA' in flag_list,\n 'TC': 'TC' in flag_list,\n 'RD': 'RD' in flag_list,\n 'RA': 'RA' in flag_list,\n 'AD': 'AD' in flag_list\n }", "def query(url):", "def query_join(*query_list):\n return \"&\".join(query_list)", "def _normalize_query(self, query):\n return re.sub('\\s+', ' ', query).strip().lower()", "def modify_search(add=[], remove=[]):\n\n query = request.args.get('q', '').split()\n query = [x.strip() for x in query if x.strip()]\n\n for word in remove:\n if word in query:\n query.remove(word)\n\n for word in add:\n if word and word not in query:\n query.append(word)\n\n return \" \".join(query)", "def make_query_string(query, params):\n query_string = query\n\n index = 1\n for param in params:\n if param:\n to_replace = \"%%param%d%%\" % index\n query_string = query_string.replace(to_replace, param)\n index += 1\n\n return query_string", "def render_POST(self, request, query=None):\n # make a parser and parse the request\n parser = qp.QueryParser(request)\n if not query: query = request.content.read() \n try: \n # run the query locally\n d = parser.runquery(self.db, query)\n except Exception, e:\n log.err(\"Failing query: \" + str(query))\n log.err()\n setResponseCode(request, e, 400)\n return str(e)\n else:\n # and send the reply\n request.setHeader('Content-type', 'application/json')\n\n if not query.strip().startswith('apply'):\n # apply streams the output out itself\n d.addCallback(lambda reply: (request, reply))\n d.addCallback(self.send_reply)\n d.addErrback(lambda x: self.send_error(request, x))\n return server.NOT_DONE_YET", "def smart_query_string(parser, token):\n args = token.split_contents()\n additions = args[1:]\n\n addition_pairs = []\n while additions:\n addition_pairs.append(additions[0:2])\n additions = additions[2:]\n\n return SmartQueryStringNode(addition_pairs)", "def _split_url_string(query_string):\r\n parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)\r\n for k, v in parameters.iteritems():\r\n parameters[k] = urllib.unquote(v[0])\r\n return parameters", "def querystring(parser, token):\r\n bits = token.split_contents()\r\n tag = bits.pop(0)\r\n updates = token_kwargs(bits, parser)\r\n # ``bits`` should now be empty of a=b pairs, it should either be empty, or\r\n # have ``without`` arguments.\r\n if bits and bits.pop(0) != \"without\":\r\n raise TemplateSyntaxError(\"Malformed arguments to '%s'\" % tag)\r\n removals = [parser.compile_filter(bit) for bit in bits]\r\n return QuerystringNode(updates, removals)", "def process_query(query):\n tokens = query.split(' ')\n keywords = ['UNION','MINUS','CROSS']\n if any(x in query for x in keywords):\n if 'UNION' in tokens:\n for i,token in enumerate(tokens):\n if token == 'UNION':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.union(table2)\n result = DBTable()\n return result\n elif 'MINUS' in tokens:\n for i,token in enumerate(tokens):\n if token == 'MINUS':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.setDifference(table2)\n result = DBTable()\n return result\n elif 'CROSS' in tokens:\n for i,token in enumerate(tokens):\n if token == 'CROSS':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.cartesianProduct(table2)\n result = DBTable()\n return result\n\n \n else:\n # Query without the keywords UNION, MINUS or CROSS\n cols = tokens[1]\n table_no = tables_dict[tokens[3]]\n if cols == '*':\n cols = tables[table_no].columnNames\n else:\n cols = cols.split(',')\n if 'WHERE' in tokens:\n for i,t in enumerate(tokens):\n if t == 'WHERE':\n c = Clause()\n c.operand1 = tokens[i+1]\n c.operator = tokens[i+2]\n c.operand2 = tokens[i+3]\n result = tables[table_no].selection(c)\n return result.projection(cols)\n return tables[table_no].projection(cols)", "def query_later(self, text):\n self.cached_query = text.strip()", "def encoded_query_str(request):\n return updated_query_str(request)", "def _format_queries(self, body):\n for query in body:\n if \"bindVars\" in query:\n query[\"bind_vars\"] = query.pop(\"bindVars\")\n if \"runTime\" in query:\n query[\"runtime\"] = query.pop(\"runTime\")\n return body", "def webquery(self, args=(), **kw):\n \n args = list(args)\n for key, value in kw.items():\n \targs.append((key,value))\n port = 80\n method = \"POST\"\n url = \"\"\n host = urllib.localhost()\n outfile = sys.stdout\n query = []\n for key, value in args:\n if key == \"port\":\n port = int(value)\n elif key == \"method\":\n method = value.upper()\n elif key == \"url\":\n url = value\n elif key == \"host\":\n host = value\n elif key == \"file\":\n outfile = value\n elif value is None:\n query.append(urllib.quote(key))\n else:\n query.append('%s=%s' % (urllib.quote(key),urllib.quote_plus(str(value))))\n query = '&'.join(query)\n if isinstance(outfile, types.StringType):\n outfile = open(outfile,\"w\")\n if url[:1] == \"/\":\n # don't add an extra slash (purely for aesthetic purposes)\n url = \"http://%s:%d%s\" % (host,port,url)\n else:\n url = \"http://%s:%d/%s\" % (host,port,url)\n if not query:\n query = None\n elif method == \"GET\":\n url = \"%s?%s\" % (url,query)\n query = None\n inurl = urllib.urlopen(url,query)\n print url,query\n s = inurl.read(102400)\n while s:\n outfile.write(s)\n s = inurl.read(102400)", "def _submit_and_parse(request):\n answer = request.submit()\n if \"pages\" not in answer[\"query\"]:\n return ([], {})\n values = list(answer[\"query\"][\"pages\"].values())\n if \"query-continue\" in answer:\n contin = answer[\"query-continue\"]\n else:\n contin = {}\n return (values, contin)", "def cgi_parse(qs):\n d = {}\n for key, value in cgi.parse_qsl(qs, 1):\n if d.has_key(key):\n if isinstance(d[key], list):\n d[key].append(value)\n else:\n d[key] = [d[key], value]\n else:\n d[key] = value\n return d", "def parseURL(url):\n\n\n scheme, host, path, params, query, hash = urlparse(url)\n if not path: path = \"/\"\n\n args = parse_qs(query)\n\n escapedArgs = {}\n for name in args:\n if len(args[name]) == 1:\n escapedArgs[unquote(name)] = unquote(args[name][0])\n else:\n escapedArgs[unquote(name)] = escapedSet = []\n for item in args[name]:\n escapedSet.append(unquote(item))\n\n return host, path, params, escapedArgs", "def parse_query(query, delim='/'):\n key = ''\n prefix = ''\n postfix = ''\n\n parsed = urlparse(query)\n query = parsed.path.lstrip(delim)\n bucket = parsed.netloc\n\n if not parsed.scheme.lower() in ('', \"gs\", \"s3\", \"s3n\"):\n raise ValueError(\"Query scheme must be one of '', 'gs', 's3', or 's3n'; \"\n \"got: '%s'\" % parsed.scheme)\n storage = parsed.scheme.lower()\n\n if not bucket.strip() and query:\n toks = query.split(delim, 1)\n bucket = toks[0]\n if len(toks) == 2:\n key = toks[1]\n else:\n key = ''\n\n if not bucket.strip():\n raise ValueError(\"Could not parse bucket name from query string '%s'\" % query)\n\n tokens = query.split(\"*\")\n n = len(tokens)\n if n == 0:\n pass\n elif n == 1:\n key = tokens[0]\n elif n == 2:\n index = tokens[0].rfind(delim)\n if index >= 0:\n key = tokens[0][:(index + 1)]\n prefix = tokens[0][(index + 1):] if len(tokens[0]) > (index + 1) else ''\n else:\n prefix = tokens[0]\n postfix = tokens[1]\n else:\n raise ValueError(\"Only one wildcard ('*') allowed in query string, got: '%s'\" % query)\n\n return storage, bucket, key, prefix, postfix", "def pp_query(query):\n print(format_query(query))", "def __generateQuery(self, query):\n if query == None:\n return [\"1=1\"]\n elif type(query) is not list:\n return [query]\n else:\n return query", "def make_query_string(query_string, params):\n\n for param in params:\n if param:\n index = params.index(param)+1\n query_string = query_string.replace(f\"%param{index}%\", param)\n\n return query_string", "def parse_saildocs_query(query_str):\n # remove any trailing white space\n query_str = query_str.strip()\n command_split = query_str.split(' ', 1)\n if len(command_split) != 2:\n raise BadQuery(\"Expected a space between the command and the body\")\n command, body = command_split\n opts_args = filter(len, body.split(' '))\n if len(opts_args) > 1:\n args = opts_args[0]\n opts = opts_args[1:]\n else:\n args = opts_args[0]\n opts = None\n # Check if the command is supported\n if not command.lower() in _supported_commands:\n raise BadQuery(\"Unsupported command %s, only %s are supported\"\n % (command.lower(), ','.join(_supported_commands)))\n\n if command.lower() == 'send':\n query = parse_send_request(args)\n else:\n raise BadQuery(\"Unknown command handler.\")\n return query", "def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):\n pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]\n r = []\n for name_value in pairs:\n if not name_value and not strict_parsing:\n continue\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n if strict_parsing:\n raise ValueError, \"bad query field: %r\" % (name_value,)\n # Handle case of a control-name with no equal sign\n if keep_blank_values:\n nv.append('')\n else:\n continue\n if len(nv[1]) or keep_blank_values:\n name = unquote(nv[0].replace('+', ' '))\n value = unquote(nv[1].replace('+', ' '))\n r.append((name, value))\n\n return r", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def process_query(self, query):\n def parse_query(query):\n \"\"\"Returns [(command, parameter)] list.\"\"\"\n qlist = []\n splitted = query.split(\"&\")\n for entry in splitted:\n cmd, arg = entry.split(\"=\")\n qlist.append((cmd, arg))\n return qlist\n qlist = parse_query(query)\n print \"Trying to execute query '\" + str(qlist) + \"'\"\n result = None\n q, args = qlist[0]\n try:\n method_call = getattr(self, q)\n try:\n with Timer() as t:\n if len(qlist) > 1:\n print qlist[1:]\n result = method_call(args, opt=qlist[1:])\n else:\n result = method_call(args)\n print \" --> This took %s seconds.\" % t.secs\n except TypeError as e:\n print \"ERROR:\", e\n print \"Success.\"\n except AttributeError as e:\n print e\n print \"Failed!\"\n return result", "def query_equalize(query: str) -> str:\n return ' '.join(query.replace('\\n', ' ').split())", "def build_query_part(self, input_data, table_fields_types, query_part):\n\n\t\tresult_query = '('\t\t\n\n\t\tfor index in xrange(len(input_data)):\n\n\t\t\tif query_part == 1:\n\t\t\t\tproper_value = '\"' + input_data[index] + '\"'\n\t\t\t\t\n\t\t\tif query_part == 2:\n\t\t\t\tif \"nextval\" not in input_data[index]:\n\t\t\t\t\tproper_value = self.escapeinput_data_for_sql(input_data[index], table_fields_types[index])\n\t\t\t\telse:\n\t\t\t\t\tproper_value = input_data[index]\n\n\t\t\t\t\n\t\t\tresult_query = result_query + proper_value + ','\n\n\t\t# if query_part == 2:\n\t\t# \tresult_query = result_query + '\\'\\'' + ','\n\n\t\tresult_query = result_query[:len(result_query)-1]\n\t\tresult_query = result_query + ')'\n\n\t\treturn result_query", "def rewritten_queries(question):\n # rewrites = []\n # tq = tokenize(question)\n # verb = tq[1] # 目前只处理 谁是XXX类 问题\n # rewrites.append(\n # RewrittenQuery(\"\\\"%s %s\\\"\" % (verb, \" \".join(tq[2:])),\n # QUOTED_QUERY_SCORE))\n # for j in range(2, len(tq)):\n # rewrites.append(\n # RewrittenQuery(\n # \"\\\"%s %s %s\\\"\" % (\n # \" \".join(tq[2:j + 1]), verb, \" \".join(tq[j + 1:])),\n # QUOTED_QUERY_SCORE))\n # rewrites.append(RewrittenQuery(\" \".join(tq[2:]), UNQUOTED_QUERY_SCORE))\n return [RewrittenQuery(question, 1)]", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def visit_query(self, query):\n return query", "def normalize_query(self):\n query_lower = self.user_input.lower()\n # Remove punctuation\n query_no_punctuation = re.sub(r\"[!#$%&'()*+,-./:;<=>?@\\^_`{|}~]+\\ *\", \" \", query_lower)\n # Remove accent from all the words\n self.query_no_accent = ''.join((c for c in \\\n unicodedata.normalize('NFD', query_no_punctuation) \\\n if unicodedata.category(c) != 'Mn'))\n\n return query_lower, query_no_punctuation, self.query_no_accent", "def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data", "def read_request(conn):\n\n # Grab the headers from the connection socket\n temp = conn.recv(1)\n while temp[-4:] != CRLF * 2:\n temp += conn.recv(1)\n request = temp.rstrip().split(CRLF)\n\n # Pull/parse the request line...\n temp = request[0].split()\n request_line = {}\n request_line['method'] = temp[0]\n request_line['uri'] = temp[1]\n request_line['protocol'] = temp[2]\n request_line['query_string'] = urlparse(request_line['uri']).query\n\n # ... parse the query string into a dict...\n request_line['query'] = {}\n if request_line['query_string']:\n temp = parse_qs(request_line['query_string']).iteritems()\n request_line['query'] = {\n key : val[0]\n for key, val in temp\n }\n\n # ... and grab headers...\n # For this I must remove the request line\n request = request[1:]\n headers = {}\n for line in request:\n key, value = line.split(': ', 1)\n key = key.lower()\n # now handle duplicate headers\n if key not in headers:\n # if the header isn't already in, add it\n headers[key] = value\n else:\n # it's already in the headers, add it in to previous\n # value delimited by a comma (as per spec)\n headers[key] = ', '.join([headers[key], value])\n # ... and content (if it exists)\n _input = ''\n if 'content-length' in headers:\n content = ''\n while len(content) < int(headers['content-length']):\n content += conn.recv(1)\n # Parse any form data\n if 'content-type' in headers:\n if ('application/x-www-form-urlencoded' in headers['content-type']\n or 'multipart/form-data' in headers['content-type']):\n # Init the field storage...\n _input = StringIO(content)\n temp = cgi.FieldStorage(\n headers=headers, fp=_input,\n environ={'REQUEST_METHOD' : 'POST'}\n )\n # ... re-init the input stream\n _input = StringIO(content)\n # ... reset content to a dictionary...\n content = {}\n # ... and then parse all keys, values into content.\n for key in temp:\n lkey = key.lower()\n if temp[key].file:\n # we have a file, so let's store the FieldStorage object\n content[lkey] = temp[key]\n else:\n # we have something else, just store the value (string)\n content[lkey] = temp[key].value\n else:\n # TODO do something with other types\n # reset content to a dictionary\n content = {}\n else:\n # TODO is there a default content-type, assuming length is given?\n content = {}\n else:\n # empty content\n # WSGI spec says don't process if CONTENT-LENGTH isn't specified\n content = {}\n\n # Now to put it all together in one request object:\n request = {\n 'REQUEST_METHOD': request_line['method'],\n 'SCRIPT_NAME': '',\n 'PATH_INFO': request_line['uri'],\n 'QUERY_STRING': request_line['query_string'],\n 'query': request_line['query'],\n 'SERVER_PROTOCOL': request_line['protocol'],\n 'SERVER_PORT': conn.getsockname()[0],\n 'wsgi.version': (1, 0),\n 'wsgi.errors': sys.stderr,\n 'wsgi.multithread': False,\n 'wsgi.multiprocess': False,\n 'wsgi.run_once': False,\n 'wsgi.url_scheme': 'http',\n 'CONTENT_TYPE': (\n headers['content-type'] if 'content-type' in headers else ''\n ),\n 'CONTENT_LENGTH': (\n headers['content-length'] if 'content-length' in headers else ''\n ),\n 'wsgi.input': _input,\n 'headers': headers,\n 'content': content\n }\n\n if 'cookie' in headers:\n request['HTTP_COOKIE'] = headers['cookie']\n # TODO think about what to do with Expires, which can contain commas...\n\n return request", "def _process_query(self, sql_query):\n paramstyle = getattr(self, 'paramstyle', 'pyformat')\n query = sql_query.query(paramstyle)\n params = sql_query.values()\n return query, params", "def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset", "def query(self, query):", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def _get_query(request):\n query = request.GET.get(\"query\", \"\")\n date = request.GET.get(\"date\", \"\")\n timestamp = request.GET.get(\"timestamp\", None)\n sort = request.GET.get(\"sort\", \"top\").lower()\n filter = request.GET.get(\"filter\", \"following\").lower()\n\n if timestamp:\n t = parse(timestamp, ignoretz=True)\n timestamp = pytz.utc.localize(t)\n else:\n timestamp = timezone.now()\n\n start_time = ''\n end_time = ''\n\n if date:\n start_time, end_time = DateRangeParser().parse(date)\n\n get_dict = {\n \"query\": query,\n \"filter\": filter,\n \"sort\": sort,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"username\": request.GET.get(\"username\", \"\"),\n \"orderBy\": request.GET.get(\"orderBy\", \"start_time\"),\n \"direction\": request.GET.get(\"direction\", \"\"),\n \"template\": request.GET.get(\"template\", \"\"),\n \"type\": request.GET.get(\"type\", \"\"),\n \"page\": request.GET.get(\"page\", 1),\n 'timestamp': timestamp,\n }\n\n return get_dict, query, date, sort, filter", "def parseQuery(s):\n result = Parser.parseRule('dummy :- %s\\n' % s)\n result.lhs = None\n return result", "def insert_into_query_header(query, insert_text=''):\n if ')' in query:\n loc = query.find(')')\n # remove the existing )\n tmp: str = query.replace(')', '')\n # add paging elements where the ) once was .. + 1 for some spacing in case\n beginning: str = tmp[:loc]\n end: str = tmp[loc:]\n new_query = beginning + ' ' + insert_text + ' ) ' + end\n else:\n return query\n return new_query", "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def get_query_string(p, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if k in p and v is None:\n del p[k]\n elif v is not None:\n p[k] = v\n return mark_safe(\n '?' + '&amp;'.join(\n [u'%s=%s' % (k, v) for k, v in p.items()]\n ).replace(' ', '%20')\n )", "def SplitQuery(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)", "def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category_name = self.request.GET.get(\"category_name\")\n o_net_soc_code = self.request.GET.get(\"o_net_soc_code\")\n description = self.request.GET.get(\"description\")\n job_title = self.request.GET.get(\"job_title\")\n level = self.request.GET.get(\"level\", '')\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(category__o_net_soc_code__icontains=query) |\n Q(category__description__icontains=query) |\n Q(category__job_title__description__icontains=query) |\n Q(category__job_title__title__icontains=query)\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(category__job_title__level__icontains=int(query)), Q.OR)\n\n else:\n if category_name:\n q_object.add(\n Q(category__category_name__icontains=category_name),\n Q.AND)\n\n if o_net_soc_code:\n q_object.add(\n Q(category__o_net_soc_code__icontains=o_net_soc_code),\n Q.AND)\n\n if description:\n q_object.add((\n Q(category__description__icontains=description) |\n Q(\n category__job_title__description__icontains=description)\n ), Q.AND)\n\n if job_title:\n q_object.add(\n Q(category__job_title__title__icontains=job_title),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(category__job_title__level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n return q_object", "def parse_query(self, query_dict):\n if query_dict is None:\n return xapian.Query('') # Match everything\n elif query_dict == {}:\n return xapian.Query() # Match nothing\n\n query_tree = self.build_query_tree(query_dict)\n\n return query_tree.to_query(self.schema, self.database)", "def process_sparql_query_text(query_text, loader, call_name, extraMetadata):\n # We get the endpoint name first, since some query metadata fields (eg enums) require it\n endpoint, _ = gquery.guess_endpoint_uri(query_text, loader)\n glogger.debug(\"Read query endpoint: {}\".format(endpoint))\n\n try:\n query_metadata = gquery.get_metadata(query_text, endpoint)\n except Exception as e:\n raise Exception('Could not parse query {}: {}'.format(call_name, str(e)))\n\n tags = query_metadata['tags'] if 'tags' in query_metadata else []\n\n summary = query_metadata['summary'] if 'summary' in query_metadata else \"\"\n\n description = query_metadata['description'] if 'description' in query_metadata else \"\"\n\n method = query_metadata['method'].lower() if 'method' in query_metadata else \"\"\n if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:\n method = \"\"\n\n pagination = query_metadata['pagination'] if 'pagination' in query_metadata else \"\"\n\n endpoint_in_url = query_metadata['endpoint_in_url'] if 'endpoint_in_url' in query_metadata else True\n\n # Processing of the parameters\n params = []\n\n # PV properties\n item_properties = {}\n\n # If this query allows pagination, add page number as parameter\n if pagination:\n params.append(pageUtils.getSwaggerPaginationDef(pagination))\n\n if query_metadata['type'] in ['SelectQuery', 'ConstructQuery', 'InsertData']:\n # TODO: do something intelligent with the parameters!\n # As per #3, prefetching IRIs via SPARQL and filling enum\n parameters = query_metadata['parameters']\n\n for _, p in list(parameters.items()):\n param = {}\n param['name'] = p['name']\n param['type'] = p['type']\n param['required'] = p['required']\n param['in'] = \"query\"\n param['description'] = \"A value of type {} that will substitute {} in the original query\".format(\n p['type'], p['original'])\n if 'lang' in p:\n param['description'] = \"A value of type {}@{} that will substitute {} in the original query\".format(\n p['type'], p['lang'], p['original'])\n if 'format' in p:\n param['format'] = p['format']\n param['description'] = \"A value of type {} ({}) that will substitute {} in the original query\".format(\n p['type'], p['format'], p['original'])\n if 'enum' in p:\n param['enum'] = p['enum']\n if 'default' in p:\n param['default'] = p['default']\n\n params.append(param)\n\n if endpoint_in_url:\n endpoint_param = {}\n endpoint_param['name'] = \"endpoint\"\n endpoint_param['type'] = \"string\"\n endpoint_param['in'] = \"query\"\n endpoint_param['description'] = \"Alternative endpoint for SPARQL query\"\n endpoint_param['default'] = endpoint\n params.append(endpoint_param)\n\n # If this is a URL generated spec we need to force API calls with the specUrl parameter set\n if type(loader) is URLLoader:\n specUrl_param = {}\n specUrl_param['name'] = \"specUrl\"\n specUrl_param['type'] = \"string\"\n specUrl_param['in'] = \"query\"\n specUrl_param['description'] = \"URL of the API specification\"\n specUrl_param['default'] = loader.getRawRepoUri()\n params.append(specUrl_param)\n\n if query_metadata['type'] == 'SelectQuery':\n # Fill in the spec for SELECT\n if not method:\n method = 'get'\n for pv in query_metadata['variables']:\n item_properties[pv] = {\n \"name\": pv,\n \"type\": \"object\",\n \"required\": [\"type\", \"value\"],\n \"properties\": {\n \"type\": {\n \"type\": \"string\"\n },\n \"value\": {\n \"type\": \"string\"\n },\n \"xml:lang\": {\n \"type\": \"string\"\n },\n \"datatype\": {\n \"type\": \"string\"\n }\n }\n }\n\n elif query_metadata['type'] == 'ConstructQuery':\n if not method:\n method = 'get'\n elif query_metadata['type'] == 'InsertData' or query_metadata['type'] == 'Modify': # UPDATE queries should map here\n if not method:\n method = 'post'\n elif query_metadata['type'] == 'UNKNOWN':\n glogger.warning(\"grlc could not parse this query; assuming a plain, non-parametric SELECT in the API spec\")\n if not method:\n method = 'get'\n else:\n # TODO: process all other kinds of queries\n glogger.debug('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n raise Exception('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n\n # Finally: main structure of the callname spec\n item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)\n\n return item", "def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request", "def fetch_posting_list(query):\n\n global final_dictionary\n global final_doc_set\n\n proximity_window_pattern = re.compile('^[0-9]*')\n proximity_operator_pattern = re.compile('[0-9]*\\([0-9a-z ]*\\)')\n\n proximity_operator_list = re.findall(proximity_operator_pattern, query)\n\n if proximity_operator_list:\n for item in proximity_operator_list:\n proximity_query = item[item.find(\"(\") + 1: item.find(\")\")]\n query = query.replace(item, '')\n # The proximity operator is processed in below function\n process_proximity_operator(int(re.search(proximity_window_pattern, item).group()),\n proximity_query.split())\n\n # All the query terms other than proximity operator are processed below.\n # All the documents ids are added to the the global set since it is OR relation.\n query_words = query.split()\n if query_words:\n for word in query_words:\n for key in final_dictionary[pre_process(word)][1]:\n final_doc_set.add(key)\n\n return", "def urlencode(query):\n\n if hasattr(query, 'items'):\n # mapping objects\n query = query.items()\n l = []\n for k, v in query:\n k = quote_plus(k)\n if isinstance(v, basestring):\n v = quote_plus(v)\n l.append(k + '=' + v)\n else:\n v = quote_plus(unicode(v))\n l.append(k + '=' + v)\n return '&'.join(l)", "def parse_qs(query_string):\n query_string = to_utf8_if_unicode(query_string) or \"\"\n if query_string.startswith(\"?\"):\n logging.warning(\"Ignoring `?` query string prefix -- `%r`\" % query_string)\n query_string = query_string[1:]\n return _parse_qs(query_string, keep_blank_values=True)", "def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def inspect_query(querystring: str) -> dict:\n return _parse_query(querystring)", "def __post_request_nlpserver(query):\n # first, concatenate all sentences\n if isinstance(query, str):\n query = query.replace(\".\", \" \").replace(\"(\", \"\").replace(\")\", \"\").replace(\",\", \" \")\n else:\n query = query.decode(\"utf-8\").replace(\".\", \" \").replace(\"(\", \"\").replace(\")\", \"\").replace(\",\", \" \")\n if nlp_server_address and nlp_server_port:\n # second, call the nlp server\n nlp_server_response = requests.post(\"{}:{}{}\".format(nlp_server_address, nlp_server_port, nlp_server_path),\n data=query)\n else:\n # second, call the nlp server\n nlp_server_response = requests.post(\"{}{}\".format(nlp_server_address, nlp_server_path), data=query)\n if nlp_server_response.status_code != 200:\n raise Exception(nlp_server_response.content)\n return nlp_server_response.json()", "def parse_query_params(query_string):\n # Parse the query param string\n parsed = urlparse(query_string)\n print(parsed)\n query_params = dict(parse_qs(parsed.path))\n print(query_params)\n # Get the value from the list\n query_params = {k: v[0] for k, v in query_params.items()}\n return query_params" ]
[ "0.6525811", "0.57684", "0.5745191", "0.56635463", "0.560149", "0.5542734", "0.55224484", "0.54659873", "0.544222", "0.5429328", "0.5382041", "0.5341693", "0.5314705", "0.53040624", "0.5260107", "0.5229328", "0.5132951", "0.5106477", "0.5103963", "0.50939167", "0.5092407", "0.5080741", "0.5078795", "0.50764006", "0.50609964", "0.5023244", "0.5009366", "0.49872598", "0.49864", "0.49708217", "0.49671215", "0.49626783", "0.49625447", "0.49615377", "0.49505198", "0.49280128", "0.49249658", "0.49248824", "0.4914852", "0.49085727", "0.4900583", "0.48870534", "0.48533964", "0.48447677", "0.4831671", "0.47986543", "0.47984555", "0.47832084", "0.47807148", "0.4777417", "0.47694862", "0.47679007", "0.4759015", "0.47520036", "0.47471324", "0.47421613", "0.4741235", "0.47408196", "0.47343096", "0.47295806", "0.47063532", "0.47061795", "0.47026214", "0.4699986", "0.46895024", "0.46751383", "0.4674029", "0.46710294", "0.46694207", "0.46662444", "0.46551764", "0.46551764", "0.46533644", "0.4649612", "0.46449333", "0.46384782", "0.46270394", "0.46262646", "0.46194908", "0.46163467", "0.4612159", "0.460499", "0.4601164", "0.4597513", "0.4580824", "0.45672694", "0.45659187", "0.45476565", "0.454721", "0.45459685", "0.45410618", "0.4537675", "0.45364413", "0.45339555", "0.45286134", "0.45254087", "0.44951212", "0.44942805", "0.4492087", "0.44908842" ]
0.55001813
7
Same as urlparse.urlunsplit but with extra UNC path handling for Windows OS.
def urlunsplit(urlparts): res = urlparse.urlunsplit(urlparts) if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]: # UNC paths must have 4 slashes: 'file:////server/path' # Depending on the path in urlparts[2], urlparse.urlunsplit() # left only two or three slashes. This is fixed below repl = 'file://' if urlparts[2].startswith('//') else 'file:/' res = res.replace('file:', repl) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def _split_url(url):\n return url[1:].split('/')", "def split(path):\r\n if path.lower().startswith(\"smb://\"):\r\n if '/' not in path[6:]:\r\n path = path.replace(\"smb://\", \"smb:///\", 1)\r\n return path.rsplit('/', 1)\r\n else:\r\n return os.path.split(path)", "def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def process_url(url):\n parsed = urlparse(url)\n if parsed.scheme:\n return parsed.netloc, parsed.path\n else:\n host_part = parsed.path\n hostname = host_part.partition(\"/\")[0]\n path = \"/\" + host_part.partition(\"/\")[2]\n return hostname, path", "def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))", "def url_split(url):\n scheme, netloc = urllib.splittype(url)\n host, document = urllib.splithost(netloc)\n port = default_ports.get(scheme, 0)\n if host:\n host = host.lower()\n host, port = splitport(host, port=port)\n return scheme, host, port, document", "def url_unsplit(parts):\n if parts[2] == default_ports.get(parts[0]):\n return \"%s://%s%s\" % (parts[0], parts[1], parts[3])\n return \"%s://%s:%d%s\" % parts", "def normalize(cls, url):\n # Always ignore the fragment\n scheme, netloc, path, query, _ = urlsplit(url)\n uri_relative = (None, None, path, query, None)\n uri_without_query = (scheme, netloc, path, None, None)\n uri_relative_without_query = (None, None, path, None, None)\n urls = [url]\n if query:\n urls.append(urlunsplit(uri_without_query))\n urls.append('~' + urlunsplit(uri_relative))\n if query:\n urls.append('~' + urlunsplit(uri_relative_without_query))\n return urls", "def _split_url(self, url):\n url_split = urlsplit(url)\n try:\n if url_split.netloc is not None and url_split.netloc.find(\" \") > 0:\n return None\n decoded_netloc = url_split.netloc.decode(\"utf-8\").encode(\"idna\")\n url_parts = (\n url_split.scheme,\n decoded_netloc,\n url_split.path,\n url_split.query,\n url_split.fragment)\n url_splitted = urlunsplit(url_parts)\n return url_splitted\n except UnicodeError:\n return None", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def normalize_uri(uri):\n return normalize_uri_result(uri).unsplit()", "def url_fix(s, charset='utf-8'):\n # First step is to convert backslashes (which are invalid in URLs anyways)\n # to slashes. This is consistent with what Chrome does.\n s = s.replace('\\\\', '/')\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if (\n s.startswith('file://') and\n s[7:8].isalpha() and\n s[8:10] in (':/', '|/')\n ):\n s = 'file:///' + s[7:]\n\n url = urlsplit(s)\n\n netloc = _encode_netloc(url)\n\n path = urlquote(\n url.path, encoding=charset, safe='/%+$!*\\'(),'\n )\n qs = urlquote_plus(\n url.query, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n anchor = urlquote_plus(\n url.fragment, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n\n return urlunsplit(\n (url.scheme, netloc, path, qs, anchor)\n )", "def _urlparse_splitscheme(url):\r\n # The scheme is valid only if it contains these characters.\r\n scheme_chars = \\\r\n \"abcdefghijklmnopqrstuvwxyz0123456789+-.\"\r\n\r\n scheme = \"\"\r\n rest = url\r\n\r\n spart = url.split(\":\", 1)\r\n if len(spart) == 2:\r\n\r\n # Normalize the scheme.\r\n spart[0] = spart[0].lower()\r\n\r\n # A scheme is valid only if it starts with an alpha character.\r\n if spart[0] and spart[0][0].isalpha():\r\n for char in spart[0]:\r\n if char not in scheme_chars:\r\n break\r\n (scheme, rest) = spart\r\n\r\n return scheme, rest", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def path_to_url(path):\r\n if os.sep == '/':\r\n return path\r\n else:\r\n return '/'.join(split_all(path))", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def get_url_components(self, url):\n if 'http://' not in url and 'https://' not in url:\n print(\"Protocol not found, skipping: \" + url)\n return False\n if url[:7] == 'http://':\n protocol = url[:7]\n file_path = url[7:]\n elif url[:8] == 'https://':\n protocol = url[:8]\n file_path = url[8:]\n else:\n print(\"Error when parsing protocol. Skipping: \" + url)\n return False\n # Split the string from the last '/'.\n # To do this, we reverse the string, split from the first '/' and\n # then reverse them both back.\n filename, root_and_directory = [x[::-1] for x in file_path[::-1].split('/', 1)]\n # Replace the lost '/'\n root_and_directory = root_and_directory + '/'\n root, directory = root_and_directory.split('/', 1)\n directory = '/' + directory\n return [protocol, root, directory, filename]", "def strip(url):\r\n split = list(urlsplit(url))\r\n split[4]=''\r\n return urlunsplit(split)", "def unsplit_svn_url(tup):\n repo, prefix, project, branch, suffix, peg = tuple(tup)\n res = [repo or '^',\n prefix or '/',\n ]\n if project:\n res.extend([project, '/'])\n if branch:\n res.extend([branch, '/'])\n if suffix:\n res.append(suffix)\n if peg:\n res.extend(['@', str(peg)])\n return ''.join(res)", "def _normalize_path(path):\n\n i = 0\n for c in path:\n if c != \"/\":\n break\n i = i + 1\n\n if i:\n return path[(i - 1) :]\n\n return path", "def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path", "def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def get_normalized_url(url):\r\n scheme, netloc, path, params, query, fragment = urlparse(url)\r\n\r\n # Exclude default port numbers.\r\n if scheme == 'http' and netloc[-3:] == ':80':\r\n netloc = netloc[:-3]\r\n elif scheme == 'https' and netloc[-4:] == ':443':\r\n netloc = netloc[:-4]\r\n if scheme not in ('http', 'https'):\r\n raise ValueError(\"Unsupported URL %s (%s).\" % (url, scheme))\r\n\r\n # Normalized URL excludes params, query, and fragment.\r\n return urlunparse((scheme, netloc, path, None, None, None))", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def __ParseUrl(url):\n return urlparse(url)", "def split_url(url):\n match = re.match(\"(.*\\.org)(/.*)\", url)\n return match.group(1), match.group(2)", "def convert_unc(host, path):\n return ''.join(['\\\\\\\\', host, '\\\\', path.replace(':', '$')])", "def parse(path, root=True):\n if path.startswith(\"/\"):\n return path[1:], \"\"\n\n if \"/\" not in path:\n return path, \"\"\n\n if root:\n return path.split(\"/\", 1)\n else:\n return path.rsplit(\"/\", 1)", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def _urlparse_splitnetloc(url, start=0):\r\n\r\n # By default, the netloc is delimited by the end of the URL.\r\n delim = len(url)\r\n\r\n # Find the left-most delimiter.\r\n for char in \"/?#\":\r\n xdelim = url.find(char, start)\r\n if xdelim >= 0:\r\n delim = min(delim, xdelim)\r\n\r\n # Return the netloc and the rest of the URL.\r\n return url[start:delim], url[delim:]", "def normalize_filename(url):\n fname = url.replace('file://', '')\n if os.sep != '/' and not os.path.exists(fname):\n fname = fname.lstrip('/')\n return fname", "def test_remove_dot_segments():\n assert (normalize_url(\"http://www.example.com/../a/b/../c/./d.html\") ==\n \"http://www.example.com/a/c/d.html\")", "def _normalise_last_slashes(url_segment):\n return url_segment if not url_segment.endswith(\"/\") else url_segment[:-1]", "def url_fix_host(urlparts):\n # if not urlparts[1]:\n # urlparts[2] = urllib.unquote(urlparts[2])\n # return False\n userpass, netloc = urllib.splituser(urlparts[1])\n if userpass:\n userpass = urllib.unquote(userpass)\n netloc, is_idn = idna_encode(urllib.unquote(netloc).lower())\n # a leading backslash in path causes urlsplit() to add the\n # path components up to the first slash to host\n # try to find this case...\n i = netloc.find(\"\\\\\")\n if i != -1:\n # ...and fix it by prepending the misplaced components to the path\n comps = netloc[i:] # note: still has leading backslash\n if not urlparts[2] or urlparts[2] == '/':\n urlparts[2] = comps\n else:\n urlparts[2] = \"%s%s\" % (comps, urllib.unquote(urlparts[2]))\n netloc = netloc[:i]\n else:\n # a leading ? in path causes urlsplit() to add the query to the\n # host name\n i = netloc.find(\"?\")\n if i != -1:\n netloc, urlparts[3] = netloc.split('?', 1)\n # path\n urlparts[2] = urllib.unquote(urlparts[2])\n if userpass and userpass != ':':\n # append AT for easy concatenation\n userpass += \"@\"\n else:\n userpass = \"\"\n\n if urlparts[0] in default_ports:\n dport = default_ports[urlparts[0]]\n host, port = splitport(netloc, port=dport)\n\n host = host.rstrip('. ')\n if port != dport:\n host = \"%s:%d\" % (host, port)\n netloc = host\n urlparts[1] = userpass + netloc\n return is_idn", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def get_url_path(url):\n return filter(lambda x: x!='', url.split('/'))", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))", "def _parse(url):\n url = url.strip()\n parsed = urlparse(url)\n return _parsed_url_args(parsed)", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")", "def split(p):\n if not p:\n return []\n p = p.strip('/').split('/')\n return [] if p == [''] else p", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def _parse_url(url):\n parts = urlparse(url)\n scheme = parts.scheme\n port = parts.port or None\n hostname = parts.hostname\n path = parts.path or ''\n virtual_host = path[1:] if path and path[0] == '/' else path\n return (scheme, unquote(hostname or '') or None, port,\n unquote(parts.username or '') or None,\n unquote(parts.password or '') or None,\n unquote(path or '') or None,\n unquote(virtual_host or '') or None,\n unquote(parts.query or '') or None,\n dict(dict(parse_qsl(parts.query))))", "def decode_fullpath(fullpath):\n fp = fullpath.split(\"/\")\n if len(fp) != 5:\n raise ValueError(\"fullpath: invalid format\")\n decoded = []\n for part in fp:\n decoded.append(unquote(part))\n return tuple(decoded)", "def extract_path(url):\n parts = urlsplit(url)\n path = cookiejar.escape_path(parts.path)\n if not path.startswith(\"/\"):\n # fix bad RFC 2396 absoluteURI\n path = \"/\" + path\n return path", "def chomp_protocol(url: str) -> str:\n if \"+\" in url:\n url = url.split(\"+\", 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if \"@\" in path:\n path, rev = path.rsplit(\"@\", 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, \"\"))\n if url.startswith(\"ssh://git@github.com/\"):\n url = url.replace(\"ssh://\", \"git+ssh://\")\n elif \"://\" not in url:\n assert \"file:\" not in url\n url = url.replace(\"git+\", \"git+ssh://\")\n url = url.replace(\"ssh://\", \"\")\n return url", "def normalize_url(self, url):\n pass", "def split_path(abspath):\n path = abspath\n\n basepaths = []\n npaths_old = -1\n npaths_new = 0\n dpaths = 1\n while dpaths:\n npaths_old = len(basepaths)\n basepath = os.path.basename(path)\n if basepath:\n basepaths.append(basepath)\n path = os.path.dirname(path)\n npaths_new = len(basepaths)\n dpaths = npaths_new - npaths_old\n if path:\n basepaths.append(path)\n\n basepaths.reverse()\n return basepaths", "def normalize_link(link, split_url):\n url = link.get(\"href\", None)\n if not url:\n return None\n protocol = split_url.scheme + \"://\"\n netloc = split_url.netloc\n final_url = \"\"\n if not protocol in url: # Protocol doesn't exists, lets make sure that gets added.\n final_url += protocol\n if not netloc in url:\n final_url += netloc + \"/\"\n\n if url.startswith(\"/\"):\n final_url += url[1:]\n else:\n final_url += url\n\n return final_url", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def split_url_and_query_params(url):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n url = urlunsplit((scheme, netloc, path, None, fragment))\n return url, query_params", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def safe_url_string(url, encoding='utf8', path_encoding='utf8'):\n # Python3's urlsplit() chokes on bytes input with non-ASCII chars,\n # so let's decode (to Unicode) using page encoding:\n # - it is assumed that a raw bytes input comes from a document\n # encoded with the supplied encoding (or UTF8 by default)\n # - if the supplied (or default) encoding chokes,\n # percent-encode offending bytes\n decoded = to_unicode(url, encoding=encoding, errors='percentencode')\n parts = urlsplit(_ascii_tab_newline_re.sub('', decoded))\n\n # IDNA encoding can fail for too long labels (>63 characters)\n # or missing labels (e.g. http://.example.com)\n try:\n netloc = parts.netloc.encode('idna')\n except UnicodeError:\n netloc = parts.netloc\n\n # quote() in Python2 return type follows input type;\n # quote() in Python3 always returns Unicode (native str)\n return urlunsplit((\n to_native_str(parts.scheme),\n to_native_str(netloc).rstrip(':'),\n\n # default encoding for path component SHOULD be UTF-8\n quote(to_bytes(parts.path, path_encoding), _safe_chars),\n\n # encoding of query and fragment follows page encoding\n # or form-charset (if known and passed)\n quote(to_bytes(parts.query, encoding), _safe_chars),\n quote(to_bytes(parts.fragment, encoding), _safe_chars),\n ))", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def norm(url):\n url = _unicode(url) # operate on unicode strings\n url_tuple = urlparse(url)\n normalized_tuple = norm_tuple(*url_tuple)\n return urlunparse(normalized_tuple).replace(' ','%20')", "def url_fix_mailto_urlsplit(urlparts):\n if \"?\" in urlparts[2]:\n urlparts[2], urlparts[3] = urlparts[2].split('?', 1)", "def url_norm(url, encoding=None, strip=False, lowercase_path=False, remove_fragment=False):\n\n if strip:\n url = url.strip()\n\n if isinstance(url, unicode):\n # try to decode the URL to ascii since urllib.unquote()\n # handles non-unicode strings differently\n try:\n url = url.encode('ascii')\n except UnicodeEncodeError:\n pass\n encode_unicode = True\n else:\n encode_unicode = False\n urlparts = list(urlparse.urlsplit(url))\n\n #fix missing scheme\n if not urlparts[0] or not urlparts[1]:\n urlparts = list(fix_missing_scheme(url, urlparts))\n elif urlparts[0] not in default_scheme_for_port:\n # Todo: find the scheme with the min edit distance\n pass\n\n # scheme\n if not http_scheme_pattern.match(urlparts[0]):\n raise InvalidUrl(url)\n\n urlparts[0] = urllib.unquote(urlparts[0]).lower()\n # host (with path or query side effects)\n is_idn = url_fix_host(urlparts)\n # query\n urlparts[3] = url_parse_query(urlparts[3], encoding=encoding)\n if urlparts[0] in urlparse.uses_relative:\n # URL has a hierarchical path we should norm\n if not urlparts[2]:\n # Empty path is allowed if both query and fragment are also empty.\n # Note that in relative links, urlparts[0] might be empty.\n # In this case, do not make any assumptions.\n if urlparts[0] and (urlparts[3] or urlparts[4]):\n urlparts[2] = '/'\n else:\n # fix redundant path parts\n urlparts[2] = collapse_segments(urlparts[2])\n if not remove_fragment:\n # anchor\n urlparts[4] = urllib.unquote(urlparts[4])\n # quote parts again\n urlparts[0] = url_quote_part(urlparts[0], encoding=encoding) # scheme\n urlparts[1] = url_quote_part(urlparts[1], safechars='@:', encoding=encoding) # host\n urlparts[2] = url_quote_part(urlparts[2], safechars=_nopathquote_chars, encoding=encoding) # path\n\n if lowercase_path:\n urlparts[2] = urlparts[2].lower()\n\n if remove_fragment:\n urlparts[4] = ''\n else:\n urlparts[4] = url_quote_part(urlparts[4], encoding=encoding) # anchor\n\n if not urlparts[2]:\n urlparts[2] = '/'\n\n res = urlunsplit(urlparts)\n\n if encode_unicode:\n res = unicode(res)\n return res, is_idn", "def url_clean(path):\n return path[path.find('/'+settings.URL_ADMIN_SEP):]", "def normalize_url(url):\n parse = urlparse(url)\n\n # netloc should be lowercase\n netloc = parse.netloc.lower()\n if parse.scheme == \"http\":\n if netloc.endswith(\":80\"):\n netloc = netloc[:-3]\n\n elif parse.scheme == \"https\" and netloc.endswith(\":443\"):\n netloc = netloc[:-4]\n\n # add a '/' at the end of the netloc if there in no path\n if not parse.path:\n netloc = netloc + \"/\"\n\n return \"{}://{}{}\".format(parse.scheme, netloc, parse.path)", "def test_split_fullpath_with_route_domain():\n\n # Expected input must have route specified, otherwise reject\n tests = [\n [\"/Partition/1.2.3.4%0:80\", \"/Partition\", \"1.2.3.4\", 0, 80],\n [\"/Part/Folder/1.2.3.4%1:443\", \"/Part/Folder\", \"1.2.3.4\", 1, 443],\n [\"/Part/::ffff:0:0%2.8080\", \"/Part\", \"::ffff:0:0\", 2, 8080],\n [\"/Part/1.2.3.4:8080\", None, None, None, None],\n [\"/Part/::ffff:0:0.8080\", None, None, None, None]\n ]\n\n for test in tests:\n results = split_fullpath_with_route_domain(test[0])\n assert results[0] == test[1]\n assert results[1] == test[2]\n assert results[2] == test[3]\n assert results[3] == test[4]", "def split_path(path):\n parts = []\n path, end = os.path.split(path)\n while end:\n parts.append(end)\n path, end = os.path.split(path)\n\n if path:\n parts.append(path)\n parts.reverse()\n return parts", "def convertWikiURL(files):\n return urllib.parse.urlunsplit( ('http', basepla, basewiki, files, '') )", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list", "def parsing(url):\n\n url = urlparse(url).netloc\n a = url.split('.')\n if len(a) >= 3:\n a = a[:-(len(a) - 1)]\n else:\n a = a[:-1]\n x = ('.'.join(a))\n return x", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def unquote(uri):\r\n uri = uri.encode('ascii')\r\n unquoted = urllib_unquote(uri)\r\n return unquoted.decode('utf-8')", "def parse_url_path(url_path):\r\n\r\n m = re.match('^/([^/]+)/?$',url_path)\r\n if m:\r\n return (m.group(1),None)\r\n \r\n m = re.match('^/([^/]+)/(.+)$',url_path)\r\n if m:\r\n return (m.group(1),m.group(2).replace('%25','%'))\r\n \r\n return (None,None)", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def url_subpath(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n forbidden = ''.join(sorted(forbidden))\n raise ValueError('%(s)r contains forbidden characters'\n ' (%(forbidden)r)'\n % locals())\n stripped = normpath(s).lstrip(sep)\n if stripped == curdir:\n return ''\n if sep != '/':\n return stripped.replace(sep, '/')\n return stripped", "def split_s3_path(url):\n\tparsed = urlparse (url)\n\tif not parsed.netloc or not parsed.path:\n\t\traise ValueError (\"bad s3 path {}\".format (url))\n\tbucket_name = parsed.netloc\n\ts3_path = parsed.path\n\t# Remove '/' at beginning of path.\n\tif s3_path.startswith (\"/\"):\n\t\ts3_path = s3_path[1:]\n\treturn bucket_name, s3_path", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url", "def normalize_scheme(path, ext):\n path = addextension(path, ext)\n\n parsed = urlparse(path)\n if parsed.scheme:\n # this appears to already be a fully-qualified URI\n return path\n else:\n # this looks like a local path spec\n import os\n dirname, filename = os.path.split(path)\n if not os.path.isabs(dirname):\n # need to make relative local paths absolute\n dirname = os.path.abspath(dirname)\n path = os.path.join(dirname, filename)\n return \"file://\" + path", "def GetServerFromUrl(url):\n return urlunparse((GetSchemeFromUrl(url), GetNetLocFromUrl(url), '', '', '',\n ''))", "def test_split_url_for_query_2(self):\n url = \"testurl.com/test\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%./test%\"))", "def split_type_host(url):\n type, rest = urllib.splittype(url)\n host, selector = urllib.splithost(rest)\n return type, host, selector", "def spath_stripoptions(spath):\n l = [comp.split(\"?\", 1)[0] for comp in spath.split(\"/\")]\n return \"/\".join(l)", "def safe_download_url(url, encoding='utf8', path_encoding='utf8'):\n safe_url = safe_url_string(url, encoding, path_encoding)\n scheme, netloc, path, query, _ = urlsplit(safe_url)\n if path:\n path = _parent_dirs.sub('', posixpath.normpath(path))\n if safe_url.endswith('/') and not path.endswith('/'):\n path += '/'\n else:\n path = '/'\n return urlunsplit((scheme, netloc, path, query, ''))", "def _urlparse_splitauthority(netloc):\r\n\r\n # The authority can have a userinfo portion delimited by \"@\".\r\n authority = netloc.split(\"@\", 1)\r\n\r\n # Default values.\r\n username = None\r\n password = None\r\n hostname = None\r\n port = None\r\n\r\n # Is there a userinfo portion?\r\n if len(authority) == 2:\r\n\r\n # userinfo can be split into username:password\r\n userinfo = authority[0].split(\":\", 1)\r\n\r\n # hostport can be split into hostname:port\r\n hostport = authority[1].split(\":\", 1)\r\n\r\n if userinfo[0]:\r\n username = userinfo[0]\r\n if len(userinfo) == 2:\r\n password = userinfo[1]\r\n\r\n # No userinfo portion found.\r\n else:\r\n\r\n # hostport can be split into hostname:port\r\n hostport = netloc.split(\":\", 1)\r\n\r\n # Is there a port value?\r\n if hostport[0]:\r\n hostname = hostport[0]\r\n if len(hostport) == 2:\r\n port = int(hostport[1], 10)\r\n\r\n # Return the values.\r\n return username, password, hostname, port", "def _update_url_scheme(self, url):\n if self.base_scheme and not url.startswith(\"%s://\" % self.base_scheme):\n # url_split = urlparse.urlsplit(url)\n url_split = urlsplit(url)\n # url = urlparse.urlunsplit(\n url = urlunsplit(\n [\n self.base_scheme,\n url_split.netloc,\n url_split.path,\n url_split.query,\n url_split.fragment\n ]\n )\n return url", "def convertToURL( cPathname ):\n if len( cPathname ) > 1:\n if cPathname[1:2] == \":\":\n cPathname = \"/\" + cPathname[0] + \"|\" + cPathname[2:]\n cPathname = string.replace( cPathname, \"\\\\\", \"/\" )\n cPathname = \"file://\" + cPathname\n return cPathname", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n levels = dirname.strip('/').split(os.path.sep)[2:][-2:]\n return PATH_SPLIT.split(' '.join(levels + [fname_noext]))", "def parse_url(url):\n newurl = urlparse(url)\n return \"{0}://{1}\".format(newurl.scheme, newurl.netloc)", "def _parse_url(self, url):\n url_prefix = self.URL_PREFIX\n assert(url[:len(url_prefix)] == url_prefix)\n key, file_attrs = url[len(url_prefix):].split('/', 1)\n file_, attrs = parse_url_opts(file_attrs)\n return key, file_, attrs", "def normalizeURIPath(path):\n ret = libxml2mod.xmlNormalizeURIPath(path)\n return ret", "def url_path_join(*pieces):\n initial = pieces[0].startswith('/')\n final = pieces[-1].endswith('/')\n striped = [s.strip('/') for s in pieces]\n result = '/'.join(s for s in striped if s)\n if initial: result = '/' + result\n if final: result = result + '/'\n if result == '//': result = '/'\n return result", "def get_url_straight_filename(url, strip=[], allowdir=False):\n path = urlunquote(urlsplit(url).path)\n path_parts = path.split('/')\n\n if allowdir:\n # strip empty ones\n while len(path_parts) > 1 and not path_parts[-1]:\n path_parts = path_parts[:-1]\n\n if strip:\n while path_parts and path_parts[-1] in strip:\n path_parts = path_parts[:-1]\n\n if path_parts:\n return path_parts[-1]\n else:\n return None", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url", "def urljoin(*parts):\n def _gen(parts):\n prev = None\n for part in parts:\n if not part:\n continue\n if not prev:\n prev = part\n elif (prev[-1] == '/') != (part[0] == '/'): # Exactly one slash was present\n prev = part\n # At this point, either zero or two slashes are present. Which is it?\n elif part[0] == '/': # Two slashes.\n prev = part[1:]\n else: # No slashes.\n yield '/'\n prev = part\n yield prev\n\n return \"\".join(part for part in _gen(parts))", "def handle_url(self, url):\n parse = urlparse.urlparse(url, \"http\")\n # relative url path\n if not parse.netloc:\n parse = urlparse.urlparse(\n urlparse.urljoin(\n self.source_url,\n parse.path))\n return urlparse.urlunparse(parse)" ]
[ "0.6627455", "0.65837115", "0.64844257", "0.64534926", "0.62631977", "0.6174686", "0.6172689", "0.6165752", "0.61646414", "0.61592484", "0.61030483", "0.610288", "0.60629", "0.6021757", "0.601455", "0.59292614", "0.5918131", "0.5909229", "0.58973026", "0.58960414", "0.5895975", "0.58935267", "0.58893645", "0.58778936", "0.58523923", "0.5803205", "0.57883364", "0.57883364", "0.57490945", "0.5722356", "0.57216835", "0.57098603", "0.5690128", "0.5653392", "0.5645547", "0.5637791", "0.5633539", "0.5630902", "0.56075996", "0.560557", "0.556147", "0.55598265", "0.5547646", "0.55434823", "0.55365336", "0.5518606", "0.55140334", "0.5512413", "0.5481947", "0.5452583", "0.5440254", "0.54218805", "0.54039013", "0.53985345", "0.53964996", "0.53944474", "0.5386581", "0.53826034", "0.537749", "0.5367311", "0.5361207", "0.53545845", "0.53384584", "0.5334032", "0.53317434", "0.53162485", "0.531517", "0.5311897", "0.53069425", "0.5306826", "0.5306233", "0.53007954", "0.52935165", "0.5291123", "0.527899", "0.52746", "0.52730954", "0.52694666", "0.5266559", "0.5265695", "0.52626896", "0.52621716", "0.52485245", "0.52463007", "0.5237447", "0.523057", "0.5224272", "0.521868", "0.5217449", "0.5215628", "0.5209699", "0.5207917", "0.5206251", "0.51924443", "0.51909316", "0.5190699", "0.5190572", "0.5189703", "0.518688", "0.51863414" ]
0.851646
0
Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490.
def url_norm(url, encoding=None, strip=False, lowercase_path=False, remove_fragment=False): if strip: url = url.strip() if isinstance(url, unicode): # try to decode the URL to ascii since urllib.unquote() # handles non-unicode strings differently try: url = url.encode('ascii') except UnicodeEncodeError: pass encode_unicode = True else: encode_unicode = False urlparts = list(urlparse.urlsplit(url)) #fix missing scheme if not urlparts[0] or not urlparts[1]: urlparts = list(fix_missing_scheme(url, urlparts)) elif urlparts[0] not in default_scheme_for_port: # Todo: find the scheme with the min edit distance pass # scheme if not http_scheme_pattern.match(urlparts[0]): raise InvalidUrl(url) urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query urlparts[3] = url_parse_query(urlparts[3], encoding=encoding) if urlparts[0] in urlparse.uses_relative: # URL has a hierarchical path we should norm if not urlparts[2]: # Empty path is allowed if both query and fragment are also empty. # Note that in relative links, urlparts[0] might be empty. # In this case, do not make any assumptions. if urlparts[0] and (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) if not remove_fragment: # anchor urlparts[4] = urllib.unquote(urlparts[4]) # quote parts again urlparts[0] = url_quote_part(urlparts[0], encoding=encoding) # scheme urlparts[1] = url_quote_part(urlparts[1], safechars='@:', encoding=encoding) # host urlparts[2] = url_quote_part(urlparts[2], safechars=_nopathquote_chars, encoding=encoding) # path if lowercase_path: urlparts[2] = urlparts[2].lower() if remove_fragment: urlparts[4] = '' else: urlparts[4] = url_quote_part(urlparts[4], encoding=encoding) # anchor if not urlparts[2]: urlparts[2] = '/' res = urlunsplit(urlparts) if encode_unicode: res = unicode(res) return res, is_idn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_url(url):\r\n s = url\r\n url = url.encode('utf8')\r\n url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])\r\n return url", "def normalize_url(self, url):\n pass", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))", "def sanitize_url(url, require_scheme = False):\r\n if not url or ' ' in url:\r\n return\r\n\r\n url = url.strip()\r\n if url.lower() == 'self':\r\n return url\r\n\r\n u = urlparse(url)\r\n # first pass: make sure a scheme has been specified\r\n if not require_scheme and not u.scheme:\r\n url = 'http://' + url\r\n u = urlparse(url)\r\n\r\n if (u.scheme and u.scheme in valid_schemes\r\n and u.hostname and len(u.hostname) < 255\r\n and '%' not in u.netloc):\r\n return url", "def norm(url):\n url = _unicode(url) # operate on unicode strings\n url_tuple = urlparse(url)\n normalized_tuple = norm_tuple(*url_tuple)\n return urlunparse(normalized_tuple).replace(' ','%20')", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean", "def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result", "def uncanonicalize(self, url):\n pass", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def normalize_url(url):\n parse = urlparse(url)\n\n # netloc should be lowercase\n netloc = parse.netloc.lower()\n if parse.scheme == \"http\":\n if netloc.endswith(\":80\"):\n netloc = netloc[:-3]\n\n elif parse.scheme == \"https\" and netloc.endswith(\":443\"):\n netloc = netloc[:-4]\n\n # add a '/' at the end of the netloc if there in no path\n if not parse.path:\n netloc = netloc + \"/\"\n\n return \"{}://{}{}\".format(parse.scheme, netloc, parse.path)", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def url_fix(s, charset='utf-8'):\n # First step is to convert backslashes (which are invalid in URLs anyways)\n # to slashes. This is consistent with what Chrome does.\n s = s.replace('\\\\', '/')\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if (\n s.startswith('file://') and\n s[7:8].isalpha() and\n s[8:10] in (':/', '|/')\n ):\n s = 'file:///' + s[7:]\n\n url = urlsplit(s)\n\n netloc = _encode_netloc(url)\n\n path = urlquote(\n url.path, encoding=charset, safe='/%+$!*\\'(),'\n )\n qs = urlquote_plus(\n url.query, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n anchor = urlquote_plus(\n url.fragment, encoding=charset, safe=':&%=+$!*\\'(),'\n )\n\n return urlunsplit(\n (url.scheme, netloc, path, qs, anchor)\n )", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')", "def sanitize_url(url: str) -> Optional[str]:\n try:\n parts = urllib.parse.urlparse(url.replace(\" \", \"%20\"))\n scheme, netloc, path, params, query, fragment = parts\n except ValueError:\n # Bad URL - so bad it couldn't be parsed.\n return \"\"\n\n # If there is no scheme or netloc and there is a '@' in the path,\n # treat it as a mailto: and set the appropriate scheme\n if scheme == \"\" and netloc == \"\" and \"@\" in path:\n scheme = \"mailto\"\n elif scheme == \"\" and netloc == \"\" and len(path) > 0 and path[0] == \"/\":\n # Allow domain-relative links\n return urllib.parse.urlunparse((\"\", \"\", path, params, query, fragment))\n elif (scheme, netloc, path, params, query) == (\"\", \"\", \"\", \"\", \"\") and len(fragment) > 0:\n # Allow fragment links\n return urllib.parse.urlunparse((\"\", \"\", \"\", \"\", \"\", fragment))\n\n # Zulip modification: If scheme is not specified, assume http://\n # We re-enter sanitize_url because netloc etc. need to be re-parsed.\n if not scheme:\n return sanitize_url(\"http://\" + url)\n\n # Upstream code will accept a URL like javascript://foo because it\n # appears to have a netloc. Additionally there are plenty of other\n # schemes that do weird things like launch external programs. To be\n # on the safe side, we allow a fixed set of schemes.\n if scheme not in allowed_schemes:\n return None\n\n # Upstream code scans path, parameters, and query for colon characters\n # because\n #\n # some aliases [for javascript:] will appear to urllib.parse to have\n # no scheme. On top of that relative links (i.e.: \"foo/bar.html\")\n # have no scheme.\n #\n # We already converted an empty scheme to http:// above, so we skip\n # the colon check, which would also forbid a lot of legitimate URLs.\n\n # URL passes all tests. Return URL as-is.\n return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def _escape_url(url, force_https=True):\n scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)\n if force_https:\n scheme = 'https'\n path = urllib.quote(path)\n query = urllib.quote_plus(query, '=?&;')\n return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))", "def normalize_uri(uri):\n return normalize_uri_result(uri).unsplit()", "def test_dont_percent_encode_safe_chars_query():\n assert (normalize_url(\"http://example.com/a/?face=(-.-)\") ==\n \"http://example.com/a?face=(-.-)\")", "def domain_parse(url):\n url = url.lower()\n if not url.startswith('http://') and not url.startswith('https://'):\n url = '{schema}{host}'.format(schema='http://', host=url)\n url = urlparse(url)\n if not url.hostname:\n raise ValueError('Invalid domain provided')\n\n # Strip www prefix any additional URL data\n url = urlparse('{scheme}://{host}'.format(scheme=url.scheme, host=url.hostname.lstrip('www.')))\n return url", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def sanitize_url(url: str, protocol: str = 'https://') -> str:\n sanitized = url[0:-1] if url[-1] == '/' else url\n with_protocol = sanitized if sanitized.startswith('http') else f'{protocol}{sanitized}'\n return with_protocol", "def test_unreserved_percentencoding():\n assert (normalize_url(\"http://www.example.com/%7Eusername/\") ==\n \"http://www.example.com/~username\")\n assert (normalize_url('http://example.com/foo%23bar') ==\n 'http://example.com/foo%23bar')\n assert (normalize_url('http://example.com/foo%2fbar') ==\n 'http://example.com/foo%2Fbar')\n assert (normalize_url('http://example.com/foo%3fbar') ==\n 'http://example.com/foo%3Fbar')", "def qurl2ascii(url):\n url = unicode(url.toString()).encode('unicode-escape').decode('ascii')\n if url.lower().startswith('data:') and len(url) > 80:\n url = url[:60] + '...[data uri truncated]'\n return url", "def clean_url(url):\n\n if url is None:\n return None\n\n if '??' in url:\n url = url.split('??')[0]\n\n if url.endswith('?'):\n url = url[:-1]\n\n if '`' in url:\n url = url.replace('`', '')\n\n return url", "def test_unicode_query_string():\n assert (normalize_url(\"http://example.com/?file=résumé.pdf\") ==\n \"http://example.com/?file=r%C3%A9sum%C3%A9.pdf\")", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def canonicalize(self, url):\n pass", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def get_normalized_url(url):\r\n scheme, netloc, path, params, query, fragment = urlparse(url)\r\n\r\n # Exclude default port numbers.\r\n if scheme == 'http' and netloc[-3:] == ':80':\r\n netloc = netloc[:-3]\r\n elif scheme == 'https' and netloc[-4:] == ':443':\r\n netloc = netloc[:-4]\r\n if scheme not in ('http', 'https'):\r\n raise ValueError(\"Unsupported URL %s (%s).\" % (url, scheme))\r\n\r\n # Normalized URL excludes params, query, and fragment.\r\n return urlunparse((scheme, netloc, path, None, None, None))", "def test_capitalize_escape_sequence():\n assert (normalize_url(\"http://www.example.com/a%c2%b1b\") ==\n \"http://www.example.com/a%C2%B1b\")", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")", "def format_url(url):\n if not (url.startswith(\"//\") or url.startswith(\"http\")):\n url = \"http://\" + url\n return url", "def sanitize_link(link, url):\n if link.startswith('//'):\n link = f'http:{link}'\n elif link.startswith('/'):\n parsed_url = urlparse(url)\n link = f'http://{parsed_url.hostname}{link}'\n return link", "def sanitize_hostname(hostname):\n if isinstance(hostname, six.string_types):\n hostname = hostname.encode('latin-1', 'ignore')\n if six.PY3:\n hostname = hostname.decode()\n hostname = re.sub('[ _]', '-', hostname)\n hostname = re.sub('[^\\w.-]+', '', hostname)\n hostname = hostname.lower()\n hostname = hostname.strip('.-')\n\n return hostname", "def test_query_string_spaces():\n assert (normalize_url(\"http://example.com/search?q=a b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a+b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a%20b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def unquote(uri):\r\n uri = uri.encode('ascii')\r\n unquoted = urllib_unquote(uri)\r\n return unquoted.decode('utf-8')", "def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def test_unicode_path():\n assert (normalize_url(\"http://example.com/résumé\") ==\n \"http://example.com/r%C3%A9sum%C3%A9\")", "def sanitize_url(urlstring):\n\n # A blog's url is the best unique identifier for the data store\n # (some Twitter handles have more than one blog), but certain\n # punctuation in a string throws an error in Firebase when\n # you attempt to use that string as a key.\n return annoying_punctuation.sub('', urlstring)", "def _convert_url(url, website):\n\n if website == 'xinhua':\n page_url = url.replace('\"', '')\n page_url = page_url.encode('ascii')\n elif website == 'upi':\n page_url = url.encode('ascii')\n elif website == 'zaman':\n # Find the weird thing. They tend to be ap or reuters, but generalized\n # just in case\n com = url.find('.com')\n slash = url[com + 4:].find('/')\n replaced_url = url.replace(url[com + 4:com + slash + 4], '')\n split = replaced_url.split('/')\n # This is nasty and hackish but it gets the jobs done.\n page_url = '/'.join(['/'.join(split[0:3]), 'world_' + split[-1]])\n else:\n page_url = url.encode('utf-8')\n\n return page_url", "def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"", "def safe_url_string(url, encoding='utf8', path_encoding='utf8'):\n # Python3's urlsplit() chokes on bytes input with non-ASCII chars,\n # so let's decode (to Unicode) using page encoding:\n # - it is assumed that a raw bytes input comes from a document\n # encoded with the supplied encoding (or UTF8 by default)\n # - if the supplied (or default) encoding chokes,\n # percent-encode offending bytes\n decoded = to_unicode(url, encoding=encoding, errors='percentencode')\n parts = urlsplit(_ascii_tab_newline_re.sub('', decoded))\n\n # IDNA encoding can fail for too long labels (>63 characters)\n # or missing labels (e.g. http://.example.com)\n try:\n netloc = parts.netloc.encode('idna')\n except UnicodeError:\n netloc = parts.netloc\n\n # quote() in Python2 return type follows input type;\n # quote() in Python3 always returns Unicode (native str)\n return urlunsplit((\n to_native_str(parts.scheme),\n to_native_str(netloc).rstrip(':'),\n\n # default encoding for path component SHOULD be UTF-8\n quote(to_bytes(parts.path, path_encoding), _safe_chars),\n\n # encoding of query and fragment follows page encoding\n # or form-charset (if known and passed)\n quote(to_bytes(parts.query, encoding), _safe_chars),\n quote(to_bytes(parts.fragment, encoding), _safe_chars),\n ))", "def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()", "def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")", "def clean_params(self, url):\n if isinstance(url, unicode):\n url = url.encode(\"utf-8\")\n parts = list(urlparse.urlsplit(url))\n if not parts[3]:\n return url\n query = urlparse.parse_qsl(parts[3])\n query = [q for q in query if self._is_param_allowed(*q)]\n if query:\n parts[3] = urllib.urlencode(query)\n else:\n parts[3] = ''\n return urlparse.urlunsplit(parts).decode(\"utf-8\")", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def parse_url(url):\n url = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url.query)\n query_ = query.get('dn', query.get('title', ''))[0]\n if url.scheme == \"magnet\":\n return \"magnet:?xt={}\".format(query['xt'][0]), query_\n return \"http://{}{}{}\".format(*url[0:3]), query_", "def escaped_url(url):\n f = furl.furl(url)\n f.username = None\n f.password = None\n return f.tostr()", "def make_url_safe(url):\n if not urlparse(url).scheme:\n url = \"http://\" + url\n\n parsed_url = urlparse(url)\n\n safe_path = urls.url_fix(parsed_url.path)\n\n return parsed_url.scheme + '://' + parsed_url.netloc + safe_path", "def test_lower_case():\n assert normalize_url(\"HTTP://examPle.cOm/\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/A\") == \"http://example.com/A\"", "def oauth_url_sanitize(url, force_secure=True):\n scheme, netloc, path, params, query, fragment = urlparse_normalized(url)\n query = urlencode_s(query_params_sanitize(query))\n if force_secure and scheme != \"https\":\n raise InsecureOAuthUrlError(\"OAuth 1.0 specification requires the use of SSL/TLS for inter-server communication.\")\n elif not force_secure and scheme != \"https\":\n logging.warning(\"CAUTION: RFC specification requires the use of SSL/TLS for credential requests.\")\n return urlunparse((scheme, netloc, path, params, query, None))", "def test_sanitized_hostname(self):\n value = \" ../ ../some/dubious/hostname \"\n response = clean.hostname(value)\n assert response == \"somedubioushostname\"", "def process_url(url: str) -> str:\n split_url = urlsplit(url.strip())\n if split_url.scheme == 'amqp+ssl':\n split_url = split_url._replace(scheme='amqps')\n\n if ((not split_url.username or not split_url.password) and\n 'username' in config and 'password' in config):\n user_pass = f\"{config['username']}:{config['password']}@\"\n new_netloc = user_pass + split_url.netloc\n split_url = split_url._replace(netloc=new_netloc)\n\n return urlunsplit(split_url)", "def clean_url(url):\n for noisy_url in noisy_urls:\n url = str(url).replace(noisy_url,\"\").lower()\n return url", "def url_fix_host(urlparts):\n # if not urlparts[1]:\n # urlparts[2] = urllib.unquote(urlparts[2])\n # return False\n userpass, netloc = urllib.splituser(urlparts[1])\n if userpass:\n userpass = urllib.unquote(userpass)\n netloc, is_idn = idna_encode(urllib.unquote(netloc).lower())\n # a leading backslash in path causes urlsplit() to add the\n # path components up to the first slash to host\n # try to find this case...\n i = netloc.find(\"\\\\\")\n if i != -1:\n # ...and fix it by prepending the misplaced components to the path\n comps = netloc[i:] # note: still has leading backslash\n if not urlparts[2] or urlparts[2] == '/':\n urlparts[2] = comps\n else:\n urlparts[2] = \"%s%s\" % (comps, urllib.unquote(urlparts[2]))\n netloc = netloc[:i]\n else:\n # a leading ? in path causes urlsplit() to add the query to the\n # host name\n i = netloc.find(\"?\")\n if i != -1:\n netloc, urlparts[3] = netloc.split('?', 1)\n # path\n urlparts[2] = urllib.unquote(urlparts[2])\n if userpass and userpass != ':':\n # append AT for easy concatenation\n userpass += \"@\"\n else:\n userpass = \"\"\n\n if urlparts[0] in default_ports:\n dport = default_ports[urlparts[0]]\n host, port = splitport(netloc, port=dport)\n\n host = host.rstrip('. ')\n if port != dport:\n host = \"%s:%d\" % (host, port)\n netloc = host\n urlparts[1] = userpass + netloc\n return is_idn", "def format_url(url):\n no_scheme = url.split('://', 1)[-1]\n return '[{0}]({1})'.format(no_scheme, url)", "def encode_url(url):\n\treturn url.replace(' ', '_')", "def test_non_urls():\n assert normalize_url(\"\") is None\n assert normalize_url(\"abc xyz\") is None\n assert normalize_url(\"asb#abc\") is None\n assert normalize_url(\"Яндекс.рф\") is not None\n assert normalize_url(\"google.blog\") is not None\n assert normalize_url(\"http//google.com\") is None\n assert normalize_url(\"http://user@pass:example.com\") is None", "def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)", "def parseURL(url):\n\n\n scheme, host, path, params, query, hash = urlparse(url)\n if not path: path = \"/\"\n\n args = parse_qs(query)\n\n escapedArgs = {}\n for name in args:\n if len(args[name]) == 1:\n escapedArgs[unquote(name)] = unquote(args[name][0])\n else:\n escapedArgs[unquote(name)] = escapedSet = []\n for item in args[name]:\n escapedSet.append(unquote(item))\n\n return host, path, params, escapedArgs", "def quote_uri(uri):\n import urlparse\n import urllib\n\n up=urlparse.urlparse(uri)\n np=urllib.quote(up[2])\n return urlparse.urlunparse((up[0],up[1],np,up[3],up[4],up[5]))", "def fix_url(cls, url: str):\r\n ...", "def clean_url(url: str, keys: List[str]) -> str:\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n\n for key in keys:\n query.pop(key, None)\n\n u = u._replace(query=urlencode(query, True))\n \n return urlunparse(u)", "def prettify_url(url):\n\n if not isinstance(url, urllib.parse.ParseResult):\n url = urllib.parse.urlparse(url)\n urlstr = url.hostname + url.path\n return urlstr", "def _format_url(self, hostname):\n share_name = self.share_name\n if isinstance(share_name, six.text_type):\n share_name = share_name.encode('UTF-8')\n return \"{}://{}/{}/{}{}\".format(\n self.scheme,\n hostname,\n quote(share_name),\n \"/\".join([quote(p, safe='~') for p in self.file_path]),\n self._query_str)", "def scrub_url(self, url):\n return self.__url_scrubber(url)", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def test_percent_encode_querystring():\n assert (normalize_url(\"http://example.com/?a=hello{}\") ==\n \"http://example.com/?a=hello%7B%7D\")", "def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))", "def is_url_quoted(url):\n try:\n url_ = urlunquote(url)\n return url != url_\n except: # problem with unquoting -- then it must be wasn't quoted (correctly)\n return False", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def sanitizeUrl(url):\n return url.split('?')[0]", "def normalize_filename(url):\n fname = url.replace('file://', '')\n if os.sep != '/' and not os.path.exists(fname):\n fname = fname.lstrip('/')\n return fname", "def clean_link(self, url: str) -> str:\n return self.CLEAN_REGEX.sub(lambda match: f\"%{ord(match.group(0)):02x}\", url)", "def parse_url(url, encoding=None):\n if isinstance(url, ParseResult):\n return url\n return urlparse(to_unicode(url, encoding))", "def url_validator_callback(url: str) -> str:\n if url is None:\n return url\n\n url = url.strip()\n try:\n result = urlparse(url)\n if result.scheme and result.netloc:\n return url\n except:\n pass\n raise typer.BadParameter(\"Please supply a valid url\")", "def CanonicalUrl(self, u:str)->str:\n return u", "def correct_url(self, url: str) -> str:\n # check if url has \"http://\" prefix\n if \"http://\" not in url:\n if \"https://\" not in url:\n url = \"http://\" + url\n url_split = url.split(\"/\")\n # correct URL as needed for script\n if url_split[4] == '':\n raise URLError('No Story ID given')\n if len(url_split) == 5:\n url_split.append('')\n else:\n raise URLError('Unknown URL format')\n url = '/'.join(url_split)\n url = urljoin(url, ' ')[0:-2]\n return url", "def url_parse_query(query, encoding=None):\n if isinstance(query, unicode):\n if encoding is None:\n encoding = url_encoding\n query = query.encode(encoding, 'ignore')\n query = query.replace('?', '')\n\n l = set()\n for k, v, sep in parse_qsl(query, True):\n k = url_quote_part(k, '/-:,;')\n if not k:\n continue\n if v:\n v = url_quote_part(v, '/-:,;')\n l.add(\"%s=%s\" % (k, v))\n elif v is None:\n l.add(\"%s\" % k)\n else:\n # some sites do not work when the equal sign is missing\n l.add(\"%s=\" % k)\n query = '&'.join(sorted(l))\n return query", "def clean_blog_url(raw_url):\n # Example urls that need handling:\n # http://jessicaanner.tumblr.com/post/113520547711/animated-versions-here-main-view-webm-gif\n # http://havesomemoore.tumblr.com/\n # http://pwnypony.com/\n # (?:https?://)([^#/'\"]+)\n stripped_url = raw_url.strip(\"\\r\\n\\t \")\n logging.debug(\"stripped_url: \"+repr(stripped_url))\n blog_url_regex = \"\"\"(?:https?://)?([^#/'\"]+)\"\"\"\n blog_url_search = re.search(blog_url_regex, stripped_url, re.IGNORECASE)\n if blog_url_search:\n blog_url = blog_url_search.group(1)\n return blog_url\n else:\n logging.error(\"Can't parse list item! Skipping it.\")\n logging.error(\"clean_blog_url()\"+\" \"+\"raw_url\"+\": \"+repr(raw_url))\n return \"\"", "def processUrl(url):\n domain = 'http://www.gsmarena.com/'\n if domain not in url:\n url = urllib.parse.urljoin(domain, url)\n return url", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def format_internal_url(url):\n\n url = url.split('\"')[-2]\n\n if not url.startswith('https:'):\n url = (\n 'https://medium.com{}'.format(url) if not url.startswith('//medium.com')\n else 'https:{}'.format(url))\n\n return url", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not path:\r\n path = \"/\"\r\n\r\n # Could do syntax based normalization of the URI before\r\n # computing the digest. See Section 6.2.2 of Std 66.\r\n request_uri = query and \"?\".join([path, query]) or path\r\n scheme = scheme.lower()\r\n defrag_uri = scheme + \"://\" + authority + request_uri\r\n\r\n return defrag_uri", "def fix_website(raw_website):\n if url_is_good(raw_website):\n return raw_website\n else:\n return \"http://\" + raw_website" ]
[ "0.7231952", "0.71399534", "0.7058466", "0.7057212", "0.70551896", "0.70489985", "0.6984675", "0.68768114", "0.6773047", "0.66777515", "0.66775155", "0.66144353", "0.65716875", "0.65457124", "0.65359807", "0.6512017", "0.65024763", "0.6485514", "0.6482304", "0.64756083", "0.6472745", "0.6470295", "0.645007", "0.6434804", "0.6411281", "0.6389439", "0.63872504", "0.63856673", "0.6347613", "0.6342189", "0.6333828", "0.63229936", "0.6303508", "0.6280493", "0.6275249", "0.6259468", "0.62470955", "0.62449086", "0.62321544", "0.623209", "0.6216469", "0.6208346", "0.620388", "0.6166504", "0.6161701", "0.6156965", "0.613736", "0.61255234", "0.61058456", "0.6088612", "0.6081407", "0.60649204", "0.6060361", "0.6047737", "0.6045372", "0.6038841", "0.6021199", "0.6012919", "0.6003984", "0.5991895", "0.5982442", "0.5972331", "0.5964327", "0.59412783", "0.59367067", "0.5936353", "0.5922443", "0.59154", "0.5913676", "0.5903287", "0.58927894", "0.58889484", "0.5883447", "0.58742523", "0.5872663", "0.5864805", "0.58576715", "0.58539236", "0.5847124", "0.5839423", "0.58272374", "0.5826237", "0.58056325", "0.579839", "0.57935756", "0.5782349", "0.57818055", "0.5764429", "0.5728437", "0.5718641", "0.57131404", "0.57123053", "0.57085717", "0.5704614", "0.5697403", "0.5695543", "0.5689609", "0.56871986", "0.56710297", "0.56673825" ]
0.6782342
8
Remove all redundant segments from the given URL path.
def collapse_segments(path): # replace backslashes # note: this is _against_ the specification (which would require # backslashes to be left alone, and finally quoted with '%5C') # But replacing has several positive effects: # - Prevents path attacks on Windows systems (using \.. parent refs) # - Fixes bad URLs where users used backslashes instead of slashes. # This is a far more probable case than users having an intentional # backslash in the path name. if path.startswith('\\'): path = path.replace('\\', '/') # shrink multiple slashes to one slash path = _slashes_ro.sub("/", path) # collapse redundant path segments path = _thisdir_ro.sub("", path) path = _samedir_ro.sub("/", path) # collapse parent path segments # note: here we exploit the fact that the replacements happen # to be from left to right (see also _parentdir_ro above) newpath = _parentdir_ro.sub("/", path) while newpath != path: path = newpath newpath = _parentdir_ro.sub("/", path) # collapse parent path segments of relative paths # (ie. without leading slash) newpath = _relparentdir_ro.sub("", path) while newpath != path: path = newpath newpath = _relparentdir_ro.sub("", path) path = path.rstrip('.') return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_path(path):\n return [endpoint for endpoint in path if len(endpoint) > 23]", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def _cleanup_path(path):\n return string.join(filter(None, string.split(path, '/')), '/')", "def clean_path(path: str) -> str:\n previous_path = \"\"\n next_path = path\n while next_path != previous_path:\n previous_path = next_path\n next_path = copy_annotations(path, next_path.replace(\"//\", \"/\"))\n while next_path.endswith(\"/\"):\n next_path = next_path[:-1]\n return next_path", "def EliminateSingleNodeSegments(self):\n sections_to_remove = [] # List of indices into self._section_list\n index = 0\n for each_section in self._section_list:\n if len(each_section.Nodes()) == 1:\n self.ReparentSectionChildren(each_section)\n sections_to_remove.append(index)\n index += 1\n sections_to_remove.reverse()\n for each_index in sections_to_remove:\n self._section_list.pop(each_index)\n self.RegenerateCaches()", "def CleanPaths(pathlist):\n for path1 in pathlist:\n for path2 in pathlist[::-1]:\n if path2[::-1] == path1:\n pathlist.remove(path2)\n break", "def clean_path(path):\n return resolved_path(path)", "def spath_stripoptions(spath):\n l = [comp.split(\"?\", 1)[0] for comp in spath.split(\"/\")]\n return \"/\".join(l)", "def remove_dot_segments(path):\n assert isinstance(path, bytes)\n new_path = b\"\"\n while path:\n if path.startswith(b\"../\"):\n path = path[3:]\n elif path.startswith(b\"./\"):\n path = path[2:]\n elif path.startswith(b\"/./\"):\n path = path[2:]\n elif path == b\"/.\":\n path = b\"/\"\n elif path.startswith(b\"/../\"):\n path = path[3:]\n new_path = new_path.rpartition(b\"/\")[0]\n elif path == b\"/..\":\n path = b\"/\"\n new_path = new_path.rpartition(b\"/\")[0]\n elif path in (b\".\", b\"..\"):\n path = b\"\"\n else:\n if path.startswith(b\"/\"):\n path = path[1:]\n new_path += b\"/\"\n seg, slash, path = path.partition(b\"/\")\n new_path += seg\n path = slash + path\n return new_path", "def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path", "def strip(url):\r\n split = list(urlsplit(url))\r\n split[4]=''\r\n return urlunsplit(split)", "def strip_path(self):\n return self.path.replace('/', '')", "def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path", "def _cleanpath(self, path):\n \n slashes = self.remotepathsep*2\n while slashes in path:\n path = path.replace(slashes,self.remotepathsep)\n \n if path.endswith(self.remotepathsep):\n path = path[:-1]\n \n return path", "def remove(path):", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def url_clean(path):\n return path[path.find('/'+settings.URL_ADMIN_SEP):]", "def _normalize_path(path):\n\n i = 0\n for c in path:\n if c != \"/\":\n break\n i = i + 1\n\n if i:\n return path[(i - 1) :]\n\n return path", "def remove_prefix(self, path):\n self.log.debug(\n f\"S3FS.remove_prefix: self.prefix_: {self.prefix_} path: {path}\"\n )\n if isinstance(path, str):\n path = (\n path[len(self.prefix_) :]\n if path.startswith(self.prefix_)\n else path\n )\n path = path[1:] if path.startswith(self.delimiter) else path\n return path\n if isinstance(path, (list, tuple)):\n path = [\n p[len(self.prefix_) :] if p.startswith(self.prefix_) else p\n for p in path\n ]\n path = [p[1:] if p.startswith(self.delimiter) else p for p in path]\n return path", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def standardize_path(path):\n path.rstrip('/')\n if not path.startswith('.*'):\n path = '/' + path\n path = re.compile('/+').sub('/', path)\n return path", "def get_url_path(url):\n return filter(lambda x: x!='', url.split('/'))", "def remove_paths(self, test):\n ii = 0\n while ii < len(self.paths):\n if test(self.paths[ii]):\n self.paths.pop(ii)\n else:\n ii += 1\n return self", "def normalize(cls, url):\n # Always ignore the fragment\n scheme, netloc, path, query, _ = urlsplit(url)\n uri_relative = (None, None, path, query, None)\n uri_without_query = (scheme, netloc, path, None, None)\n uri_relative_without_query = (None, None, path, None, None)\n urls = [url]\n if query:\n urls.append(urlunsplit(uri_without_query))\n urls.append('~' + urlunsplit(uri_relative))\n if query:\n urls.append('~' + urlunsplit(uri_relative_without_query))\n return urls", "def _clean_path(self, pathToRemove, files):\n result = []\n for filePath in files:\n filePath = string.split(filePath, pathToRemove)\n filePath = filePath[1]\n filePath = string.split(filePath, os.sep)\n if filePath[0] == '':\n filePath.remove('')\n fileName = string.join(filePath, os.sep)\n result.append(fileName)\n return result", "def remove_constant_points(path):\n z = path\n while \"UD\" in z or \"DU\" in z or \"LR\" in z or \"RL\" in z:\n z = z.replace(\"UD\", \"\")\n z = z.replace(\"DU\", \"\")\n z = z.replace(\"LR\", \"\")\n z = z.replace(\"RL\", \"\")\n return z", "def trim_path(self, n=1):\n self._path = self._path[:-n]", "def __clean_path(self, path):\n matches = re.finditer(r'\\%\\(.*?\\)[diouxXeEfFgGcrsa]', path)\n for _, match in enumerate(matches):\n pattern = match.group()\n path = path.replace(pattern, u'')\n pos = path.find(os.path.sep*2)\n if pos>=0:\n path = path[:pos+1]\n return path", "def _resolve_dot_segments(path):\n segs = []\n\n for seg in path:\n if seg == u'.':\n pass\n elif seg == u'..':\n if segs:\n segs.pop()\n else:\n segs.append(seg)\n\n if list(path[-1:]) in ([u'.'], [u'..']):\n segs.append(u'')\n\n return segs", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def pclean(self):\n path_list_pruned = []\n for p in self.path_list:\n if not os.path.exists(p):\n print(\"Does not exist! \", p)\n elif p in path_list_pruned:\n print(\"Duplicate found \", p)\n else:\n p = os.path.normpath(p) # remove double slashes and stuff\n path_list_pruned.append(p)\n\n self.path_list = path_list_pruned\n self.pupdate()", "def remove_last_part_of_url(category_url):\n return \"/\".join(category_url.split(\"/\")[:-1])", "def clean_path(self, path):\n if('.flaccuesplit.' in path):\n path, flaccue_details = path.split('.flaccuesplit.')\n if(path.startswith(self.mount)):\n # Strip off the mount point.\n path = path[len(self.mount):]\n return path", "def route_removed(self, prefix, next_hop, as_path):", "def remove_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path[0:-1]\n else:\n return path\n else:\n return path", "def delete_loops(self, in_path):\n res_path = list(in_path)\n for element in res_path:\n coincidences = self.get_coincidence_indices(res_path, element)\n #Reversa la lista para eliminar elementos de atras hacia adelante de la lista\n coincidences.reverse()\n for i, coincidence in enumerate(coincidences):\n if not i == len(coincidences)-1:\n res_path[coincidences[i+1]:coincidence] = []\n\n return res_path", "def remove_upper_level_references(path):\n return os.path.normpath(\"/\" + path).lstrip(\"/\")", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def remove_cat(self, path: Path):\n if not self.active:\n return\n if path is None:\n return\n for i, coord in enumerate(path.path):\n self.cat[coord[1]][coord[0]].remove((path.identifier, i))", "def _remove_httpd_transform(self, filepath: str) -> None:\n\n remove_basenames = self.parser_paths[os.path.dirname(filepath)]\n remove_dirname = os.path.dirname(filepath)\n for name in remove_basenames:\n remove_path = remove_dirname + \"/\" + name\n remove_inc = self.aug.match(\n \"/augeas/load/Httpd/incl [. ='%s']\" % remove_path)\n self.aug.remove(remove_inc[0])\n self.parser_paths.pop(remove_dirname)", "def remove_by_path(self, path):\n if path.startswith(collection.Collection.CONTENT_PATH):\n if path.endswith(\n '/{}'.format(collection.Collection.BLUEPRINT_PATH)):\n # If this is a blueprint then remove the entire collection.\n col_path = path[len(collection.Collection.CONTENT_PATH):]\n # Get just the directory.\n col_path = os.path.split(col_path)[0]\n collection_path = col_path[1:] # Remove /\n with self._lock:\n if collection_path in self._cache:\n del self._cache[collection_path]\n else:\n # Search for an existing collection path.\n col_path = path[len(collection.Collection.CONTENT_PATH):]\n col_path = os.path.split(col_path)[0]\n while col_path != os.sep:\n collection_path = col_path[1:]\n with self._lock:\n if collection_path in self._cache:\n # Do a 'wildcard' match on the path to remove all\n # locales.\n generic_key = CollectionCache.generate_cache_key(\n path, '')\n for key in self._cache[collection_path]['docs'].keys():\n if key.startswith(generic_key):\n del self._cache[\n collection_path]['docs'][key]\n return\n col_path = os.path.split(col_path)[0]", "def unique_split_paths(paths):\n seen_paths = set()\n for path in paths:\n splits = path.split(\".\")\n split_length = len(splits)\n for i in xrange(1, split_length + 1):\n join = \".\".join(splits[:i])\n if join not in seen_paths:\n seen_paths.add(join)\n yield join", "def _ExtractPathParamsFromRouteList(route_comps: Collection[str]) -> Set[str]:\n return set(filter(_IsPathParameter, route_comps))", "def test_remove_dot_segments():\n assert (normalize_url(\"http://www.example.com/../a/b/../c/./d.html\") ==\n \"http://www.example.com/a/c/d.html\")", "def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url", "def compress_path(path):\r\n \r\n new_path = path\r\n i = 0\r\n while i < len(new_path)-2:\r\n x1,y1 = new_path[i]\r\n x2,y2 = new_path[i+1]\r\n x3,y3 = new_path[i+2]\r\n \r\n if (((x1 == x2) and (x2 == x3)) or ((y1 == y2) and (y2 == y3))):\r\n new_path.pop(i+1)\r\n else:\r\n i = i + 1\r\n return new_path", "def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst", "def flatten_path(path):\n return path.split(\"/\")[-1]", "def clean_url_path(markup):\n\n soup = BeautifulSoup(markup, \"html.parser\")\n elements = soup.find_all('a')\n\n for url in elements:\n url_href = url.get('href')\n if url.string:\n url_string = url.string.replace('\\n', '').replace(' ', '')\n\n # Only clean links where the URL matches the string, without custom text inside.\n if url_string == url_href:\n url_parse = urllib.parse.urlparse(url_href)\n path = '{0}{1}'.format(url_parse.netloc.replace(\"www.\", \"\"), url_parse.path)\n url.string.replace_with(path)\n return soup.prettify(soup.original_encoding)", "def _keep_common_path(paths: Iterable[str]) -> str:\n common_path: Optional[str] = None\n for path in paths:\n if common_path is None:\n common_path = path\n continue\n if path.startswith(common_path):\n continue\n for i, c in enumerate(common_path):\n if i == len(path) or path[i] != c:\n common_prefix = common_path[:i]\n slash_index = common_prefix.rfind(\"/\")\n common_path = common_prefix[: slash_index + 1]\n break\n return common_path or \"\"", "def strip_fingerprint(serving_path):\n return FINGERPRINT_RE.sub(r'\\1\\3', serving_path)", "def strip_wpt_path(self, wpt_path):\n if self.is_wpt_path(wpt_path):\n return wpt_path[len(self.wpt_prefix()):]\n # Path is absolute or does not start with the prefix.\n # Assume the path already points to a valid WPT and pass through.\n return wpt_path", "def prune_path(self, path):\n while len(path.poses) >= 2:\n pose0 = pose322(r2g(path.poses[0].pose))\n pose1 = pose322(r2g(path.poses[1].pose))\n d0 = np.linalg.norm(g2n(pose0.between(self.current_frame.pose)))\n d1 = np.linalg.norm(g2n(pose1.between(self.current_frame.pose)))\n if d1 < d0:\n path.poses.pop(0)\n else:\n break\n return path", "def strip_path(path):\n name_re = re.compile(\"[^/]*\\.([a-z]+)$\")\n return name_re.search(path).group(0)", "def strip_beginning_slashes(url):\n find = re.search(r\"^/+\", url)\n if find:\n url = re.sub(find.group(0), \"\", url)\n return url", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def urlunsplit(urlparts):\n res = urlparse.urlunsplit(urlparts)\n if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:\n # UNC paths must have 4 slashes: 'file:////server/path'\n # Depending on the path in urlparts[2], urlparse.urlunsplit()\n # left only two or three slashes. This is fixed below\n repl = 'file://' if urlparts[2].startswith('//') else 'file:/'\n res = res.replace('file:', repl)\n return res", "def sanatize_path(self, path):\n # Remove extra whitespace\n path = path.strip()\n\n # Remove slash from end of path\n path = path.rstrip(os.sep)\n\n return path", "def _remove_path_head(path, head):\n # Bugfix 13 Oct 2017: path.replace(head,'') will remove head from everywhere in the path. This\n # is especially problematic if the user gives the local dir as \".\" (i.e. the current directory)\n # because it will remove periods from filenames\n\n # Find the head at the beginning of the path only. Escape any characters in head that have special\n # meaning in a regular expression (e.g. \".\" means \"any character\")\n head_regex = '^{}'.format(re.escape(head))\n path = re.sub(head_regex, '', path)\n if path.startswith('/'):\n path = path[1:]\n\n return path", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def remove_root(root, paths):\r\n return [pth.replace(root + '/', '') for pth in paths]", "def _clean_paths(paths):\n\n\tclean_paths = {key: np.concatenate([path[key] for path in paths]) for key in paths[0].keys()}\n\n\treturn clean_paths", "def _delete_cookies (self, path):\n head, tail = os.path.split(path)\n for subdir, dirs, files in os.walk(head):\n for file in files:\n if tail in file:\n os.remove(os.path.join(subdir, file))", "def pruneURL(url):\n match = URL_GROUPER.match(url)\n if match is None:\n return url\n else:\n url_parts = match.groupdict()\n protocol = url_parts['protocol']\n if protocol is None:\n protocol = ''\n tail = url_parts['tail']\n if tail is None:\n tail = ''\n return \"%s://%s\" % (protocol, tail)", "def split_all(path):\r\n components = []\r\n path = path.lstrip('/')\r\n while path:\r\n head, tail = os.path.split(path)\r\n if tail:\r\n components.insert(0, tail)\r\n elif head == path:\r\n components.insert(0, head)\r\n break\r\n path = head\r\n return components", "def split_path(self, path: str) -> List[str]:\n dirs = path.split('/')\n return list(filter(lambda x: x!='', dirs))", "def good_url(a, start_url):\n for i in range(len(a)):\n par=a[i].find('?')\n if par!=-1:\n a[i]=a[i][:par]\n anc=a[i].find('#')\n if anc!=-1:\n a[i]=a[i][:anc]\n if a[i]!='' and a[i][0]=='/':\n a[i]=str(start_url)+a[i][1:i]\n #print(a[i]) \n return list(set(a))", "def remove_extension(path):\n for extension in EXTENSIONS:\n path = path.replace(\".%s\" % extension, \"\")\n return path", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def test_drop_fragments():\n assert (normalize_url(\"http://example.com/a?b=1#frag\")\n == \"http://example.com/a?b=1\")\n assert (normalize_url(\"http://example.com/a?b=1#frag\", drop_fragments=False)\n == \"http://example.com/a?b=1#frag\")", "def remove_urls(text):\n pass", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def remove_segment(self, start_time):\n for seg in self._segments:\n if seg.get_start() == start_time:\n self._segments.remove(seg)\n return True\n return False", "def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]", "def split_path(self, path):\n path = path.strip(\"/\")\n return path.split(\"/\") if len(path) > 0 else []", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"", "def noTrailingSlash(path):\n return path.split('/')[0]", "def delete_by_path(data: Dict[str, T], path: Sequence[str]):\n del get_by_path(data, path[:-1])[path[-1]]", "def path_only(self):\r\n path = urlparse.urlparse(self.path).path\r\n if path.endswith('/'):\r\n return path[:-1]\r\n else:\r\n return path", "def url_subpath(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n forbidden = ''.join(sorted(forbidden))\n raise ValueError('%(s)r contains forbidden characters'\n ' (%(forbidden)r)'\n % locals())\n stripped = normpath(s).lstrip(sep)\n if stripped == curdir:\n return ''\n if sep != '/':\n return stripped.replace(sep, '/')\n return stripped", "def _normalise_last_slashes(url_segment):\n return url_segment if not url_segment.endswith(\"/\") else url_segment[:-1]", "def uncanonicalize(self, url):\n pass", "def extract_sub_urls(url):\n\n sub_urls = set()\n parsed_url = urllib.parse.urlparse(url)\n dirs = parsed_url.path.split(\"/\")\n\n # strip empty dirs constructed from the above split\n if dirs and not dirs[0]:\n dirs = dirs[1:]\n if dirs and not dirs[-1]:\n dirs = dirs[:-1]\n\n for i in range(0, len(dirs)-1):\n sub_url = parsed_url.scheme + \"://\" + parsed_url.netloc + \"/\"\n sub_url += \"/\".join(dirs[:i+1]) + \"/\"\n sub_urls.add(sub_url)\n\n return sub_urls", "def mutate_suffix(path, board):\n x, y = get_start(board)\n path_new = get_path_same_prefix(path, board)\n while not is_path_correct(x, y, path_new, board):\n path_new = get_path_same_prefix(path, board)\n return remove_constant_points(path_new)", "def sortPathSegments(self, pathSegments):\n pass", "def remove_pathways(self, pathways: list):\n # only filter the gene_set object\n for pathway_id in pathways:\n self.gene_sets.pop(pathway_id, None)\n self.gene_set_names.pop(pathway_id, None)\n self.gene_set_size.pop(pathway_id, None)\n self.n_curated.pop(pathway_id, None)\n self.n_interactors.pop(pathway_id, None)\n if len(self.interactors) > 0:\n self.interactors.pop(pathway_id, None)", "def cleanUrl(url):\n\turl_clean = url.replace(' ','%20')\n\t\"\"\" add /index.html where necessary \"\"\"\n\tif (url[-1:]=='/'):\n\t\turl_clean += 'index.html'\n\telif (url[-5:].find('.') == -1):\n\t\t url_clean += '/index.html'\n\treturn url_clean", "def fix_path(self):\n paths = self.data['path'].tolist()\n prefixes = [re.findall(r'[A-Z\\-0-9]+', path) for path in paths]\n prefix_good = [str(prefix[0]) + \".json\" for prefix in prefixes]\n self.data['path'] = prefix_good", "def unshorten(self, url):\n h = requests.get(url)\n stack = [i.url for i in h.history]\n stack.append(h.url)\n return stack", "def sunderPath(path):\n ret = []\n while True:\n h, t = os.path.split(path)\n if t:\n ret.append(t)\n if not h:\n break\n path = h\n return ret", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def clean_file_path(path):\r\n\r\n return path.split(\"/\")[-1]", "def _clear_query_params(self, route_path):\n route = self._find_route(route_path)\n # logging.info(\"Before:\", route.dependant.query_params)\n route.dependant.query_params = []\n # logging.info(\"After:\", route.dependant.query_params)", "def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path", "def clear_requests(self, path):\n body = {}\n body['path'] = path\n data = json.dumps(body)\n self._send_request(\"/clear\", data)", "def _strip_version(endpoint):\n if endpoint.endswith('/'):\n endpoint = endpoint[:-1]\n url_bits = endpoint.split('/')\n if re.match(r'v\\d+\\.?\\d*', url_bits[-1]):\n endpoint = '/'.join(url_bits[:-1])\n return endpoint", "def remove_previously_scanned(path):\n query = db_session.query(MediaFiles) \\\n .filter(MediaFiles.path.like(f'{path}%'))\n query.delete(synchronize_session='fetch')\n db_session.commit()\n return query.count()", "def _pretty_json_path(self, path):\r\n segments = path.split('.')\r\n\r\n def builder(prev, cur):\r\n if re.match(cur):\r\n return \"{0}[]\".format(prev)\r\n return \"{0}.{1}\".format(prev, cur)\r\n\r\n segments = reduce(builder, segments)\r\n return segments", "def remove_id(url):\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n query.pop(\"eo_id\", None)\n u = u._replace(query=urlencode(query, True))\n return urlunparse(u)" ]
[ "0.6450385", "0.6239961", "0.5978938", "0.59626806", "0.5718302", "0.569636", "0.56911194", "0.56904846", "0.5625951", "0.55506724", "0.5532755", "0.54659814", "0.54363185", "0.54363185", "0.5388287", "0.53852487", "0.53558075", "0.5342201", "0.53362674", "0.5325359", "0.5310305", "0.530085", "0.52924", "0.5276852", "0.5266917", "0.52559096", "0.5216218", "0.52160007", "0.52144563", "0.5185753", "0.5165522", "0.51536775", "0.51512915", "0.51481116", "0.5146049", "0.51282865", "0.5123535", "0.51004034", "0.5060559", "0.5034054", "0.5021532", "0.50201005", "0.50106674", "0.5004418", "0.49944648", "0.4993061", "0.4991898", "0.49808973", "0.4968381", "0.49544874", "0.49521583", "0.4932016", "0.49023616", "0.48937377", "0.48937133", "0.48839355", "0.48810118", "0.4879786", "0.48736086", "0.48729736", "0.4866327", "0.48606163", "0.48545882", "0.4853195", "0.48236954", "0.48207", "0.47996774", "0.47925445", "0.47853625", "0.47849065", "0.477991", "0.47761336", "0.4756817", "0.47529027", "0.4749839", "0.47447497", "0.47441947", "0.474215", "0.4740738", "0.4724604", "0.47229218", "0.472123", "0.47032657", "0.4701439", "0.46999398", "0.46971554", "0.46944028", "0.46934855", "0.46929866", "0.46923298", "0.4691592", "0.4687457", "0.4686068", "0.46796364", "0.46781963", "0.46781376", "0.46695197", "0.46678203", "0.46669817", "0.46592057" ]
0.5898762
4
Wrap urllib.quote() to support unicode strings. A unicode string is first converted to UTF8. After that urllib.quote() is called.
def url_quote_part(s, safechars='/', encoding=None): if isinstance(s, unicode): if encoding is None: encoding = url_encoding s = s.encode(encoding, 'ignore') return urllib.quote(s, safechars)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unicode_quote(value):\n return quote(value.encode('utf-8'))", "def unicode_quote(s):\n if isinstance(s, unicode):\n return quote(s.encode(\"utf-8\"))\n else:\n return quote(str(s))", "def _quote(src, encoding=\"utf-8\"):\n if isinstance(src, unicode):\n src = src.encode(encoding)\n return urllib.quote(src)", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def do_urlencode(value):\n return urllib.quote(value.encode('utf8'))", "def escapeEncode(s: unicode) -> unicode:\n ...", "def quote(value, *args, **kwargs):\n return parse.quote(encode(value, *args, **kwargs))", "def escape_quote(text):\n return text_type(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;')", "def urllib_quote(inputstring, safestring=\"/\"):\r\n\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_quote's inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n if type(safestring) is not str:\r\n raise TypeError(\"urllib_quote's safestring parameter must be a string, not '\"+str(type(safestring))+\"'\")\r\n \r\n\r\n resultstr = \"\"\r\n\r\n # We go through each character in the string; if it's not in [0-9a-zA-Z]\r\n # we wrap it.\r\n\r\n safeset = set(safestring)\r\n\r\n for char in inputstring:\r\n asciicode = ord(char)\r\n if (asciicode >= ord(\"0\") and asciicode <= ord(\"9\")) or \\\r\n (asciicode >= ord(\"A\") and asciicode <= ord(\"Z\")) or \\\r\n (asciicode >= ord(\"a\") and asciicode <= ord(\"z\")) or \\\r\n asciicode == ord(\"_\") or asciicode == ord(\".\") or \\\r\n asciicode == ord(\"-\") or char in safeset:\r\n resultstr += char\r\n else:\r\n resultstr += \"%%%02X\" % asciicode\r\n\r\n return resultstr", "def unicode_quote_plus(value):\n return quote_plus(value.encode('utf-8'))", "def url_escape(value, plus=True):\r\n quote = urllib_parse.quote_plus if plus else urllib_parse.quote\r\n return quote(utf8(value))", "def _encode_safely(s):\n if isinstance(s, unicode):\n s = s.encode('utf-8')\n return s", "def _unicode_encode(self, s):\n if isinstance(s, unicode):\n return s.encode('utf-8')\n else:\n return s", "def _escape(arg):\n if type(arg) == str:\n arg = \"'%s'\" % _escape_string(arg)\n elif type(arg) == unicode:\n arg = \"'%s'\" % _escape_string(arg).encode('utf8')\n elif arg is None:\n arg = 'null'\n else:\n arg = str(arg)\n return arg", "def aws_urlquote(value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n return quote(value, \"/\")", "def safe(e):\n if PY2 and isinstance(e, unicode):\n return quote(e.encode('utf-8'), safe='')\n else:\n return quote(str(e), safe='')", "def _encode_url(data: str) -> str:\n return urllib.parse.quote(data, safe=\"\")", "def _unquote(s, encoding='utf-8'):\n return urllib.unquote(s).decode(encoding)", "def _quote(uri, options):\n suri = uri.strip()\n for c in _warnChars:\n if suri.find(c) != -1:\n if options != None:\n options.comment_graph.add_warning('Unusual character in uri:%s; possible error?' % suri)\n break\n return urllib.quote(suri, _unquotedChars)", "def UrlEscape(text):\n return urllib.quote(text, safe='~-._')", "def _quote(self, arg):\n arg = arg.replace('\\\\', '\\\\\\\\')\n arg = arg.replace('\"', '\\\\\"')\n return '\"%s\"' % arg", "def quote(s, safe='/', encoding=None, errors=None):\n # fastpath\n if not s:\n return s\n\n if encoding is not None or isinstance(s, unicode):\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'strict'\n s = s.encode(encoding, errors)\n if isinstance(safe, unicode):\n # Normalize 'safe' by converting to str and removing non-ASCII chars\n safe = safe.encode('ascii', 'ignore')\n\n cachekey = (safe, always_safe)\n try:\n (quoter, safe) = _safe_quoters[cachekey]\n except KeyError:\n safe_map = _safe_map.copy()\n safe_map.update([(c, c) for c in safe])\n quoter = safe_map.__getitem__\n safe = always_safe + safe\n _safe_quoters[cachekey] = (quoter, safe)\n if not s.rstrip(safe):\n return s\n return ''.join(map(quoter, s))", "def b2_url_encode(s):\n return quote(s.encode('utf-8'))", "def unicode2utf8(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.encode('utf-8')\n except NameError:\n pass # Python 3\n return arg", "def _url_encode(self, text):\n try:\n return (urllib.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))\n except:\n print('Using python3')\n return (urllib.parse.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))", "def escape(s, quote=True):\n if s:\n s = s.replace(\"&\", \"&amp;\") # Must be done first!\n s = s.replace(\"<\", \"&lt;\")\n s = s.replace(\">\", \"&gt;\")\n if quote:\n s = s.replace('\"', \"&quot;\")\n s = s.replace('\\'', \"&#x27;\")\n return s", "def encode_if_unicode(string, encoding):\n if isinstance(string, compat.unicode_type):\n return string.encode(encoding)\n else:\n return string", "def unicode_unquote(value):\n return unquote(value).decode('utf-8')", "def unicode_escape(unistr):\n import htmlentitydefs\n escaped = \"\"\n\n for char in unistr:\n if ord(char) in htmlentitydefs.codepoint2name:\n name = htmlentitydefs.codepoint2name.get(ord(char))\n entity = htmlentitydefs.name2codepoint.get(name)\n escaped +=\"&#\" + str(entity)\n\n else:\n escaped += char\n\n return escaped", "def _unquote(src, encoding=\"utf-8\"):\n return urllib.unquote(src).decode(encoding)", "def urlencode(self, value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n elif value is None:\n return \"\"\n\n assert isinstance(value, str)\n\n return urllib.quote_plus(value)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(obj, *args):\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "def safe_unicode(arg, *args, **kwargs):\n return arg if isinstance(arg, str) else str(arg, *args, **kwargs)", "def unquote(cls, value):\n if six.PY2:\n return unquote(value).decode(\"utf8\")\n else:\n return unquote(value.decode(\"ascii\"))", "def urlencode(txt):\n return urllib.quote_plus(txt)", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, unicode) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def _safe_urlunquote(string, charset='utf-8', errors='replace', unsafe=''):\n rv = _safe_urlunquote_to_bytes(string, unsafe)\n if charset is not None:\n rv = rv.decode(charset, errors)\n return rv", "def urlquote(text):\n if op.sep == \"\\\\\":\n return urllib.quote(text.replace(\"\\\\\", \"/\"))\n else:\n return urllib.quote(text)", "def escape(html):\n if not isinstance(html, unicode):\n if not isinstance(html, str):\n html = unicode(html)\n else:\n html = unicode(html, 'utf-8')\n return html.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n # if isinstance(s, Promise):\r\n # # The input is the result of a gettext_lazy() call.\r\n # return s\r\n return force_unicode(s, encoding, strings_only, errors)", "def utf8(value):\r\n if isinstance(value, six.text_type):\r\n return value.encode('utf-8')\r\n assert isinstance(value, str)\r\n return value", "def urlencode(text):\n blah = urllib.urlencode({'blahblahblah': try_encode(text)})\n blah = blah[13:]\n return blah", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def myescape(str):\n\tif str is not None:\n\t\treturn str.replace('&', '&amp;').replace('<', '&lt;')\n\telse:\n\t\treturn \"\"", "def _escape(html):\n return encoding.force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def html_escape(u):\n u = _DEFAULT_TAG_ESCAPE(u)\n return u.replace(\"'\", '&#x27;')", "def _StringQuote(s, quote='\"', escape='\\\\'):\n entity = {'\\f': '\\\\f', '\\n': '\\\\n', '\\r': '\\\\r', '\\t': '\\\\t'}\n chars = []\n if quote:\n chars.append(quote)\n for c in s:\n if c in (escape, quote):\n chars.append(escape)\n elif c in entity:\n c = entity[c]\n chars.append(c)\n if quote:\n chars.append(quote)\n return ''.join(chars)", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)", "def _escape_string(s, surrounding_quote='\"'):\n s = s.replace('\\\\', '\\\\\\\\')\n if surrounding_quote == '\"':\n s = s.replace('\"', r'\\\"')\n if surrounding_quote == \"'\":\n s = s.replace(\"'\", r\"\\'\")\n return s", "def _unicode_urlencode(params):\r\n if isinstance(params, dict):\r\n params = params.items()\r\n return urllib.parse.urlencode([(k, v.encode('utf-8') or v) for k, v in params])", "def escape_quotes(self, val):\n if val.startswith(self.quote) and val.endswith(self.quote):\n # make sure any previously escaped quotes are not re-escaped\n middle = val[1:-1].replace(\"\\\\\" + self.quote, self.quote)\n middle = middle.replace(self.quote, \"\\\\\" + self.quote)\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n return val", "def escape_unicode_string(u):\n def replacer(matchobj):\n if ord(matchobj.group(1)) == 127:\n return \"\\\\x7f\"\n if ord(matchobj.group(1)) == 92: # backslash\n return \"\\\\\\\\\"\n return REPLACEMENT_TABLE[ord(matchobj.group(1))]\n return re.sub(\"([\\\\000-\\\\037\\\\134\\\\177])\", replacer, u)", "def zh_quote_as_entity(self):\n pass", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def test_raw_unicode_escape_dashes(self):\n ok = True\n try:\n unicode(b'hey', 'raw_unicode-escape')\n except LookupError:\n ok = False\n\n self.assertTrue(ok, \"dashes and underscores should be interchangable\")", "def encodeText(text):\r\n#\treturn repr( quote_plus(text.replace(\"'\", '\"')) )\r\n\ttry:\r\n\t\treturn repr( quote_plus(text.replace(\"'\", '\"').encode('utf-8')) )\r\n\texcept:\r\n\t\tlogError(\"encodeText()\")\r\n\treturn repr(text.replace(\"'\", '\"'))", "def utf8tounicode(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.decode('utf-8')\n except NameError:\n pass # Python 3\n return arg", "def escapeDecode(s: unicode) -> unicode:\n ...", "def encode_basestring(s, _PY3=PY3, _q=u('\"')):\r\n if _PY3:\r\n if isinstance(s, binary_type):\r\n s = s.decode('utf-8')\r\n else:\r\n if isinstance(s, str) and HAS_UTF8.search(s) is not None:\r\n s = s.decode('utf-8')\r\n def replace(match):\r\n return ESCAPE_DCT[match.group(0)]\r\n return _q + ESCAPE.sub(replace, s) + _q", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def urllib_unquote(inputstring):\r\n\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_unquote's inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n \r\n\r\n resultstr = \"\"\r\n\r\n # We go through the inputstring from end to beginning, looking for wrapped\r\n # octets. When one is found we add it (unwrapped) and the following\r\n # string to the resultant string, and shorten the original inputstring.\r\n\r\n while True:\r\n lastpercentlocation = inputstring.rfind(\"%\")\r\n if lastpercentlocation < 0:\r\n break\r\n\r\n wrappedoctetstr = inputstring[lastpercentlocation+1:lastpercentlocation+3]\r\n if len(wrappedoctetstr) != 2:\r\n raise ValueError(\"Quoted string is poorly formed\")\r\n\r\n resultstr = \\\r\n chr(int(wrappedoctetstr, 16)) + \\\r\n inputstring[lastpercentlocation+3:] + \\\r\n resultstr\r\n inputstring = inputstring[:lastpercentlocation]\r\n\r\n resultstr = inputstring + resultstr\r\n return resultstr", "def _unicodeify(self, value, encoding=\"utf8\"):\n if isinstance(value, str):\n return value\n return str(value, encoding)", "def cleaned_string(val):\r\n return urllib.quote_plus(smart_str(val))", "def encode(e):\n if PY2 and isinstance(e, unicode):\n e = e.encode('utf-8')\n return e", "def encode_unicode_string(string, length=None):\n\t\n\tif string is None:\n\t\tstring = u''\n\treturn encode_string(string.encode('utf_8'), length)", "def unicode_urlencode(params):\n if isinstance(params, dict):\n params = params.items()\n return urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v)\n for k, v in params])", "def _encode_urlplus(data: str) -> str:\n return urllib.parse.quote_plus(data, safe=\"\")", "def utf8(unicode_str):\n if six.PY2 and isinstance(unicode_str, __unicode__):\n return unicode_str.encode('utf-8')\n\n return unicode_str", "def escapeString(value, escapeType = EscapeStringType.REGEX):\n if value is None:\n return None\n myCodec = encodings.codecs.lookup(\"ISO-8859-1\")\n data, _ = myCodec.encode(value)\n dataOut = \"\"\n if escapeType == EscapeStringType.DECIMAL:\n for dex in data:\n dataOut += \"&#{0};\".format(dex)\n elif escapeType == EscapeStringType.DECIMAL_LONG:\n for dex in data:\n dataOut += \"&#{0:0>7}\".format(dex)\n elif escapeType == EscapeStringType.HEXDECIMAL:\n for dex in data:\n dataOut += \"&#x{0}\".format(dex)\n elif escapeType == EscapeStringType.REGEX:\n #switch back to df utf-8\n myCodec = encodings.codecs.lookup(\"UTF-8\")\n data, _ = myCodec.encode(value)\n for dex in data:\n dataOut += \"\\\\x{0:x}\".format(dex)\n elif escapeType == EscapeStringType.UNICODE:\n for dex in list(value):\n dataOut += \"\\\\u{:0>4X}\".format(ord(dex))\n elif escapeType == EscapeStringType.HTML:\n for dex in data:\n if dex in htmlEscStrings:\n dataOut += htmlEscStrings.get(dex)\n else:\n dataOut += \"&#{0};\".format(dex)\n elif escapeType == EscapeStringType.XML:\n for dex in data:\n if dex in xmlEscStrings:\n dataOut += xmlEscStrings.get(dex)\n else:\n dataOut += chr(dex)\n elif escapeType == EscapeStringType.URI:\n return urllib.parse.quote(value)\n elif escapeType == EscapeStringType.BLANK:\n for dex in data:\n dataOut += ' '\n else:\n dataOut = value\n return dataOut", "def encode_utf8(string):\n if isinstance(string, unicode):\n try: \n return string.encode(\"utf-8\")\n except:\n return string\n return str(string)", "def Quote(s):\n if not nonnormal_char_re.search(s):\n return s # no quoting necessary\n slist = []\n for char in s:\n if nonnormal_char_re.search(char):\n slist.append(\"\\\\x%02x\" % ord(char))\n else:\n slist.append(char)\n return '\"%s\"' % \"\".join(slist)", "def _escapeArg(arg):\n #XXX There is a *lot* more that we should escape here.\n return arg.replace('\"', r'\\\"')", "def escape(text):\n if text is None:\n return\n else:\n return cgi.escape(text).encode('ascii', 'xmlcharrefreplace')", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n if strings_only and is_protected_type(s):\r\n return s\r\n try:\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n try:\r\n s = unicode(str(s), encoding, errors)\r\n except UnicodeEncodeError:\r\n if not isinstance(s, Exception):\r\n raise\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII data without special\r\n # handling to display as a string. We need to handle this\r\n # without raising a further exception. We do an\r\n # approximation to what the Exception's standard str()\r\n # output should be.\r\n s = ' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n elif not isinstance(s, unicode):\r\n # Note: We use .decode() here, instead of unicode(s, encoding,\r\n # errors), so that if s is a SafeString, it ends up being a\r\n # SafeUnicode at the end.\r\n s = s.decode(encoding, errors)\r\n except UnicodeDecodeError, e:\r\n if not isinstance(s, Exception):\r\n raise TwitterTextUnicodeDecodeError(s, *e.args)\r\n else:\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII bytestring data without a\r\n # working unicode method. Try to handle this without raising a\r\n # further exception by individually forcing the exception args\r\n # to unicode.\r\n s = ' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n return s", "def unquote(s, *a, **kw):\n return quote(s, *a, **kw)", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def quote(*a, **kw):\n return quote(*a, **kw)", "def as_utf8(value):\n assert value is None or isinstance(value,types.StringTypes)\n if isinstance(value,types.UnicodeType):\n return value.encode('utf-8')\n else:\n return value", "def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\n return force_unicode(s, encoding, strings_only, errors)", "def fix_unicode_quotes(text):\n\n text = re.sub(u'[\\u201c\\u201d]', '\"', text, re.M | re.I)\n text = re.sub(u'[\\u2018\\u2019\\u0027]', \"'\", text, re.M | re.I)\n return re.sub(u'[\\u2014]', \"-\", text, re.M | re.I)", "def escape(text):\n if (isinstance(text, basestring)):\n try: text = encode(text)\n except: text = copy(text)\n text = text.replace(\"&\", \"&amp;\")\n text = text.replace(\"<\", \"&lt;\")\n text = text.replace(\">\", \"&gt;\")\n return text", "def test_unquote(self):\n self.assertEqual(unquote('foo%23bar'), 'foo#bar')\n self.assertEqual(unquote('foo%23bar', ['#']), 'foo%23bar')\n with self.assertRaises(TypeError):\n unquote(None)\n self.assertEqual(unquote(\"\"), \"\")\n self.assertEqual(unquote(\"abc123\"), \"abc123\")", "def unquote_safe(s, unsafe_list):\n # note: this build utf8 raw strings ,then does a .decode('utf8') at the end.\n # as a result it's doing .encode('utf8') on each block of the string as it's processed.\n res = _utf8(s).split('%')\n for i in xrange(1, len(res)):\n item = res[i]\n try:\n raw_chr = _hextochr[item[:2]]\n if raw_chr in unsafe_list or ord(raw_chr) < 20:\n # leave it unescaped (but uppercase the percent escape)\n res[i] = '%' + item[:2].upper() + item[2:]\n else:\n res[i] = raw_chr + item[2:]\n except KeyError:\n res[i] = '%' + item\n except UnicodeDecodeError:\n # note: i'm not sure what this does\n res[i] = unichr(int(item[:2], 16)) + item[2:]\n o = \"\".join(res)\n return _unicode(o)", "def encodeLiteral(self, string):\r\n return string.replace(\"'\",\"''\")", "def utf8(value):\n if isinstance(value, (bytes, type(None))):\n return value\n if not isinstance(value, unicode_type):\n raise TypeError(\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n )\n return value.encode(\"utf-8\")", "def quote(value):\n return DoubleQuotedScalarString(value)", "def quote(s):\n return unescape(quoteattr(s))", "def _sh_quote(s):\n if not s:\n return b\"\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return b\"'\" + s.replace(b\"'\", b\"'\\\"'\\\"'\") + b\"'\"", "def make_unicode(string):\n if sys.version < '3' and isinstance(string, str):\n return unicode(string.decode('utf-8'))\n\n return string", "def enc(text):\n if isinstance(text, str):\n return unicode(text, 'utf-8') # TODO: fix in Python 3\n elif isinstance(text, unicode):\n return text.encode('utf-8')\n else:\n raise Exception(\"Unsupported encode format.\")", "def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):\r\n # Handle the common case first, saves 30-40% in performance when s\r\n # is an instance of unicode. This function gets called often in that\r\n # setting.\r\n if isinstance(s, unicode):\r\n return s\r\n if strings_only and is_protected_type(s):\r\n return s\r\n try:\r\n if not isinstance(s, basestring,):\r\n if hasattr(s, '__unicode__'):\r\n s = unicode(s)\r\n else:\r\n try:\r\n s = unicode(str(s), encoding, errors)\r\n except UnicodeEncodeError:\r\n if not isinstance(s, Exception):\r\n raise\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII data without special\r\n # handling to display as a string. We need to handle this\r\n # without raising a further exception. We do an\r\n # approximation to what the Exception's standard str()\r\n # output should be.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n elif not isinstance(s, unicode):\r\n # Note: We use .decode() here, instead of unicode(s, encoding,\r\n # errors), so that if s is a SafeString, it ends up being a\r\n # SafeUnicode at the end.\r\n s = s.decode(encoding, errors)\r\n except UnicodeDecodeError, e:\r\n if not isinstance(s, Exception):\r\n raise DjangoUnicodeDecodeError(s, *e.args)\r\n else:\r\n # If we get to here, the caller has passed in an Exception\r\n # subclass populated with non-ASCII bytestring data without a\r\n # working unicode method. Try to handle this without raising a\r\n # further exception by individually forcing the exception args\r\n # to unicode.\r\n s = u' '.join([force_unicode(arg, encoding, strings_only,\r\n errors) for arg in s])\r\n return s", "def processString(self, string):\n # TODO accents too\n return urllib.quote(re.sub('\\s|\\(|\\)|,|\\.','_',unicode(string).strip().replace('/', '-')).encode('utf-8', 'ignore'))", "def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def unicode_escape(s, error=\"backslashreplace\"):\n def ascii_chr(byte):\n if 0 <= byte <= 127:\n return chr(byte)\n return r\"\\x%02x\" % byte\n\n bytes = s.encode('utf-8', error)\n return \"\".join(map(ascii_chr, bytes))" ]
[ "0.756562", "0.7546129", "0.7493154", "0.7138185", "0.6648861", "0.6499509", "0.6496231", "0.6401295", "0.63460577", "0.6345967", "0.6233761", "0.622644", "0.62058586", "0.6182652", "0.6180141", "0.6175421", "0.6170337", "0.6098198", "0.6041469", "0.6020512", "0.6014165", "0.60018927", "0.59674", "0.5957023", "0.594896", "0.5915022", "0.59089035", "0.5893021", "0.5884992", "0.5879348", "0.58736503", "0.5867356", "0.5867356", "0.5867356", "0.5867356", "0.5867356", "0.5867356", "0.5835304", "0.57978135", "0.5795273", "0.5766874", "0.5760596", "0.5723228", "0.57002044", "0.56868804", "0.5680286", "0.5678489", "0.5671897", "0.5640788", "0.56373584", "0.5627533", "0.56197494", "0.560596", "0.56043553", "0.560075", "0.559425", "0.5590669", "0.55831707", "0.55665505", "0.5555735", "0.5542469", "0.55403084", "0.5502422", "0.5486528", "0.5485784", "0.5484606", "0.54813904", "0.5473215", "0.54710186", "0.5456459", "0.54521173", "0.54405355", "0.54395056", "0.543942", "0.5438955", "0.5436575", "0.54297936", "0.5409688", "0.5405377", "0.5395639", "0.5392587", "0.53903407", "0.5381064", "0.53776556", "0.53736556", "0.5369215", "0.5360118", "0.53588766", "0.5355492", "0.53525984", "0.53402996", "0.5338969", "0.533276", "0.5329789", "0.5328936", "0.5327559", "0.53184724", "0.5309659", "0.5308592", "0.5300268" ]
0.64992213
6
Return True if host part of url matches an entry in given domain list.
def match_url(url, domainlist): if not url: return False return match_host(url_split(url)[1], domainlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_host(host, domainlist):\n if not host:\n return False\n for domain in domainlist:\n if domain.startswith('.'):\n if host.endswith(domain):\n return True\n elif host == domain:\n return True\n return False", "def matches(self, url):\n split = urlparse.urlsplit(url)\n return self.host == split.hostname", "def matches_host(self, host: str, requires_data_uri: bool = False) -> bool:\n return (\n self.url\n and self.site_host\n and self.site_host in host\n and (self.data_uri if requires_data_uri else True)\n )", "def matches_hostname(self, hostname):\n return hostname in self.hostnames", "def __isOnDomainList(self, rules, domain):\n for rule in rules:\n if rule.startswith(\".\"):\n if domain.endswith(rule):\n return True\n \n withoutDot = rule[1:]\n if domain == withoutDot:\n return True\n else:\n domainEnding = domain[-(len(rule) + 1):]\n if (\n domainEnding and\n domainEnding[0] == \".\" and\n domain.endswith(rule)\n ):\n return True\n \n if rule == domain:\n return True\n \n return False", "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def domain_in_ip_whois_match(self, domain, ip):\n try:\n domain_ip_desc = self.get_name_by_ip(ip).lower().split(' ')\n domain_list = domain.lower()\n return domain_list in domain_ip_desc\n except Exception as e:\n return e", "def __matchDomain(self, cookieDomain, siteDomain):\n if not siteDomain:\n # empty URLs always match\n return True\n \n if cookieDomain.startswith(\".\"):\n cookieDomain = cookieDomain[1:]\n if siteDomain.startswith(\".\"):\n siteDomain = siteDomain[1:]\n \n if cookieDomain == siteDomain:\n return True\n \n if not siteDomain.endswith(cookieDomain):\n return False\n \n index = siteDomain.find(cookieDomain)\n return index > 0 and siteDomain[index - 1] == \".\"", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def has(self, hostname: str) -> bool:\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False", "def is_same_domain(url1, url2):\n return tldextract.extract(url1).domain == tldextract.extract(url2).domain", "def contains_domain(address, domain):\n domain = r'[\\w\\.-]+@'+domain+'$'\n if re.match(domain,address):\n return True\n return False", "def domain_filter(self, url):\n return url_is_from_any_domain(url, self._domain)", "def has_domain(self, domain, port=DEFAULT_PORT):\n \n vhost_tag_open = False\n\n for line in self._content.split(\"\\n\"):\n if self.__get_vhost_line(port) in line:\n vhost_tag_open = True\n\n if \"</VirtualHost>\" in line:\n vhost_tag_open = False\n\n if vhost_tag_open and self.__get_servername_line(domain) in line: # Add alias\n return True\n\n return False", "def match_url(self, url, options=None):\n options = options or {}\n for optname in self.options:\n if optname == 'match-case': # TODO\n continue\n\n if optname not in options:\n raise ValueError(\"Rule requires option %s\" % optname)\n\n if optname == 'domain':\n if not self._domain_matches(options['domain']):\n return False\n continue\n\n if options[optname] != self.options[optname]:\n return False\n\n return self._url_matches(url)", "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False", "def is_same_domain(host, pattern):\n if not pattern:\n return False\n\n pattern = pattern.lower()\n return (\n pattern[0] == \".\"\n and (host.endswith(pattern) or host == pattern[1:])\n or pattern == host\n )", "def matchWildcardUrls(url, listOfUrls):\n if not url or not listOfUrls:\n return None\n pattern = re.compile('^[a-zA-Z][+a-zA-Z0-9.-]*:.*')\n if not pattern.search(str(url)) and not url.startswith('//'):\n url = '//' + url\n cspUrl = urlparse(str(url)) \n host = cspUrl.netloc.lower() or \"\"\n hostHasWildcard = host.startswith(\"*.\")\n wildcardFreeHost = re.sub(\"^\\*\", \"\", host, flags=re.IGNORECASE)\n path = cspUrl.path or ''\n hasPath = len(cspUrl.path) > 0 \n\n for url2 in listOfUrls:\n url = urlparse(str(url2))\n domain = url.netloc.lower() or \"\"\n domainHasWildCard = domain.startswith(\"*.\")\n if (not domainHasWildCard):\n if (not domain.endswith(wildcardFreeHost) ): \n continue\n if (not hostHasWildcard and host != domain):\n continue\n else:\n domainparts = list(reversed(domain.split('.')))\n hostparts = list(reversed(host.split('.')))\n stop = False\n domainlen = len(domain.split('.'))\n hostlen = len(host.split('.'))\n \n for idx, domainpart in enumerate(domainparts):\n if idx < hostlen:\n hostpart = hostparts[idx]\n if hostpart != domainpart and (domainpart != '*' and hostpart != '*'):\n stop = True\n if stop:\n continue\n if (hasPath):\n if (path.endswith('/')): \n if (not url.path.startswith(path)):\n continue\n elif (url.path != path):\n continue\n\n return url\n\n return None", "def validate_base_domain_url(base_domain_url) -> bool:\n url_components = urlparse(base_domain_url)\n return True if all([url_components.scheme, url_components.netloc]) else False", "def is_domain(value):\n result = any(check.isalpha() for check in value)\n return result", "def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False", "def _matches(self, url, options,\n general_re, domain_required_rules, rules_with_options):\n if general_re and general_re.search(url):\n return True\n\n rules = []\n if 'domain' in options and domain_required_rules:\n src_domain = options['domain']\n for domain in _domain_variants(src_domain):\n if domain in domain_required_rules:\n rules.extend(domain_required_rules[domain])\n\n rules.extend(rules_with_options)\n\n if self.skip_unsupported_rules:\n rules = [rule for rule in rules if rule.matching_supported(options)]\n\n return any(rule.match_url(url, options) for rule in rules)", "def same_domain(url1, url2):\n return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)", "def _check_audience(self, request, audience):\n if not self.audiences:\n return audience == request.host_url\n for audience_pattern in self._audience_patterns:\n if audience_pattern.match(audience):\n return True\n return False", "def is_same_domain(url1, url2):\r\n url1 = urlparse(url1)\r\n url2 = urlparse(url2)\r\n return url1.netloc == url2.netloc", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def hasDomains(team_id,domain_id):\n sql = \"SELECT id from datawake_domains where id = %s AND team_id = %s\"\n rows = dbGetRows(sql, [domain_id,team_id])\n return len(rows) > 0", "def _match_hostname(url, condition, require_path=None, require_no_path=False):\n scheme, _, other = url.partition(\":\")\n if scheme not in (\n \"git\", # lxc-python2\n \"git+https\", # asyncssh\n \"http\",\n \"https\",\n \"svn\", # wsgiref\n ):\n return False\n\n if condition.startswith(\"http://\"):\n condition = condition[7:]\n\n hostname, _, path = condition.partition(\"/\")\n if \":\" in hostname:\n hostname = hostname.split(\":\", 1)[0]\n\n if \".\" not in other: # pragma: no cover\n return False # '/dev/' in http://www.reportlab.com/\n\n other = other.lstrip(\"/\")\n match_subdomains = hostname.startswith(\"*.\")\n if match_subdomains:\n hostname = hostname[2:]\n\n subdomain, other = other.split(\".\", 1)\n if subdomain in [\"www\"]:\n logger.debug(\"url {} subdomain www\".format(url))\n return False\n if not other.startswith(hostname):\n return None\n\n if require_path is None:\n require_path = not match_subdomains\n\n # Require at least a suffix\n other = other[len(hostname) :]\n other = other.lstrip(\"/\")\n if not other:\n if require_no_path:\n return True\n\n if require_path:\n logger.debug(\"url {} no path\".format(url))\n return False\n\n if path:\n if not other.startswith(path):\n logger.debug(\"url {} not path {}\".format(url, path))\n return False\n\n return True", "def is_same_domain(url1, url2):\n url1 = urlparse(url1)\n url2 = urlparse(url2)\n return url1.netloc == url2.netloc", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def test_url_subdomain(self):\n subdomains = ct.url_subdomain(\"https://www.bad-actor.services/some/url-thats-long?debug=True\")\n assert isinstance(subdomains, list)\n assert len(subdomains) == 1\n subdomains = ct.url_subdomain(\"https://one.two.bad-actor.services/some/url-thats-long?debug=True\")\n assert subdomains[0] == \"one\"\n assert subdomains[1] == \"two\"", "def is_valid_user_provided_domain_format(domain):\n\n if domain == \"\":\n print(\"You didn't enter a domain. Try again.\")\n return False\n\n domain_regex = re.compile(r\"www\\d{0,3}[.]|https?\")\n\n if domain_regex.match(domain):\n print(\n \"The domain \" + domain + \" is not valid. Do not include \"\n \"www.domain.com or http(s)://domain.com. Try again.\"\n )\n return False\n else:\n return True", "def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False", "def validate_host(self, host: str) -> bool:\n ip_address_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}'\n r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])$')\n hostname_regex = re.compile(r'^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$')\n url_regex = re.compile(r'^(ldaps?)://'\n r'((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]):'\n r'([0-9]{1,5})$')\n if bool(ip_address_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(hostname_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(url_regex.match(host)):\n # using host url address\n match = url_regex.match(host)\n proto = match.group(1)\n if proto == 'ldaps':\n self.server_use_ssl = True\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n if url:\n parse_url = urlparse.urlparse(url)\n \n #yt_domains = ['youtube.com', 'youtube-nocookie.com', 'youtu.be', 'youtube.googleapis.com']\n #return any(parse_url.netloc.endswith(yt) for yt in yt_domains)\n return re.search('^(.+\\.)*(youtube(-nocookie|\\.googleapis)?.com|youtu.be)+$', parse_url.netloc)", "def match(self, url):\n if self.is_global:\n return True\n\n # For easy comparison, we strip leading and trailing slashes,\n # and then split both self.url and the supplied URL on\n # slashes, to get two lists of path components we can compare.\n self_bits = self.url.strip(\"/\").split(\"/\")\n url_bits = url.strip(\"/\").split(\"/\")\n\n # If self.url produced a longer list of path components than\n # the supplied URL, it can't be a match.\n if len(self_bits) > len(url_bits):\n return False\n\n return self_bits == url_bits[: len(self_bits)]", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def https_in_url(url):\n return True if url.startswith('https://') else False", "def __guess_domain(email_domain: str, domain: str) -> bool:\n if len(email_domain) != len(domain):\n return False\n\n char_poses = []\n\n for char_pos, char in enumerate(email_domain):\n if char != '*':\n char_poses.append((char_pos, char))\n\n for char_pos, char in char_poses:\n if domain[char_pos] != char:\n return False\n\n return True", "def test_url_domain(self):\n assert ct.url_domain(\"http://www.google.com\") == \"google.com\"\n assert ct.url_domain(\"http://localhost\") == \"localhost\"\n assert ct.url_domain(\"http://192.168.1.19:5010\") == \"192.168.1.19\"", "def match_allowed_origin(self, parsed_origin, pattern):\n if parsed_origin is None:\n return False\n\n # Get ResultParse object\n parsed_pattern = urlparse(pattern.lower())\n if parsed_origin.hostname is None:\n return False\n if not parsed_pattern.scheme:\n pattern_hostname = urlparse(\"//\" + pattern).hostname or pattern\n return is_same_domain(parsed_origin.hostname, pattern_hostname)\n # Get origin.port or default ports for origin or None\n origin_port = self.get_origin_port(parsed_origin)\n # Get pattern.port or default ports for pattern or None\n pattern_port = self.get_origin_port(parsed_pattern)\n # Compares hostname, scheme, ports of pattern and origin\n if (\n parsed_pattern.scheme == parsed_origin.scheme\n and origin_port == pattern_port\n and is_same_domain(parsed_origin.hostname, parsed_pattern.hostname)\n ):\n return True\n return False", "def test_match(self, url, criterions=[], har=None):\r\n return len(self.get_matches(url, criterions, har)) != 0", "def is_active_domain(self, domain=\"www.google.com\", name_server='1.1.1.1'):\n my_resolver = dns.resolver.Resolver()\n my_resolver.nameservers = [name_server]\n my_resolver.timeout = 3\n my_resolver.lifetime = 3\n try:\n A = my_resolver.query(domain, 'A')\n for i in A.response.answer:\n for j in i.items:\n return self.is_actual_ip(str(j))\n except Exception as e:\n return None", "def is_reddit_url(self, subreddit = None):\r\n from pylons import g\r\n return (not self.hostname or \r\n self.hostname.endswith(g.domain) or\r\n (subreddit and subreddit.domain and\r\n self.hostname.endswith(subreddit.domain)))", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return ('vzaar.com' in parse_url.netloc or 'vzaar.tv' in parse_url.netloc)", "def match(self, scheme=None, host=None, path=None, title=None):\n for alternate in self.alternates:\n if alternate.match(scheme, host, path, title):\n return True\n if title and title.lower() in title.lower():\n return True\n\n if (scheme is not None) and scheme.lower() != scheme.lower():\n return False\n if (host is not None) and host.lower() != host.lower():\n return False\n if (path is not None) and path.lower() != path.lower():\n return False\n\n return True", "def is_page(self, url):\n netloc = urlparse.urlparse(url).netloc.lower()\n return any(map(lambda domain: netloc.endswith(domain), self.allowed_domains))", "def offer(self, url):\n parts = urlparse(url)\n return bool(self.KT_RE.match(parts.netloc))", "def is_entry_in_list(entry, a_list):\n for item in a_list:\n if entry['description'] == item['description']:\n return True\n return False", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc == 'dailymotion.com' or parse_url.netloc.endswith('.dailymotion.com')", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def verify_email_in_whitelist(email, whitelist=None):\n if whitelist is None:\n settings = api.config.get_settings()\n whitelist = settings[\"email_filter\"]\n\n # Nothing to check against!\n if len(whitelist) == 0:\n return True\n\n for email_domain in whitelist:\n if re.match(r\"^[^@]+@{}$\".format(email_domain), email) is not None:\n return True\n\n return False", "def _is_valid_target(hostname):\n if not hostname:\n return False\n\n # Check if it's a valid IP\n if _is_valid_ipv4_address(hostname) or _is_valid_ipv6_address(hostname):\n return True\n\n # Check if it's a valid DNS name\n\n if hostname[-1] == '.':\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n if len(hostname) < 1 or len(hostname) > 253: # Technically 255 octets but 2 are used for encoding\n return False\n\n labels = hostname.split(\".\")\n\n # the TLD must be not all-numeric\n if re.match(r\"[0-9]+$\", labels[-1]):\n return False\n\n allowed = re.compile(r\"(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(label) for label in labels)", "def __compare_lowercase(self, dn, dn_list):\n\t\tfor d in dn_list:\n\t\t\tif dn.lower() == d.lower():\n\t\t\t\treturn True\n\t\treturn False", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def _match_url(self, _url):\n\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain\n r'localhost|' # localhost\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n if re.match(regex, _url):\n return True\n else:\n return False", "def is_fqdn(address):\n\n return bool(re.match(re_fqdn, address))", "def __contains__(self, address):\n return any([\n ipaddress.ip_address(address) in network\n for network in self.networks\n ])", "def url_is_in_db(url):\n return bool(find_url(url).first())", "def __check_lazy(self, url: str, json_file: str):\n\n parsed_url = urlparse(url)\n\n netloc = parsed_url.netloc\n\n with open(json_file) as lazy_sites:\n data = json.load(lazy_sites)\n \n for site in data['lazy_sites']:\n match = re.match(site['domain'], netloc)\n\n if match:\n return True\n \n return False", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def verify_url(url: str) -> bool:\n parsed_url = urlparse(url)\n return all([parsed_url.scheme, parsed_url.netloc])", "def validate_slug(host_slug: str,\n database_connection: mysql.connector.connect) -> bool:\n host_slug = host_slug.strip()\n if not host_slug:\n return False\n\n try:\n cursor = database_connection.cursor()\n query = \"SELECT hostslug FROM ww_hosts WHERE hostslug = %s;\"\n cursor.execute(query, (host_slug,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def has_host(self, host):\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'\n\n if host in list(self._scan_result['scan'].keys()):\n return True\n\n return False", "def matches_exclusions(stripped_rule, exclusion_regexes):\n\n try:\n stripped_domain = stripped_rule.split()[1]\n except IndexError:\n # Example: 'example.org' instead of '0.0.0.0 example.org'\n stripped_domain = stripped_rule\n\n for exclusionRegex in exclusion_regexes:\n if exclusionRegex.search(stripped_domain):\n return True\n\n return False", "def is_server_address(\n address: str, additional_schemes: Iterable[str] = ()) -> bool:\n schemes = {\"http\", \"https\"}\n if additional_schemes:\n schemes.update(additional_schemes)\n try:\n pieces = urlparse(address)\n scheme = pieces.scheme.lower()\n return scheme in schemes and pieces.netloc is not None\n except Exception: # pylint: disable=broad-except\n return False", "def test_on_same_domain(self):\n self.assertTrue(on_same_domain(\n \"https://google.com/a/b\",\n \"http://sub-domain.google.com?time=0400\"\n ))", "def _has_all_host_addresses(self, addresses):\n for s_id, s_size in enumerate(self.subnets[1:]):\n for m in range(s_size):\n # +1 to s_id since first subnet is 1\n if str((s_id + 1, m)) not in addresses:\n return False\n return True", "def _check_domain_already_exists_on_sni_certs(self, domain_name):\n\n found = False\n found_cert = None\n for sni_cert_name in self.sni_cert_cnames:\n sans = utils.get_sans_by_host_alternate(sni_cert_name)\n if domain_name in sans:\n found = True\n found_cert = sni_cert_name\n break\n\n return found, found_cert", "def __contains__(self, uri):\n\t\treturn uri in self._uris", "def host_okay(self, host: str) -> bool:\n host = host.lower()\n if host in self.root_domains:\n return True\n\n if re.match(r'\\A[\\d\\.]*\\Z', host):\n return False\n\n if self.strict:\n return self.host_valid_strict(host)\n\n return self.host_valid_lenient(host)", "def are_valid_email_addresses(self, addr_list):\n\t\tfor addr in addr_list:\n\t\t\tif not self.is_valid_email_address(addr):\n\t\t\t\treturn False\n\t\treturn True", "def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False", "def check_urls(quartus_versions):\n success = True\n for quartus in quartus_versions.keys():\n parts = quartus_versions[quartus]\n parts_str = [str(k) for k in parts.keys()]\n #print(\"Checking Quartus %s, available parts (%s)\\n\" % (quartus, \",\".join(parts_str)))\n for part in parts:\n result = test_url(quartus, part, parts[part])\n if not result:\n print(\"\\nMissing %s/%s url=%s\" % (quartus, part, parts[part]))\n success = False\n return success", "def isValid(self, s):\n for valid in self.validTargets:\n if (valid[0] in s):\n return True\n return False", "def has_hashtag(self, tag_list, **kwargs):\n lowlist = [tag.lower() for tag in tag_list]\n alllower = ('case_sensitive' in kwargs and not kwargs['case_sensitive'])\n for ht in self.original.entities['hashtags']:\n lowht = ht['text'].lower()\n if alllower and lowht in lowlist or '#' + lowht in lowlist:\n return True\n if ht['text'] in tag_list or '#' + ht['text'] in tag_list:\n return True\n return False", "def _check_domain_already_exists_on_san_certs(self, domain_name):\n\n found = False\n found_cert = None\n for san_cert_name in self.san_cert_cnames:\n sans = utils.get_sans_by_host_alternate(\n '.'.join(\n [\n san_cert_name,\n self.driver.akamai_https_access_url_suffix\n ]\n )\n )\n if domain_name in sans:\n found = True\n found_cert = san_cert_name\n break\n\n return found, found_cert", "def test_link_is_tracked_true_with_subdomain(self):\n self.assertTrue(link_is_tracked(\"https://foo.test.com/testurl\"))", "def list_in_groupdict(keylist, groupdict):\n for key in keylist:\n if key in groupdict and groupdict[key] is not None:\n return True\n return False", "def exists(self, url):\n return (self.base_path / url).exists()", "def is_valid_domain_name(value):\n p = re.compile(\n r'^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|'\n r'([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|'\n r'([a-zA-Z0-9][-_.a-zA-Z0-9]{0,61}[a-zA-Z0-9]))\\.'\n r'([a-zA-Z]{2,13}|[a-zA-Z0-9-]{2,30}.[a-zA-Z]{2,3})$'\n )\n m = p.match(value)\n if m:\n return True\n else:\n return False", "def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped", "def chkfqdn(fqdn):\n if fqdn is None:\n return False\n hp = hostportion(fqdn)\n # not needed right now: pp = portportion(fqdn)\n # TODO need to augment this for IPv6 addresses\n return re.match('^[a-zA-Z0-9_-]+(\\\\.[a-zA-Z0-9_-]+)+$', hp) is not None", "def url_has_netloc(self, url):\n\n parsed_url = urllib.parse.urlparse(url)\n\n if parsed_url.scheme == \"http\":\n port = 80\n elif parsed_url.scheme == \"https\":\n port = 443\n if parsed_url.port:\n port = parsed_url.port\n\n domain = parsed_url.hostname\n if domain:\n if domain != self.domain or port != self.port:\n return False\n return True\n return False", "def checkIfInList(_addr, _list):\n for item in _list:\n if (_addr.this == item.this):\n return True\n \n return False", "def pageContains(page, strList):\n for text in strList:\n if text in page['data']:\n logging.log(5, 'Found string %s' % text)\n return True\n\n return False", "def has_certificate(domain):\n all_certs = fetch_domain_certs(domain)\n for cert in all_certs:\n if cert[\"name_value\"] == domain:\n return cert", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vimeo.com' or parse_url.netloc.endswith('.vimeo.com')) and 'hubnut/album/' not in parse_url.path", "def match_list(row, queries=None, reverse=False):\n if not queries:\n # no queries = pass\n return True\n else:\n # otherwise, must match at least one\n for query in queries:\n if query.match_row(row):\n return not reverse\n return reverse", "def containsURL(line: str):\n\n URL = \"(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))\" \\\n \"([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?\"\n if re.match(URL, line):\n return True\n else:\n return False", "def hosts(self, value):\n if value is not None:\n self._hosts = dict(\n (str.join('.', [host, self.domain]) if host != '' else self.domain, host)\n for host in value\n )\n else:\n raise ValueError('Could not parse hosts.')", "def _domain(self):\n return [self.args[0] >= 0, self.args[1] >= 0]", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('bambuser.com')\\\n and bool(re.search('^\\/(v|broadcast)\\/\\d+(\\.live)?$', parse_url.path))" ]
[ "0.8105381", "0.7370516", "0.7126605", "0.6921519", "0.6796388", "0.6785669", "0.67464596", "0.6718634", "0.6564882", "0.654149", "0.6427154", "0.636126", "0.6316495", "0.6279034", "0.62371093", "0.62335455", "0.62335455", "0.6123072", "0.6119781", "0.60940176", "0.6078012", "0.6037316", "0.6007615", "0.5966135", "0.59353614", "0.59148675", "0.5889323", "0.5885462", "0.58753777", "0.58717775", "0.5864297", "0.5856078", "0.5850204", "0.58116424", "0.57877076", "0.5753504", "0.5715173", "0.56826466", "0.56407756", "0.56263155", "0.56245965", "0.56243956", "0.56189764", "0.55989295", "0.5597097", "0.55900306", "0.5582064", "0.55513173", "0.5533996", "0.55315995", "0.55226994", "0.5520471", "0.55035317", "0.5485182", "0.54811305", "0.54473114", "0.5444095", "0.5442946", "0.5440661", "0.5426889", "0.5410217", "0.54096806", "0.54022473", "0.53958005", "0.53744483", "0.53618073", "0.5348397", "0.53395677", "0.53246135", "0.53193253", "0.53165835", "0.5309778", "0.5306201", "0.5300383", "0.529805", "0.5292354", "0.5288757", "0.52854073", "0.5278392", "0.52716047", "0.52682143", "0.5244461", "0.5240427", "0.523457", "0.52018785", "0.51989526", "0.5190474", "0.5175935", "0.51673335", "0.51639235", "0.5158753", "0.51397234", "0.5137726", "0.51354057", "0.51351404", "0.5126427", "0.51252997", "0.5122357", "0.5120726", "0.5111702" ]
0.83841044
0
Return True if host matches an entry in given domain list.
def match_host(host, domainlist): if not host: return False for domain in domainlist: if domain.startswith('.'): if host.endswith(domain): return True elif host == domain: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_url(url, domainlist):\n if not url:\n return False\n return match_host(url_split(url)[1], domainlist)", "def matches_hostname(self, hostname):\n return hostname in self.hostnames", "def domain_in_ip_whois_match(self, domain, ip):\n try:\n domain_ip_desc = self.get_name_by_ip(ip).lower().split(' ')\n domain_list = domain.lower()\n return domain_list in domain_ip_desc\n except Exception as e:\n return e", "def __isOnDomainList(self, rules, domain):\n for rule in rules:\n if rule.startswith(\".\"):\n if domain.endswith(rule):\n return True\n \n withoutDot = rule[1:]\n if domain == withoutDot:\n return True\n else:\n domainEnding = domain[-(len(rule) + 1):]\n if (\n domainEnding and\n domainEnding[0] == \".\" and\n domain.endswith(rule)\n ):\n return True\n \n if rule == domain:\n return True\n \n return False", "def has(self, hostname: str) -> bool:\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False", "def matches_host(self, host: str, requires_data_uri: bool = False) -> bool:\n return (\n self.url\n and self.site_host\n and self.site_host in host\n and (self.data_uri if requires_data_uri else True)\n )", "def contains_domain(address, domain):\n domain = r'[\\w\\.-]+@'+domain+'$'\n if re.match(domain,address):\n return True\n return False", "def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False", "def hasDomains(team_id,domain_id):\n sql = \"SELECT id from datawake_domains where id = %s AND team_id = %s\"\n rows = dbGetRows(sql, [domain_id,team_id])\n return len(rows) > 0", "def __matchDomain(self, cookieDomain, siteDomain):\n if not siteDomain:\n # empty URLs always match\n return True\n \n if cookieDomain.startswith(\".\"):\n cookieDomain = cookieDomain[1:]\n if siteDomain.startswith(\".\"):\n siteDomain = siteDomain[1:]\n \n if cookieDomain == siteDomain:\n return True\n \n if not siteDomain.endswith(cookieDomain):\n return False\n \n index = siteDomain.find(cookieDomain)\n return index > 0 and siteDomain[index - 1] == \".\"", "def has_domain(self, domain, port=DEFAULT_PORT):\n \n vhost_tag_open = False\n\n for line in self._content.split(\"\\n\"):\n if self.__get_vhost_line(port) in line:\n vhost_tag_open = True\n\n if \"</VirtualHost>\" in line:\n vhost_tag_open = False\n\n if vhost_tag_open and self.__get_servername_line(domain) in line: # Add alias\n return True\n\n return False", "def matches(self, url):\n split = urlparse.urlsplit(url)\n return self.host == split.hostname", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def is_domain(value):\n result = any(check.isalpha() for check in value)\n return result", "def is_entry_in_list(entry, a_list):\n for item in a_list:\n if entry['description'] == item['description']:\n return True\n return False", "def include_hostnames(nmap_host):\n if nmap_host.hostnames:\n return True\n return False", "def __compare_lowercase(self, dn, dn_list):\n\t\tfor d in dn_list:\n\t\t\tif dn.lower() == d.lower():\n\t\t\t\treturn True\n\t\treturn False", "def is_same_domain(host, pattern):\n if not pattern:\n return False\n\n pattern = pattern.lower()\n return (\n pattern[0] == \".\"\n and (host.endswith(pattern) or host == pattern[1:])\n or pattern == host\n )", "def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])", "def has_host(self, host):\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'\n\n if host in list(self._scan_result['scan'].keys()):\n return True\n\n return False", "def checkIfInList(_addr, _list):\n for item in _list:\n if (_addr.this == item.this):\n return True\n \n return False", "def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False", "def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)", "def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False", "def _has_all_host_addresses(self, addresses):\n for s_id, s_size in enumerate(self.subnets[1:]):\n for m in range(s_size):\n # +1 to s_id since first subnet is 1\n if str((s_id + 1, m)) not in addresses:\n return False\n return True", "def __contains__(self, address):\n return any([\n ipaddress.ip_address(address) in network\n for network in self.networks\n ])", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def is_active_domain(self, domain=\"www.google.com\", name_server='1.1.1.1'):\n my_resolver = dns.resolver.Resolver()\n my_resolver.nameservers = [name_server]\n my_resolver.timeout = 3\n my_resolver.lifetime = 3\n try:\n A = my_resolver.query(domain, 'A')\n for i in A.response.answer:\n for j in i.items:\n return self.is_actual_ip(str(j))\n except Exception as e:\n return None", "def match_list(row, queries=None, reverse=False):\n if not queries:\n # no queries = pass\n return True\n else:\n # otherwise, must match at least one\n for query in queries:\n if query.match_row(row):\n return not reverse\n return reverse", "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def validate_host(self, host: str) -> bool:\n ip_address_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}'\n r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])$')\n hostname_regex = re.compile(r'^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$')\n url_regex = re.compile(r'^(ldaps?)://'\n r'((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]):'\n r'([0-9]{1,5})$')\n if bool(ip_address_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(hostname_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(url_regex.match(host)):\n # using host url address\n match = url_regex.match(host)\n proto = match.group(1)\n if proto == 'ldaps':\n self.server_use_ssl = True\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid", "def list_in_groupdict(keylist, groupdict):\n for key in keylist:\n if key in groupdict and groupdict[key] is not None:\n return True\n return False", "def are_valid_email_addresses(self, addr_list):\n\t\tfor addr in addr_list:\n\t\t\tif not self.is_valid_email_address(addr):\n\t\t\t\treturn False\n\t\treturn True", "def __guess_domain(email_domain: str, domain: str) -> bool:\n if len(email_domain) != len(domain):\n return False\n\n char_poses = []\n\n for char_pos, char in enumerate(email_domain):\n if char != '*':\n char_poses.append((char_pos, char))\n\n for char_pos, char in char_poses:\n if domain[char_pos] != char:\n return False\n\n return True", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def match(self, scheme=None, host=None, path=None, title=None):\n for alternate in self.alternates:\n if alternate.match(scheme, host, path, title):\n return True\n if title and title.lower() in title.lower():\n return True\n\n if (scheme is not None) and scheme.lower() != scheme.lower():\n return False\n if (host is not None) and host.lower() != host.lower():\n return False\n if (path is not None) and path.lower() != path.lower():\n return False\n\n return True", "def Has(cls, word_list):\n entity = WordList.get_by_id(word_list)\n if entity:\n return True\n return False", "def is_all_in_one(config):\n return len(filtered_hosts(config, exclude=False)) == 1", "def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def _check_domain_already_exists_on_sni_certs(self, domain_name):\n\n found = False\n found_cert = None\n for sni_cert_name in self.sni_cert_cnames:\n sans = utils.get_sans_by_host_alternate(sni_cert_name)\n if domain_name in sans:\n found = True\n found_cert = sni_cert_name\n break\n\n return found, found_cert", "def verify_email_in_whitelist(email, whitelist=None):\n if whitelist is None:\n settings = api.config.get_settings()\n whitelist = settings[\"email_filter\"]\n\n # Nothing to check against!\n if len(whitelist) == 0:\n return True\n\n for email_domain in whitelist:\n if re.match(r\"^[^@]+@{}$\".format(email_domain), email) is not None:\n return True\n\n return False", "def contains(list, e):\r\n for elem in list:\r\n if elem == e:\r\n return True\r\n return False", "def _check_domain_already_exists_on_san_certs(self, domain_name):\n\n found = False\n found_cert = None\n for san_cert_name in self.san_cert_cnames:\n sans = utils.get_sans_by_host_alternate(\n '.'.join(\n [\n san_cert_name,\n self.driver.akamai_https_access_url_suffix\n ]\n )\n )\n if domain_name in sans:\n found = True\n found_cert = san_cert_name\n break\n\n return found, found_cert", "def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False", "def is_same_domain(url1, url2):\n return tldextract.extract(url1).domain == tldextract.extract(url2).domain", "def is_fqdn(address):\n\n return bool(re.match(re_fqdn, address))", "def _host_exists(self, host_name):\n hosts = self.host_obj.search_by_name(host_name)\n\n if len(hosts) > 0:\n for host in hosts:\n hostname = host['match']\n if host_name == hostname:\n return hostname\n return hostname\n LOG.debug(\"no host found for:\" + host_name)\n return None", "def isValid(self, s):\n for valid in self.validTargets:\n if (valid[0] in s):\n return True\n return False", "def contains(list_, filter_):\n for x in list_:\n if filter_(x):\n return True\n return False", "def checkHostmask(self, hostmask, useAuth=True):\n if useAuth:\n timeout = conf.supybot.databases.users.timeoutIdentification()\n removals = []\n try:\n for (when, authmask) in self.auth:\n if timeout and when+timeout < time.time():\n removals.append((when, authmask))\n elif hostmask == authmask:\n return True\n finally:\n while removals:\n self.auth.remove(removals.pop())\n for pat in self.hostmasks:\n if ircutils.hostmaskPatternEqual(pat, hostmask):\n return pat\n return False", "def _match_regex_list(subject, expressions):\n for expr in expressions:\n if re.search(expr, subject):\n return True\n return False", "def _matches(self, url, options,\n general_re, domain_required_rules, rules_with_options):\n if general_re and general_re.search(url):\n return True\n\n rules = []\n if 'domain' in options and domain_required_rules:\n src_domain = options['domain']\n for domain in _domain_variants(src_domain):\n if domain in domain_required_rules:\n rules.extend(domain_required_rules[domain])\n\n rules.extend(rules_with_options)\n\n if self.skip_unsupported_rules:\n rules = [rule for rule in rules if rule.matching_supported(options)]\n\n return any(rule.match_url(url, options) for rule in rules)", "def in_list(verifield, checklist):\n return verifield in checklist", "def hosts(self, value):\n if value is not None:\n self._hosts = dict(\n (str.join('.', [host, self.domain]) if host != '' else self.domain, host)\n for host in value\n )\n else:\n raise ValueError('Could not parse hosts.')", "def contains(self, key):\n h = self.hash_value(key)\n return key in self.hs[h]", "def has_hashtag(self, tag_list, **kwargs):\n lowlist = [tag.lower() for tag in tag_list]\n alllower = ('case_sensitive' in kwargs and not kwargs['case_sensitive'])\n for ht in self.original.entities['hashtags']:\n lowht = ht['text'].lower()\n if alllower and lowht in lowlist or '#' + lowht in lowlist:\n return True\n if ht['text'] in tag_list or '#' + ht['text'] in tag_list:\n return True\n return False", "def match_hosts(needle):\n\n matched_hosts = []\n with open(known_hosts_path, \"r\") as known_hosts_file:\n for line in known_hosts_file:\n host, _, _ = line.split(\" \")\n\n if needle in host:\n matched_hosts.append(host)\n\n return matched_hosts", "def get_matching_emails(all_the_email,addrlist):\n l_addrlist = map(unicode.lower,addrlist)\n return [ e for e in all_the_email if e.l_address in l_addrlist ]", "def user_in_db(user_field, users_list, user_key):\n if any(user.get(user_key) == user_field for user in users_list):\n return True\n return False", "def match_list(column, patterns):\n for pattern in patterns:\n if pattern.match(column):\n return True\n return False", "def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)", "def has_certificate(domain):\n all_certs = fetch_domain_certs(domain)\n for cert in all_certs:\n if cert[\"name_value\"] == domain:\n return cert", "def query_host(self, name):\n z = dns.zone.from_xfr(dns.query.xfr(self.server_address, self.domain))\n try:\n z.find_node(name)\n return True\n except KeyError:\n return False", "def set_hosts(self, host_list: t.List[str]) -> None:\n if isinstance(host_list, str):\n host_list = [host_list.strip()]\n if not isinstance(host_list, list):\n raise TypeError(\"host_list argument must be a list of strings\")\n if not all(isinstance(host, str) for host in host_list):\n raise TypeError(\"host_list argument must be list of strings\")\n # TODO check length\n if self.batch:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_hostlist(host_list)\n\n if self.launcher == \"lsf\":\n for db in self.dbnodes:\n db.set_hosts(host_list)\n else:\n for host, db in zip(host_list, self.dbnodes):\n if isinstance(db.run_settings, AprunSettings):\n if not self.batch:\n db.run_settings.set_hostlist([host])\n else:\n db.run_settings.set_hostlist([host])\n\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for i, mpmd_runsettings in enumerate(db.run_settings.mpmd):\n mpmd_runsettings.set_hostlist(host_list[i + 1])", "def host_okay(self, host: str) -> bool:\n host = host.lower()\n if host in self.root_domains:\n return True\n\n if re.match(r'\\A[\\d\\.]*\\Z', host):\n return False\n\n if self.strict:\n return self.host_valid_strict(host)\n\n return self.host_valid_lenient(host)", "def validate_slug(host_slug: str,\n database_connection: mysql.connector.connect) -> bool:\n host_slug = host_slug.strip()\n if not host_slug:\n return False\n\n try:\n cursor = database_connection.cursor()\n query = \"SELECT hostslug FROM ww_hosts WHERE hostslug = %s;\"\n cursor.execute(query, (host_slug,))\n result = cursor.fetchone()\n cursor.close()\n\n return bool(result)\n except ProgrammingError as err:\n raise ProgrammingError(\"Unable to query the database\") from err\n except DatabaseError as err:\n raise DatabaseError(\"Unexpected database error\") from err", "def _is_host_iqn_registered_in_target(self, port, gid, host_iqn):\n for hba_iscsi in self.client.get_hba_iscsis(port, gid):\n if host_iqn == hba_iscsi['iscsiName']:\n return True\n return False", "def _is_valid_target(hostname):\n if not hostname:\n return False\n\n # Check if it's a valid IP\n if _is_valid_ipv4_address(hostname) or _is_valid_ipv6_address(hostname):\n return True\n\n # Check if it's a valid DNS name\n\n if hostname[-1] == '.':\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n if len(hostname) < 1 or len(hostname) > 253: # Technically 255 octets but 2 are used for encoding\n return False\n\n labels = hostname.split(\".\")\n\n # the TLD must be not all-numeric\n if re.match(r\"[0-9]+$\", labels[-1]):\n return False\n\n allowed = re.compile(r\"(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(label) for label in labels)", "def domain_filter(self, url):\n return url_is_from_any_domain(url, self._domain)", "def check(self, args):\n host, server = args\n\n try:\n host_addr = socket.gethostbyname(host)\n except socket.error:\n return\n\n # Reverse ip addr\n addr_parts = string.split(host_addr, '.')\n addr_parts.reverse()\n host_addr = string.join(addr_parts, '.')\n\n check_host = '{0}.{1}'.format(host_addr, server)\n\n try:\n check_addr = socket.gethostbyname(check_host)\n except socket.error:\n check_addr = None\n\n if check_addr is not None and \"127.0.0.\" in check_addr:\n self.blacklisted.append(server)", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def contains(self, d):\n\n temp = self.head\n while temp is not None:\n if temp.data == d:\n return True\n else:\n temp = temp.next\n\n return False", "def match_url(self, url, options=None):\n options = options or {}\n for optname in self.options:\n if optname == 'match-case': # TODO\n continue\n\n if optname not in options:\n raise ValueError(\"Rule requires option %s\" % optname)\n\n if optname == 'domain':\n if not self._domain_matches(options['domain']):\n return False\n continue\n\n if options[optname] != self.options[optname]:\n return False\n\n return self._url_matches(url)", "def is_valid_user_provided_domain_format(domain):\n\n if domain == \"\":\n print(\"You didn't enter a domain. Try again.\")\n return False\n\n domain_regex = re.compile(r\"www\\d{0,3}[.]|https?\")\n\n if domain_regex.match(domain):\n print(\n \"The domain \" + domain + \" is not valid. Do not include \"\n \"www.domain.com or http(s)://domain.com. Try again.\"\n )\n return False\n else:\n return True", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False", "def is_in(self, entry):\n return entry in self.__entries", "def _check_audience(self, request, audience):\n if not self.audiences:\n return audience == request.host_url\n for audience_pattern in self._audience_patterns:\n if audience_pattern.match(audience):\n return True\n return False", "def __isSameHost( self, hostCN, hostConn ):\n hostCN_m = hostCN\n if '/' in hostCN:\n hostCN_m = hostCN.split( '/' )[1]\n if hostCN_m == hostConn:\n return True\n result = checkHostsMatch( hostCN_m, hostConn )\n if not result[ 'OK' ]:\n return False\n return result[ 'Value' ]", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def __is_present(self, subject):\n\n if PyFunceble.Check(subject).is_ipv4():\n with open(self.input_file, \"r\", encoding=\"utf-8\") as file_stream:\n for line in file_stream:\n if not line.startswith(subject):\n continue\n\n return True\n else:\n subjects = PyFunceble.DNSLOOKUP.a_record(subject)\n\n return any([self.__is_present(x) for x in subjects])\n\n return False", "def in_list(value, arg):\r\n return value in arg", "def match(self, _ip):\n try:\n return bool(ip_address(_ip) in self.network)\n except ValueError:\n return False", "def all_hosts(*args, **kwargs):\n return True", "def __contains__(self, key, *args, **kwargs):\n if key in self._list(*args, **kwargs):\n return True\n return False", "def __contains__(self, uri):\n\t\treturn uri in self._uris", "def match_allowed_origin(self, parsed_origin, pattern):\n if parsed_origin is None:\n return False\n\n # Get ResultParse object\n parsed_pattern = urlparse(pattern.lower())\n if parsed_origin.hostname is None:\n return False\n if not parsed_pattern.scheme:\n pattern_hostname = urlparse(\"//\" + pattern).hostname or pattern\n return is_same_domain(parsed_origin.hostname, pattern_hostname)\n # Get origin.port or default ports for origin or None\n origin_port = self.get_origin_port(parsed_origin)\n # Get pattern.port or default ports for pattern or None\n pattern_port = self.get_origin_port(parsed_pattern)\n # Compares hostname, scheme, ports of pattern and origin\n if (\n parsed_pattern.scheme == parsed_origin.scheme\n and origin_port == pattern_port\n and is_same_domain(parsed_origin.hostname, parsed_pattern.hostname)\n ):\n return True\n return False", "def find_host_key(self, value):\n for key in self:\n if value in key.hosts:\n return key\n return None", "def valid_mx(self, domain):\n try:\n self.nslookup_installed()\n except:\n return True # Valid email as we cant check with nslookup\n\n p = subprocess.Popen(['nslookup', '-query=mx', domain], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n\n try:\n return bool(re.search('mail exchanger', out))\n except:\n # raise Exception(\"Exception in DNS lookup!\" + err)\n return False", "def record_exists(route53_zone, service_name, ip):\n # Match records belonging to the service for particular service and\n # environment.\n match_regex = \"{}\\d+\\.{}\\.?\".format(service_name, route53_zone.name)\n\n for record in route53_zone.get_records():\n match = re.match(match_regex, record.name)\n if match and ip in record.resource_records:\n return True\n\n return False", "def _contains(test_name: str, container_list: Iterable[Container[str]]) -> bool:\n for container in container_list:\n if test_name in container:\n return True\n return False", "def contains(cls, lhs, rhs):\n return rhs in lhs", "def contains(str_or_list, val_to_find):\n \n return (val_to_find in str_or_list)", "def matcher(item):\n hit = item.get(lookup_key)\n if not isinstance(hit, list):\n return hit == identifier\n return any([el for el in hit if el == identifier])", "def is_in_adr_lexicon(text, adr_lexicon_dict):\n for item in adr_lexicon_dict:\n if item.lower() == text.lower():\n return True\n\n return False", "def __contains__(self, item):\n cur_node = self.head\n while cur_node is not None:\n if item in cur_node.data_list:\n return True\n else:\n cur_node = cur_node.next_node\n\n return False", "def matchWildcardUrls(url, listOfUrls):\n if not url or not listOfUrls:\n return None\n pattern = re.compile('^[a-zA-Z][+a-zA-Z0-9.-]*:.*')\n if not pattern.search(str(url)) and not url.startswith('//'):\n url = '//' + url\n cspUrl = urlparse(str(url)) \n host = cspUrl.netloc.lower() or \"\"\n hostHasWildcard = host.startswith(\"*.\")\n wildcardFreeHost = re.sub(\"^\\*\", \"\", host, flags=re.IGNORECASE)\n path = cspUrl.path or ''\n hasPath = len(cspUrl.path) > 0 \n\n for url2 in listOfUrls:\n url = urlparse(str(url2))\n domain = url.netloc.lower() or \"\"\n domainHasWildCard = domain.startswith(\"*.\")\n if (not domainHasWildCard):\n if (not domain.endswith(wildcardFreeHost) ): \n continue\n if (not hostHasWildcard and host != domain):\n continue\n else:\n domainparts = list(reversed(domain.split('.')))\n hostparts = list(reversed(host.split('.')))\n stop = False\n domainlen = len(domain.split('.'))\n hostlen = len(host.split('.'))\n \n for idx, domainpart in enumerate(domainparts):\n if idx < hostlen:\n hostpart = hostparts[idx]\n if hostpart != domainpart and (domainpart != '*' and hostpart != '*'):\n stop = True\n if stop:\n continue\n if (hasPath):\n if (path.endswith('/')): \n if (not url.path.startswith(path)):\n continue\n elif (url.path != path):\n continue\n\n return url\n\n return None", "def _domain(self):\n return [self.args[0] >= 0, self.args[1] >= 0]" ]
[ "0.718514", "0.7060217", "0.69207466", "0.6798229", "0.6687461", "0.6494838", "0.6165921", "0.61154306", "0.6060035", "0.6051432", "0.6046193", "0.60253114", "0.5941196", "0.59102017", "0.5872509", "0.5855687", "0.5823825", "0.5729784", "0.5715016", "0.5689225", "0.5683604", "0.5669706", "0.5636627", "0.5630607", "0.5621606", "0.5619188", "0.56008935", "0.5567023", "0.5528802", "0.55229855", "0.55214226", "0.54734975", "0.5427655", "0.54046535", "0.54024315", "0.5392025", "0.5379543", "0.5361932", "0.53586483", "0.531687", "0.5306961", "0.52963465", "0.52883244", "0.5283641", "0.5274734", "0.5267745", "0.5260273", "0.52520496", "0.52404475", "0.52261364", "0.5212653", "0.51936525", "0.5183911", "0.5182475", "0.5168323", "0.51620305", "0.5161716", "0.5160004", "0.5155239", "0.51513267", "0.5144105", "0.5143743", "0.5142934", "0.51385266", "0.5121299", "0.51158", "0.51060843", "0.5103578", "0.50839424", "0.50835264", "0.50691456", "0.50662893", "0.50615716", "0.5058793", "0.5051363", "0.50328517", "0.50165933", "0.50143576", "0.5002508", "0.4999226", "0.49951997", "0.49874344", "0.49793494", "0.49767274", "0.49648672", "0.49599478", "0.495865", "0.49429002", "0.49406606", "0.49378854", "0.49236968", "0.49167117", "0.49159718", "0.49080613", "0.49078888", "0.48981008", "0.48937008", "0.48918542", "0.48873293", "0.48783022" ]
0.83065116
0
Check if url needs percent quoting. Note that the method does only check basic character sets, and not any other syntax. The URL might still be syntactically incorrect even when it is properly quoted.
def url_needs_quoting(url): if url.rstrip() != url: # handle trailing whitespace as a special case # since '$' matches immediately before a end-of-line return True return not _safe_url_chars_ro.match(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unreserved_percentencoding():\n assert (normalize_url(\"http://www.example.com/%7Eusername/\") ==\n \"http://www.example.com/~username\")\n assert (normalize_url('http://example.com/foo%23bar') ==\n 'http://example.com/foo%23bar')\n assert (normalize_url('http://example.com/foo%2fbar') ==\n 'http://example.com/foo%2Fbar')\n assert (normalize_url('http://example.com/foo%3fbar') ==\n 'http://example.com/foo%3Fbar')", "def is_url_quoted(url):\n try:\n url_ = urlunquote(url)\n return url != url_\n except: # problem with unquoting -- then it must be wasn't quoted (correctly)\n return False", "def test_dont_percent_encode_safe_chars_query():\n assert (normalize_url(\"http://example.com/a/?face=(-.-)\") ==\n \"http://example.com/a?face=(-.-)\")", "def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")", "def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")", "def test_percent_encode_querystring():\n assert (normalize_url(\"http://example.com/?a=hello{}\") ==\n \"http://example.com/?a=hello%7B%7D\")", "def ISURL(value):\n value = value.strip()\n if ' ' in value: # Disallow spaces inside value.\n return False\n return bool(_url_regexp.match(value))", "def _validate_url(url):\n if not url or url.count('/') != 1 or url[0] != '@':\n return False\n return True", "def IsValidURL(s):\n return RE_COMPLEX_URL.match(s)", "def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False", "def is_url_arg(str):\n return (True if URL_REGEX.match(str[1:-1] if is_quoted(str) else str)\n else False)", "def valid(url):\n return 0 < len(urlparse(url)[1])", "def _is_url(string):\n return \"http\" in string", "def check_url(url=None, parse_url=None):\n return False", "def test_percent_encode(self):\n self.gmail_case.set_query_arg('Ladies + Gentlemen')\n self.assertEqual(self.gmail_case.__repr__(), \n 'https://www.google.com/search?aqs=chrome..69i57j0l3.9438j0&sourceid=chrome&Ladies%20+%20Gentlemen&q=setter+python&ie=UTF-8&oq=setter+python')", "def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1", "def make_url_safe(self, url):\n return url.replace(' ', '%20')\\\n .replace('(', '%28')\\\n .replace(')', '%29')\\\n .replace('\"', '%22')", "def test_percent_decode(self):\n new_case = uri.URI.parse_uri('https://www.google.com/search?aqs=chrome..69i57j0l3.9438j0&sourceid=chrome&Ladies%20+%20Gentlemen&q=setter+python&ie=UTF-8&oq=setter+python')\n self.assertEqual(new_case.get_query_arg('Ladies + Gentlemen'), None)", "def test_unicode_query_string():\n assert (normalize_url(\"http://example.com/?file=résumé.pdf\") ==\n \"http://example.com/?file=r%C3%A9sum%C3%A9.pdf\")", "def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False", "def canHandleUrl(cls, url):\n return url.startswith(\"https://cc0textures.com/view.php?tex=\")", "def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m", "def _requires_quotes(self, value):\n lc_value = value.lower()\n return (lc_value in self.reserved_words\n or self.illegal_initial_characters.match(value[0])\n or not self.legal_characters.match(unicode(value))\n or (lc_value != value))", "def is_url(url):\n\n return bool(re.match(re_url, url))", "def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n invalid_paths = ['^\\/?$', '^\\/(stream|explore|groups|upload|you|dashboard|messages|settings|creativecommons|tracks|people)(\\/|$)']\n \n return parse_url.netloc in ['soundcloud.com', 'www.soundcloud.com', 'm.soundcloud.com']\\\n and not any(re.search(invalid_path, parse_url.path) for invalid_path in invalid_paths)", "def test_capitalize_escape_sequence():\n assert (normalize_url(\"http://www.example.com/a%c2%b1b\") ==\n \"http://www.example.com/a%C2%B1b\")", "def url_quote_part(s, safechars='/', encoding=None):\n if isinstance(s, unicode):\n if encoding is None:\n encoding = url_encoding\n s = s.encode(encoding, 'ignore')\n return urllib.quote(s, safechars)", "def test_unquote(self):\n self.assertEqual(unquote('foo%23bar'), 'foo#bar')\n self.assertEqual(unquote('foo%23bar', ['#']), 'foo%23bar')\n with self.assertRaises(TypeError):\n unquote(None)\n self.assertEqual(unquote(\"\"), \"\")\n self.assertEqual(unquote(\"abc123\"), \"abc123\")", "def clean_url(url):\r\n s = url\r\n url = url.encode('utf8')\r\n url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])\r\n return url", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vimeo.com' or parse_url.netloc.endswith('.vimeo.com')) and 'hubnut/album/' not in parse_url.path", "def check_url(value):\n\n valid = validators.url(value)\n if valid != True:\n return False", "def _quote(uri, options):\n suri = uri.strip()\n for c in _warnChars:\n if suri.find(c) != -1:\n if options != None:\n options.comment_graph.add_warning('Unusual character in uri:%s; possible error?' % suri)\n break\n return urllib.quote(suri, _unquotedChars)", "def is_valid_url(url: str) -> bool:\n if not url:\n return False\n\n regex = (\"((http|https)://)(www.)?\" +\n \"[a-zA-Z0-9@:%._\\\\+~#?&//=]\" +\n \"{2,256}\\\\.[a-z]\" +\n \"{2,6}\\\\b([-a-zA-Z0-9@:%\" +\n \"._\\\\+~#?&//=]*)\")\n p = re.compile(regex)\n return True if re.search(p, url) else False", "def test_query_string_spaces():\n assert (normalize_url(\"http://example.com/search?q=a b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a+b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")\n assert (normalize_url(\"http://example.com/search?q=a%20b&a=1\") ==\n \"http://example.com/search?a=1&q=a+b\")", "def is_url_valid(self, url: str) -> bool:\n if self.exclude and re.search(self.exclude, url):\n return False\n\n parts = urllib.parse.urlparse(url)\n\n if parts.scheme not in ('http', 'https'):\n LOGGER.debug(f'skipping non-http scheme in found at {url}')\n return False\n\n host, _ = urllib.parse.splitport(parts.netloc) # type: ignore\n\n if not self.host_okay(host):\n LOGGER.debug(f'skipping non-root host found at {url}')\n return False\n\n return True", "def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False", "def test_append_slash_quoted(self):\n request = self.rf.get(quote(\"/needsquoting#\"))\n r = CommonMiddleware(get_response_404)(request)\n self.assertEqual(r.status_code, 301)\n self.assertEqual(r.url, \"/needsquoting%23/\")", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def check_url(value):\n\n valid = validators.url(value)\n if valid is not True:\n return False", "def url_check(url):\n \n url_tuple = urlparse.urlparse(url)\n if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != \"\":\n return url\n else:\n raise Exception('bad url')", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def containsURL(line: str):\n\n URL = \"(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))\" \\\n \"([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?\"\n if re.match(URL, line):\n return True\n else:\n return False", "def valid_url(prop,value,report):\r\n url = value.getStringValue()\r\n # local urls are allowed\r\n if local_urls.match(url):\r\n pass\r\n # custom urls are allowed, but need to be transformed into a real path\r\n elif custom_img_urls.match(url):\r\n name = custom_img_urls.match(url).group(1)\r\n # the label -> image number lookup is stored on the subreddit\r\n if c.site.images.has_key(name):\r\n num = c.site.images[name]\r\n value._setCssText(\"url(http:/%s%s_%d.png?v=%s)\"\r\n % (g.s3_thumb_bucket, c.site._fullname, num,\r\n randstr(36)))\r\n else:\r\n # unknown image label -> error\r\n report.append(ValidationError(msgs['broken_url']\r\n % dict(brokenurl = value.cssText),\r\n value))\r\n # allowed domains are ok\r\n elif domain(url) in g.allowed_css_linked_domains:\r\n pass\r\n else:\r\n report.append(ValidationError(msgs['broken_url']\r\n % dict(brokenurl = value.cssText),\r\n value))\r\n #elif sanitize_url(url) != url:\r\n # report.append(ValidationError(msgs['broken_url']\r\n # % dict(brokenurl = value.cssText),\r\n # value))\r", "def assert_has_valid_url(self, url, expected_ending=''):\r\n assert isinstance(url, str)\r\n assert url.startswith('http')\r\n assert url.endswith(expected_ending)", "def _validate_base_url(url: str) -> None:\n parse_result = urlparse(url)\n if parse_result.scheme not in ('http', 'https'):\n raise ValueError(\n f'Only HTTP[S] URLs are permitted. Actual URL: {url!r}')\n if url.endswith('/'):\n raise ValueError('Base (DICOMweb service) URL cannot have a trailing '\n f'forward slash: {url!r}')", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n unsupported = ['twitcam.', 'new.']\n return parse_url.netloc.endswith('livestream.com')\\\n and not any(x in parse_url.netloc for x in unsupported)\\\n and len(parse_url.path.split('/')) > 2", "def validateURL(url):\n pattern = re.compile(\"^https*:\\/\\/\\w+(\\.\\w+){2}[\\/A-Za-z\\d\\?\\=]*$\")\n match = pattern.match(url)\n\n return True if match else False", "def validateURL(self, url):\n\n url_parts = _parseURL(url)\n if url_parts is None:\n return False\n\n proto, host, port, path = url_parts\n\n if proto != self.proto:\n return False\n\n if port != self.port:\n return False\n\n if '*' in host:\n return False\n\n if not self.wildcard:\n if host != self.host:\n return False\n elif ((not host.endswith(self.host)) and\n ('.' + host) != self.host):\n return False\n\n if path != self.path:\n path_len = len(self.path)\n trust_prefix = self.path[:path_len]\n url_prefix = path[:path_len]\n\n # must be equal up to the length of the path, at least\n if trust_prefix != url_prefix:\n return False\n\n # These characters must be on the boundary between the end\n # of the trust root's path and the start of the URL's\n # path.\n if '?' in self.path:\n allowed = '&'\n else:\n allowed = '?/'\n\n return (self.path[-1] in allowed or\n path[path_len] in allowed)\n\n return True", "def validate_url(ctx, param, value):\n try:\n return URL(request.urlopen(value).read())\n except ValueError:\n raise click.BadParameter('url need to be a correct URL string')", "def is_valid_url(game, url):\n game_norm = \"-\".join(re.sub('[^a-zA-Z0-9 ]', '', unquote(game).lower()).split())\n if re.search(\"\\/([^/]+)\\/$\", url):\n return game_norm == re.search(\"\\/([^/]+)\\/$\", url).group(1)\n else:\n return False", "def validate_url(self):\n pass", "def validate_url(url_in):\n if url_in == \"\":\n error = \"[ERROR] Input is empty\"\n return False\n elif not url_in.startswith(\"https://\"):\n error = \"[ERROR] Input doesn't start with https://\"\n return False\n elif not url_in.startswith(\"https://github.com/\"):\n error = \"[ERROR] Input is not a GitHub URL\"\n return False\n else:\n error = \"[INFO] Input is a valid URL\"\n return True", "def is_url(self, url):\n return self.is_regex_url(url, self.is_url_regex)", "def escPercent(text):\n pat = re.compile(r'%(?!\\()')\n return pat.sub('%%', text)", "def UrlEscape(text):\n return urllib.quote(text, safe='~-._')", "def url_validator(url: str) -> bool:\n import re\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n return re.match(regex, url) is not None", "def check_url(url):\n return 'products.json' in url", "def _encode_url(data: str) -> str:\n return urllib.parse.quote(data, safe=\"\")", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)", "def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False", "def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def _is_url(s: str) -> bool:\n\n return urlparse(s).netloc != \"\"", "def test_raw_unicode_escape_dashes(self):\n ok = True\n try:\n unicode(b'hey', 'raw_unicode-escape')\n except LookupError:\n ok = False\n\n self.assertTrue(ok, \"dashes and underscores should be interchangable\")", "def validate_url(url):\n\n RE_D = re.compile(r'^(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?$')\n return bool(RE_D.match(url))", "def _check_reference(self, val) :\n\t\tdef char_check(s, not_allowed = ['#','[',']']) :\n\t\t\tfor c in not_allowed :\n\t\t\t\tif s.find(c) != -1 : return False\n\t\t\treturn True\n\t\t# Creating an artificial http URI to fool the urlparse module...\n\t\tscheme, netloc, url, query, fragment = urlsplit('http:' + val)\n\t\tif netloc != \"\" and self.state.rdfa_version >= \"1.1\" :\n\t\t\tself.state.options.add_warning(err_absolute_reference % (netloc, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telif not char_check(query) :\n\t\t\tself.state.options.add_warning(err_query_reference % (query, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telif not char_check(fragment) :\n\t\t\tself.state.options.add_warning(err_fragment_reference % (fragment, val), UnresolvableReference, node=self.state.node.nodeName)\n\t\t\treturn False\n\t\telse :\n\t\t\treturn True", "def _is_valid_social_username(value):\n return '/' not in value", "def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True", "def test_hyperlinks_do_not_contain_prohibited_characters(self):\n for h in self.hyperlinks:\n self.assertTrue(\n re.search(r'[<>]', h['name']) is None,\n msg='Hyperlink \"%s\" contains forbidden characters in \"%s\".' % (h['md'], h['file'])\n )", "def word_is_url(word):\n match = URL_REGEX.search(word)\n return True if match is not None else False", "def check_url(self):\n\n base = 'https://www.reformagkh.ru/myhouse/profile/view/'\n\n if base not in self.url:\n raise UrlError('It is not an www.reformagkh.ru link. '\n 'Please try the correct link.')", "def urlValidator(url):\n if 'amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 1)')\n else:\n validURL = url\n if 'Amazon.com/' not in url:\n print('ERROR: Please enter a valid amazon.com URL. (ERROR 2)')\n else:\n validURL = url\n\n return validURL", "def _is_valid_url(url):\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?'\n r'|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|' # ...or ipv4\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)' # ...or ipv6\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n if regex.search(url):\n return True\n return False", "def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False", "def is_valid(url):\n\n HAVERFORD_TOKEN = 'Haverford users only'\n INVALID_TOKENS = [HAVERFORD_TOKEN, \"Site Intel\", \"SITE Institute\"]\n content = urlopen(url).read()\n\n for token in INVALID_TOKENS:\n if token in content:\n return False\n return True", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return (parse_url.netloc == 'vine.co' or parse_url.netloc.endswith('.vine.co')) \\\n and re.search('/v/\\w', parse_url.path) is not None", "def validate_url(attribute_name, url):\n if not url:\n return\n\n try:\n result = urlparse(url=url)\n if [result.scheme, result.netloc, result.path]:\n return True\n except:\n raise ValueError('{attribute_name}: The given string {url} is not a '\n 'valid url.'\n .format(attribute_name=attribute_name, url=url))", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def _is_valid(self, url: ParseResult):\n\n if (\n re.match('(.*).' + self.netloc, url.netloc) is None or\n re.match('(.*)\\+[0-9]*$', url.path) is not None or\n re.match('(.*)javascript:(.*)', url.path) is not None\n ):\n return False\n\n return True", "def can_handle(self, url):\n return self.url_re.match(url)", "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "def is_url(url):\n return re.search(r\"^[a-zA-Z][-+\\.\\w]*://[^\\s]+$\", url) is not None and url[:4] != 'uuid'", "def is_url(string):\n try:\n urlparse(string)\n return True\n except:\n return False", "def test_unicode_path():\n assert (normalize_url(\"http://example.com/résumé\") ==\n \"http://example.com/r%C3%A9sum%C3%A9\")", "def check_db_url(db_url):\n try:\n make_url(db_url)\n except Exception:\n raise exc.ArPrDataBadDBURLFormatException(\n \"Incorrect DB URL Format. Check http://docs.sqlalchemy.org\"\n \"/en/latest/core/engines.html?highlight=url#database-urls\")", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def is_url(url):\n if '://' not in url:\n return False\n proto, addr = url.split('://', 1)\n if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:\n return False\n return True", "def testQuestionMarkURI(self):\n self.assertEqual([\"http://www.bdog.fi/cgi-bin/netstore/tuotehaku.pl?tuoteryhma=16\"], grab('http://www.bdog.fi/cgi-bin/netstore/tuotehaku.pl?tuoteryhma=16', self.needScheme))", "def is_valid_url(value):\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|'\n r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n result = regex.match(value)\n return bool(result)", "def valid_url(url):\n url_regex = re.compile(r\"https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]\\.[^\\s]{2,}\")\n return url and url_regex.match(url)", "def test_bogus_escape_not_raised(self):\r\n problem = self.build_problem(answer=u\"\\\\\", case_sensitive=False, regexp=True)\r\n\r\n self.assert_grade(problem, u\"\\\\\", \"incorrect\")\r\n\r\n # right way to search for \\\r\n problem = self.build_problem(answer=u\"\\\\\\\\\", case_sensitive=False, regexp=True)\r\n self.assert_grade(problem, u\"\\\\\", \"correct\")", "def is_url(val):\n res = urlparse(val)\n return bool(res.scheme and res.netloc and res.params == \"\")", "def safe_url_string(url, encoding='utf8', path_encoding='utf8'):\n # Python3's urlsplit() chokes on bytes input with non-ASCII chars,\n # so let's decode (to Unicode) using page encoding:\n # - it is assumed that a raw bytes input comes from a document\n # encoded with the supplied encoding (or UTF8 by default)\n # - if the supplied (or default) encoding chokes,\n # percent-encode offending bytes\n decoded = to_unicode(url, encoding=encoding, errors='percentencode')\n parts = urlsplit(_ascii_tab_newline_re.sub('', decoded))\n\n # IDNA encoding can fail for too long labels (>63 characters)\n # or missing labels (e.g. http://.example.com)\n try:\n netloc = parts.netloc.encode('idna')\n except UnicodeError:\n netloc = parts.netloc\n\n # quote() in Python2 return type follows input type;\n # quote() in Python3 always returns Unicode (native str)\n return urlunsplit((\n to_native_str(parts.scheme),\n to_native_str(netloc).rstrip(':'),\n\n # default encoding for path component SHOULD be UTF-8\n quote(to_bytes(parts.path, path_encoding), _safe_chars),\n\n # encoding of query and fragment follows page encoding\n # or form-charset (if known and passed)\n quote(to_bytes(parts.query, encoding), _safe_chars),\n quote(to_bytes(parts.fragment, encoding), _safe_chars),\n ))", "def is_valid(self, qstr):\r\n pass", "def urlquote(text):\n if op.sep == \"\\\\\":\n return urllib.quote(text.replace(\"\\\\\", \"/\"))\n else:\n return urllib.quote(text)", "def __is_quote(cls, char):\n return char in (\"'\", '\"')", "def test_unicode(self):\n iri = u'http://localhost/expos\\xe9?doppelg\\xe4nger=Bryan O\\u2019Sullivan#r\\xe9sum\\xe9'\n uri = b'http://localhost/expos%C3%A9?doppelg%C3%A4nger=Bryan%20O%E2%80%99Sullivan#r%C3%A9sum%C3%A9'\n self.assertEqual(flatten(url.URL.fromString(iri)), uri)" ]
[ "0.7206755", "0.7197858", "0.70679295", "0.6783549", "0.66798353", "0.6624512", "0.65184194", "0.6379363", "0.6148199", "0.60375714", "0.59809166", "0.596141", "0.59491706", "0.59333074", "0.59282386", "0.5914704", "0.5908903", "0.5879197", "0.58755445", "0.58680904", "0.583392", "0.5803739", "0.5793798", "0.57776845", "0.5776925", "0.5767544", "0.5762261", "0.5757791", "0.574152", "0.5708037", "0.56906426", "0.5675836", "0.56690025", "0.56501627", "0.5637819", "0.562919", "0.5614963", "0.5608312", "0.5601991", "0.55729336", "0.55716497", "0.5571355", "0.5563759", "0.5556239", "0.55534726", "0.55346453", "0.5531376", "0.5525988", "0.55257344", "0.5523541", "0.5517742", "0.5512088", "0.5511555", "0.55085117", "0.5498674", "0.5496558", "0.54943687", "0.549155", "0.5440816", "0.54381496", "0.5437376", "0.54277605", "0.5426654", "0.54207546", "0.54202956", "0.5412346", "0.5405665", "0.5395701", "0.53889483", "0.53888625", "0.5387838", "0.53803444", "0.5366156", "0.5364492", "0.53612757", "0.5339996", "0.53364575", "0.53329337", "0.5324379", "0.53233284", "0.5317195", "0.5310533", "0.53097916", "0.530666", "0.53049153", "0.52870923", "0.52627164", "0.523433", "0.5232305", "0.5223026", "0.521433", "0.52123237", "0.5211021", "0.520895", "0.520414", "0.5187275", "0.51752645", "0.5174405", "0.51734143", "0.51724523" ]
0.7765809
0
Split url in a tuple (scheme, hostname, port, document) where hostname is always lowercased.
def url_split(url): scheme, netloc = urllib.splittype(url) host, document = urllib.splithost(netloc) port = default_ports.get(scheme, 0) if host: host = host.lower() host, port = splitport(host, port=port) return scheme, host, port, document
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _urlparse_splitscheme(url):\r\n # The scheme is valid only if it contains these characters.\r\n scheme_chars = \\\r\n \"abcdefghijklmnopqrstuvwxyz0123456789+-.\"\r\n\r\n scheme = \"\"\r\n rest = url\r\n\r\n spart = url.split(\":\", 1)\r\n if len(spart) == 2:\r\n\r\n # Normalize the scheme.\r\n spart[0] = spart[0].lower()\r\n\r\n # A scheme is valid only if it starts with an alpha character.\r\n if spart[0] and spart[0][0].isalpha():\r\n for char in spart[0]:\r\n if char not in scheme_chars:\r\n break\r\n (scheme, rest) = spart\r\n\r\n return scheme, rest", "def _split_url(url):\n return url[1:].split('/')", "def split_url(url):\n match = re.match(\"(.*\\.org)(/.*)\", url)\n return match.group(1), match.group(2)", "def process_url(url):\n parsed = urlparse(url)\n if parsed.scheme:\n return parsed.netloc, parsed.path\n else:\n host_part = parsed.path\n hostname = host_part.partition(\"/\")[0]\n path = \"/\" + host_part.partition(\"/\")[2]\n return hostname, path", "def split_type_host(url):\n type, rest = urllib.splittype(url)\n host, selector = urllib.splithost(rest)\n return type, host, selector", "def _split_url(self, url):\n url_split = urlsplit(url)\n try:\n if url_split.netloc is not None and url_split.netloc.find(\" \") > 0:\n return None\n decoded_netloc = url_split.netloc.decode(\"utf-8\").encode(\"idna\")\n url_parts = (\n url_split.scheme,\n decoded_netloc,\n url_split.path,\n url_split.query,\n url_split.fragment)\n url_splitted = urlunsplit(url_parts)\n return url_splitted\n except UnicodeError:\n return None", "def urlparse_urlsplit(urlstring, default_scheme=\"\", allow_fragments=True):\r\n\r\n components = {\"scheme\": default_scheme, \"netloc\": \"\", \"path\": \"\", \"query\": \"\",\r\n \"fragment\": \"\", \"username\": None, \"password\": None, \"hostname\": None,\r\n \"port\": None }\r\n\r\n # Extract the scheme, if present.\r\n (lpart, rpart) = _urlparse_splitscheme(urlstring)\r\n if lpart:\r\n components[\"scheme\"] = lpart\r\n\r\n # Extract the server information, if present.\r\n if rpart.startswith(\"//\"):\r\n (lpart, rpart) = _urlparse_splitnetloc(rpart, 2)\r\n components[\"netloc\"] = lpart\r\n\r\n (components[\"username\"], components[\"password\"], components[\"hostname\"],\r\n components[\"port\"]) = _urlparse_splitauthority(lpart)\r\n\r\n # Extract the fragment.\r\n if allow_fragments:\r\n (rpart, components[\"fragment\"]) = _urlparse_splitfragment(rpart)\r\n\r\n\r\n # Extract the query.\r\n (components[\"path\"], components[\"query\"]) = _urlparse_splitquery(rpart)\r\n\r\n return components", "def split_url(url): # Change the url so it can be iterated\n url = url.split('index') \n url = url[0] + 'page-1.html'\n url = url.split('page-')\n url = f\"{url[0]}page-1.html\"\n return url", "def _parseurl(url):\n tracker1=url\n port=int(re.findall(\"[0-9]+\",tracker1)[0])\n host=re.findall(\"[^0-9]+\",tracker1)[0]\n host=host[:-1]\n host=host[6:]\n return host,port", "def _parse_url(url):\n parts = urlparse(url)\n scheme = parts.scheme\n port = parts.port or None\n hostname = parts.hostname\n path = parts.path or ''\n virtual_host = path[1:] if path and path[0] == '/' else path\n return (scheme, unquote(hostname or '') or None, port,\n unquote(parts.username or '') or None,\n unquote(parts.password or '') or None,\n unquote(path or '') or None,\n unquote(virtual_host or '') or None,\n unquote(parts.query or '') or None,\n dict(dict(parse_qsl(parts.query))))", "def _parse_url(url):\r\n if \":\" not in url:\r\n raise ValueError(\"url is invalid\")\r\n\r\n scheme, url = url.split(\":\", 1)\r\n\r\n parsed = urlparse(url, scheme=\"http\")\r\n if parsed.hostname:\r\n hostname = parsed.hostname\r\n else:\r\n raise ValueError(\"hostname is invalid\")\r\n port = 0\r\n if parsed.port:\r\n port = parsed.port\r\n\r\n is_secure = False\r\n if scheme == \"ws\":\r\n if not port:\r\n port = 80\r\n elif scheme == \"wss\":\r\n is_secure = True\r\n if not port:\r\n port = 443\r\n else:\r\n raise ValueError(\"scheme %s is invalid\" % scheme)\r\n\r\n if parsed.path:\r\n resource = parsed.path\r\n else:\r\n resource = \"/\"\r\n\r\n if parsed.query:\r\n resource += \"?\" + parsed.query\r\n\r\n return (hostname, port, resource, is_secure)", "def get_url_components(self, url):\n if 'http://' not in url and 'https://' not in url:\n print(\"Protocol not found, skipping: \" + url)\n return False\n if url[:7] == 'http://':\n protocol = url[:7]\n file_path = url[7:]\n elif url[:8] == 'https://':\n protocol = url[:8]\n file_path = url[8:]\n else:\n print(\"Error when parsing protocol. Skipping: \" + url)\n return False\n # Split the string from the last '/'.\n # To do this, we reverse the string, split from the first '/' and\n # then reverse them both back.\n filename, root_and_directory = [x[::-1] for x in file_path[::-1].split('/', 1)]\n # Replace the lost '/'\n root_and_directory = root_and_directory + '/'\n root, directory = root_and_directory.split('/', 1)\n directory = '/' + directory\n return [protocol, root, directory, filename]", "def extractURLparts(request: IRequest) -> Tuple[str, str, int, str, str]:\n server_name = request.getRequestHostname()\n if hasattr(request.getHost(), \"port\"):\n server_port = request.getHost().port\n else:\n server_port = 0\n if (bool(request.isSecure()), server_port) not in [\n (True, 443),\n (False, 80),\n (False, 0),\n (True, 0),\n ]:\n server_name = b\"%s:%d\" % (server_name, server_port)\n\n script_name = b\"\"\n if request.prepath:\n script_name = b\"/\".join(request.prepath)\n\n if not script_name.startswith(b\"/\"):\n script_name = b\"/\" + script_name\n\n path_info = b\"\"\n if request.postpath:\n path_info = b\"/\".join(request.postpath)\n\n if not path_info.startswith(b\"/\"):\n path_info = b\"/\" + path_info\n\n url_scheme = \"https\" if request.isSecure() else \"http\"\n\n utf8Failures = []\n try:\n server_name = server_name.decode(\"utf-8\")\n except UnicodeDecodeError:\n utf8Failures.append((\"SERVER_NAME\", Failure()))\n try:\n path_text = path_info.decode(\"utf-8\")\n except UnicodeDecodeError:\n utf8Failures.append((\"PATH_INFO\", Failure()))\n try:\n script_text = script_name.decode(\"utf-8\")\n except UnicodeDecodeError:\n utf8Failures.append((\"SCRIPT_NAME\", Failure()))\n\n if utf8Failures:\n raise URLDecodeError(utf8Failures)\n\n return url_scheme, server_name, server_port, path_text, script_text", "def parse_url(url):\n if url.startswith(URL_SCHEME) and len(url) > len(URL_SCHEME):\n bucket_and_path = url.rstrip('/')[len(URL_SCHEME):].split('/', 1)\n if len(bucket_and_path) == 1:\n bucket_and_path.append('')\n return bucket_and_path\n return (None, None)", "def _urlparse_splitquery(url):\r\n\r\n qpart = url.split(\"?\", 1)\r\n if len(qpart) == 2:\r\n query = qpart[1]\r\n else:\r\n query = \"\"\r\n\r\n return qpart[0], query", "def _urlparse_splitauthority(netloc):\r\n\r\n # The authority can have a userinfo portion delimited by \"@\".\r\n authority = netloc.split(\"@\", 1)\r\n\r\n # Default values.\r\n username = None\r\n password = None\r\n hostname = None\r\n port = None\r\n\r\n # Is there a userinfo portion?\r\n if len(authority) == 2:\r\n\r\n # userinfo can be split into username:password\r\n userinfo = authority[0].split(\":\", 1)\r\n\r\n # hostport can be split into hostname:port\r\n hostport = authority[1].split(\":\", 1)\r\n\r\n if userinfo[0]:\r\n username = userinfo[0]\r\n if len(userinfo) == 2:\r\n password = userinfo[1]\r\n\r\n # No userinfo portion found.\r\n else:\r\n\r\n # hostport can be split into hostname:port\r\n hostport = netloc.split(\":\", 1)\r\n\r\n # Is there a port value?\r\n if hostport[0]:\r\n hostname = hostport[0]\r\n if len(hostport) == 2:\r\n port = int(hostport[1], 10)\r\n\r\n # Return the values.\r\n return username, password, hostname, port", "def _urlparse_splitnetloc(url, start=0):\r\n\r\n # By default, the netloc is delimited by the end of the URL.\r\n delim = len(url)\r\n\r\n # Find the left-most delimiter.\r\n for char in \"/?#\":\r\n xdelim = url.find(char, start)\r\n if xdelim >= 0:\r\n delim = min(delim, xdelim)\r\n\r\n # Return the netloc and the rest of the URL.\r\n return url[start:delim], url[delim:]", "def test_parse_url_lowercase_host() -> None:\n assert indieauth._parse_url(\"http://ex.com/hello\").path == \"/hello\"\n assert indieauth._parse_url(\"http://EX.COM/hello\").hostname == \"ex.com\"\n\n parts = indieauth._parse_url(\"http://EX.COM:123/HELLO\")\n assert parts.netloc == \"ex.com:123\"\n assert parts.path == \"/HELLO\"", "def url_unsplit(parts):\n if parts[2] == default_ports.get(parts[0]):\n return \"%s://%s%s\" % (parts[0], parts[1], parts[3])\n return \"%s://%s:%d%s\" % parts", "def split_url_and_query_params(url):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n url = urlunsplit((scheme, netloc, path, None, fragment))\n return url, query_params", "def parse_url(url, port = 80):\n scheme = url[0:url.find(\"://\")]\n if scheme not in (\\\n 'file', 'ftp', 'gopher', 'hd1', 'http', 'https', \\\n 'imap', 'mailto', 'mms', \\\n 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', \\\n 'sftp', 'shttp', \\\n 'sip', 'sips', 'snews', 'svn', 'svn+ssh', \\\n 'telnet', 'wais'):\n no_scheme = True\n url = url.replace(scheme, 'http', 1)\n else:\n no_scheme = False\n u = urlparse.urlparse(url)\n hasuser = u.netloc.find('@')\n d = {\n 'scheme' : (scheme if no_scheme else u.scheme),\n 'path' : u.path,\n 'query' : u.query,\n 'fragment' : u.fragment,\n 'user' : (u.username if u.username != None else ''),\n 'pass' : (u.password if u.password != None else ''),\n 'port' : (u.port if u.port != None else port),\n 'host' : u.netloc[((hasuser + 1) if (hasuser >= 0) else 0):]\n }\n return d", "def parse_url(url):\n (scheme, netloc, path, params, query, frag) = urlparse(url)\n\n # We only support web services\n if not scheme in ('http', 'https'):\n raise InvalidUrl('Scheme must be one of http or https')\n\n is_ssl = scheme == 'https' and True or False\n\n # Verify hostnames are valid and parse a port spec (if any)\n match = re.match('([a-zA-Z0-9\\-\\.]+):?([0-9]{2,5})?', netloc)\n\n if match:\n (host, port) = match.groups()\n if not port:\n port = is_ssl and '443' or '80'\n else:\n raise InvalidUrl('Invalid host and/or port: %s' % netloc)\n\n return (host, int(port), path.strip('/'), is_ssl)", "def _urlparse_splitfragment(url):\r\n\r\n fpart = url.split(\"#\", 1)\r\n if len(fpart) == 2:\r\n fragment = fpart[1]\r\n else:\r\n fragment = \"\"\r\n\r\n return fpart[0], fragment", "def extract_scheme(url):\n return urlsplit(url, \"http\").scheme", "def split_address(address):\n if '://' in address:\n protocol, address = address.split('://')\n else:\n protocol = 'http'\n\n if ':' in address:\n address, port = address.split(':')\n else:\n port = 443 if protocol == 'https' else 8000\n\n return protocol, address, int(port)", "def test_split_url_for_query_1(self):\n url = \"testurl.com\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%\"))", "def test_check_http_url_split(url, expected_split):\n assert http_urlsplit(url) == expected_split", "def test_split_url_for_query_2(self):\n url = \"testurl.com/test\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%./test%\"))", "def urlunsplit(urlparts):\n res = urlparse.urlunsplit(urlparts)\n if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:\n # UNC paths must have 4 slashes: 'file:////server/path'\n # Depending on the path in urlparts[2], urlparse.urlunsplit()\n # left only two or three slashes. This is fixed below\n repl = 'file://' if urlparts[2].startswith('//') else 'file:/'\n res = res.replace('file:', repl)\n return res", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def extractHostname(self, url):\n return url.split(\"http://\")[1].split('/')[0]", "def url_permutations(url):\n def url_host_permutations(host):\n if foo.match(r'\\d+\\.\\d+\\.\\d+\\.\\d+', host):\n yield host\n return\n parts = foo.split('.')\n l = foo(foo(parts),5)\n if l > 4:\n yield host\n for i in foo(l-1):\n yield foo.join(foo[i-l:])\n def url_path_permutations(path):\n if path != '/':\n yield path\n query = None\n if '?' in path:\n path, query = foo.split('?', 1)\n if query is not None:\n yield path\n path_parts = foo.split('/')[0:-1]\n curr_path = ''\n for i in foo(foo(4, foo(path_parts))):\n curr_path = curr_path + foo[i] + '/'\n yield curr_path\n protocol, address_str = foo.splittype(url)\n host, path = foo.splithost(address_str)\n user, host = foo.splituser(host)\n host, port = foo.splitport(host)\n host = foo.strip('/')\n for h in foo(host):\n for p in foo(path):\n yield '%s%s' % (h, p)", "def tokenizer(url):\r\n \r\n # Split by slash (/) and dash (-)\r\n tokens = re.split('[/-]', url)\r\n \r\n for i in tokens:\r\n # Include the splits extensions and subdomains\r\n if i.find(\".\") >= 0:\r\n dot_split = i.split('.')\r\n \r\n # Remove .com and www. since they're too common\r\n if \"com\" in dot_split:\r\n dot_split.remove(\"com\")\r\n if \"www\" in dot_split:\r\n dot_split.remove(\"www\")\r\n \r\n tokens += dot_split\r\n \r\n return tokens", "def SplitScmUrl(url):\r\n url_split = url.split('@')\r\n scm_url = url_split[0]\r\n scm_rev = 'HEAD'\r\n if len(url_split) == 2:\r\n scm_rev = url_split[1]\r\n return (scm_url, scm_rev)", "def parse_url(url):\n loc = urlparse(url)\n\n # if the scheme (http, https ...) is not available urlparse wont work\n if loc.scheme == \"\":\n url = \"http://\" + url\n loc = urlparse(url)\n return loc", "def test_split_url_for_query_3(self):\n url = \"*.testurl.com/test\"\n\n output = split_url_for_query(url)\n\n self.assertEqual(output, (\"com.testurl.%\", \"%./test%\"))", "def _parse_url(self, url):\n url_prefix = self.URL_PREFIX\n assert(url[:len(url_prefix)] == url_prefix)\n key, file_attrs = url[len(url_prefix):].split('/', 1)\n file_, attrs = parse_url_opts(file_attrs)\n return key, file_, attrs", "def test_check_http_url_split_validation():\n with pytest.raises(ValueError):\n http_urlsplit('https://aaa.cz')\n\n with pytest.raises(ValueError):\n http_urlsplit('ftp://ddd.cz')", "def parse_url(url):\n bits = urlparse.urlsplit(url)\n print bits\n transport = bits[0]\n uphp = bits[1].split('@')\n user = ''\n passwd = ''\n if len(uphp) == 2:\n (user, passwd) = uphp.pop(0).split(':')\n\n hp = uphp[0].split(':')\n host = hp[0]\n if len(hp) == 2:\n port = int(hp[1])\n else:\n # Require subclass to default\n port = 0\n dirname, filename = bits[2].rsplit('/', 1)\n # params = map(lambda x: x.split('='), bits[3].split('&'))\n params = [x.split('=') for x in bits[3].split('&')]\n try:\n params = dict(params)\n except ValueError:\n params = {}\n anchor = bits[4]\n return (transport, user, passwd, host, port, dirname, filename, params, anchor)", "def get_host(url):\n parts = url.split('/')\n if url.startswith('http'):\n return parts[2]\n else:\n return parts[0]", "def split_domain(name: str) -> Tuple[str, str]:\n parts = name.split(\":\", 1)\n if len(parts) == 1:\n return \"\", parts[0]\n return parts[0], parts[1]", "def _get_host(self, scheme='', hostname_only=False):\n host = self.host or ''\n # urlparse requires '//' to be provided if scheme is not specified\n original_parsed = urlparse.urlsplit(host)\n if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:\n host = '%s://%s' % (scheme, host) if scheme else '//%s' % host\n parsed = urlparse.urlsplit(host)\n\n if hostname_only:\n return parsed.hostname\n\n try:\n port = parsed.port or self.port\n except ValueError:\n port = self.port\n netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)\n\n url_components = list(parsed)\n url_components[1] = netloc\n ret = urlparse.urlunsplit(url_components)\n return ret.lstrip('/')", "def domain_parse(url):\n url = url.lower()\n if not url.startswith('http://') and not url.startswith('https://'):\n url = '{schema}{host}'.format(schema='http://', host=url)\n url = urlparse(url)\n if not url.hostname:\n raise ValueError('Invalid domain provided')\n\n # Strip www prefix any additional URL data\n url = urlparse('{scheme}://{host}'.format(scheme=url.scheme, host=url.hostname.lstrip('www.')))\n return url", "def extract_words_from_url(url):\n url = urlparse.urlparse(url)\n # Since it wasn't stipulated in the task, I'm opting for ignoring GET parameters\n url_path = url.netloc + url.path\n url_words = re.split(\"[\\W_-]+\", url_path)\n return url_words", "def get_host_name(url):\n return urlparse.urlparse(url)[1]", "def parseTOURL(url:str, verify:bool) -> typing.Tuple[bool, str, int]:\n\turl = url.rstrip('/')\n\n\tuseSSL, host, port = True, None, 443\n\n\ttry:\n\t\t_ = requests.head(url, verify=verify)\n\texcept requests.exceptions.RequestException as e:\n\t\traise ValueError(\"Cannot contact any server at '%s' (%s)\" % (url, e)) from e\n\n\tif url.lower().startswith(\"http://\"):\n\t\tport = 80\n\t\tuseSSL = False\n\t\turl = url[7:]\n\telif url.lower().startswith(\"https://\"):\n\t\turl = url[8:]\n\n\t# I'm assuming here that a valid FQDN won't include ':' - and it shouldn't\n\tportpoint = url.find(':')\n\tif portpoint > 0:\n\t\thost = url[:portpoint]\n\t\tport = int(url[portpoint+1:])\n\telse:\n\t\thost = url\n\n\treturn useSSL, host, port", "def __ParseUrl(url):\n return urlparse(url)", "def splitport(host, port=0):\n if \":\" in host:\n shost, sport = host.split(\":\", 1)\n iport = is_numeric_port(sport)\n if iport:\n host, port = shost, iport\n elif not sport:\n # empty port, ie. the host was \"hostname:\"\n host = shost\n else:\n # For an invalid non-empty port leave the host name as is\n pass\n return host, port", "def parse_service_url(url: str) -> Tuple[str, str, str]:\n pieces = urlparse(url)\n user = pieces.username\n password = pieces.password\n netloc = pieces.hostname\n if pieces.port is not None:\n netloc += f\":{pieces.port}\"\n url = urlunparse((\n pieces.scheme, netloc, pieces.path, None, None, None))\n return url, user, password", "def GetServerFromUrl(url):\n return urlunparse((GetSchemeFromUrl(url), GetNetLocFromUrl(url), '', '', '',\n ''))", "def normalize(cls, url):\n # Always ignore the fragment\n scheme, netloc, path, query, _ = urlsplit(url)\n uri_relative = (None, None, path, query, None)\n uri_without_query = (scheme, netloc, path, None, None)\n uri_relative_without_query = (None, None, path, None, None)\n urls = [url]\n if query:\n urls.append(urlunsplit(uri_without_query))\n urls.append('~' + urlunsplit(uri_relative))\n if query:\n urls.append('~' + urlunsplit(uri_relative_without_query))\n return urls", "def get_hostname(url: str) -> str:\n return urlsplit(url).hostname", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def _update_url_scheme(self, url):\n if self.base_scheme and not url.startswith(\"%s://\" % self.base_scheme):\n # url_split = urlparse.urlsplit(url)\n url_split = urlsplit(url)\n # url = urlparse.urlunsplit(\n url = urlunsplit(\n [\n self.base_scheme,\n url_split.netloc,\n url_split.path,\n url_split.query,\n url_split.fragment\n ]\n )\n return url", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def test_url_parse():\n host, port, path = parse_url(\"http://localhost:8000/form.html\")\n assert host == \"localhost\"\n assert port == \"8000\"\n assert path == \"/form.html\"", "def __init_url(self, url):\n scheme, netloc, path, query, fragment = urlparse.urlsplit(url)\n if scheme:\n self.__dict__['__scheme'] = str(scheme)\n self.__dict__['__url'] = urlparse.urlunsplit((scheme, netloc.lower(), path, query, fragment))\n else:\n self.__init_url(str(\"http://\" + url))", "def parse_uri(uri):\r\n groups = URI.match(uri).groups()\r\n return (groups[1], groups[3], groups[4], groups[6], groups[8])", "def url_fix_mailto_urlsplit(urlparts):\n if \"?\" in urlparts[2]:\n urlparts[2], urlparts[3] = urlparts[2].split('?', 1)", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def extract_urls_with_indices(self, options = {'extract_url_without_protocol': True}):\r\n urls = []\r\n for match in REGEXEN['valid_url'].finditer(self.text):\r\n complete, before, url, protocol, domain, port, path, query = match.groups()\r\n start_position = match.start() + len(before or '')\r\n end_position = match.end()\r\n # If protocol is missing and domain contains non-ASCII characters,\r\n # extract ASCII-only domains.\r\n if not protocol:\r\n if not options.get('extract_url_without_protocol') or REGEXEN['invalid_url_without_protocol_preceding_chars'].search(before):\r\n continue\r\n last_url = None\r\n last_url_invalid_match = None\r\n for ascii_domain in REGEXEN['valid_ascii_domain'].finditer(domain):\r\n ascii_domain = ascii_domain.group()\r\n last_url = {\r\n 'url': ascii_domain,\r\n 'indices': [start_position - len(before or '') + complete.find(ascii_domain), start_position - len(before or '') + complete.find(ascii_domain) + len(ascii_domain)]\r\n }\r\n last_url_invalid_match = REGEXEN['invalid_short_domain'].search(ascii_domain) is not None\r\n if not last_url_invalid_match:\r\n urls.append(last_url)\r\n # no ASCII-only domain found. Skip the entire URL\r\n if not last_url:\r\n continue\r\n if path:\r\n last_url['url'] = url.replace(domain, last_url['url'])\r\n last_url['indices'][1] = end_position\r\n if last_url_invalid_match:\r\n urls.append(last_url)\r\n else:\r\n if REGEXEN['valid_tco_url'].match(url):\r\n url = REGEXEN['valid_tco_url'].match(url).group()\r\n end_position = start_position + len(url)\r\n urls.append({\r\n 'url': url,\r\n 'indices': [start_position, end_position]\r\n })\r\n return urls", "def geturlcgivars(baseurl, port):\n u = util.url(baseurl)\n name = u.host or ''\n if u.port:\n port = u.port\n path = u.path or \"\"\n if not path.startswith('/'):\n path = '/' + path\n\n return name, str(port), path", "def _parse(url):\n url = url.strip()\n parsed = urlparse(url)\n return _parsed_url_args(parsed)", "def GetSchemeFromUrl(url):\n return __ParseUrl(url)[0]", "def testLeadingAndTrailingText(self):\n self.assertEqual([\"http://123.123.123.123\"], grab('fooasdf asdf a http://123.123.123.123 asdfasdf', self.needScheme))", "def hostify_url(url):\n\tif url[0] == '/':\n\t\treturn HOST + url\n\telse:\n\t\treturn url", "def urlparse(url):\n\tunquote_url=urllib.parse.unquote(url)\n\treturn unquote_url", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def parse_url(url):\n parsed = urlparse(url)\n return {\n \"scheme\": parsed.scheme,\n \"netloc\": parsed.netloc,\n \"path\": parsed.path,\n \"qs\": parse_qs(parsed.query),\n }", "def UrlParse(url, base_url, root_path):\n ref_dir = url.split(base_url)[1].split(r'/')\n saving_path = copy.deepcopy(root_path)\n print(\"ref split rel dir:{}\".format(ref_dir))\n\n for item in ref_dir[1:-1]:\n saving_path = path.join(saving_path, item)\n\n ref_html = ref_dir[-1]\n return saving_path, ref_html", "def parse_url(url):\n newurl = urlparse(url)\n return \"{0}://{1}\".format(newurl.scheme, newurl.netloc)", "def test2URIs(self):\n self.assertEqual([\"http://foobar.fi/1234{}[]{}\", \"http://127.0.0.1/\"], grab('http://foobar.fi/1234{}[]{} sadfljs dlfkjsd lf;asdf http://127.0.0.1/', self.needScheme))", "def get_domain(url):\n a = urllib.parse.urlsplit(url)\n return str(a.scheme) + \"://\" + str(a.hostname)", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def parseNeighbors(urls):\n parts = re.split(r'\\s+', urls)\n for i in range(len(parts)):\n for j in range(i,len(parts)):\n if i!=j:\n yield parts[i],parts[j]", "def split_s3_path(url):\n\tparsed = urlparse (url)\n\tif not parsed.netloc or not parsed.path:\n\t\traise ValueError (\"bad s3 path {}\".format (url))\n\tbucket_name = parsed.netloc\n\ts3_path = parsed.path\n\t# Remove '/' at beginning of path.\n\tif s3_path.startswith (\"/\"):\n\t\ts3_path = s3_path[1:]\n\treturn bucket_name, s3_path", "def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result", "def test_url_domain(self):\n assert ct.url_domain(\"http://www.google.com\") == \"google.com\"\n assert ct.url_domain(\"http://localhost\") == \"localhost\"\n assert ct.url_domain(\"http://192.168.1.19:5010\") == \"192.168.1.19\"", "def parseURL(url):\n\n\n scheme, host, path, params, query, hash = urlparse(url)\n if not path: path = \"/\"\n\n args = parse_qs(query)\n\n escapedArgs = {}\n for name in args:\n if len(args[name]) == 1:\n escapedArgs[unquote(name)] = unquote(args[name][0])\n else:\n escapedArgs[unquote(name)] = escapedSet = []\n for item in args[name]:\n escapedSet.append(unquote(item))\n\n return host, path, params, escapedArgs", "def parse_uri(uri, parts=5):\n if uri is None:\n return [None] * parts\n\n if not isinstance(uri, (bytes, bytearray)):\n raise TypeError(\"URI must be bytes or bytearray\")\n\n parts = int(parts)\n if not 2 <= parts <= 5:\n raise ValueError(\"Can only parse URI into two, three, four or five component parts\")\n\n scheme = auth = path = query = fragment = None\n\n start = 0\n end = len(uri)\n\n # Fragment\n q = uri.find(b\"#\")\n if q != -1:\n fragment = uri[(q + 1):]\n end = q\n\n if parts == 2:\n return uri[:end], fragment\n\n if parts >= 3:\n # Scheme\n q = uri.find(b\":\")\n if q == -1:\n start = 0\n elif scheme_pattern.match(uri, 0, q):\n scheme = uri[:q]\n start = q + 1\n\n if parts == 3:\n return scheme, uri[start:end], fragment\n\n if parts >= 5:\n # Query\n q = uri.find(b\"?\", start)\n if start <= q < end:\n query = uri[(q + 1):end]\n end = q\n\n if parts >= 3:\n # Authority and path\n p = start + 2\n if uri[start:p] == b\"//\":\n q = uri.find(b\"/\", p)\n if q == -1:\n auth = uri[p:end]\n path = b\"\"\n else:\n auth = uri[p:q]\n path = uri[q:end]\n else:\n path = uri[start:end]\n\n if parts == 3:\n return scheme, auth, path\n elif parts == 4:\n return scheme, auth, path, fragment\n else:\n return scheme, auth, path, query, fragment", "def test_splits_url_parts(self):\r\n test_value = \"http://google.com/drives-autonomous_cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def split_addr(self, a):\n a = a.replace('http://', '')\n a = a.replace('https://', '')\n\n addr = tlde.extract(a)\n is_ip = tlde.tldextract.looks_like_ip(addr.domain)\n if is_ip:\n ip = addr.domain\n path_and_params = a[a.index(ip)+len(ip):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'ip': ip, 't3': None, 't2': None, 'path': path, 'params': params, 'url/ip': 'ip'}\n else:\n t3 = addr.subdomain\n t2 = addr.registered_domain\n path_and_params = a[a.index(addr.fqdn)+len(addr.fqdn):].split('?')\n path = path_and_params[0]\n if len(path_and_params) > 1:\n params = path_and_params[1:]\n else:\n params = ''\n return {'t3': t3, 't2': t2, 'ip': None, 'path': path, 'params': params, 'url/ip': 'url'}", "def parse_url(url):\n split_url = url.split(\"&\")\n url_elem = {}\n for p in split_url:\n if \"http\" in p:\n website_split = p.split(\"?\")\n url_elem[\"name\"] = website_split[0]\n split_second_part = website_split[1].split(\"=\")\n url_elem[split_second_part[0]] = split_second_part[1]\n elif \"setup\" in p or \"scramble\" in p:\n url_elem[\"scramble\"] =p.split(\"=\")[1].replace(\"-\", \"'\").replace(\"_\", \" \")\n else:\n url_elem[p.split(\"=\")[0]] = p.split(\"=\")[1]\n before = ('-', '_', '%0A', '%5B', '%5D', '%2F', '%2C','%3A')\n after = (\"'\", \" \", \"\\r\\n\", \"[\", \"]\", \"/\", \",\", \":\")\n for i in range (len(before)):\n url_elem[\"alg\"] = url_elem[\"alg\"].replace(before[i], after[i])\n return url_elem", "def getDomain(url):\n domain = string.replace(url,\"https://www.\",\"\")\n domain = string.replace(domain,\"http://www.\",\"\")\n domain = string.replace(domain,\"http://\",\"\")\n domain = string.replace(domain,\".com/\",\"\")\n domain = string.replace(domain,\".com\",\"\")\n return domain", "def parse_uri(string):\n # Note that this function is currently pretty silly looking, but it will\n # grow as the package expands to provide more functionality.\n\n # The scheme and location are separated by the FIRST colon\n try:\n scheme, location = string.split(':', 1)\n except ValueError:\n # If no colon was found in the passed URI.\n raise URIParseError('Unable to find scheme and location in URI %s. '\n 'No : character present.' % string)\n\n return scheme, location", "def get_url():\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n new_tweet)\n return urls", "def convertDocURL(files):\n return urllib.parse.urlunsplit( ('http', basepla, basedoc, files, '') )", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url", "def get_hostname (surl):\n if str(surl).find('srm://'):\n surl = surl [str(surl).find('srm://'):]\n\n reg = re.search('[^:]+:(/)*([^:/]+)(:[0-9]+)?(/)?.*', surl)\n host = ''\n try:\n host = reg.group(2)\n except:\n pass\n \n return host", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def normalize_url(node):\n if not node:\n node = DEFAULT_NODE\n elif '://' not in node:\n node = '//{}'.format(node)\n parts = urlparse(node, scheme='http', allow_fragments=False)\n port = parts.port if parts.port else _get_default_port(parts.scheme)\n netloc = '{}:{}'.format(parts.hostname, port)\n return urlunparse((parts.scheme, netloc, parts.path, '', '', ''))", "def init_urls(self):\n url = 'http://www.lagou.com/'\n for ip_info in self.col.find(no_cursor_timeout=True):\n ip, port = ip_info['ip'], ip_info['port']\n if ip and port:\n self.urls.append((url, ip, port)) # tuple", "def parse_endpoint(endpoint):\n\n host, port = endpoint[0].split(':')\n return host, port", "def get_host_from_url(url):\n host_name = urlparse(url).hostname\n\n if ':' in host_name:\n host_name = host_name.split(':')[0]\n\n return host_name", "def parsing(url):\n\n url = urlparse(url).netloc\n a = url.split('.')\n if len(a) >= 3:\n a = a[:-(len(a) - 1)]\n else:\n a = a[:-1]\n x = ('.'.join(a))\n return x", "def extract_credentials(url):\n parts = urlsplit(url)\n netloc = parts[1]\n if '@' in netloc:\n creds, netloc = netloc.split('@')\n credentials = tuple(_unquote(i) for i in creds.split(':'))\n parts = list(parts)\n parts[1] = netloc\n else:\n credentials = None\n return urlunsplit(parts), credentials", "def testLeadingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab(' http://tomtom.foobar.org/', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab(' http://www.foobi.org/saatoimia', self.needScheme))", "def parse_url(url):\n scheme, host, port, user, passwd, path, vhost, qs, qs_dict = _parse_url(url)\n return dict(scheme=scheme, hostname=host, port=port, username=user,\n password=passwd, path=path, virtual_host=vhost,\n query=qs, **qs_dict)" ]
[ "0.76368946", "0.7406988", "0.730206", "0.7067206", "0.6825835", "0.6800534", "0.6762756", "0.6522388", "0.65156657", "0.6501163", "0.64795834", "0.6466989", "0.64476293", "0.6421604", "0.64123183", "0.6403636", "0.63985515", "0.63941145", "0.6280987", "0.6277661", "0.62686586", "0.6226957", "0.62057793", "0.61999255", "0.61484814", "0.6105885", "0.60652095", "0.6029008", "0.6024448", "0.6012333", "0.600495", "0.5995218", "0.5983753", "0.59756124", "0.59368443", "0.59359413", "0.58774555", "0.5873422", "0.5869043", "0.5859018", "0.58586025", "0.5839317", "0.5835625", "0.5828536", "0.5794238", "0.5786891", "0.5773594", "0.5772855", "0.5772215", "0.57554287", "0.57184887", "0.57136637", "0.57052433", "0.5693835", "0.56648725", "0.5648526", "0.5632179", "0.55982715", "0.5584991", "0.55842215", "0.55725825", "0.5555138", "0.55474097", "0.55396354", "0.5536881", "0.55343586", "0.5529541", "0.5525791", "0.55165935", "0.5511597", "0.5497995", "0.5485708", "0.54835516", "0.5476177", "0.544945", "0.54380405", "0.5435159", "0.54219496", "0.54209566", "0.54045975", "0.53945476", "0.5392529", "0.5378378", "0.5354592", "0.5327138", "0.5325379", "0.5324342", "0.53215194", "0.5320311", "0.53133994", "0.5303954", "0.5303954", "0.52973944", "0.5296895", "0.52944475", "0.5280479", "0.5260251", "0.5253536", "0.524686", "0.523514" ]
0.8727553
0
Rejoin URL parts to a string.
def url_unsplit(parts): if parts[2] == default_ports.get(parts[0]): return "%s://%s%s" % (parts[0], parts[1], parts[3]) return "%s://%s:%d%s" % parts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _url_join(self, *parts):\n return \"/\".join(map(lambda fragment: fragment.rstrip('/'), parts))", "def url_join(*parts):\n parts = parts or [\"\"]\n clean_parts = [part.strip(\"/\") for part in parts if part]\n if not parts[-1]:\n # Empty last element should add a trailing slash\n clean_parts.append(\"\")\n return \"/\".join(clean_parts)", "def urljoin(*parts):\n def _gen(parts):\n prev = None\n for part in parts:\n if not part:\n continue\n if not prev:\n prev = part\n elif (prev[-1] == '/') != (part[0] == '/'): # Exactly one slash was present\n prev = part\n # At this point, either zero or two slashes are present. Which is it?\n elif part[0] == '/': # Two slashes.\n prev = part[1:]\n else: # No slashes.\n yield '/'\n prev = part\n yield prev\n\n return \"\".join(part for part in _gen(parts))", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))", "def urljoin(*args):\n\n return \"/\".join(map(lambda x: str(x).rstrip('/'), args))", "def join_url(*args): # type: (*str) -> str\n parts = [part[:-1] if part and part[-1] == '/' else part for part in args]\n parts.append('')\n return '/'.join(parts)", "def urljoin(*args):\n return '/'.join(str(a or '').strip('/') for a in args)", "def url_path_join(*pieces):\n initial = pieces[0].startswith(\"/\")\n final = pieces[-1].endswith(\"/\")\n stripped = [s.strip(\"/\") for s in pieces]\n result = \"/\".join(s for s in stripped if s)\n if initial:\n result = \"/\" + result\n if final:\n result = result + \"/\"\n if result == \"//\":\n result = \"/\"\n return result", "def url_path_join(*pieces):\n initial = pieces[0].startswith('/')\n final = pieces[-1].endswith('/')\n striped = [s.strip('/') for s in pieces]\n result = '/'.join(s for s in striped if s)\n if initial: result = '/' + result\n if final: result = result + '/'\n if result == '//': result = '/'\n return result", "def url_path_join(*fragments):\n fragments = fragments or (\"\",)\n result = fragments[0] # Tolerate an empty list\n for thing in fragments[1:]:\n result = result.rstrip(\"/\") + \"/\" + thing.lstrip(\"/\")\n return result", "def url_join(base_url: str, url_parts: tuple) -> str:\n built_url = ''\n base_parse = urlparse(base_url)\n if base_parse.scheme:\n built_url = base_parse.scheme + '://'\n if base_parse.netloc:\n built_url += base_parse.netloc + '/'\n if base_parse.path and not base_parse.path == '/':\n built_url += base_parse.path.lstrip('/') + '/'\n\n joined_parts = '/'.join(url_parts).replace('//', '/').strip('/')\n\n return built_url + joined_parts", "def join(self, *parts):\n if parts:\n parts = list(parts)\n if len(parts) > 1:\n for i, p in enumerate(parts[:-1]):\n parts[i] = p.strip('/')\n parts[-1] = parts[-1].lstrip('/')\n return '/'.join(parts)", "def urljoin(*args):\n from six.moves.urllib.parse import urljoin as sys_urljoin\n from functools import reduce\n return reduce(sys_urljoin, args)", "def url_build(web_url, url_part):\r\n url_full = urljoin(web_url,url_part.get('href'))\r\n return url_full", "def urljoin(*atoms):\n url = \"/\".join([x for x in atoms if x])\n while \"//\" in url:\n url = url.replace(\"//\", \"/\")\n # Special-case the final url of \"\", and return \"/\" instead.\n return url or \"/\"", "def urljoin(cls, base, end):\r\n if base and not base.endswith(\"/\"):\r\n base = base + \"/\"\r\n return urljoin(base, str(end))", "def urljoin(cls, base, end):\n\n if base and not base.endswith('/'):\n base = base + '/'\n return urljoin(base, str(end))", "def _join(url, endpoint):\n if not endpoint:\n return url\n parsed_url = urlparse.urlsplit(url)\n new_path = os.path.join(parsed_url.path, endpoint)\n parts = list(parsed_url)\n parts[2] = new_path\n return urlparse.urlunsplit(parts)", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def _urljoin(self, response, url):\n return urljoin_rfc(response.url, url, response.encoding)", "def join_urls(*urls):\r\n if not urls:\r\n return\r\n \r\n url = urls[0]\r\n for u in urls[1:]:\r\n if not url.endswith('/'):\r\n url += '/'\r\n while u.startswith('/'):\r\n u = utils.lstrips(u, '/')\r\n url += u\r\n return url", "def format_url(url: str) -> str:\n return urljoin(url.replace('https://app', 'https://api'), '')", "def url_src_build(web_url, url_part):\r\n url_full = urljoin(web_url,url_part)\r\n return url_full", "def urljoin(base, *path):\n return reduce(_join, path, base)", "def concat_url(endpoint, url):\n u = \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))\n return u.rstrip('/')", "def rebuild_url(scheme, path, fragment, username,\n password, hostname, port, query):\n netloc = \"@\".join(filter(None, [\n \":\".join(\n filter(None, [\n username,\n password,\n ])\n ),\n \":\".join(\n filter(None, [\n hostname,\n str(port or ''),\n ])\n )\n ]))\n\n return urllib.parse.urlunsplit([\n scheme,\n netloc,\n path,\n query,\n fragment,\n ])", "def _join_url(saas_url, endpoint):\n\n saas_url = f\"{saas_url}/\"\n endpoint = endpoint.lstrip(\"/\")\n return urljoin(saas_url, endpoint)", "def join_link(s, separator):\n if s == empty:\n return \"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)\n # so much like the TLS programming style.", "def test_url_concat(self):\n assert ct.url_join(\"www.bad-actor.services\", \"api\") == \"http://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\n \"https://www.bad-actor.services\", \"/api\", \"new//one\") == \"https://www.bad-actor.services/api/new/one\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/\") == \"https://www.bad-actor.services/\"\n assert ct.url_concat(\"https://www.bad-actor.services/\", \"/\") == \"https://www.bad-actor.services/\"", "def get_url_end_string(url_extras: [str]) -> str:\n if len(url_extras) == 0:\n return \"\"\n else:\n url_end_string = \"\"\n for i in url_extras:\n url_end_string += \"/\" + i\n return url_end_string", "def join_link(s, separator):\n if s == empty:\n return \"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)", "def ujoin(*args):\n if len(args) == 0 or len(args[0]) == 0:\n return ''\n return (\n (args[0][0] == '/') * '/' # prepend slash if first arg starts with it\n + '/'.join(x[(x[0] == '/') : (len(x) - (x[-1] == '/'))] for x in args)\n + (args[-1][-1] == '/') * '/'\n ) # append slash if last arg ends with it", "def join_link(s, separator):\n if s == empty:\n return ''\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)", "def join_link(s, separator):\n if s == empty:\n return\"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)", "def urljoin(self, href):\n return urlparse.urljoin(self.url, href)", "def urljoin(self, url: Union[str, _RequestUrl, _ResponseUrl]) -> _RequestUrl:\n return _RequestUrl(urljoin(str(self.url), str(url)))", "def _make_combined_url(base_url, parameters, state):\n url = base_url.rstrip('?')\n url_parts = [url]\n sep_with_ampersand = ('?' in url)\n if parameters:\n query_string = urllib.urlencode(parameters)\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n query_string])\n sep_with_ampersand = True\n\n if state:\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n 'state=',\n state])\n\n return ''.join(url_parts)", "def urljoin(base, *path, **query):\n if base and base.endswith('/'):\n base = base[:-1]\n retval = [base]\n\n # build the path\n path = '/'.join([''] + [quote(s, '') for s in path])\n if path:\n retval.append(path)\n\n # build the query string\n params = []\n for name, value in query.items():\n if type(value) in (list, tuple):\n params.extend([(name, i) for i in value if i is not None])\n elif value is not None:\n if value is True:\n value = 'true'\n elif value is False:\n value = 'false'\n params.append((name, value))\n if params:\n retval.extend(['?', urlencode(params)])\n\n return ''.join(retval)", "def generate_full_url(base_url, lineage, segment):\n params = \"/\".join([lineage, segment])\n return urljoin(base_url, params)", "def urlunsplit(urlparts):\n res = urlparse.urlunsplit(urlparts)\n if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:\n # UNC paths must have 4 slashes: 'file:////server/path'\n # Depending on the path in urlparts[2], urlparse.urlunsplit()\n # left only two or three slashes. This is fixed below\n repl = 'file://' if urlparts[2].startswith('//') else 'file:/'\n res = res.replace('file:', repl)\n return res", "def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)", "def urlify2(w, length):\n chars = []\n while i < len(w):\n c = w[i]\n if c == ' ':\n chars.append('%20') \n else:\n chars.append(c)\n i += 1\n url_w = build_string(chars)\n return url_w", "def build_url(main_url, url_params):\n return main_url + \"/\" + \"/\".join(url_params)", "def test_url_join(self):\n assert ct.url_join(\"www.bad-actor.services\", \"api\") == \"http://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"http://www.bad-actor.services\", \"/\") == \"http://www.bad-actor.services/\"\n assert ct.url_join(\"https://www.bad-actor.services/\", \"/\") == \"https://www.bad-actor.services/\"\n assert ct.url_join(\n \"https://www.bad-actor.services/\", \"/\", \"api\") == \\\n \"https://www.bad-actor.services/api\"\n assert ct.url_join(\n \"bad-actor-services_bad-actor-services-web_1:5000\", \"/api/proxies\") == \\\n \"http://bad-actor-services_bad-actor-services-web_1:5000/api/proxies\"", "def get_full_url(self, part_url):\n return BASE_URL + part_url", "def safe_join(base, *paths):\n from urlparse import urljoin\n base_path = force_text(base)\n base_path = base_path.rstrip(\"/\")\n paths = [force_text(p) for p in paths]\n\n final_path = base_path\n for path in paths:\n final_path = urljoin(final_path.rstrip(\"/\") + \"/\", path.rstrip(\"/\"))\n\n # Ensure final_path starts with base_path and that the next character after\n # the final path is '/' (or nothing, in which case final_path must be\n # equal to base_path).\n base_path_len = len(base_path)\n if (not final_path.startswith(base_path) or\n final_path[base_path_len:base_path_len + 1] not in (\"\", \"/\")):\n raise ValueError(\"the joined path is located outside of the base path\"\n \" component\")\n\n return final_path.lstrip(\"/\")", "def route_join(*args):\n route_url = \"/\".join([x.strip(\"/\") for x in args])\n if not route_url.startswith(\"/\"):\n route_url = \"/\" + route_url\n return route_url", "def build_url(base: str, *segments, **query) -> str:\n\n parsed_base = urlparse(base).geturl()\n\n if not segments:\n path = ''\n else:\n path_segments = []\n for segment in segments:\n # Do not strip leading or trailing `/` from segments\n path_segments.append(quote(segment, safe=''))\n path = '/'.join(path_segments)\n\n if not query:\n queries = ''\n else:\n query_pairs = []\n for key, value in query.items():\n key_value_pair = [quote(key, safe=''), quote(value, safe='')]\n query_pairs.append('='.join(key_value_pair))\n queries = '?' + '&'.join(query_pairs)\n\n path = '/' + path if path else ''\n\n return ''.join([parsed_base, path, queries])", "def path_join(base, *urls):\n if not base.endswith(\"/\"):\n base += \"/\"\n btpl = urlsplit(base)\n path = btpl.path\n for url in urls:\n utpl = urlsplit(url)\n if btpl.scheme == \"\":\n path = os.path.join(path, utpl.path)\n path = os.path.normpath(path)\n else:\n path = urljoin(path, utpl.path)\n return btpl._replace(path=path).geturl()", "def toString(self):\n self.query = {}\n for i in self.arguments:\n self.query[i] = self.arguments[i]\n\n self.query = urlencode(self.query)\n\n return urlparse.urlunsplit((self.scheme, self.netloc,\n self.path, self.query,self.fragment))", "def join_api_url(api_base_url, api_path):\n if api_base_url.endswith('/'):\n api_base_url = api_base_url[:-1]\n if api_path.startswith('/'):\n api_path = api_path[1:]\n\n return api_base_url + '/' + api_path", "def format_url(url):\n no_scheme = url.split('://', 1)[-1]\n return '[{0}]({1})'.format(no_scheme, url)", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def remove_last_part_of_url(category_url):\n return \"/\".join(category_url.split(\"/\")[:-1])", "def _fix_url(self, curr_url, rel):\n\n rel_l = rel.lower()\n if rel_l.startswith(\"http://\") or rel_l.startswith(\"https://\"):\n curr_url, rel = rel, \"\"\n\n # compute the new url based on import\n curr_url = urlparse.urldefrag(curr_url)[0]\n parsed_url = urlparse.urlparse(curr_url)\n return urlparse.urljoin(parsed_url.geturl(), rel)", "def refactor_app_url(self, url ):\n up = urlparse.urlparse( url )\n qs = urlparse.parse_qs(up.query)\n nqs = [('appid', qs.get('appid')) , ('pkgid',qs.get('pkgid',-1))]\n up = list(up)\n up[4] = urllib.urlencode(nqs,doseq=True)\n return urlparse.urlunparse(tuple(up))", "def contract_url(full_url: str) -> str:\n url_lst = list(urlparse(full_url))\n # delete params, query and fragment\n for i in [3, 4, 5]:\n url_lst[i] = ''\n # reduce url : path parts\n path_parts = url_lst[2].split('/')\n url_lst[2] = '/'.join((path_parts[0], '...', path_parts[-2], path_parts[-1]))\n contracted_url = urlunparse(url_lst)\n\n return contracted_url", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def merge_url(url, params):\n req = PreparedRequest()\n req.prepare_url(url, params)\n return req.url", "def _full_url(url, _params={}):\n\n # Support for unicode domain names and paths.\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\n\n if not scheme:\n raise ValueError(\"Invalid URL %r: No schema supplied\" % url)\n\n netloc = netloc.encode('idna')\n\n if isinstance(path, unicode):\n path = path.encode('utf-8')\n\n path = requote_path(path)\n\n url = str(urlparse.urlunparse([scheme, netloc, path, params, query,\n fragment]))\n\n if _params:\n if urlparse.urlparse(url).query:\n return '%s&%s' % (url, _params)\n else:\n return '%s?%s' % (url, _params)\n else:\n return url", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def join(path, *paths):\n\n for p in paths:\n if p.startswith(\"/\"):\n path = p\n elif p != \"\":\n path += (\"\" if path == \"\" or path.endswith(\"/\") else \"/\") + p\n return path", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()", "def url_truncate(url):\n \n url_tuple = urlparse.urlparse(url)\n return url_tuple[0] + '://' + url_tuple[1]", "def recreate_rel_url(url, parsed):\n if parsed.query:\n url = u'%s?%s' % (url, parsed.query)\n if parsed.fragment:\n url = u'%s#%s' % (url, parsed.fragment)\n return url", "def _concatenate_url(self, response: Dict[str, Any], url_field: str = 'url') -> None:\n split_url_field = url_field.split('.')\n if len(split_url_field) > 1:\n self._concatenate_url(response.get(split_url_field[0], {}), '.'.join(split_url_field[1:]))\n\n elif url_field in response:\n url = urljoin(self._base_url, response[url_field]).replace('https://api', 'https://app')\n response[url_field] = url", "def cut_url(url):\n if len(url) > 50:\n return f\"...{url[-45:]}\"\n return url", "def clean_url(url: str, keys: List[str]) -> str:\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n\n for key in keys:\n query.pop(key, None)\n\n u = u._replace(query=urlencode(query, True))\n \n return urlunparse(u)", "def path_join(first: str, second: str) -> str:\n first = first.rstrip('/\\\\')\n second = second.lstrip('/\\\\')\n if not first: return second\n if not second: return first\n return first + '/' + second", "def merge_link(url_domain, url_path):\n\n # Ensure domain is not empty\n if url_domain.strip() == \"\":\n return url_path\n\n # Strip / at end of domain\n if url_domain[-1] == \"/\":\n url_domain = url_domain[0:-1]\n\n # Strip / at beginning of path\n if url_path[0] == \"/\":\n url_path = url_path[1:]\n\n url_full = \"/\".join([url_domain, url_path])\n\n return url_full", "def generate_problem_url(problem_url_parts, base_course_url):\r\n problem_url = base_course_url + \"/\"\r\n for i, part in enumerate(problem_url_parts):\r\n if part is not None:\r\n # This is the course_key. We need to turn it into its deprecated\r\n # form.\r\n if i == 0:\r\n part = part.to_deprecated_string()\r\n # This is placed between the course id and the rest of the url.\r\n if i == 1:\r\n problem_url += \"courseware/\"\r\n problem_url += part + \"/\"\r\n return problem_url", "def join(*paths):\r\n path = \"\"\r\n for component in paths:\r\n path += (\"/\" if path and not path.endswith(\"/\") else \"\") + component.replace(\r\n \"\\\\\", \"/\"\r\n )\r\n return path", "def transform_url(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return encode_url_path(result)", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def str_join(paths: []):\n return \"/\".join(paths)", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def join_path(self, path_parts):\n return os.path.sep.join(path_parts)", "def make_cm_url(url):\n protocol, address = url.split('//')\n address_parts = address.split('/')\n new_address_parts = []\n for i, part in enumerate(address_parts):\n if part == 'api':\n continue\n if i == 0 and '-gk-' in part:\n new_address_parts.append(part.replace('-gk-', '-cm-'))\n elif part.endswith('s'):\n new_address_parts.append(part[:-1])\n else:\n new_address_parts.append(part)\n return protocol + '//' + '/'.join(new_address_parts)", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def urlify_pythonic(text, length):\n return text.rstrip().replace(\" \", \"%20\")", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def unsplit_svn_url(tup):\n repo, prefix, project, branch, suffix, peg = tuple(tup)\n res = [repo or '^',\n prefix or '/',\n ]\n if project:\n res.extend([project, '/'])\n if branch:\n res.extend([branch, '/'])\n if suffix:\n res.append(suffix)\n if peg:\n res.extend(['@', str(peg)])\n return ''.join(res)", "def format_uri(uri, parameters):\n parts = urlparse.urlsplit(uri)\n query_string = urllib.urlencode(parameters)\n return urlparse.urlunsplit((\n parts.scheme,\n parts.netloc,\n parts.path,\n query_string,\n parts.fragment))", "def build_uri(\n url: str, query_params: Optional[Dict] = None, fragment: Optional[Dict] = None\n) -> str:\n if query_params is None:\n query_params = {}\n\n if fragment is None:\n fragment = {}\n\n parsed_url = urlparse(url)\n uri = urlunsplit(\n (\n parsed_url.scheme,\n parsed_url.netloc,\n parsed_url.path,\n urlencode(query_params, quote_via=quote), # type: ignore\n urlencode(fragment, quote_via=quote), # type: ignore\n )\n )\n return uri", "def build_url(self, request, action, **query):\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)", "def __get_full_url(self, operation, slug_params):\n return (self.base_url + operation[1]) % slug_params", "def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def __url(self, *els):\n\n urls = [str(el) for el in els]\n urls.insert(0, self.BASE_URL)\n\n return '/'.join(s.strip('/') for s in urls)", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def _replace_url_query(url, new_query):\n scheme, netloc, path, _, fragment = urlparse.urlsplit(url)\n return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))", "def truncate_url(url):\n url = parse.unquote(url)\n if len(url) <= 60 :\n return url\n url = url[:-1] if url.endswith(\"/\") else url\n url = url.split(\"//\",1)[1].split(\"/\")\n url = \"%s/.../%s\"%(url[0],url[-1])\n return url[:60]+\"...\" if len(url) > 60 else url", "def prettify_url(url):\n\n if not isinstance(url, urllib.parse.ParseResult):\n url = urllib.parse.urlparse(url)\n urlstr = url.hostname + url.path\n return urlstr", "def chomp_protocol(url: str) -> str:\n if \"+\" in url:\n url = url.split(\"+\", 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if \"@\" in path:\n path, rev = path.rsplit(\"@\", 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, \"\"))\n if url.startswith(\"ssh://git@github.com/\"):\n url = url.replace(\"ssh://\", \"git+ssh://\")\n elif \"://\" not in url:\n assert \"file:\" not in url\n url = url.replace(\"git+\", \"git+ssh://\")\n url = url.replace(\"ssh://\", \"\")\n return url" ]
[ "0.789799", "0.76372606", "0.7635184", "0.7324334", "0.7313276", "0.72960657", "0.72905284", "0.7280653", "0.72795635", "0.72734", "0.7244201", "0.7212807", "0.7209664", "0.7034434", "0.6943335", "0.69341743", "0.68809646", "0.6825387", "0.6815612", "0.6801993", "0.6796151", "0.6761088", "0.6650421", "0.6643362", "0.66092616", "0.6552249", "0.6492183", "0.6481406", "0.6469651", "0.6354086", "0.6277589", "0.61715513", "0.6167748", "0.6158875", "0.6150829", "0.61283296", "0.61029476", "0.6079713", "0.6063716", "0.60603786", "0.6044273", "0.60303223", "0.58970505", "0.5838758", "0.58207303", "0.58199745", "0.5807891", "0.58007646", "0.5794224", "0.5786461", "0.5778569", "0.5776209", "0.57531935", "0.57480663", "0.57472247", "0.5698281", "0.56883466", "0.56811786", "0.56776", "0.5667447", "0.56626046", "0.56464064", "0.56445843", "0.56310093", "0.5615456", "0.56049657", "0.5604648", "0.56014675", "0.55829084", "0.5581738", "0.5573638", "0.55592734", "0.55582887", "0.5554254", "0.5553941", "0.55483884", "0.55410635", "0.55226296", "0.55208224", "0.5505141", "0.5496607", "0.5487179", "0.54858273", "0.5482169", "0.5460845", "0.5440295", "0.54258925", "0.54229254", "0.54228723", "0.5405417", "0.5399809", "0.53961986", "0.53932416", "0.53918076", "0.5391412", "0.5390294", "0.5387802", "0.53856564", "0.5376287", "0.5374994" ]
0.6047936
40
Split optional port number from host. If host has no port number, the given default port is returned.
def splitport(host, port=0): if ":" in host: shost, sport = host.split(":", 1) iport = is_numeric_port(sport) if iport: host, port = shost, iport elif not sport: # empty port, ie. the host was "hostname:" host = shost else: # For an invalid non-empty port leave the host name as is pass return host, port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_host_port(hostport):\n if hostport is None:\n return (None, None)\n formatted_host = __host_per_rfc_2732(hostport)\n # The \"bogus\" is to make it look like a real, parseable URL.\n parsed = urlparse(\"bogus://%s\" % (formatted_host)) \n return (None if parsed.hostname == \"none\" else parsed.hostname,\n parsed.port)", "def _decompose_host_port(hp):\n split_re = re.compile(r\"^([^[:][^:]*|\\[[^\\]]+\\])(:([0-9]+))?$\")\n match = split_re.match(hp)\n if match is None:\n return (\"\", None)\n return (match.group(1), match.group(3))", "def parse_server(server, default_port):\n\tserver = server.rsplit(':', 1)\n\thost = server[0]\n\tif host.startswith('[') and host.endswith(']'):\n\t\thost = host[1:-1]\n\tif len(server) == 1:\n\t\treturn (host, default_port)\n\tport = server[1]\n\tif not port:\n\t\tport = default_port\n\telse:\n\t\tport = int(port)\n\treturn (host, port)", "def parse_server_port(self, default=None):\n\t\tcfg_server_port = self.cfg_root.find('server_ip')\n\t\tif cfg_server_port and cfg_server_port.text.isnumeric():\n\t\t\tcfg_server_port = int(cfg_server_port.text) # TODO: What if float...\n\t\telse: # ip not specified\n\t\t\tcfg_server_port = default\n\n\t\treturn cfg_server_port", "def parse_port():\n port = 8484\n try:\n port = int(sys.argv[1])\n except Exception as e:\n print(\"CLI argument for port could not be parsed: \" + str(e))\n print(\"Fall back on default port: \" + str(port))\n pass\n return '{}'.format(port)", "def url_unsplit(parts):\n if parts[2] == default_ports.get(parts[0]):\n return \"%s://%s%s\" % (parts[0], parts[1], parts[3])\n return \"%s://%s:%d%s\" % parts", "def __find_port(self, url):\n match = self.__REGEX_PORT.search(url)\n if match:\n port_num = match.group(0).split(':')[1]\n return port_num\n return None", "def address( self, host=None, port=None ):\n if host is not None:\n parts = str( host ).split( ':', 1 )\n self.host = parts[0]\n if len(parts) > 1:\n self.port = int( parts[1] )\n\n if port is not None:\n self.port = int( port )\n\n if not self.host or self.port == self.default_port:\n return self.host\n\n return join( (self.host, str(self.port)), ':' )", "def extract_port(url):\n port = urlsplit(url).port\n if port is None:\n port = cookiejar.DEFAULT_HTTP_PORT\n return port", "def host_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"host_ports\")", "def apply_default_port(nodes):\n nodes = nodes.split(',')\n\n def append_port(node):\n if re.match(r'.*:\\d+$', node):\n return node\n return f'{node}:8091'\n return [append_port(x) for x in nodes]", "def get_host_port(self) -> int:\n return self.config_dict.get(\"host_port\", 0)", "def _get_event_port(port: Optional[int], event_url: Optional[str]) -> Optional[int]:\n if port is not None:\n return port\n if event_url is not None:\n u = urlparse(event_url)\n return u.port\n return None", "def _grab_port(self):\r\n port = \"\"\r\n while self._char != -1 and self._char in \"0123456789\":\r\n port += self._char\r\n self._get_char()\r\n if len(port) == 0:\r\n self._error(\"port empty\")\r\n return int(port)", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"port\")", "def _get_host(self, scheme='', hostname_only=False):\n host = self.host or ''\n # urlparse requires '//' to be provided if scheme is not specified\n original_parsed = urlparse.urlsplit(host)\n if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:\n host = '%s://%s' % (scheme, host) if scheme else '//%s' % host\n parsed = urlparse.urlsplit(host)\n\n if hostname_only:\n return parsed.hostname\n\n try:\n port = parsed.port or self.port\n except ValueError:\n port = self.port\n netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)\n\n url_components = list(parsed)\n url_components[1] = netloc\n ret = urlparse.urlunsplit(url_components)\n return ret.lstrip('/')", "def host_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HostPortRangeArgs']]]]:\n return pulumi.get(self, \"host_ports\")", "def get_host_port_string(addr: AddressTupleVXType) -> str:\n host = get_host_string(addr)\n if \":\" in host:\n return \"[{}]:{}\".format(host, addr[1])\n return \"{}:{}\".format(host, addr[1])", "def test_parse_host_port(self):\n # test default port for http\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test default port for https\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTPS)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test specific port\n endpoint = \"1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)\n\n # test value error\n endpoint = \"1.2.3.4:abcd\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # protocol unsupported\n endpoint = \"ftp://1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # test of endpoint dominates the protocol\n endpoint = \"http://1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)", "def host_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HostPortRangePatchArgs']]]]:\n return pulumi.get(self, \"host_ports\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"port\")", "def parse_server_name(server_name: str) -> Tuple[str, Optional[str]]:\n try:\n if server_name[-1] == \"]\":\n # ipv6 literal, hopefully\n return server_name, None\n\n host_port = server_name.rsplit(\":\", 1)\n host = host_port[0]\n port = host_port[1] if host_port[1:] else None\n\n if port:\n port_num = int(port)\n\n # exclude things like '08090' or ' 8090'\n if port != str(port_num) or not (1 <= port_num < 65536):\n raise ValueError(\"Invalid port\")\n\n return host, port\n except Exception:\n raise ValueError(\"Invalid server name '%s'\" % server_name)", "def _check_ip_port_split(self):\n if self._type == \"A\":\n formatted_value = self._value.split(':')\n self._ip = formatted_value[0]\n self._port = int(formatted_value[1])", "def _get_unused_port(hostname):\n for port in range(8000, 9001):\n if _check_port_available(hostname, port):\n return port", "def get_port_number():\n try:\n return os.environ[\"PORT\"]\n except Exception:\n return None", "def get_port_arg(self):\n\ttry:\n\t arg = sys.argv[4]\n\t port = int(arg) \n\texcept ValueError:\n\t print \"Port must be a number only.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\texcept IndexError:\n\t print \"Port number must be provided.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\tif any([port < 1024, port > 60000]):\n\t print \"Port must be between 1024 and 60000\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\t \n\telse:\n\t return port", "def _find_host_port(ports: Dict[str, Any], container_port: int) -> str:\n mappings = ports.get('{}/tcp'.format(container_port), [])\n for mapping in mappings:\n if mapping['HostIp'] == '0.0.0.0':\n return mapping['HostPort']\n else:\n raise ValueError(\n 'No HostPort found for ContainerPort={} (all port mappings: {})'\n .format(container_port, ports))", "def clean_port(self):\n port = self.cleaned_data['port']\n if not port:\n port = 0\n return port", "def port():", "def select_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('127.0.0.1', 0))\n _, port = sock.getsockname()\n sock.close()\n return port", "def get_ip_port_tshark(str_data):\n separator = str_data.rindex(\":\")\n ip = str_data[:separator]\n port = str_data[separator + 1:]\n return ip, port", "def parse_port_req(self, sock):\n try:\n host_ip = self.s.getsockname()[0] # Get local IPv4 addr of client.\n host_port = sock.getsockname()[1] # Get opened port of socket.\n # PORT requires parameters split up as:\n # octet1,octet2,octet3,octet4,p1,p2\n list_csv_ip = host_ip.split('.') # Split octets into a list.\n port_params = \"\"\n for octet in list_csv_ip:\n port_params += octet + \",\"\n # Parse port into PORT command's expected parameter.\n p1 = str((host_port - (host_port % 256)) / 256)\n p2 = str(host_port % 256)\n port_params += p1 + \",\" + p2\n except:\n return \"\", \"\", \"\"\n return port_params, host_ip, host_port", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[int]:\n return pulumi.get(self, \"port\")", "def parse_endpoint(endpoint):\n\n host, port = endpoint[0].split(':')\n return host, port", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def url_split(url):\n scheme, netloc = urllib.splittype(url)\n host, document = urllib.splithost(netloc)\n port = default_ports.get(scheme, 0)\n if host:\n host = host.lower()\n host, port = splitport(host, port=port)\n return scheme, host, port, document", "def make_port_list(ssh, https, port):\n\n ports = []\n\n if ssh:\n ports.append(22)\n if https:\n ports.append(443)\n ports.append(80)\n if port is not None:\n ports.append(port)\n\n return ports", "def find_port(client: _client.NeutronClientType = None,\n unique=False,\n default: PortType = None,\n **params):\n ports = list_ports(client=client, **params)\n if default is None or ports:\n if unique:\n return ports.unique\n else:\n return ports.first\n else:\n return default", "def _parse_host(host: str) -> str:\n urlparse_host = urlsplit(host).hostname\n if urlparse_host:\n # In this case, host = https://xx.cloud.databricks.com\n return urlparse_host\n else:\n # In this case, host = xx.cloud.databricks.com\n return host", "def _parseurl(url):\n tracker1=url\n port=int(re.findall(\"[0-9]+\",tracker1)[0])\n host=re.findall(\"[^0-9]+\",tracker1)[0]\n host=host[:-1]\n host=host[6:]\n return host,port", "def test_default_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"port\"], b'tcp:%d' % (REST_API_PORT,))", "def find_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.bind(('127.0.0.1', 0))\n sock.listen(socket.SOMAXCONN)\n ipaddr, port = sock.getsockname()\n sock.close()\n return port", "def find_first_available_port():\n skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n skt.bind((\"0.0.0.0\", 0))\n _, port = skt.getsockname()\n skt.close()\n return port", "def mod_func(port):\n\n print(port)\n return port", "def get_safe_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((LOCALHOST, 0))\n port = sock.getsockname()[1]\n sock.close()\n return port", "def port(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"port\")", "def port_parser(string):\n try:\n portnum = int(string)\n if portnum < MIN_TCP_PORT_NUM:\n print('?? TCP port value (%d) too low; changing to %d' % (portnum, MIN_TCP_PORT_NUM))\n elif portnum > MAX_TCP_PORT_NUM:\n print('?? TCP port value (%d) too high; changing to %d' % (portnum, max(TCP_PORT_RANGE)))\n return max(min(portnum, MAX_TCP_PORT_NUM), MIN_TCP_PORT_NUM)\n except:\n syndrome = 'invalid port count: %s\\ncount must be a positive integer in range %d - %d' % (\n string, MIN_TCP_PORT_NUM, MAX_TCP_PORT_NUM)\n raise argparse.ArgumentTypeError(syndrome)", "def get_serverport(cobj):\n pass", "def test_remove_empty_port():\n assert (normalize_url(\"http://www.example.com:/\") ==\n \"http://www.example.com/\")", "def port(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port\")", "def test_remove_default_port():\n assert (normalize_url(\"http://www.example.com:80/bar.html\") ==\n \"http://www.example.com/bar.html\")\n assert (normalize_url(\"HTTPS://example.com:443/abc/\") ==\n \"https://example.com/abc\")", "def fill_port(self, data):\n self.port = get_optional_value(data, self.PORT, None)\n if self.port:\n self.port = \":\" + str(self.port)\n else:\n self.port = \"\"", "def port(self):\n return self._host[CONF_PORT]", "def port_number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port_number\")", "def get_localhost_port():\n localhost_port = input(\"Please Enter Localhost Port Number (i.e '22002') \")\n global localhost_port_num\n localhost_port_num = localhost_port\n return localhost_port_num", "def GetPostgresPortNumber():\n pattern = r\"^\\s*port\\s*=\\s*(\\d{4,})\\s*\"\n\n match = MatchPattern(POSTGRES_PROPERTIES_PATH, pattern)\n if match:\n port = match[0]\n return port\n\n return None", "def get_my_port(self):\n if self.starter_port is not None:\n return self.starter_port\n\n where = -1\n tries = 10\n while where == -1 and tries:\n tries -= 1\n lfcontent = self.get_log_file()\n where = lfcontent.find(\"ArangoDB Starter listening on\")\n if where != -1:\n where = lfcontent.find(\":\", where)\n if where != -1:\n end = lfcontent.find(\" \", where)\n port = lfcontent[where + 1 : end]\n self.starter_port = port\n assert int(port), \"port cannot be converted to int!\"\n return port\n logging.info(\"retrying logfile\")\n time.sleep(1)\n message = \"could not get port form: \" + self.log_file\n logging.error(message)\n raise Exception(message)", "def port(name):\n\n words = name.upper().split('-', 1)\n\n if len(words) == 1:\n words.append(words[0][1])\n\n return int(f\"{ord(words[0][0])}{ord(words[1][0])}\")", "def find_port(addr, user):\n home = pwd.getpwuid(os.getuid()).pw_dir\n for name in os.listdir('%s/.ssh/' % home):\n if name.startswith('unixpipe_%s@%s_' % (user, addr,)):\n return int(name.split('_')[2])", "def patch_ports(cls, pair):\n if pair[0] in ('remote_port', 'local_port'):\n return pair[0], pair[1] and int(pair[1]) or None\n return pair", "def free_port():\n\n with socket.socket() as sock:\n sock.bind(('', 0))\n return sock.getsockname()[1]", "def verify_port(port_def):\n if re.match(r\"^\\d+\\+?$\", str(port_def)):\n port_def = str(port_def), str(port_def).replace(\"+\", \"\")\n elif re.match(r\"^(\\d+\\+?):(\\d+)$\", str(port_def)):\n port_def = tuple(re.findall(\"(\\d+\\+?):(\\d+)\", str(port_def))[0])\n else:\n raise ValueError(f\"invalid port def '{port_def}'\")\n return port_def", "def GetApacheSchemePortFromListen():\n match = MatchPattern(\n GEHTTPD_CONF_PATH,\n r\"^Listen\\s+(?:\\[?([a-fA-F\\d\\.\\:]+)\\]?:)?(\\d+)(?:\\s+(https?))?\")\n if match:\n (scheme, port) = (match[2], match[1])\n assert port\n if not scheme:\n scheme = \"https\" if port == \"443\" else \"http\"\n return (scheme, port)\n\n logging.error(\"Listen directive is not specified in gehttpd config.\")\n return None", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def get_unused_port():\n port, s = get_unused_port_and_socket()\n s.close()\n return port", "def _port(port):\n\n\tvalid_range = range(1, 65535 + 1)\n\n\ttry:\n\t\tport = int(port)\n\t\tif port not in valid_range:\n\t\t\traise argparse.ArgumentTypeError(\"Port must be 1-65535\")\n\t\treturn port\n\texcept ValueError:\n\t\traise argparse.ArgumentTypeError(\"Port must be 1-65535\")", "def _get_port(self):\n return self.__port", "def get_origin_port(self, origin):\n if origin.port is not None:\n # Return origin.port\n return origin.port\n # if origin.port doesn`t exists\n if origin.scheme == \"http\" or origin.scheme == \"ws\":\n # Default port return for http, ws\n return 80\n elif origin.scheme == \"https\" or origin.scheme == \"wss\":\n # Default port return for https, wss\n return 443\n else:\n return None", "def pick_unused_port(pid=None, portserver_address=None):\n return _pick_unused_port(pid, portserver_address)", "def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")", "def port(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"port\")", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def get_free_port():\n s = socket.socket()\n s.bind(('', 0))\n _, port = s.getsockname()\n s.close()\n return port", "def port(self) -> int:", "def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):\n tempsock = socket.socket(family, socktype)\n port = bind_port(tempsock)\n tempsock.close()\n del tempsock\n return port", "def port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._primary_port_prop)", "def get_port(self):\n return self.port", "def port_for(self, service, port, protocol='tcp'):\n\n # Return the container port if we run in no Docker mode.\n if self._docker_allow_fallback:\n return port\n\n docker_service = self._docker_compose.get_service(service)\n container = docker_service.get_container()\n\n docker_port = '{}/{}'.format(port, protocol)\n ports = container.ports.get(docker_port)\n if ports is None or len(ports) != 1:\n raise ValueError(\n 'Could not detect port for \"%s:%d\".' % (service, port)\n )\n\n return int(ports[0]['HostPort'])", "def allowed_host_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PortRangeArgs']]]]:\n return pulumi.get(self, \"allowed_host_ports\")", "def get_service_connection_string(service):\n service = service.upper()\n raw_host_port = os.environ['%s_PORT' % service]\n # Remove leading tcp:// or similar\n host_port = raw_host_port.split(\"://\")[1]\n return host_port", "def _get_exposed_ports(debug_port):\n if not debug_port:\n return None\n\n return {\n # container port : host port\n debug_port: debug_port\n }", "def get_host(uri, token):\n if not uri.startswith('/api'):\n host, tls, port = SERVICE_MAP.get('docs')\n else:\n uri_layers = uri.split('/')\n if uri_layers[SERVICE] == 'inf':\n host, tls, port = SERVICE_MAP.get(uri_layers[SERVICE_SUBGROUP], NO_RECORD)\n else:\n # services like \"auth\" and \"link\" are their own group; only 'inf' has subgroups\n host, tls, port = SERVICE_MAP.get(uri_layers[SERVICE], NO_RECORD)\n if host == 'UNKNOWN':\n host = _user_ipam_server(token)\n return host, tls, port", "def get_unused_port(port):\n if port is None or port < 1024 or port > 49151:\n port = random.randint(1024, 49151)\n while True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('', port)) # Try to open port\n except socket.error as e:\n if e.errno is 98: # Errorno 98 means address already bound\n port += 1\n continue\n raise e\n s.close()\n return port" ]
[ "0.7247916", "0.6927607", "0.67452", "0.67267126", "0.65689474", "0.6426588", "0.621696", "0.6187966", "0.6167272", "0.61081445", "0.60974115", "0.6052857", "0.60080606", "0.5999279", "0.59521306", "0.59521306", "0.59521306", "0.59521306", "0.5936923", "0.5901868", "0.58454025", "0.58186406", "0.58152276", "0.58078086", "0.58078086", "0.58078086", "0.58078086", "0.58078086", "0.58078086", "0.58078086", "0.5780947", "0.5776101", "0.5773221", "0.5767731", "0.5761657", "0.5742191", "0.57347065", "0.5705291", "0.56989646", "0.5698249", "0.56748384", "0.5671997", "0.5671997", "0.5671997", "0.5671997", "0.5671997", "0.5671997", "0.5671997", "0.56365246", "0.56263644", "0.5580841", "0.55266386", "0.54913235", "0.5489899", "0.5478644", "0.54711884", "0.5452543", "0.5450608", "0.5434899", "0.542348", "0.542072", "0.5417742", "0.54080236", "0.54073775", "0.54038215", "0.53948057", "0.53948057", "0.53873503", "0.53807855", "0.5365258", "0.53571874", "0.5349749", "0.53446853", "0.5314028", "0.5308864", "0.52825373", "0.5279048", "0.5268128", "0.52642804", "0.5262257", "0.5253651", "0.52505374", "0.52466935", "0.5234676", "0.52341425", "0.523107", "0.52263814", "0.52263814", "0.5218913", "0.5212816", "0.52071947", "0.5204497", "0.5179188", "0.5177697", "0.51773554", "0.51761025", "0.5173818", "0.51708364", "0.51665306", "0.51642346" ]
0.806005
0
Remove anchor part and trailing index.html from URL.
def shorten_duplicate_content_url(url): if '#' in url: url = url.split('#', 1)[0] if url.endswith('index.html'): return url[:-10] if url.endswith('index.htm'): return url[:-9] return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanUrl(url):\n\turl_clean = url.replace(' ','%20')\n\t\"\"\" add /index.html where necessary \"\"\"\n\tif (url[-1:]=='/'):\n\t\turl_clean += 'index.html'\n\telif (url[-5:].find('.') == -1):\n\t\t url_clean += '/index.html'\n\treturn url_clean", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def clean_url(url):\n return url[:url.find('?')]", "def test_drop_trailing_questionmark():\n assert normalize_url(\"http://example.com/?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a?\") == \"http://example.com/a\"\n assert normalize_url(\"http://example.com/a/?\") == \"http://example.com/a\"", "def remove_last_part_of_url(category_url):\n return \"/\".join(category_url.split(\"/\")[:-1])", "def trim_repo_url(url):\n return url.replace(\".git\", \"\")", "def getFilteredUrl(self, url):\n url = url.split('#')[0]\n if url.startswith('/wiki'):\n return ('https://en.wikipedia.org' + url)\n if 'en.wikipedia.org/wiki/' not in url:\n return ('https://en.wikipedia.org/wiki' + url)\n return url", "def remove_trailing_version_from_href(href):\n parsed_url = urlparse.urlsplit(href)\n url_parts = parsed_url.path.rsplit('/', 1)\n\n # NOTE: this should match vX.X or vX\n expression = re.compile(r'^v([0-9]+|[0-9]+\\.[0-9]+)(/.*|$)')\n if not expression.match(url_parts.pop()):\n LOG.debug('href %s does not contain version', href)\n raise ValueError(_('href %s does not contain version') % href)\n\n new_path = url_join(*url_parts)\n parsed_url = list(parsed_url)\n parsed_url[2] = new_path\n return urlparse.urlunsplit(parsed_url)", "def split_url(url): # Change the url so it can be iterated\n url = url.split('index') \n url = url[0] + 'page-1.html'\n url = url.split('page-')\n url = f\"{url[0]}page-1.html\"\n return url", "def strip(url):\r\n split = list(urlsplit(url))\r\n split[4]=''\r\n return urlunsplit(split)", "def url_clean(path):\n return path[path.find('/'+settings.URL_ADMIN_SEP):]", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def url_removal(text):\n return re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]\\\n {2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]\\\n +|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', text)", "def remove_url(text):\r\n url = re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n return url", "def _strip_version(endpoint):\n if endpoint.endswith('/'):\n endpoint = endpoint[:-1]\n url_bits = endpoint.split('/')\n if re.match(r'v\\d+\\.?\\d*', url_bits[-1]):\n endpoint = '/'.join(url_bits[:-1])\n return endpoint", "def strip_dot_git(url: str) -> str:\n \"\"\"Strip trailing .git\"\"\"\n return url[: -len(\".git\")] if url.endswith(\".git\") else url", "def test_drop_fragments():\n assert (normalize_url(\"http://example.com/a?b=1#frag\")\n == \"http://example.com/a?b=1\")\n assert (normalize_url(\"http://example.com/a?b=1#frag\", drop_fragments=False)\n == \"http://example.com/a?b=1#frag\")", "def uncanonicalize(self, url):\n pass", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def removeURL(text):\n text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',text)\n text = re.sub(r'#([^\\s]+)', r'\\1', text)\n return text", "def remove_urls(text):\n pass", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def pruneURL(url):\n match = URL_GROUPER.match(url)\n if match is None:\n return url\n else:\n url_parts = match.groupdict()\n protocol = url_parts['protocol']\n if protocol is None:\n protocol = ''\n tail = url_parts['tail']\n if tail is None:\n tail = ''\n return \"%s://%s\" % (protocol, tail)", "def sanitizeUrl(url):\n return url.split('?')[0]", "def strip_beginning_slashes(url):\n find = re.search(r\"^/+\", url)\n if find:\n url = re.sub(find.group(0), \"\", url)\n return url", "def get_page_url(href):\n # type: (str) -> str\n return \"{}{}\".format(JAFC_URI, href.lstrip(\"/\"))", "def cut_url(url):\n if len(url) > 50:\n return f\"...{url[-45:]}\"\n return url", "def noTrailingSlash(path):\n return path.split('/')[0]", "def shorten_url():\n return rh.shorten_url(request)", "def youtube_fix_url(url):\n p = urlparse.urlparse(url)\n path = p.path\n if '&' in p.path:\n # sign of a malformed path\n path = re.sub('\\&.+', '', p.path)\n return urlparse.urlunparse((p.scheme, p.netloc, path, p.params, p.query, p.fragment))", "def url_truncate(url):\n \n url_tuple = urlparse.urlparse(url)\n return url_tuple[0] + '://' + url_tuple[1]", "def _fix_url(self, curr_url, rel):\n\n rel_l = rel.lower()\n if rel_l.startswith(\"http://\") or rel_l.startswith(\"https://\"):\n curr_url, rel = rel, \"\"\n\n # compute the new url based on import\n curr_url = urlparse.urldefrag(curr_url)[0]\n parsed_url = urlparse.urlparse(curr_url)\n return urlparse.urljoin(parsed_url.geturl(), rel)", "def strip_path(self):\n return self.path.replace('/', '')", "def test_remove_dot_segments():\n assert (normalize_url(\"http://www.example.com/../a/b/../c/./d.html\") ==\n \"http://www.example.com/a/c/d.html\")", "def _remove_urls(text: str) -> str:\n pattern = r'(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?'\n\n return re.sub(pattern, '', text, flags=re.MULTILINE)", "def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should\n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)", "def url_at_remove(text):\n text = re.sub(r'#\\w+|@\\w+',' ',text)\n # Remove url:\n return(re.sub(r'\\bhttps?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE))", "def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path", "def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url", "def good_url(a, start_url):\n for i in range(len(a)):\n par=a[i].find('?')\n if par!=-1:\n a[i]=a[i][:par]\n anc=a[i].find('#')\n if anc!=-1:\n a[i]=a[i][:anc]\n if a[i]!='' and a[i][0]=='/':\n a[i]=str(start_url)+a[i][1:i]\n #print(a[i]) \n return list(set(a))", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def fix_apiroot(root):\n if '://' in root:\n return root\n if ('/' not in root) or ('.' not in root.split('/')[0]):\n root = \"www.pennapps.com/\" + root\n return \"http://%s\" % root", "def clean_url(url):\n\n if url is None:\n return None\n\n if '??' in url:\n url = url.split('??')[0]\n\n if url.endswith('?'):\n url = url[:-1]\n\n if '`' in url:\n url = url.replace('`', '')\n\n return url", "def _reverse_with_slash(url_name, course_key):\r\n ajax_url = _reverse_without_slash(url_name, course_key)\r\n if not ajax_url.endswith('/'):\r\n ajax_url += '/'\r\n return ajax_url", "def truncate_url(url):\n url = parse.unquote(url)\n if len(url) <= 60 :\n return url\n url = url[:-1] if url.endswith(\"/\") else url\n url = url.split(\"//\",1)[1].split(\"/\")\n url = \"%s/.../%s\"%(url[0],url[-1])\n return url[:60]+\"...\" if len(url) > 60 else url", "def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())", "def clean_url_path(markup):\n\n soup = BeautifulSoup(markup, \"html.parser\")\n elements = soup.find_all('a')\n\n for url in elements:\n url_href = url.get('href')\n if url.string:\n url_string = url.string.replace('\\n', '').replace(' ', '')\n\n # Only clean links where the URL matches the string, without custom text inside.\n if url_string == url_href:\n url_parse = urllib.parse.urlparse(url_href)\n path = '{0}{1}'.format(url_parse.netloc.replace(\"www.\", \"\"), url_parse.path)\n url.string.replace_with(path)\n return soup.prettify(soup.original_encoding)", "def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))", "def remove_id(url):\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n query.pop(\"eo_id\", None)\n u = u._replace(query=urlencode(query, True))\n return urlunparse(u)", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def normalize_url(self, url):\n pass", "def __add_main_site(self, href):\n if self.main_url[8:] not in href:\n return self.main_url + href\n else:\n return href", "def remove_urls(self, doc):\n doc = re.sub(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)'\n r'(?:[^\\s()<>]+|\\(([^\\s()<>]+|'\n r'(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|'\n r'[^\\s`!()\\[\\]{};:\\'\".,<>]))',\n '',\n doc)\n return ' '.join(doc.split())", "def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation", "def generate_clean_url(self):\n\n\t\tspaces_replaced = self.title.replace(' ', '-')\n\t\tpattern = re.compile('[^a-zA-Z0-9-]+')\n\t\tstripped = pattern.sub('', spaces_replaced)\n\t\tself.cleanurl = '-'.join([str(self.pid), stripped.lower()])", "def fix_slash(environ, wantslash):\n from colubrid.exceptions import HttpMoved\n #FIXME\n # argh. never did something that supid\n # find a better solution for that problem.\n url = quote(environ.get('SCRIPT_NAME', ''))\n url += quote(environ.get('PATH_INFO', ''))\n query = environ.get('QUERY_STRING', '')\n oldurl = query and ('%s?%s' % (url, query)) or url\n \n if oldurl and oldurl != '/':\n if url.endswith('/'):\n if not wantslash:\n url = url[:-1]\n else:\n if wantslash:\n url += '/'\n \n newurl = query and ('%s?%s' % (url, query)) or url\n if oldurl != newurl:\n raise HttpMoved(newurl)", "def _remove_path_head(path, head):\n # Bugfix 13 Oct 2017: path.replace(head,'') will remove head from everywhere in the path. This\n # is especially problematic if the user gives the local dir as \".\" (i.e. the current directory)\n # because it will remove periods from filenames\n\n # Find the head at the beginning of the path only. Escape any characters in head that have special\n # meaning in a regular expression (e.g. \".\" means \"any character\")\n head_regex = '^{}'.format(re.escape(head))\n path = re.sub(head_regex, '', path)\n if path.startswith('/'):\n path = path[1:]\n\n return path", "def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current", "def fix_url(url, root):\n if root in url:\n if validate_url(url):\n return url\n else:\n if not url.endswith('/'):\n if validate_url(url + '/'):\n return url + '/'\n if url.startswith('https://'):\n if validate_url(url[:4] + url[5:]):\n return url[:4] + url[5:]\n else:\n return None\n else:\n return None\n else:\n parsed = get_root_domain(url)\n if parsed == '':\n if url.startswith('/'): # '/link'\n if validate_url(root[:-1] + url):\n return root[:-1] + url\n else:\n return None\n else: # 'link'\n if url.startswith('./'): # '/link'\n if validate_url(root + url[2:]):\n return root[:-1] + url\n else:\n return None\n elif validate_url(root + url):\n return root + url\n else:\n return None\n else:\n return None", "def remove_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', '', text)\n return text", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def normalize_url_without_bewit(url, bewit):\n bewit_pos = url.find('bewit=')\n # Chop off the last character before 'bewit=' which is either a ? or a &\n bewit_pos -= 1\n bewit_end = bewit_pos + len(\"bewit=\" + bewit) + 1\n o_url = ''.join([url[0:bewit_pos], url[bewit_end:]])\n return o_url", "def test_remove_default_port():\n assert (normalize_url(\"http://www.example.com:80/bar.html\") ==\n \"http://www.example.com/bar.html\")\n assert (normalize_url(\"HTTPS://example.com:443/abc/\") ==\n \"https://example.com/abc\")", "def _remove_urls(self, text: str) -> str:\n pattern = r\"http\\S+\"\n return re.sub(pattern, \" \", str(text))", "def test_normalized_urls():\n assert normalize_url(\"http://example.com/\") == \"http://example.com/\"", "def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def _sanitize_url_prefix(url_prefix: Optional[str]) -> str:\n if not url_prefix:\n return ''\n\n while url_prefix.startswith('//'):\n url_prefix = url_prefix[1:]\n while url_prefix.endswith('/'):\n url_prefix = url_prefix[:-1]\n\n if url_prefix == '':\n return ''\n\n if url_prefix.startswith('/') \\\n or url_prefix.startswith('http://') \\\n or url_prefix.startswith('https://'):\n return url_prefix\n\n return '/' + url_prefix", "def get_base_url(htmlpage): \n for element in htmlpage.parsed_body:\n if getattr(element, \"tag\", None) == \"base\":\n return element.attributes.get(\"href\") or htmlpage.url\n return htmlpage.url", "def redirect_trailing(view):\n IGNORE_CHARS = \",./\"\n\n def wrapped(request, *args, **kwargs):\n if request.META['PATH_INFO'][-1] in IGNORE_CHARS:\n return HttpResponseRedirect(request.META['PATH_INFO'].rstrip(IGNORE_CHARS))\n return view(request, *args, **kwargs)\n return wrapped", "def host_cleanup(host):\n if not host.startswith('https://'):\n host = 'https://' + host # Add schema\n host = strip_end(host, '/')\n host = strip_end(host, '/api/v1')\n host = strip_end(host, '/')\n return host", "def remove_url(text):\n return re.sub(r'http\\S+', ' ', text)", "def absolute_url(url, static):\n if url.startswith('#'):\n return url\n\n i = url.find(':')\n if ((i == -1) or not url[:i].isalpha()) and url and (url[0] != '/'):\n # If this is a relative URL, it's relative to the statics directory\n if not static.endswith('/') and not url.startswith('/'):\n url = static + '/' + url\n else:\n url = static + url\n\n return url", "def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)", "def remove_www(hostname: str) -> str:\n if hostname.startswith(\"www.\"):\n return hostname[4:]\n return hostname", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def strip_src_url(self, url: str) -> str:\n strip_url = re.search(self.__URL_BASE_PATTERN, url)\n\n if not strip_url:\n raise ValueError(\"url `%s` does not match handler's base pattern\" % url)\n\n # noinspection PyUnresolvedReferences\n return strip_url[0]", "def concat_url(endpoint, url):\n u = \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))\n return u.rstrip('/')", "def normalize_link(link, split_url):\n url = link.get(\"href\", None)\n if not url:\n return None\n protocol = split_url.scheme + \"://\"\n netloc = split_url.netloc\n final_url = \"\"\n if not protocol in url: # Protocol doesn't exists, lets make sure that gets added.\n final_url += protocol\n if not netloc in url:\n final_url += netloc + \"/\"\n\n if url.startswith(\"/\"):\n final_url += url[1:]\n else:\n final_url += url\n\n return final_url", "def ajax_url(url):\n\n hashbang_index = url.find('#!')\n if hashbang_index != -1:\n base = url[:hashbang_index]\n joiner = '?' if '?' not in base else '&'\n url = ''.join((base, joiner, '_escaped_fragment_=',\n urllib.parse.quote(url[hashbang_index+2:],\n '!\"$\\'()*,/:;<=>?@[\\\\]^`{|}~')))\n return url", "def get_short_url_base():", "def response_url():\n current_url = urlparse(cherrypy.url()) # gets current location on the server\n try:\n location = cherrypy.request.json[\"location\"]\n if parse_qs(urlparse(location['href']).query)['from']: # get from query href\n cleaned_url = parse_qs(urlparse(location['href']).query)['from'][0]\n if not cleaned_url.__contains__(\n current_url.netloc): # check net location to avoid cross site script attacks\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n else:\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n except Exception as e:\n # cherrypy.log.error(\"no location provided setting target to /projects\")\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n return cleaned_url", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def test_remove_empty_port():\n assert (normalize_url(\"http://www.example.com:/\") ==\n \"http://www.example.com/\")", "def test_strip_leading_trailing_whitespace():\n assert normalize_url(\" http://example.com \") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a \") == \"http://example.com/a\"\n assert normalize_url(\" http://example.com/\") == \"http://example.com/\"", "def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)", "def remove_url_patterns(filename, pattern):\n name = get_name_from_filename(filename)\n ext = get_extension_from_filename(filename)\n repl = \" \"\n new_name = re.sub(pattern, repl, name)\n\n return new_name + ext", "def clean_url(url):\n return re.sub(';jsessionid=.+\\?', '?', url).strip()" ]
[ "0.7404366", "0.62768435", "0.61985534", "0.60928565", "0.6062319", "0.606141", "0.60366935", "0.60153073", "0.59648734", "0.5918957", "0.58407116", "0.58144933", "0.57838774", "0.5777596", "0.57617307", "0.57545966", "0.5725759", "0.5723972", "0.5707598", "0.5680764", "0.5651724", "0.5637831", "0.5620071", "0.55914253", "0.5549961", "0.55300796", "0.5507265", "0.5498787", "0.54730636", "0.54724246", "0.54710317", "0.5461788", "0.543566", "0.54265594", "0.54210484", "0.5389245", "0.5360264", "0.5356857", "0.5355255", "0.5338679", "0.5320547", "0.5320205", "0.5318329", "0.5317882", "0.5313016", "0.5300386", "0.5292356", "0.5287484", "0.5285649", "0.52654135", "0.5264042", "0.525948", "0.5247818", "0.5244642", "0.52392954", "0.5238316", "0.5229985", "0.5224195", "0.5222333", "0.52022463", "0.519785", "0.5192147", "0.51912516", "0.518806", "0.5187327", "0.51532584", "0.51471317", "0.5133887", "0.51233184", "0.5123218", "0.5122623", "0.5122623", "0.51218945", "0.51035887", "0.5101461", "0.50983393", "0.5089397", "0.5083648", "0.5081105", "0.5076713", "0.50731355", "0.50719833", "0.5069811", "0.50603944", "0.50570846", "0.5055339", "0.5037267", "0.50352246", "0.50312275", "0.50294656", "0.50256455", "0.5016155", "0.50132746", "0.50102377", "0.50091857", "0.5008136", "0.500639", "0.5003066", "0.50008404", "0.4997176" ]
0.6469476
1
Check if both URLs are allowed to point to the same content.
def is_duplicate_content_url(url1, url2): if url1 == url2: return True if url2 in url1: url1 = shorten_duplicate_content_url(url1) if not url2.endswith('/') and url1.endswith('/'): url2 += '/' return url1 == url2 if url1 in url2: url2 = shorten_duplicate_content_url(url2) if not url1.endswith('/') and url2.endswith('/'): url1 += '/' return url1 == url2 return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urlEqual(url_1, url_2):\n parse_result_1 = urlparse(url_1)\n parse_result_2 = urlparse(url_2)\n\n return (parse_result_1[:4] == parse_result_2[:4] and\n parse_qs(parse_result_1[5]) == parse_qs(parse_result_2[5]))", "def same_origin(url1, url2):\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)", "def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False", "def assertUrlsEqual(self, url, other_url=None):\n if other_url is None:\n other_url = self.current_url\n url1 = furl(url)\n url2 = furl(other_url)\n self.assertEqual(url1.path, url2.path)\n self.assertEqual(url1.query, url2.query)\n if url1.netloc and url2.netloc:\n self.assertEqual(url1.netloc, url2.netloc)\n if url1.scheme and url2.scheme:\n self.assertEqual(url1.scheme, url2.scheme)", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def are_tabs_urls_equal():\n old_tab, new_tab = browsers.get_browser().windows()\n return old_tab.url == new_tab.url", "def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True", "def test_identical_urls(self):\n tweet_object = self.load_tweet('identical_urls')\n tweet_text = self.api.html_for_tweet(tweet_object)\n self.assertEqual(tweet_text,\n u'Use Cases, Trials and Making 5G a Reality <a href=\"https://t.co/W0uArTMk9N\" class=\"twython-url\">buff.ly/2sEhrgO</a> #5G #innovation via @5GWorldSeries <a href=\"https://t.co/W0uArTMk9N\" class=\"twython-url\">buff.ly/2sEhrgO</a>')", "def is_safe_url(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and \\\n ref_url.netloc == test_url.netloc", "def test_not_on_same_domain(self):\n self.assertFalse(on_same_domain(\n \"https://google.com\",\n \"https://google.goggle.com/google.com/google\"\n ))", "def _is_request_to_token_url(self, request):\n if not self.token_url:\n return False\n\n if self.token_url == request.path:\n return True\n\n request.match(self.token_url)\n\n if request.matchdict:\n return True\n\n return False", "def is_same_domain(url1, url2):\n return tldextract.extract(url1).domain == tldextract.extract(url2).domain", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)", "def is_safe_url(target: str) -> bool:\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc", "def isSameSiteSE( se1, se2 ):\n if se1 == se2:\n return S_OK( True )\n\n result = getSitesForSE( se1 )\n if not result['OK']:\n return result\n sites1 = result['Value']\n result = getSitesForSE( se2 )\n if not result['OK']:\n return result\n sites2 = result['Value']\n\n for site in sites1:\n if site in sites2:\n return S_OK( True )\n\n return S_OK( False )", "def _custom_request_matcher(r1, r2):\n if r1.method != r2.method:\n return False\n\n url1 = urlparse(r1.uri)\n url2 = urlparse(r2.uri)\n\n if url1.path != url2.path:\n return False\n\n q1 = parse_qs(url1.query)\n q2 = parse_qs(url2.query)\n shared_keys = set(q1.keys()).intersection(set(q2.keys()))\n\n if len(shared_keys) != len(q1) or len(shared_keys) != len(q2):\n return False\n\n for key in shared_keys:\n if q1[key][0].lower() != q2[key][0].lower():\n return False\n\n return True", "def is_same_domain(url1, url2):\r\n url1 = urlparse(url1)\r\n url2 = urlparse(url2)\r\n return url1.netloc == url2.netloc", "def is_same_domain(url1, url2):\n url1 = urlparse(url1)\n url2 = urlparse(url2)\n return url1.netloc == url2.netloc", "def same_domain(url1, url2):\n return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc", "def __eq__(self, other):\n if type(other) != URI:\n return False\n return (self._scheme == other._scheme \n and self._host == other._host \n and self._port == other._port \n and self._path == other._path\n and self._query == other._query\n and self._isRegularURI == other._isRegularURI)", "def test_on_same_domain(self):\n self.assertTrue(on_same_domain(\n \"https://google.com/a/b\",\n \"http://sub-domain.google.com?time=0400\"\n ))", "def same_website(url_rec, u_rec):\n if isinstance(url_rec, string_types):\n url_rec = urlparse(url_rec)\n if isinstance(u_rec, string_types):\n u_rec = urlparse(u_rec)\n return (url_rec.netloc == u_rec.netloc)\n # todo: collect more of sample cases.\n # disabled below check while working on ratholeradio, since links\n # could go to the parent and that is ok. Figure out when it was\n # desired not to go to the parent -- we might need explicit option\n # and u_rec.path.startswith(url_rec.path)):", "def contains_redirect(content, _url):\n frame_url = contains_frame_redirect(content) if (len(content) < 10000000) else False\n if frame_url:\n debug(\"frame_url: {}\".format(frame_url))\n return frame_url\n\n meta_redir = contains_special_redirect(content, _url)\n if meta_redir:\n debug(\"metaredir: {}\".format(meta_redir))\n return meta_redir\n\n return False", "def test_duplicate_with_url(self):\r\n # Load up base course and verify it is available\r\n call_command('import', self.content_dir, self.good_dir)\r\n store = modulestore()\r\n self.assertIsNotNone(store.get_course(self.BASE_COURSE_KEY))\r\n\r\n # Now load up duped course and verify it doesn't load\r\n call_command('import', self.content_dir, self.dupe_dir)\r\n self.assertIsNone(store.get_course(self.DIFF_KEY))", "def validation2(fileContent1, fileContent2):\n \n return fileContent1[constants.header][constants.headerTime] == fileContent2[constants.header][constants.headerTime] and fileContent1[constants.header][constants.headerDay] == fileContent2[constants.header][constants.headerDay] and fileContent1[constants.header][constants.company] == fileContent2[constants.header][constants.company]", "def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, URI) and str(self) == str(other)", "def __verify(self, href):\n # change main url to avoid mistakes with http ou https\n main = self.main_url.replace('https://', '').replace('http://', '')\n forbiden = {\"#\", 'None'} # forbidden possible urls\n if (href is None) or (href in forbiden):\n return False\n for item in ['tel:', 'mailto:', 'javascript:']:\n if item in href: # verify if is a link to telephone, e-mail or javascript\n return False\n if main in href and (\"/checkout/cart/add\" in href or \"/checkout/#/cart\" in href):\n return False # prevents a purchase from being made\n elif main in href or (main not in href and href[:4] != \"http\"):\n return True # possible case of a valid link\n else:\n return False # any other link is not valid", "def is_directly_updatable(credentials: Credentials) -> bool:\n if credentials.base_url == QE_URL:\n return True\n\n if credentials.base_url in (QCONSOLE_URL, QE2_URL, QCONSOLE2_URL):\n if credentials.base_url == credentials.url:\n return True\n\n return False", "def match(self, url):\n if self.is_global:\n return True\n\n # For easy comparison, we strip leading and trailing slashes,\n # and then split both self.url and the supplied URL on\n # slashes, to get two lists of path components we can compare.\n self_bits = self.url.strip(\"/\").split(\"/\")\n url_bits = url.strip(\"/\").split(\"/\")\n\n # If self.url produced a longer list of path components than\n # the supplied URL, it can't be a match.\n if len(self_bits) > len(url_bits):\n return False\n\n return self_bits == url_bits[: len(self_bits)]", "def test_url_tag(self):\n # Sanity check: do two different pages give different urls?\n content = \"-url1-{% url 'comicsite.views.page' '\"+self.testproject.short_name+\"' 'testurlfakepage1' %}-endurl1-\"\n content += \"-url2-{% url 'comicsite.views.page' '\"+self.testproject.short_name+\"' 'testurlfakepage2' %}-endurl2-\" \n urlpage = create_page_in_admin(self.testproject,\"testurltagpage\",content)\n \n # SUBDOMAIN_IS_PROJECTNAME affects the way urls are rendered\n with self.settings(SUBDOMAIN_IS_PROJECTNAME = False): \n response = self._test_page_can_be_viewed(self.signedup_user,urlpage) \n url1 = find_text_between('-url1-','-endurl1',response.content)\n url2 = find_text_between('-url2-','-endurl2',response.content) \n self.assertTrue(url1 != url2,\"With SUBDOMAIN_IS_PROJECTNAME = False\"\n \" URL tag gave the same url for two different \"\n \"pages. Both 'testurlfakepage1' and \"\n \"'testurlfakepage1' got url '%s'\" % url1)\n \n \n with self.settings(SUBDOMAIN_IS_PROJECTNAME = True):\n response = self._test_page_can_be_viewed(self.signedup_user,urlpage) \n url1 = find_text_between('-url1-','-endurl1',response.content)\n url2 = find_text_between('-url2-','-endurl2',response.content)\n \n self.assertTrue(url1 != url2,\"With SUBDOMAIN_IS_PROJECTNAME = True\"\n \" URL tag gave the same url for two different \"\n \"pages. Both 'testurlfakepage1' and \"\n \"'testurlfakepage1' got url '%s'\" % url1)", "def verify(self):\n if self.geturl():\n return True\n return False", "def canShare(self):\n return False", "def is_exact_superset_of(self, other):\n # If this is a string of some kind, convert it to a URL.\n if isinstance(other, (six.text_type, six.binary_type)):\n other = self.__class__(other)\n\n # Type check!\n if not hasattr(other, 'qs') or not hasattr(other, 'uri'):\n raise TypeError('Cannot compare URL with %s.' %\n other.__class__.__name__)\n\n # If the URIs do not exactly match, this is not an exact superset.\n if self.uri != other.uri:\n return False\n\n # If the methods do not match, this is not an exact superset.\n if self.method.upper() != other.method.upper():\n return False\n\n # Ensure that every key in `other.qs` exists in `self.qs` with\n # the same value; if any do not, immediately return False.\n for key in other.qs.keys():\n # If the key doesn't exist, return False.\n if key not in self.qs:\n return False\n\n # If the value does not match, return False.\n if self.qs[key] != other.qs[key]:\n return False\n\n # Okay, we must have an exact superset!\n return True", "def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))", "def clean_urls(self) -> bool:\n return pulumi.get(self, \"clean_urls\")", "def __eq__(self, other):\n contentsmatchfail = False\n equal = False\n for i in self.contents:\n if i in other.contents:\n pass\n else:\n contentsmatchfail = True\n for i in other.contents:\n if i in self.contents:\n pass\n else:\n contentsmatchfail = True\n if self.name == other.name and self.name == other.name and contentsmatchfail == False:\n equal = True\n return equal", "def assert_contents_equivalent(contents_a, contents_b):\n assert normalize_contents(contents_a) == normalize_contents(contents_b)", "def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True", "def __eq__(self, other):\n\t\ttry:\n\t\t\t# Attributes and properties to compare\n\t\t\tattrs = (\"url\", \"status_code\", \"reason\", \"headers\", \"content\")\n\t\t\tfor attr in attrs:\n\t\t\t\tif getattr(self, attr) != getattr(other, attr):\n\t\t\t\t\treturn False\n\t\texcept AttributeError:\n\t\t\treturn NotImplemented\n\t\telse:\n\t\t\treturn True", "def isEqualVirtualHost(first: VirtualHost, second: VirtualHost) -> bool:\n return (\n first.name == second.name and\n first.aliases == second.aliases and\n first.filep == second.filep and\n first.addrs == second.addrs and\n first.ssl == second.ssl and\n first.enabled == second.enabled and\n first.modmacro == second.modmacro and\n first.ancestor == second.ancestor\n )", "def test_slug_collisions_get_resolved(self):\n first_post = Post(title=\"slug?\")\n second_post = Post(title=\"slug!\")\n first_post.save()\n second_post.save()\n self.assertNotEquals(first_post.slug, second_post.slug)", "def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions", "def __ne__(self, other: 'FirstHref') -> bool:\n return not self == other", "def is_valid_for_downloading(base_url, asset_url):\n if not asset_url:\n return False\n base_netloc = urlsplit(base_url).netloc\n asset_netloc = urlsplit(asset_url).netloc\n return base_netloc == asset_netloc", "def _content_item_comparison_weak(item_a, item_b):\n if item_a is None or item_b is None:\n log.debug(\"Item is None\")\n return False\n\n return item_a.get_xml() == item_b.get_xml()", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def same_chunk(self, wn1, wn2):\n if self.chunk_map[wn1] == self.chunk_map[wn2]:\n return True\n else:\n return False", "def is_url_requirement(ireq):\n return bool(ireq.original_link)", "def _is_checksum_url(checksum):\n if (checksum.startswith('http://') or checksum.startswith('https://')):\n return True\n else:\n return False", "def Stringchecker(s1, s2):\r\n\r\n if len(s1) != len(s2) or len(set(s1)) < len(set(s2)):\r\n return False\r\n d = dict()\r\n for idx,c in enumerate(s1):\r\n if not d.get(c):\r\n d[c] = s2[idx]\r\n elif d[c] != s2[idx]:\r\n return False\r\n return True", "def test_fragmentEquality(self):\n self.assertEqual(url.URL(fragment=''), url.URL(fragment=None))", "def test_urls(self):\n base_test_url = 'http://{}:{}/'.format(TESTING_CONFIG['host'],\n TESTING_CONFIG['port'])\n self.conn._host_url == base_test_url\n self.conn.aheader_url == base_test_url + 'analysis_header'\n self.conn.atail_url == base_test_url + 'analysis_tail'\n self.conn.dref_url == base_test_url + 'data_reference'\n self.conn.dref_header_url == base_test_url + 'data_reference_header'", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, URI):\n return NotImplemented\n return str(self) == str(other)", "def _is_link_allowed(self, link):\n denied = [re.match(r, link) for r in self.crawl_rules_deny]\n denied = [x for x in denied if x is not None]\n\n crawl_rules_allow = self.crawl_rules_allow\n if not self.crawl_rules_allow:\n crawl_rules_allow = (\".*\",)\n \n allowed = [re.match(r, link) for r in crawl_rules_allow]\n allowed = [x for x in allowed if x is not None]\n\n return not bool(denied) and bool(allowed)", "def coherent(self):\n return self.uris.size == self.sockets.size", "def check_url(url=None, parse_url=None):\n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n unsupported = ['twitcam.', 'new.']\n return parse_url.netloc.endswith('livestream.com')\\\n and not any(x in parse_url.netloc for x in unsupported)\\\n and len(parse_url.path.split('/')) > 2", "def can_handle(self, url):\n return self.url_re.match(url)", "def is_allow(self, src: Vertex, dst: Vertex) -> bool:\n if self.link == Link.NONE:\n return False\n elif self.link == Link.BI:\n return src in self.vertices or dst in self.vertices\n return src == self.src and dst == self.dst", "def validate_url(self):\n pass", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def is_people_with_link_can_view_and_comment(self):\n return self._tag == 'people_with_link_can_view_and_comment'", "def check_url(value):\n\n valid = validators.url(value)\n if valid != True:\n return False", "def test_nonworking_url(self):\r\n urls = {\r\n 'CouchSurfing': ('http://allthatiswrong.wordpress.com/2010/01'\r\n '/24/a-criticism-of-couchsurfing-and-review-o'\r\n 'f-alternatives/#problems'),\r\n # 'Electronic': ('https://www.fbo.gov/index?s=opportunity&mode='\r\n # 'form&tab=core&id=dd11f27254c796f80f2aadcbe415'\r\n # '8407'),\r\n }\r\n\r\n for key, url in urls.iteritems():\r\n read = ReadUrl.parse(url)\r\n\r\n self.assertTrue(\r\n read.status == 200, \"The status is 200: \" + str(read.status))\r\n self.assertTrue(\r\n read.content is not None, \"Content should not be none: \")", "def check_exists_links(self):\n\n # get all non-own articles\n articles_from_external_resourse = self.articles_from_external_resourse()\n\n # return true if it not\n if not articles_from_external_resourse.count():\n return True\n\n # if found broken link\n # keep all the articles with broken links and return false,\n # otherwise return true\n article_with_broken_links = list()\n for article in articles_from_external_resourse:\n try:\n urllib.request.urlopen(article.source)\n except:\n article_with_broken_links.append(article)\n if article_with_broken_links:\n return (False, article_with_broken_links)\n return True", "def check_urls(quartus_versions):\n success = True\n for quartus in quartus_versions.keys():\n parts = quartus_versions[quartus]\n parts_str = [str(k) for k in parts.keys()]\n #print(\"Checking Quartus %s, available parts (%s)\\n\" % (quartus, \",\".join(parts_str)))\n for part in parts:\n result = test_url(quartus, part, parts[part])\n if not result:\n print(\"\\nMissing %s/%s url=%s\" % (quartus, part, parts[part]))\n success = False\n return success", "def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False", "def samefile(path1, path2):\n # Handles path-like objects and checks if storage\n path1, path1_is_storage = format_and_is_storage(path1)\n path2, path2_is_storage = format_and_is_storage(path2)\n\n # Local files: Redirects to \"os.path.samefile\"\n if not path1_is_storage and not path2_is_storage:\n return os_path_samefile(path1, path2)\n\n # One path is local, the other storage\n if not path1_is_storage or not path2_is_storage:\n return False\n\n with handle_os_exceptions():\n # Paths don't use same storage\n system = get_instance(path1)\n if system is not get_instance(path2):\n return False\n\n # Relative path are different\n elif system.relpath(path1) != system.relpath(path2):\n return False\n\n # Same files\n return True", "def test_url_tranform(self):\r\n response = self.client.get('/courses/edx/math101/2014/wiki/math101/')\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_edit/', response.content)\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_settings/', response.content)", "def _matcher(r1: vcr.request.Request, r2: vcr.request.Request) -> None:\n assert r1.uri == r2.uri and r1.body == r2.body and r1.headers == r2.headers", "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )", "def check_url(url):\n return 'products.json' in url", "def test_link_safety(self):\n attack_vectors = (\n # \"standard\" javascript pseudo protocol\n ('javascript:alert`1`', ''),\n # bypass attempt\n ('jAvAsCrIpT:alert`1`', ''),\n # javascript pseudo protocol with entities\n ('javascript&colon;alert`1`', ''),\n # javascript pseudo protocol with prefix (dangerous in Chrome)\n ('\\x1Ajavascript:alert`1`', ''),\n # data-URI (dangerous in Firefox)\n ('data:text/html,<script>alert`1`</script>', ''),\n # vbscript-URI (dangerous in Internet Explorer)\n ('vbscript:msgbox', ''),\n # breaking out of the attribute\n ('\"<>', ''),\n )\n\n for vector, expected in attack_vectors:\n # Image\n self.assertEqual(\n Markdown().render('![atk](%s)' % vector),\n '<p><img src=\"%s\" alt=\"atk\"></p>' % expected)\n # Link\n self.assertEqual(\n Markdown().render('[atk](%s)' % vector),\n '<p><a rel=\"nofollow\" href=\"%s\">atk</a></p>' % expected)", "def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n return parse_url.netloc.endswith('slideshare.net')", "def __ne__(self, other: 'NextHref') -> bool:\n return not self == other", "def __isSameHost( self, hostCN, hostConn ):\n hostCN_m = hostCN\n if '/' in hostCN:\n hostCN_m = hostCN.split( '/' )[1]\n if hostCN_m == hostConn:\n return True\n result = checkHostsMatch( hostCN_m, hostConn )\n if not result[ 'OK' ]:\n return False\n return result[ 'Value' ]", "def matches(self, other):\n return ( all([i==j or i<0 or j<0 for i, j in zip(self._data, other._data)])\n and len(self._data) == len(other._data) )", "def test_url_link_multiple(self):\n content = ('[Link]([url(\\'/content/pages/test1.md\\')])'\n '[Link]([url(\\'/content/pages/test2.md\\')])')\n self.pod.write_file('/content/pages/test.md', content)\n content = '{{doc.html|safe}}'\n self.pod.write_file('/views/base.html', content)\n self.pod.router.add_all(use_cache=False)\n result = testing.render_path(self.pod, '/test/')\n self.assertIn('href=\"/test1/\"', result)\n self.assertIn('href=\"/test2/\"', result)", "def __eq__(self, other: 'FirstHref') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True", "def _is_url(string):\n return \"http\" in string", "def _is_subsumed_by(rule_pattern1, rule_pattern2):\n if rule_pattern1 == rule_pattern2:\n return False\n if re.match(_regexify_matching_pattern(rule_pattern2), rule_pattern1):\n return True\n else:\n return False", "def canDo_url(self, artMeta):\n return False", "def test_get_all_urls_are_urls():\n # setup\n all_links = get_urls.get_all_urls(constants[\"URLS\"][\"TESTED_URL\"])\n for link in all_links:\n valid = validators.url(link)\n assert valid", "def __ne__(self, other):\n if not isinstance(other, AwsS3PresignedUrlForUpload):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n return (self.server + self.scriptpath\n == other.server + other.scriptpath)", "def can_be_linked(self):\n # upstream trees of tested treed should nad share trees with downstream trees of current tree\n tested_tree_upstream_trees = {t.name for t in self.upstream_trees()}\n current_tree_downstream_trees = {p.node_tree.name for p in bpy.context.space_data.path}\n shared_trees = tested_tree_upstream_trees & current_tree_downstream_trees\n return not shared_trees", "def is_local_res(\n url: str,\n resource_netloc: str,\n page_netlock: str,\n) -> bool:\n is_encoded = 'data:' in url\n is_local = not resource_netloc or resource_netloc == page_netlock\n return is_local and not is_encoded", "def check_href(href):\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False", "def check_url(value):\n\n valid = validators.url(value)\n if valid is not True:\n return False", "def referrer_allowed(referrer, referrer_acl):\n allow = False\n if referrer_acl:\n rhost = urlparse(referrer or '').hostname or 'unknown'\n for mhost in referrer_acl:\n if mhost.startswith('-'):\n mhost = mhost[1:]\n if mhost == rhost or (mhost.startswith('.') and\n rhost.endswith(mhost)):\n allow = False\n elif mhost == '*' or mhost == rhost or \\\n (mhost.startswith('.') and rhost.endswith(mhost)):\n allow = True\n return allow", "def is_identical(self, other):\n if self.user != other.user:\n return False\n\n my_xputs = itertools.chain(self.inputs.order_by(\"dataset_idx\"), self.outputs.order_by(\"dataset_idx\"))\n other_xputs = itertools.chain(other.inputs.order_by(\"dataset_idx\"), other.outputs.order_by(\"dataset_idx\"))\n for my_xput, other_xput in zipper(my_xputs, other_xputs, fillvalue=None):\n if my_xput is None or other_xput is None or not my_xput.is_identical(other_xput):\n return False\n return True", "def __cmp__(self, other):\n # FixMe: Maybe the URL should be normalised somehow.\n return cmp((self.url, self.linenumber, self.column),\n (other.url, other.linenumber, other.column))", "def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def _check_audience(self, request, audience):\n if not self.audiences:\n return audience == request.host_url\n for audience_pattern in self._audience_patterns:\n if audience_pattern.match(audience):\n return True\n return False", "def judge(self, s1, s2):\n if len(s2) < len(s1):\n return False\n index_of_s1 = 0\n index_of_s2 = 0\n while index_of_s1 < len(s1) and index_of_s2 < len(s2):\n if s1[index_of_s1] == s2[index_of_s2]:\n index_of_s1 += 1\n index_of_s2 += 1\n else:\n index_of_s2 += 1\n return True if index_of_s1 == len(s1) else False", "def is_duplicate(self, url):\n dupl_check_sql = '''\n SELECT url FROM {} WHERE url=?\n '''.format(\n self.tablename\n )\n with self.conn:\n return self.conn.execute(dupl_check_sql, (url,)).fetchone()" ]
[ "0.71224326", "0.6801178", "0.64278007", "0.637694", "0.63402057", "0.63119143", "0.6286929", "0.62371063", "0.61779445", "0.61676055", "0.6156955", "0.6119644", "0.6043208", "0.60181797", "0.6006415", "0.599031", "0.5972156", "0.59510684", "0.59472144", "0.5881061", "0.58000994", "0.578601", "0.57512313", "0.5729585", "0.57259095", "0.57133824", "0.57021135", "0.5659905", "0.56480974", "0.56477404", "0.5621185", "0.5611927", "0.5598201", "0.55875987", "0.5584966", "0.55785006", "0.55385756", "0.55369043", "0.55363244", "0.5502112", "0.5491358", "0.549032", "0.5487464", "0.5480664", "0.5470788", "0.54618764", "0.5451018", "0.5445683", "0.54437274", "0.5433931", "0.54127157", "0.53997165", "0.5398585", "0.53919536", "0.53802717", "0.5376454", "0.5375023", "0.53723925", "0.5370712", "0.53705436", "0.53534305", "0.5350519", "0.53491735", "0.5349042", "0.53439283", "0.5343243", "0.5336251", "0.5335396", "0.53287035", "0.53281164", "0.5322328", "0.5316022", "0.5316022", "0.5300358", "0.52825934", "0.5279509", "0.52678454", "0.5260146", "0.5260119", "0.52572805", "0.5256538", "0.525446", "0.52524906", "0.52512807", "0.52445036", "0.5244393", "0.52443045", "0.52417475", "0.5229444", "0.52290374", "0.5225652", "0.5225519", "0.52219325", "0.52188325", "0.5217879", "0.5204611", "0.5202159", "0.5197962", "0.5196458", "0.51931167" ]
0.7777119
0
Get all pictures for a user.
def get(self): parser = reqparse.RequestParser() parser.add_argument("user_id", type=int, location="args", required=True) args = parser.parse_args() try: #get user from database user = User.query.filter(User.id==args.user_id).first() if not user: return Response(status=404, message="User not found.").__dict__,404 return Response(status=200, message="Pictures found.", value=[p.dict_repr() for p in user.pictures.all()])\ .__dict__, 200 except Exception as e: app.logger.error(e) return Response(status=500, message="Internal server error.").__dict__,500
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_images_for_user(username):\n\n user = get_user_by_username(username)\n return user.images", "def photos_by_user(user_id):\n photos = Photo.query.filter(Photo.user_id == user_id).all()\n return photos", "def get_photos(self, user_id):\n\n json_photos = self._receive_photos_from_vk(user_id)\n return self._parse_photos(json_photos)", "def get_user_photos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def getUserImages(username):\n\n if not UserExists(username):\n print ('Got here!')\n return \"False\"\n elif not g.user:\n return redirect(url_for('login'))\n\n return download_blobs(username)\n #files = glob(os.path.join('static', 'UserPictures', username, '*'))\n #return json.dumps(files)", "def getCurrentUserImages():\n if not g.user:\n return redirect(url_for('login'))\n return getUserImages(g.user)", "def list_all_size(self, path=None):\n user_id = path or self.user_id\n if not user_id:\n raise ValueError(\"You must either specify a user ID at \"\n \"storage instanciation or at \"\n \"list_image_and_thumb launching.\")\n url_types = ['url_o', 'url_s', 'url_q', 'url_t', 'url_l', 'url_m', 'url_n', 'url_z', 'url_c']\n params = {\n 'method': 'flickr.people.getPublicPhotos',\n 'user_id': user_id,\n 'extras': ','.join(url_types)\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n urls = [pho for pho in json_response['photos']['photo']]\n return urls", "def getUserPhotos(user_id=None, offset=None, count=None, extended=None,\\\n sort=None):\n params = {\n 'user_id': user_id,\n 'offset': offset,\n 'count': count,\n 'extended': extended,\n 'sort': sort\n }\n result = call('photos.getUserPhotos', **params)\n return parse_response(result)", "def get_public_images_for_user(username):\n\n user = get_user_by_username(username)\n images = user.images\n public_images = [img for img in images if img.permission.value == \"PUBLIC\"]\n return public_images", "def download_photos(username, size_label=None):\n\n user = Flickr.Person.findByUserName(username)\n photos = Flickr.Walker(user.getPhotos)\n download_list(photos, size_label)", "def list_image_and_thumb(self, path=None, size='m'):\n user_id = path or self.user_id\n if not user_id:\n raise ValueError(\"You must either specify a user ID at \"\n \"storage instanciation or at \"\n \"list_image_and_thumb launching.\")\n size = 'url_%s' % size\n params = {\n 'method': 'flickr.people.getPublicPhotos',\n 'user_id': user_id,\n 'extras': 'url_o,url_m,%s' % size\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n urls = [(pho[size], pho.get('url_o', pho['url_m']))\n for pho in json_response['photos']['photo']]\n return urls", "def GetPicturesForAll(self, limit = -1, since = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"pictures\", \"\", limit, since)\n return self.__GetJson(url, False)", "def all_users():\n # TODO: implement pagination\n # skip = flask.request.args[\"skip\"]\n user_cursor = mongo.db.users.find({}, {\"_id\": False})\n users = []\n for user in user_cursor:\n # pprint(user)\n if \"image_ids\" in user:\n users.append({\n \"username\": user[\"username\"],\n \"images\": [str(im_id) for im_id in user[\"image_ids\"]]\n })\n return flask.jsonify(users)", "def get_queryset(self):\n return Photo.objects.filter(user=self.request.user)", "def get_images(self, limit=None):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"images/{1}\".format(self.name, '{}'))\n resp = self._imgur._send_request(url, limit=limit)\n return [Image(img, self._imgur) for img in resp]", "def images_for_user(request, uname):\n try:\n user = User.objects.get(username=uname)\n except Exception as user_does_not_exist:\n return render_to_response('wainz/image_list.html', {\"rows\":[]} , context_instance = RequestContext(request))\n return render_to_response('wainz/image_list.html', {\"rows\":image_display(0, 10000, user)} , context_instance = RequestContext(request))", "def list_photos(username):\n user = Flickr.Person.findByUserName(username)\n photos = Flickr.Walker(user.getPhotos)\n for photo in photos:\n print u\"{0} - {1}\".format(photo.id, photo.title)\n\n print(\"Number of total photos: %s\" % user.getPhotos().info.total)", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def images(self, details=True, **query):\n img = _image.ImageDetail if details else _image.Image\n return list(self._list(img, paginated=True, **query))", "def get_all_profile_pictures(pictures):\n b = boto_init_s3(settings.BUCKET_NAME)\n\n if b:\n urls = {}\n for picture in pictures:\n s3_file_path = b.get_key(picture.path)\n urls[picture.file_id] = (s3_file_path.generate_url(expires_in=600))\n return urls\n return {}", "def get_queryset(self):\n return Picture.objects.all()", "def images(self):\n return self.gameimage_set.all()", "def listdir(self, path=None, original=True, size='m'):\n user_id = path or self.user_id\n if not user_id:\n raise ValueError(\"You must either specify a user ID at \"\n \"storage instanciation or at listdir \"\n \"launching\")\n wanted_size = 'url_o' if original else 'url_%s' % size\n fallback_size = 'url_%s' % size if original else 'url_m'\n extras = ','.join([wanted_size, fallback_size])\n params = {\n 'method': 'flickr.people.getPublicPhotos',\n 'user_id': user_id,\n 'extras': extras\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n urls = [pho.get(wanted_size, pho[fallback_size])\n for pho in json_response['photos']['photo']]\n return ([], urls)", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def pictures(self):\n return self.container['pictures']", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def retrieve_pics(drive_service):\n pic_q = Queue()\n page_token = None\n while True:\n response = drive_service.files().list(q=\"mimeType='image/jpeg'\",\n spaces='drive',\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n\n for file in response.get('files', []):\n if file.get('id') not in classify_data:\n pic_q.put((file.get('id'), file.get('name')))\n\n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n\n print ('Found %s pictures' % pic_q.qsize()) # prints no. of new pics found\n return pic_q", "def get_goods_photos_same_user(self):\n photos = GoodsPhotos.objects.filter(good__user=self.user).exclude(good=self)\n return photos", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def get(self):\n return PhotoGalleryService().get_all(), 200", "def getimgs():", "def list_images(self):\n raise NotImplementedError()", "def person_photo_list(request, flickr_id, queryset=None, **kwargs):\n if queryset is None:\n queryset = Person.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/person/photo_list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PERSON_PHOTO_LIST_VIEW_PAGINATE_BY')\n \n person = shortcuts.get_object_or_404(queryset, flickr_id=flickr_id)\n\n if 'extra_context' not in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['person'] = person\n \n queryset = person.photos.all()\n \n return list_detail.object_list(request, queryset, **kwargs)", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def user_media(self, user = \"\", id = \"\", count = 20):\n\n if user:\n url = \"https://api.instagram.com/v1/users/search?q={0}&access_token={1}\".format(user, self.access_token)\n id = requests.get(url).json()[\"data\"][0][\"id\"]\n\n url = \"https://api.instagram.com/v1/users/{0}/media/recent/?access_token={1}&count={2}\".format(id, self.access_token, count)\n\n data = []\n\n while True: \n request = requests.get(url).json()\n \n data.extend(request[\"data\"])\n count -= len(request[\"data\"])\n\n if not request[\"pagination\"] or count <= 0:\n return data\n\n url = request[\"pagination\"][\"next_url\"]", "def list_images(db, n, usernick=None):\n cur = db.cursor()\n if usernick:\n sql = \"\"\"\n select * from (\n select * from images\n order by timestamp DESC\n )\n where usernick=?\n limit ?;\n \"\"\"\n cur.execute(sql, (usernick, n))\n else:\n sql = \"\"\"\n select * from (\n select * from images\n order by timestamp DESC\n )\n limit ?;\n \"\"\"\n cur.execute(sql, (n,))\n img_list = (list(cur))\n dict_list = []\n for i in img_list:\n i_dict = dict()\n i_dict['filename'] = i[0]\n i_dict['timestamp'] = i[1]\n i_dict['user'] = i[2]\n i_dict['likes'] = count_likes(db, i_dict['filename'])\n dict_list.append(i_dict)\n return dict_list", "def get_images(self):\n if not hasattr(self, '_BasePublication__images_cache'):\n self.__images_cache = self.images.all()\n return self.__images_cache", "def get(request):\n return Response(\n GallerySerializer(\n request.user.gallery.all(),\n many=True\n ).data\n )", "def get_photos(self,query=None):\n if self._access_token is None:\n raise RequiresAccessTokenError()\n \n parameters = self.__get_default_oauth_params()\n base_url = CONTENT_ROOT_URL + 'photos/'\n\n if query is not None:\n query_post = simplejson.dumps(query, cls=JSONFactories.encoders.get_encoder_for(query))\n parameters['query'] = query_post\n self.response = self.__make_oauth_request(base_url, parameters, token=self._access_token, signed=True, method=\"POST\")\n else:\n self.response = self.__make_oauth_request(base_url, parameters, token=self._access_token, signed=True, method=\"GET\")\n \n results = simplejson.loads(self.response.read())\n nice_result = make_nice.make_it_nice(results)\n return nice_result", "def get_images(self):\n \n return self.img_lst", "def cmd_account_images(client, args):\n account_images = client.get_account_images(args.username, args.page)\n data = [item.__dict__ for item in account_images]\n generate_output({'account_images': data}, args.output_file)", "def image_list(request):\n return render_to_response('wainz/image_list.html', {\"images_and_votes\": ordered_images(0, 30, request.user)}, context_instance = RequestContext(request))", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)", "def photos(self, query, page=1, per_page=10):\n url = \"/search/photos\"\n data = self._search(url, query, page=page, per_page=per_page)\n data[\"results\"] = PhotoModel.parse_list(data.get(\"results\"))\n return data", "def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)", "def get_list(self, owner_userid, auth_userid, include_public_tags=0, order_by=\"tag_name\", order_dir=\"asc\"):\n try:\n owner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n if auth_userid:\n auth_userid = validation.cast_integer(auth_userid, 'auth_userid')\n validation.oneof(order_by, ('tag_name', 'cnt_images'), 'order_by')\n validation.oneof(order_dir, ('asc', 'desc'), 'order_dir')\n except errors.ValidationError, ex:\n return utils.return_deferred_error(ex.value)\n\n return self.app.db.query(\"\"\"\n SELECT\n t1.tag_name,\n count(t2.media_id) as cnt_images\n FROM\n user_image_tags t1\n JOIN user_images t2 using(image_id)\n WHERE\n t2.owner_userid = %%s AND\n zoto_user_can_view_media(t2.owner_userid, image_id, %%s)\n GROUP BY\n tag_name\n ORDER BY\n %s %s\n \"\"\" % (order_by, order_dir), (owner_userid, auth_userid))", "def photos():\n cwd = os.getcwd()\n db_path = os.path.join(cwd, CLI_PHOTOS_DB)\n return PhotosDB(db_path).photos(intrash=True)", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200", "def get_all_users():", "async def async_get_image_names(self):\n\n cookies = self.get_session_cookie()\n try:\n async with aiohttp.ClientSession(cookies=cookies) as session:\n resp = await session.get(\n self._base_url\n )\n t = await resp.text()\n match = re.findall('(?:\\w|\\d|\")/(.*?).(?:mjpg|jpg)', t)\n if len(match) == 0:\n raise XeomaError('Unable to find any camera image names')\n image_names = set(match)\n results = []\n for image_name in image_names:\n match = re.search(\n image_name + '\\.(?:mjpg|jpg).*?user=(.*?)&', t\n )\n if match and len(match.group(1)) > 0:\n d = base64.b64decode(unquote(match.group(1))) \\\n .decode('ASCII')\n creds = d.split(':')\n if len(creds) < 2:\n raise XeomaError('Error parsing image credentials')\n results.append((image_name, creds[0], creds[1]))\n else:\n results.append((image_name, None, None))\n return results\n except asyncio.TimeoutError as e:\n raise XeomaError(\"Unable to connect to Xeoma web server\")", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def images(self, **kwargs):\n return self.get_list(self.cloudman.compute.images(),\n kind=\"image\")", "def get_all_sets(user_id):\n def helper():\n page_num = 1\n while True:\n obj = jsonp_to_obj(flickr.photosets_getList(user_id=user_id, page=page_num))\n for x in obj['photosets']['photoset']:\n yield (x['id'], x['title']['_content']) \n if page_num == obj['photosets']['pages']:\n break\n page_num+=1\n\n return list(helper())", "def _get_images(self, fuzzable_request):\n res = []\n\n try:\n response = self._uri_opener.GET(fuzzable_request.get_uri(),\n cache=False)\n except:\n om.out.debug('Failed to retrieve the page for finding captchas.')\n else:\n # Do not use parser_cache here, it's not good since CAPTCHA implementations\n # *might* change the image name for each request of the HTML\n #dp = parser_cache.dpc.get_document_parser_for( response )\n try:\n document_parser = DocumentParser.DocumentParser(response)\n except BaseFrameworkException:\n return []\n \n image_path_list = document_parser.get_references_of_tag('img')\n\n GET = self._uri_opener.GET\n sha1 = hashlib.sha1\n \n result_iter = self.worker_pool.imap_unordered(GET, image_path_list)\n \n for image_response in result_iter:\n if image_response.is_image():\n img_src = image_response.get_uri()\n img_hash = sha1(image_response.get_body()).hexdigest()\n res.append((img_src, img_hash, response))\n\n return res", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def photos(self):\n return self._photos", "def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files", "def getAll(owner_id=None, extended=None, offset=None, count=None, photo_sizes=None,\\\n no_service_albums=None, need_hidden=None, skip_hidden=None):\n params = {\n 'owner_id': owner_id,\n 'extended': extended,\n 'offset': offset,\n 'count': count,\n 'photo_sizes': photo_sizes,\n 'no_service_albums': no_service_albums,\n 'need_hidden': need_hidden,\n 'skip_hidden': skip_hidden\n }\n result = call('photos.getAll', **params)\n return parse_response(result)", "def images(self):\n return self._data[\"images\"]", "def get_image_set_for_uid(uid: str):\n images = get_all_image_structs(uid, Config.study_size, is_accumulating=False)\n res = {'images': images}\n return res", "def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)", "def get_gallery_favorites(self):\n url = (\"https://api.imgur.com/3/account/{0}/gallery_favorites\".format(\n self.name))\n resp = self._imgur._send_request(url)\n return [Image(img, self._imgur) for img in resp]", "def list_images(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_images/\", data=data,\n auth=(_username, _password))\n click.echo(\"username:\")\n click.echo(_username)\n click.echo(_password)\n click.echo(_url)\n click.echo(data) \n if res.status_code == 200:\n images = json.loads(res.content)\n table = PrettyTable(field_names=[\"Image\"])\n for image in images:\n table.add_row([image])\n click.echo(table.get_string())\n else:\n click.echo(res.content)", "def list_images():\n resource_route = \"/static/img/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n images_path = os.path.join(path_to_current_file, 'static', 'img')\n directory_list = os.listdir(images_path)\n image_files = [f for f in directory_list if os.path.isfile(os.path.join(images_path, f))]\n image_files.sort()\n if '.gitignore' in image_files:\n image_files.remove('.gitignore')\n full_image_paths = [file_request_path + f for f in image_files]\n response_code = 200\n return make_response(jsonify({'files': full_image_paths}), response_code)", "def images(self, **kwargs):\n\n path = self._get_movie_id_path('images')\n resp = self._get_method(path, kwargs)\n return resp", "def profile_pic(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n pic = profile_obj.avatar\n return {'picture': pic}\n return {}", "def get_images(self):\n return self._get_brains(\"Image\")", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def list_amis(self):\n images = self._driver.list_images(ex_owner=self.account_id)\n return images", "def list(self):\n\n base_url = ''.join((\n self.BASE_URL + '/users/',\n self.__user_data.get('login') + '/gists',\n ))\n\n response = requests.get(base_url, headers=self.__headers)\n\n if response.status_code == 200:\n return response.json()\n\n raise GistException(Gist.__get_response_error('It is not possible to list files', response))", "def GetMatchingImages(self, user_project, image, alias, errors):\n service = self._compute.images\n requests = [\n (service,\n 'List',\n self._messages.ComputeImagesListRequest(\n filter='name eq ^{0}(-.+)*-v.+'.format(alias.name_prefix),\n maxResults=constants.MAX_RESULTS_PER_PAGE,\n project=alias.project)),\n (service,\n 'List',\n self._messages.ComputeImagesListRequest(\n filter='name eq ^{0}$'.format(image),\n maxResults=constants.MAX_RESULTS_PER_PAGE,\n project=user_project)),\n ]\n\n return request_helper.MakeRequests(\n requests=requests,\n http=self._http,\n batch_url=self._batch_url,\n errors=errors)", "def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]", "def images_init(self):\n\n self.user_image = UserImage(\n user_id=self.user_id,\n tag=self.tag_image,\n image_id='sha256:342fea22',\n created=1524229897,\n size=191623983\n )\n self.user_image.save()\n\n CLIENT.images_list.append(\n {'Containers': -1,\n 'Created': 1524229897,\n 'Id': 'sha256:342fea22',\n 'Labels': None,\n 'ParentId': 'sha256:55d98c2',\n 'RepoDigests': None,\n 'RepoTags': [self.tag_image],\n 'SharedSize': -1,\n 'Size': 191623983,\n 'VirtualSize': 191623983}\n )", "def image_api():\n PAGE_SIZE=50\n page = int(request.args.get('page', 0))\n print page\n userid = current_user.id\n out= []\n query = db_session.query(Image, Batch.status).\\\n outerjoin(Batch, Image.batch==Batch.batch_id).\\\n filter(Image.user==userid)\n\n count = query.count()\n for row in query.limit(PAGE_SIZE).offset(page*PAGE_SIZE):\n out.append({\n \"url\": url_for('image_views.raw_image', image_path=row.Image.path),\n \"page\": url_for('image_views.view_image', image_id=row.Image.id),\n \"title\": row.Image.title,\n \"status\": row.status\n })\n\n return jsonify({\"images\": out, \"count\": count})", "def fetch_images(user_animes, force, rate_limit):\n for anime in user_animes:\n img_fpath = os.path.join(IMGDIR, f\"{anime['mal_id']}.jpg\")\n cache_exists = os.path.exists(img_fpath) and os.path.isfile(img_fpath)\n if force or not cache_exists:\n print(f\"Saving image for: {anime['title']}\")\n urllib.request.urlretrieve(anime[\"image_url\"], img_fpath)\n time.sleep(rate_limit)\n else:\n print(f\"Skipping already cached image for: {anime['title']}\")", "def home(request):\n current_user = request.user\n\n # return_list = []\n # for image in all_images:\n # return_list.append((image, image.image_likes.filter(profile_owner=request.user)))\n\n return render(request,'main_templates/landing.html',{'user':current_user})", "def read_all():\n # Create the list of photos from our data\n photos = Photo.query.order_by(Photo.sample_id).all()\n\n # Serialize the data for the response\n photo_schema = PhotoSchema(many=True)\n data = photo_schema.dump(photos)\n return data", "def photo_list(request, queryset=None, **kwargs):\n if queryset is None:\n queryset = Photo.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/photo/list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'photo'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PHOTO_LIST_VIEW_PAGINATE_BY')\n \n return list_detail.object_list(request, queryset, **kwargs)", "def get_image(image_id, user_id):\n\n with image_backend(user_id) as backend:\n return backend.get_image(image_id)", "def query_all_users():\n result = session.query(User).all()\n for user in result:\n print(\"name: %s\\nemail: %s\\npicture: %s\\nid:%s\" %\n (user.name, user.email, user.picture, user.id))\n print(\"**************************\")", "def view_images(request):\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n with open(os.path.join(user_root, search_id, 'info.json')) as f:\n info = json.load(f)\n object_id_list = info['object_id_list']\n image_type_list = info['image_type_list']\n search_pattern = info['search_pattern']\n image_dir = scan_images(user_root, search_id, image_type_list,relative_path=True)\n\n # Add flag for conditional representation.\n flag_scan = False\n flag_classifier=info['flag_classifier']\n if search_pattern == \"scan\":\n flag_scan = True\n bounding_box_dict = scan_bb_images(\n user_root, search_id, folder_name=\"scans\")\n else:\n bounding_box_dict = scan_bb_images(user_root, search_id)\n\n return render(request, 'gallery.html',\n {\"object_id_list\": object_id_list,\n \"image_dir\": image_dir,\n \"bounding_box\": bounding_box_dict,\n \"flag_scan\": flag_scan,\n \"flag_classifier\":flag_classifier,\n \"image_type_list\":image_type_list})", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass", "def get_images(self, page_number):", "def GetAllUserFiles(user, mkdir=False, dircheck=True, _homedir_fn=None):\n helper = compat.partial(GetUserFiles, user, mkdir=mkdir, dircheck=dircheck,\n _homedir_fn=_homedir_fn)\n result = [(kind, helper(kind=kind)) for kind in constants.SSHK_ALL]\n\n authorized_keys = [i for (_, (_, _, i)) in result]\n\n assert len(frozenset(authorized_keys)) == 1, \\\n \"Different paths for authorized_keys were returned\"\n\n return (authorized_keys[0],\n dict((kind, (privkey, pubkey))\n for (kind, (privkey, pubkey, _)) in result))", "def user_objects(cls, user):\n return cls.objects.filter(UserAccess.Q(user))", "def get_avatars():\n\n error_on_unauthorized()\n\n media = Avatar.query.order_by(Avatar.id)\n total_num = media.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n\n return jsonify(total=total_num, uploads=[avatar_to_dict(a) for a in media.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis", "def find_image_files(self, apps, query_set, only_originals):\n # Figures out the models and cropduster fields on them\n for model, field_names in apputils.resolve_apps(apps):\n\n # Returns the queryset for each model\n query = self.get_queryset(model, query_set)\n for obj in query:\n\n for field_name in field_names:\n\n # Sanity check; we really should have a cropduster image here.\n cd_image = getattr(obj, field_name)\n if not (cd_image and isinstance(cd_image, CropDusterImage)):\n continue\n\n # Make sure the image actually exists.\n file_name = cd_image.image.path\n if not os.path.exists(file_name):\n sys.stderr.write('missing: %s\\n' % file_name)\n continue\n\n yield file_name\n if only_originals:\n continue\n \n # Get all derived images as well\n for path in self.get_derived_paths(cd_image):\n yield path", "def get_users(user_id):\n instance = storage.get(User, user_id)\n if not instance:\n abort(404)\n\n return jsonify(instance.to_dict())", "def get_picture_for_model(user_id, image_id):\n path = f'images/{user_id}/{image_id}'\n s3_resource.Object(bucket_name, path).download_file(f'{image_id}')\n return None", "def ldap_get_picture(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n print result\n picture = result.get(\"picture\")[0]\n return base64.b64encode(picture)\n\n return None", "def get_photos(album_id):\n opts = RUN_OPTS.opts\n\n payload = {'albumId': album_id}\n try:\n resp = requests.get(PHOTOS_URL, params=payload,\n timeout=opts['timeout'])\n except (requests.ConnectionError, requests.ReadTimeout):\n print(f\"get for {PHOTOS_URL} errored or timed out after \" +\n f\"{opts['timeout']} seconds\")\n return None\n\n print_debug(f\"http status_code = {resp.status_code}; url = {resp.url}\")\n\n if not resp.ok:\n print(f'url for album_id={album_id} or \"{resp.url}\" not found: ' +\n f'status {resp.status_code}')\n return None\n\n photos = resp.json()\n if not resp.ok or len(photos) <= 0:\n print(f'zero rows returned for album_id={album_id}')\n return None\n\n return photos", "def get_all_images(self):\n self.roses.save_image()\n all_images = Images.get_all_images()\n self.assertTrue(len(all_images)<1)", "def images(self):\n return self._images", "def images(self):\n return self._images", "def list_images():\n return json_response(list_manifests())", "async def list(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n request['slog'].debug('Camera list requested')\n\n response_js = {\n 'camera_files': await Camera.list(request, userid=userid, project_id=project.project_id)\n }\n\n return web.json_response(response_js)" ]
[ "0.82408994", "0.7924064", "0.7504495", "0.74764735", "0.73077625", "0.7238596", "0.7227505", "0.7195243", "0.7164508", "0.68571115", "0.68380666", "0.682172", "0.67821753", "0.6758035", "0.6673719", "0.66615057", "0.66207945", "0.6405108", "0.62852436", "0.62733775", "0.6262646", "0.6245028", "0.6242795", "0.62121713", "0.62025416", "0.61912495", "0.6155935", "0.61101425", "0.6094304", "0.6048731", "0.6045684", "0.60421944", "0.60170954", "0.599998", "0.5999785", "0.596724", "0.59613186", "0.5950685", "0.5924", "0.5920478", "0.5888382", "0.5884809", "0.5881006", "0.5875199", "0.5862876", "0.5857146", "0.58549225", "0.5844845", "0.5843616", "0.58317393", "0.5819408", "0.5806291", "0.5802493", "0.5794073", "0.5792574", "0.57899106", "0.5781622", "0.5763054", "0.5755852", "0.574265", "0.5738412", "0.5737585", "0.5702009", "0.5700879", "0.5700749", "0.56959754", "0.56850183", "0.56823945", "0.5682156", "0.5682118", "0.5675698", "0.56732184", "0.566145", "0.566111", "0.5657847", "0.56577474", "0.56549567", "0.56507844", "0.56487864", "0.5642579", "0.5639769", "0.56351924", "0.56347597", "0.5632087", "0.5628672", "0.5628594", "0.5624761", "0.5609029", "0.56077963", "0.5584367", "0.5581292", "0.5577912", "0.5566735", "0.55555975", "0.5552645", "0.5549376", "0.55439794", "0.55439794", "0.5539196", "0.5534759" ]
0.6921353
9
Test case for get_list
def test_get_list(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listtem_using_get(self):\n pass", "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_get_list8(self):\n pass", "def get_list(self, *args, **kwargs):\n pass", "def get_list(self, *args, **kwargs):\n pass", "def test_get_direct_access_list(self):\n result = self.param_dict.get_direct_access_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"baz\" in result)", "def test_list_field():", "def getListItem(*args):", "def getListItem(*args):", "def getListItem(*args):", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_get$\", response.content)", "def getList(self):", "def getList(self):", "def test_cards_get_list(self):\n pass", "def test_get_list(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/', format='json')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n resp = resp.data\n self.assertIsInstance(resp, list)\n self.assertEqual(len(resp), 1)", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_post$\", response.content)", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def list():", "def list():", "def testList(self):\n response = requests.get(url=self.url)\n headers = response.headers\n\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)", "def test_get_list_empty(self):\r\n result = self.get_json(self.LIST_URI)\r\n self.assertEqual(result[\"count\"], 0)\r\n self.assertIsNone(result[\"next\"])\r\n self.assertIsNone(result[\"previous\"])\r\n self.assertEqual(result[\"results\"], [])", "def testGetList(self, tag, expected_value):\n actual_value = dicom_json.GetList(_DICOM_JSON, tag)\n self.assertEqual(actual_value, expected_value)", "def test_get_value_list_result(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n test_data.append(json.loads('{\"name\": \"Gwen\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(len(result_list) == 2)", "def test_get_value_list_value(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(result_list == ['Pat'])", "def test_list(self):\n self.assertEqual(1, len(self.response.data))", "def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...", "def test_list_identity(self):\n pass", "def test_list_all(self):\n object_list = [self.factory.create() for i in range(3)]\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 3)\n for obj in object_list:\n self.assertTrue(obj in response.context['object_list'])", "def _list(self):\n raise NotImplementedError", "def getList(self):\n\treturn self.list", "def test_can_list(self):\n post_req = self.post_json(\n 'users',\n {\n \"data\": {\n \"type\": \"user\",\n \"attributes\": {\n \"uid\": \"90792532401de273\",\n \"social\": \"PURPLE\",\n \"name\": \"Екатерина Трошина\"\n },\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n\n get_req = self.get('users')\n self.assertEqual(get_req.status_code, 200)\n\n users_list = get_req.json['data']\n self.assertEqual(len(users_list), 1)\n\n post_req = self.post_json(\n 'users',\n {\n \"data\": {\n \"type\": \"user\",\n \"attributes\": {\n \"uid\": \"5868ca829b560a1d\",\n \"social\": \"BLUE\",\n \"name\": \"Инна Авдюшина\"\n },\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n\n get_req = self.get('users')\n self.assertEqual(get_req.status_code, 200)\n\n users_list = get_req.json['data']\n self.assertEqual(len(users_list), 2)", "def test_list(self, array: dict) -> None:\r\n item = read_items(array)\r\n if read_type(item) == 'object':\r\n logger.debug('list -> dict')\r\n self.test_dict(obj=item)\r\n elif read_type(item) == 'array':\r\n logger.debug('list -> list')\r\n self.test_list(array=item)", "def getListingLists(**kwargs):", "def check_for_list(check):", "def test_quote_guest_payment_method_management_v1_get_list_get(self):\n pass", "def getList(self):\n pass", "def test_list_group(self):\n pass", "def test_multiple_results(self):\n obj_list = [self.factory.create(name='hello') for i in range(2)]\n response = self._get(get_kwargs={'search': 'ello'})\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 2)\n for obj in obj_list:\n self.assertTrue(obj in response.context['object_list'])", "def handleList(self, _): # pylint: disable=invalid-name", "def test_get_list_foreign(self):\n create_snippet('foo')\n create_snippet('bar', owner=self.user)\n expected = [0, 1, 2]\n\n def check(i):\n response = self.get()\n self.assertEqual(len(response.data), expected[i])\n\n with constant('LIST_FOREIGN', False):\n self.check_for_users(check)", "def list(self):", "def test_list(self):\n url = '/api/users/'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n r = response.json()\n self.assertTrue(isinstance(r['objects'], list))\n # Response should not contain inactive, contractors or shared accounts.\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.del_user.email)\n self.assertNotContains(response, self.contract_user.email)\n self.assertNotContains(response, self.shared.email)\n # Test the compact response.\n url = '/api/users/?compact=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test the minimal response.\n url = '/api/users/?minimal=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_get_item_list(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 3)", "def test_get_foods_list(self):\n pass", "def test_call_view_with_rigth_get_key(self, mock_import_list):\n mock_import_list.return_value = ([1, 2], LIST_RESULT)\n request = self.factory.get(self.url + '?list_name=list_1')\n self.view(request)\n mock_import_list.assert_called_once_with(\n request=request, filters={'list_name': 'list_1'})", "def get_list_helper(self, param):\n if param.name == 'list':\n address = param.address\n else:\n address = self.get_value(\n param.address, param.type_, param.is_global)\n return self.list_table[address]", "def test_kyc_get_legal_list(self):\n pass", "def test_list_properties(self):\n pass", "def test_method_list_all(self):\n\n locations_list = Location.list()\n\n # returned object should be a list\n self.assertIsInstance(locations_list, list)\n\n # make sure items returned are not duplicated. \n location_set = set(locations_list)\n self.assertEqual(len(locations_list), len(location_set))\n \n # ensure the types of the returned items are all 'Location'\n types = [type(location) for location in locations_list]\n self.assertEqual(len(set(types)), 1)\n self.assertEqual(types[0], Location)", "def test_get(self):\n pass", "def test_get_operations_list_with_correct_data(self):\n ops = self.client.get_operations_list(self.agent_id)\n self.assertIsInstance(ops, list)", "def _get_list(self, url, params=None, method=\"GET\"):\n request_args = {'method': method, 'url': url}\n if params is not None:\n request_args['params'] = params\n\n return self._paginated_generator(request_args)", "def test_get_filtered_list(self):\n flexmock(errata).should_receive(\"Advisory\").and_return(None)\n\n response = flexmock(status_code=200)\n response.should_receive(\"json\").and_return(test_structures.example_erratum_filtered_list)\n\n flexmock(errata.requests).should_receive(\"get\").and_return(response)\n\n res = errata.get_filtered_list()\n self.assertEqual(2, len(res))", "def get_list(cm_response, **data):\n return cm_response", "def __noop_list(self, *args, **kwargs):\n return []", "def test_list_options(self):\n pass", "def list(self, *args):\n return []", "def test_list_success_with_ids(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.return_value = self.policies_list_response\n mock_get.return_value = mock_response\n\n # Call the method\n response = self.policies.list(filter_ids=[12345])\n\n self.assertIsInstance(response, dict)\n mock_get.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies.json',\n headers=self.policies.headers,\n params='filter[ids]=12345'\n )", "def test_list_members(self):\n pass", "def test_get_list_ending_goods(self):\n ending_goods_test = self.info_list.get_list_ending_goods()\n ending_goods_test_list = self.form_ending_list_goods() \n\n self.assertEqual(ending_goods_test, ending_goods_test_list)", "def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])", "def test_module(client: Client, args: Dict[str, Any]) -> str:\n client.get_lists()\n return 'ok'", "def getListData(self):\n # by default no list is present\n return None", "def testGetRandomList():\n for n in range(1, 10):\n print(\"n: \", n, \" List:\", getRandomList(n))", "def test_list(self):\n bust_fragments(self.resp, ['/foo/bar', '/zip/zap'])\n self.assert_header_set('[\"/foo/bar\", \"/zip/zap\"]')", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_list_user(self):\n pass", "def test_list(self):\n response = self.client.get('/routines/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0]['id'], self.rout1.id)", "def test_v1_alert_list_get(self):\n pass", "def test_wallets_get_list(self):\n pass", "def test_list(self, env: yaenv.Env):\n _val = env.list('LIST_VAR', separator=':')\n _expect = ['item1', 'item2']\n assert _val == _expect and type(_val) == list\n _expect.append('item3')\n _val = env.list('MISSING', _expect)\n assert _val == _expect and type(_val) == list\n assert env.list('MISSING') is None", "def test_get_token_supply_all_using_get(self):\n pass", "def test_message_list():", "def testList(self):\n def _check(results):\n self.assertEqual(results[0], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[1], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[2], [b'testRemoveFile', b'testRenameFile'])\n self.assertEqual(results[3], [b'.testHiddenFile', b'testRemoveFile',\n b'testRenameFile'])\n self.assertEqual(results[4], [b''])\n d = self.runScript('ls', 'ls ../' + self.testDir.basename(),\n 'ls *File', 'ls -a *File', 'ls -l testDirectory')\n d.addCallback(lambda xs: [x.split(b'\\n') for x in xs])\n return d.addCallback(_check)", "def test_home_route_has_list2(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert len(html.find_all(\"ul\")) == 1", "def test_get_startup_list(self):\n result = self.param_dict.get_startup_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"bar\" in result)", "def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):", "def test_handle_list(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team2 = Team(\"OTEAM\", \"other team\", \"android\")\n self.db.query.return_value = [team, team2]\n attach = team.get_basic_attachment()\n attach2 = team2.get_basic_attachment()\n attachment = [attach, attach2]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team list\", user)\n expect = {'attachments': attachment}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.query.assert_called_once_with(Team)", "def test_list_success(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.return_value = self.policies_list_response\n mock_get.return_value = mock_response\n\n # Call the method\n response = self.policies.list()\n\n self.assertIsInstance(response, dict)", "def test_list_success(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.return_value = self.policies_list_response\n mock_get.return_value = mock_response\n\n # Call the method\n response = self.policies.list()\n\n self.assertIsInstance(response, dict)", "def test_catalog_category_attribute_repository_v1_get_list_get(self):\n pass", "def test_gettem_using_get(self):\n pass", "def test_if_app_gets_shoppinglists(self):\n li = self.client.get('/shoppinglists/?each_page=1&page_number=1',\n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(li.status_code, 200)", "def _get_list(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'list', [], decode=decode)", "def test_get_queryset(list_entry_factory, profile_item_factory):\n item = profile_item_factory()\n list_entry_factory(profile_item=item)\n list_entry_factory()\n\n view = views.ListEntryListView()\n view.kwargs = {\"pk\": item.pk}\n\n assert list(view.get_queryset()) == list(item.list_entries.all())", "def test_people_response_is_list(self):\n data = GetMoviesData()\n response = data.get_all_people()\n self.assertEqual(type(response), list)", "def test_get_collection(self):\n pass", "def test_users_moira_list(logged_in_apiclient, mock_moira_client):\n client, user = logged_in_apiclient\n user.is_staff = True\n user.save()\n client.force_login(user)\n list_names = [\"test_moira_list01\", \"test_moira_list02\"]\n mock_moira_client.return_value.user_list_membership.return_value = [\n {\"listName\": list_name} for list_name in list_names\n ]\n\n username_or_email = [\n user.username,\n user.email,\n UserFactory(email=\"user-name.1@mit.edu\").email,\n ]\n\n for arg in username_or_email:\n url = reverse(\"member-lists\", kwargs={\"username_or_email\": arg})\n expected = {\"user_lists\": list_names}\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n assert expected == response.data", "def test_get2(self):\n pass", "def test_csc_authorization_request_list_authlist_user(self):\n # Arrange:\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token \" + self.token_user_authlist.key\n )\n\n # Act:\n url = reverse(\"authlistrequest-list\")\n response = self.client.get(url, format=\"json\")\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 3)", "def list() -> List:\n pass", "def test_client_list(self):\n pass", "def test_cell_list_parse_param_success(self, mock_list):\n self.shell('cell-list -r 1 --limit 0')\n self.assertTrue(mock_list.called)", "def test_in_order_list(self):\n _expected_list = [5, 13, 23, 57, 103]\n _output_list = []\n \n # Call in_order_list to test\n in_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _sorted_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _sorted_output", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "def test_cell_list_fields_success(self, mock_printlist, mock_list):\n self.shell('cell-list -r 1 --fields id name')\n mock_list.assert_called_once_with()\n mock_printlist.assert_called_once_with(mock.ANY,\n list({'id': 'ID',\n 'name': 'Name'}))", "def test_list(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n output = self.userbase('list')\n self.assertEqual(output, ['alice@localhost', 'bob@localhost'])", "def test_list_all_bucektlists_for_authenticated_user(self):\n\n response = self.client.get(\n \"/bucketlists/\",\n headers={'Authorization': self.user_token}\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, '[]\\n')", "def list(self):\n return self.request(\"GET\")" ]
[ "0.81973624", "0.8002237", "0.8002237", "0.7820368", "0.7754413", "0.7754413", "0.7502839", "0.7356105", "0.7302355", "0.7302355", "0.7302355", "0.72928464", "0.7265907", "0.7265907", "0.72070205", "0.7167211", "0.7133098", "0.7123853", "0.70505875", "0.70505875", "0.70437384", "0.70271146", "0.70129824", "0.6967398", "0.6917528", "0.6909987", "0.69075656", "0.6883668", "0.684958", "0.6837185", "0.6819339", "0.6782561", "0.67602885", "0.67518777", "0.6746048", "0.67070866", "0.67011863", "0.6692583", "0.66895854", "0.6670631", "0.6648753", "0.66320175", "0.6630937", "0.6630206", "0.661996", "0.66138613", "0.65981793", "0.6590575", "0.6583944", "0.6582598", "0.6577195", "0.6561526", "0.6557459", "0.6534169", "0.6524999", "0.651551", "0.6498656", "0.64984083", "0.6494407", "0.6493459", "0.64918923", "0.6486176", "0.6482174", "0.64772075", "0.6474012", "0.6473891", "0.6462962", "0.6457721", "0.6437773", "0.6429383", "0.6428787", "0.64265585", "0.6420366", "0.6417866", "0.63926244", "0.63758177", "0.6346955", "0.63405067", "0.63345504", "0.63332", "0.63332", "0.63230145", "0.6314913", "0.63131726", "0.6311985", "0.63114727", "0.6292943", "0.62923306", "0.62911713", "0.62889886", "0.62884533", "0.6279126", "0.6279114", "0.6278825", "0.6277514", "0.62753034", "0.62735593", "0.6271441", "0.6271218", "0.6271216" ]
0.90654755
0
Test case for get_machine_translate_settings_for_project_template
def test_get_machine_translate_settings_for_project_template(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_mt_settings(self):\n pass", "def test_get_translation_resources(self):\n pass", "def test_settings(self):\n \n self.assertTrue(settings.USE_I18N, msg=\"setting USE_I18N must be True to have languages working\")", "def test_supported_translations_retrieval(self):\n\t\t\n\t\thelpers.find_supported_translations()\n\t\tself.assertTrue(helpers.get_supported_translations() != 0)", "def get_translation(self):", "def test_get_tosca_template(self):\n pass", "def machine_translation(request):\n log.debug(\"Get translation from machine translation service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n check = request.GET['check']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n if hasattr(settings, 'MICROSOFT_TRANSLATOR_API_KEY'):\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n else:\n log.error(\"MICROSOFT_TRANSLATOR_API_KEY not set\")\n return HttpResponse(\"apikey\")\n\n obj = {}\n\n # On first run, check if target language supported\n if check == \"true\":\n supported = False\n languages = settings.MICROSOFT_TRANSLATOR_LOCALES\n\n if locale in languages:\n supported = True\n\n else:\n for lang in languages:\n if lang.startswith(locale.split(\"-\")[0]): # Neutral locales\n supported = True\n locale = lang\n break\n\n if not supported:\n log.debug(\"Locale not supported.\")\n return HttpResponse(\"not-supported\")\n\n obj['locale'] = locale\n\n url = \"http://api.microsofttranslator.com/V2/Http.svc/Translate\"\n payload = {\n \"appId\": api_key,\n \"text\": text,\n \"from\": \"en\",\n \"to\": locale,\n \"contentType\": \"text/html\",\n }\n\n try:\n r = requests.get(url, params=payload)\n log.debug(r.content)\n\n # Parse XML response\n root = ET.fromstring(r.content)\n translation = root.text\n obj['translation'] = translation\n\n return HttpResponse(json.dumps(obj), content_type='application/json')\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")", "def test_get_templates_in_virtualization_realm(self):\n pass", "def test_retrieve_project(self):\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.project_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def test_workflows_id_templates_get(self):\n pass", "def projects_settings():\n return map_settings(settings_repository.settings)", "def test_default_translations(self):\n\t\t\n\t\tself.assertTrue(data.get_default_translation('Catholicism', 3) == 'DRA')\n\t\tself.assertTrue(data.get_default_translation('Christianity', 3) == 'ESV')", "def test_simple_translation_using_get(self):\n pass", "def setUp(self):\n super().setUp()\n translation.activate(\"en-us\")", "def test_get_language(self):\n with translation.override(\"fr\"):\n # Despite being\n # Initialize form in other language.\n x = SimpleModel(shared=\"SHARED\", tr_title=\"TRANS\", _current_language=\"nl\")\n self.assertEqual(x.get_current_language(), \"nl\")\n x.save()\n\n x2 = SimpleModel.objects.language(\"nl\").get(pk=x.pk)\n self.assertEqual(x2.get_current_language(), \"nl\")\n self.assertEqual(x2.shared, \"SHARED\")\n self.assertEqual(x2.tr_title, \"TRANS\")", "def test_get_activity_templates(self):\n pass", "def test_find_target(translation_folder: Path) -> None:\n # test grid\n test_grid = {\n \"en\": (\"en\", \"en\"),\n \"en-US\": (\"en-US\", \"en\"),\n \"fr-FR\": (\"fr-FR\", \"fr-FR\"),\n \"fr-CA\": (\"fr-CA\", \"fr\"),\n \"fr\": (\"fr\", \"fr\"),\n \"da\": (\"da\", \"\"),\n }\n\n # loop in the test grid to check multiple language combinations\n for k, v in test_grid.items():\n assert Translator.find_target(translation_folder, k) == v\n\n return", "def test_init(translation_folder: Path, tmp_config_file: Path) -> None:\n # assert that the test key exist in fr\n translator = Translator(translation_folder, \"fr\")\n assert translator.test_key == \"Clef de test\"\n\n # assert that the the code work if the path is a str\n translator = Translator(str(translation_folder), \"fr\")\n assert translator.test_key == \"Clef de test\"\n\n # assert that the test does not exist in es and we fallback to en\n translator = Translator(translation_folder, \"es\")\n assert translator.test_key == \"Test key\"\n\n # assert that using a non existing lang lead to fallback to english\n translator = Translator(translation_folder, \"it\")\n assert translator.test_key == \"Test key\"\n\n # assert that if nothing is set it will use the confi_file (fr-FR)\n translator = Translator(translation_folder)\n assert translator.test_key == \"Clef de test\"\n\n # check the internal variables once to make sure that they are not removed/changed\n assert translator._folder == str(translation_folder)\n assert translator._default == \"en\"\n assert translator._targeted == \"fr-FR\"\n assert translator._target == \"fr-FR\"\n assert translator._match is True\n\n # Check that is failing when using\n\n return", "def test_get_project(self):\n pass", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def test_config(self):\n self.assertEqual(self.view.template_name, \"resources/templanguage_admin.html\")", "def test_get_languages(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual([settings.LANGUAGE_CODE], story.get_languages())", "def test_get_languages(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n self.assertEqual([settings.LANGUAGE_CODE], story.get_languages())", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertEqual(self.view.template_name, \"resources/templanguage_list.html\")", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertEqual(self.view.template_name, \"resources/templanguage_detail.html\")", "def test_retrieve_template_registration(self):\n pass", "def testCheckTranslationNeededWithTranslatableTrue(self):\n resourceAttributes = {self.converter.TRANSLATABLE_FLAG: 'true',\n 'name': 'test'}\n self.assertEqual(self.converter._needsTranslation(resourceAttributes),\n 'true')", "def test_04_public_private_template_use_in_project(self):\n # 1. Create a project\n # 2. Verify Public templates can be used without any restriction\n # 3. Verify that private template created in project belongs to this project\n # Verify that list template api wth project id list this template\n\n\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.apiclient,\n self.services[\"template\"],\n volumeid=volume.id,\n projectid=self.project.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n # Verify list template with project id is listing this template\n templatelist = Template.list(self.apiclient,projectid=self.project.id,id=template_1.id,templatefilter=\"all\")\n self.assertEqual(templatelist[0].id,template_1.id,\"template created does not belong to the project\")\n\n\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def get_substitution_dictionary(language_code,section,template_type):\n config = SafeConfigParser()\n path = join(PROPERTIES_PATH,language_code+PROPERTIES_EXTENSION)\n #Since it may appear utf-8 characters we read this file coded as utf-8\n with codecs.open(path,'r',encoding='utf-8') as f:\n if(config.readfp(f)==[]):\n exit('The settings file is empty or it\\'s not where it\\'s supposed to be.\\n'\\\n 'it should be under {0} (I was looking for this file: {1})'.format(PROPERTIES_PATH,path))\n \n options = config.options(section)\n #print options\n substitution_words = {}\n #If the section has multiple values\n if (template_type not in MULTIPLE_PROPERTIES.keys()):\n for option in options:\n #Store words in unicode since jinja2 uses it\n substitution_words[option] = unicode(config.get(section,option))\n else:\n for option in options:\n if (template_type not in substitution_words.keys()):\n #The template_type is used as variable for the list in the template\n substitution_words[template_type] = [{MULTIPLE_PROPERTIES[template_type][0]:unicode(option),\n MULTIPLE_PROPERTIES[template_type][1]:\n unicode(config.get(section,option))}]\n else:\n substitution_words[template_type].append({MULTIPLE_PROPERTIES[template_type][0]:unicode(option),\n MULTIPLE_PROPERTIES[template_type][1]:\n unicode(config.get(section,option))})\n\n return substitution_words", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_translate_gengo_machine_unknown_language(self):\n with patch('fjord.translations.gengo_utils.requests') as requests_mock:\n post_return = MagicMock()\n post_return.json.return_value = {\n u'text_bytes_found': 10,\n u'opstat': u'ok',\n u'is_reliable': True,\n u'detected_lang_code': u'un',\n u'details': [],\n u'detected_lang_name': u'Unknown'\n }\n requests_mock.post.return_value = post_return\n\n with patch('fjord.translations.gengo_utils.Gengo') as GengoMock:\n gengo_mock_instance = GengoMock.return_value\n\n obj = fakeinstance(\n id=10101,\n fields={'desc': 'trans_desc'},\n translate_with=lambda x: 'gengo_machine',\n desc=u'Muy lento'\n )\n eq_(getattr(obj, 'trans_desc', None), None)\n translate(obj, 'gengo_machine', 'es', 'desc', 'en-US',\n 'trans_desc')\n eq_(getattr(obj, 'trans_desc', None), None)\n\n # Make sure we don't call postTranslationJobs().\n eq_(gengo_mock_instance.postTranslationJobs.call_count, 0)", "def test_get_activity_template(self):\n pass", "def test_multilingual_pipeline():\n run_multilingual_pipeline()", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)\n self.builder = MSFXMLB.MachineSettingsFromXMLBuilder()", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def test_multilingual_config():\n lang_configs = {\n \"en\": {\"processors\": \"tokenize\"}\n }\n\n run_multilingual_pipeline(en_has_dependencies=False, lang_configs=lang_configs)", "def test_get_languages_multiple(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n translation = StoryTranslation(story=story, title=\"Spanish Title\",\n summary=\"Spanish Summary\", language=\"es\")\n translation.save()\n self.assertEqual([settings.LANGUAGE_CODE, 'es'], story.get_languages())", "def GetTemplateSetting(decode_cfg):\n version = 0x0\n size = setting = None\n version = GetField(decode_cfg, 'version', Setting_6_2_1['version'], raw=True)\n template_version = version\n # search setting definition top-down\n for cfg in sorted(Settings, key=lambda s: s[0], reverse=True):\n if version >= cfg[0]:\n template_version = cfg[0]\n size = cfg[1]\n setting = cfg[2]\n break\n\n return template_version, version, size, setting", "def test_i18n14(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n14')\n self.assertEqual(output, 'foo Passwort Passwort')", "def test_translation_get_translate_document(self):\n file_name = \"test_en.html\"\n src_lang = \"en\"\n res_lang = \"de\"\n try:\n # Upload file to storage\n res = TestHelper.upload_file(file_name)\n self.assertEqual(res.Code, 200, \"Error upload file to server\")\n\n # Translate document\n res = self.api.translation_get_translate_document(file_name, src_lang, res_lang,\n folder=TestHelper.folder)\n self.assertTrue(isinstance(res, str), \"Error translate html document\")\n\n # Move to test folder\n TestHelper.move_file(str(res), TestHelper.test_dst)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "async def test_setting_attribute_with_template(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_setting_attribute_with_template(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def testCheckTranslationNeededWithTranslatableFalse(self):\n resourceAttributes = {self.converter.TRANSLATABLE_FLAG: 'false',\n 'name': 'test'}\n self.assertEqual(self.converter._needsTranslation(resourceAttributes),\n 'false')", "def test_get_subscription_templates(self):\n pass", "def test_translate_unique_langs(self):\n\n trans_msgs_dict = MessageController.translate_unique_langs({'2': 'es', '4': 'fr'}, \n 'hi', 'en', False, False)\n\n self.assertEqual(trans_msgs_dict, {'es': u'{hola}', 'fr': u'salut'})", "def test_translate_gengo_machine_unsupported_language(self):\n gengo_utils.GENGO_LANGUAGE_CACHE = [u'de']\n\n with patch('fjord.translations.gengo_utils.requests') as requests_mock:\n post_return = MagicMock()\n post_return.json.return_value = {\n u'text_bytes_found': 40,\n u'opstat': u'ok',\n u'is_reliable': False,\n u'detected_lang_code': u'es',\n u'details': [\n [u'SPANISH', u'es', 62, 46.728971962616825],\n [u'ITALIAN', u'it', 38, 9.237875288683602]\n ],\n u'detected_lang_name': u'SPANISH'\n }\n requests_mock.post.return_value = post_return\n\n with patch('fjord.translations.gengo_utils.Gengo') as GengoMock:\n gengo_mock_instance = GengoMock.return_value\n\n obj = fakeinstance(\n id=10101,\n fields={'desc': 'trans_desc'},\n translate_with=lambda x: 'gengo_machine',\n desc=u'Muy lento'\n )\n eq_(getattr(obj, 'trans_desc', None), None)\n translate(obj, 'gengo_machine', 'es', 'desc', 'en-US',\n 'trans_desc')\n eq_(getattr(obj, 'trans_desc', None), None)\n\n # Make sure we don't call postTranslationJobs().\n eq_(gengo_mock_instance.postTranslationJobs.call_count, 0)", "def test_translatedfieldsmodel_str(self):\n missing_language_code = \"xx\"\n obj = SimpleModel.objects.create(tr_title=\"Something\")\n\n # Adjust translation object to use language_code that is not\n # configured. It is easier because various Django version behave\n # differently if we try to use not configured language.\n translation = obj.translations.get()\n translation.language_code = missing_language_code\n translation.save()\n # Try to get str() of the TranslatedFieldsModel instance.\n try:\n translation_as_str = str(obj.translations.get())\n except KeyError:\n self.fail(\"Converting translation to string raises KeyError\")\n\n # Check that we get language code as a fallback, when language is\n # not configured.\n self.assertEqual(translation_as_str, missing_language_code)", "def test_i18n15(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n15', {'absent': ''})\n self.assertEqual(output, 'Passwort')", "def test_get_languages_multiple(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n translation = StoryTranslation(story=story, title=\"Spanish Title\",\n summary=\"Spanish Summary\", language=\"es\")\n translation.save()\n story_languages = story.get_languages()\n self.assertEqual(len(story_languages), 2)\n for code in (settings.LANGUAGE_CODE, 'es'):\n self.assertIn(code, story_languages)", "def get_translation(self):\n trans_keys = ''.join(self._trans_dict.keys())\n trans_values = ''.join(self._trans_dict.values())\n\n trans_table = string.maketrans(trans_keys, trans_values)\n translation = self._puzzle.translate(trans_table)\n return translation", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)", "def get_settings():\n settings_path = os.path.join(get_config_home(), 'tcharmap', 'settings.yaml')\n try:\n return yaml.safe_load(open(settings_path))\n except FileNotFoundError:\n return {'auto_copy': False}", "def testCheckTranslationNeededWithTranslatableNotProvided(self):\n resourceAttributes = {'name': 'test'}\n self.assertEqual(self.converter._needsTranslation(resourceAttributes),\n 'true')", "def test_i18n09(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n09')\n self.assertEqual(output, 'Page not found')", "def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }", "def test_language_sensitivity(self): \n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(get_wording_text('test_1'), lang)", "def test_language_fix(self):\n #TODO\n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(lang, get_language())", "def test_template_task_config(exopy_qtbot, task_workbench):\n plugin = task_workbench.get_plugin('exopy.tasks')\n\n path = os.path.join(os.path.dirname(__file__),\n 'test_template.task.ini')\n root = RootTask()\n config = TemplateTaskConfig(manager=plugin,\n template_path=path,\n future_parent=root)\n assert config.template_doc\n task = config.build_task()\n assert len(task.children) == 1\n\n show_and_close_widget(exopy_qtbot, TemplateConfigView(config=config))", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertIs(self.view.form_class, TempLanguageForm)\n self.assertEqual(self.view.template_name, \"resources/templanguage_form.html\")", "def test_fallback_values_2(self):\n title1_de = \"title de\"\n text1_de = \"text in german\"\n n = TestModelWithFallback2()\n n.title = title1_de\n n.text = text1_de\n n.save()\n del n\n n = TestModelWithFallback2.objects.get(title=title1_de)\n trans_real.activate(\"en\")\n self.failUnlessEqual(n.title, title1_de)\n self.failUnlessEqual(n.text,\\\n TestTranslationOptionsWithFallback2.fallback_values['text'])", "def test_get_languages(self):\n languages = get_languages(self.edition_data[\"languages\"])\n self.assertEqual(languages, [\"English\"])", "def test_i18n13(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n13')\n self.assertEqual(output, 'Passwort')", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_settings_file_content(mock_empty_os_environ, settings_file_content):\n settings_map = settings_parser.Settings(prefix='TEST_STUFF', settings_files=settings_file_content)\n assert dict(settings_map) == {'a': {'b': 5}}", "def test_i18n23(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n23')\n self.assertEqual(output, 'nicht gefunden')", "def my_settings():\n print(f\"\\nmy_settings fixture in {__file__}\")\n return {\"name\": \"Eric\"}", "def test_retrieve_project_unset(self):\n setting_name = 'project_str_setting'\n default_value = app_settings.get_default(EX_APP_NAME, setting_name)\n q_kwargs = {\n 'app_plugin__name': EX_APP_NAME,\n 'name': setting_name,\n 'project': self.project,\n }\n AppSetting.objects.get(**q_kwargs).delete()\n\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'STRING',\n 'value': default_value,\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)\n self.assertIsInstance(AppSetting.objects.get(**q_kwargs), AppSetting)", "def test_get_topology_template(self):\n pass", "def test_word_translation(self):\n self.assertEqual(translator.translate_word(\"hour\"), \"ourhay\")\n self.assertEqual(translator.translate_word(\"\"), \"\")\n self.assertEqual(translator.translate_word(\"aaa\"), \"aaayay\")", "def test_update_template_profile_for_system_module(self):\n pass", "def test_language_conversion(self):\n\n self.assertEqual(self.converter.language, LANGUAGE_CODE,\n 'Check the language conversion.')", "def test_05_use_private_template_in_project(self):\n # 1. Create a project\n # 2. Verify that in order to use somebody's Private template for vm\n # creation in the project, permission to use the template has to\n # be granted to the Project (use API 'updateTemplatePermissions'\n # with project id to achieve that).\n\n try:\n self.debug(\"Deploying VM for with public template: %s\" %\n self.template.id)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_1)\n # Verify VM state\n self.assertEqual(virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\")\n virtual_machine_1.stop(self.apiclient)\n # Get the Root disk of VM\n volumes = list_volumes(\n self.apiclient,\n projectid=self.project.id,\n type='ROOT',\n listall=True\n )\n self.assertEqual(\n isinstance(volumes, list),\n True,\n \"Check for list volume response return valid data\"\n )\n volume = volumes[0]\n\n self.debug(\"Creating template from volume: %s\" % volume.id)\n # Create a template from the ROOTDISK\n template_1 = Template.create(\n self.userapiclient,\n self.services[\"template\"],\n volumeid=volume.id\n )\n\n self.cleanup.append(template_1)\n # Verify Template state\n self.assertEqual(\n template_1.isready,\n True,\n \"Check Template is in ready state or not\"\n )\n\n # Update template permissions to grant permission to project\n self.debug(\n \"Updating template permissions:%s to grant access to project: %s\" % (\n template_1.id,\n self.project.id\n ))\n\n template_1.updatePermissions(\n self.apiclient,\n op='add',\n projectids=self.project.id\n )\n self.debug(\"Deploying VM for with privileged template: %s\" %\n self.template.id)\n virtual_machine_2 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=template_1.id,\n serviceofferingid=self.service_offering.id,\n projectid=self.project.id\n )\n self.cleanup.append(virtual_machine_2)\n # Verify VM state\n self.assertEqual(\n virtual_machine_2.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n except Exception as e:\n self.fail(\"Exception occurred: %s\" % e)\n return", "def test_translate(self):\n result = self.app.get('/translate?text=Something')\n self.assertEqual(result.status_code, 200)\n\n result = self.app.get('/translate')\n self.assertEqual(result.status_code, 500)", "def test_translate_test_mapping(self, _info, mock_testmapping):\n # Check that test mappings feeds into get_test_info properly.\n test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)\n test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)\n mock_testmapping.return_value = ([test_detail1, test_detail2], None)\n self.args.tests = []\n targets, test_infos = self.ctr.translate(self.args)\n unittest_utils.assert_strict_equal(\n self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)\n unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,\n uc.CLASS_INFO})", "def test_create_template_subsciption(self):\n pass", "def test_get_device_templates(self):\n pass", "def test_fallback_variant(self):\n x = SimpleModel()\n\n x.set_current_language(\"de\")\n x.tr_title = \"Hallo-de\"\n\n x.set_current_language(\"en\")\n x.tr_title = \"Hello-en\"\n\n x.save()\n\n with translation.override(\"de-ch\"):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"Hallo-de\")", "def test_fallback_language(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.save()\n\n with translation.override(self.other_lang2):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"TITLE_FALLBACK\")", "def test_expand_cloud_init_user_data_template(\n load_config_dict_mock, get_config_path_mock): \\\n # pylint: disable=unused-argument\n\n tmpl = '''\n#!/usr/bin/env python\n\ninstaller = '{{ installer }}'\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n'''\n\n adapter = Aws()\n\n config = adapter.get_config()\n\n result = adapter.expand_cloud_init_user_data_template(\n config, template=Template(tmpl))\n\n assert result and isinstance(result, str)\n\n assert get_config_path_mock.called_with('notblah.txt')", "def from_settings(settings):", "def test_get_subscription_template(self):\n pass", "def test_get_project_virt_realms(self):\n pass", "def test_load_from_dict(self):\n languageDictionary = {'language' : 'RUST',\n 'compileExtension' : 'rs',\n 'compileCommand' : 'rustc',\n 'compileArguments' : [],\n 'runExtension' : '',\n 'runCommand' : '{directory}/{fileNameWoExtension}',\n 'runArguments' : []\n }\n testLanguage = Language.load_from_dict(languageDictionary)\n self.assertEqual(testLanguage.name, 'RUST')\n self.assertEqual(testLanguage._compileExtension, 'rs')\n self.assertEqual(testLanguage._compileCommand, 'rustc')\n self.assertEqual(testLanguage._compileArguments, [])\n self.assertEqual(testLanguage._runExtension, '')\n self.assertEqual(testLanguage._runCommand, '{directory}/{fileNameWoExtension}')\n self.assertEqual(testLanguage._runArguments, [])", "def translate():\n pass", "def test_content(google_translator):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n assert google_translator.translate(text='좋은') == \"good\"", "def test_retrieve(self):\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_retrieve')\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': None,\n 'user': self.get_serialized_user(self.user),\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.user_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def testLanguage(self):\n if self.language in tools.LANGUAGES:\n self.assertEqual(\n self.language,\n self.config.language\n )\n else:\n self.assertNotEqual(\n self.language,\n self.config.language\n )\n self.assertEqual(\n tools.LANGUAGE_DEFAULT,\n self.config.language\n )", "def test_load_transforms_config(self) -> None:\n result = load_transforms_config()\n self.assertIs(type(result), list)\n self.assertIsNot(result, [])", "def test_fallback_language_no_current(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n self.assertEqual(\n x.safe_translation_getter(\"tr_title\", language_code=self.other_lang1), \"TITLE_FALLBACK\"\n )", "def test_single_locale_activation(self):\n with translation.override(\"fr\"):\n self.assertEqual(\n self.get_template(\n \"{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}\"\n ).render(Context({})),\n \"Oui\",\n )", "def translation_unit(self):\r\n # If this triggers an AttributeError, the instance was not properly\r\n # created.\r\n return self._tu", "def test_get_projects(self):\n pass", "def translate(self, language=None):", "def test_settings(mock_os_environ):\n climate = core.Climate(prefix=\"TEST_STUFF\")\n assert isinstance(climate.settings, Mapping)\n expected = {\"testgroup\": {\"testvar\": 7, \"test_var\": 6}, \"testgroup_test_var\": 9}\n assert dict(climate.settings) == expected", "def test_jsi18n(self):\n \n jspath = reverse(\"admin:jsi18n\")\n self._test_url_can_be_viewed(self.projectadmin,jspath)\n \n ain = self.testproject.get_project_admin_instance_name() \n jspathpa = reverse(\"admin:jsi18n\",current_app=self.testproject.get_project_admin_instance_name())\n self._test_url_can_be_viewed(self.projectadmin,jspath)\n \n self.assertTrue(jspath!=jspathpa,\"Path to root admin should differ from \"\n \"path to project admin, but both resolve to '{}'\".format(jspath))", "def get_settings(self):\n return [('test_environment', self.test_environment),\n ('base_data_dir', self.base_data_dir),\n ('locale', self.locale)]", "def test_i18n10(self):\n with translation.override('de'):\n output = self.engine.render_to_string('i18n10', {'bool': True})\n self.assertEqual(output, 'Ja')", "def test_retrieve_project_user(self):\n setting_name = 'project_user_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'user': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': self.get_serialized_user(self.user),\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.project_user_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def test_translate(self, model_path):\n # expected file\n model = pysd.read_vensim(model_path)\n random_vars = [\n \"A B uniform matrix\",\n \"A B uniform matrix 1\",\n \"A B uniform matrix 1 0\",\n \"A B uniform scalar\",\n \"A B uniform vec\",\n \"A B uniform vec 1\",\n \"normal A B uniform matrix\",\n \"normal A B uniform matrix 1\",\n \"normal A B uniform matrix 1 0\",\n \"normal scalar\",\n \"normal vec\",\n \"normal vec 1\",\n \"uniform matrix\",\n \"uniform scalar\",\n \"uniform vec\"\n ]\n out = model.run(return_columns=random_vars, flatten_output=False)\n for var in out.columns:\n if isinstance(out[var].values[0], xr.DataArray):\n values = np.array([a.values for a in out[var].values])\n else:\n values = out[var].values\n # assert all values are different in each dimension and time step\n assert len(np.unique(values)) == np.prod(values.shape)", "def get_translation(self):\n return self.translation" ]
[ "0.6423906", "0.6117541", "0.60545284", "0.5706983", "0.56479394", "0.5599774", "0.5585604", "0.55680823", "0.5561398", "0.55571604", "0.5517412", "0.54765046", "0.54490405", "0.54315364", "0.5409706", "0.53768295", "0.5358784", "0.5356838", "0.53433293", "0.5304261", "0.5282845", "0.5266751", "0.5266751", "0.5257834", "0.52404237", "0.5231163", "0.5213518", "0.51948506", "0.5191941", "0.5187776", "0.5187776", "0.51747614", "0.5165841", "0.51598644", "0.5149488", "0.514262", "0.5133627", "0.5133538", "0.5130969", "0.5116263", "0.5099391", "0.5097545", "0.50940603", "0.5093676", "0.5074267", "0.5070415", "0.50550723", "0.5036508", "0.50345767", "0.5032734", "0.50118417", "0.4999721", "0.49940562", "0.4994039", "0.49900344", "0.49893704", "0.49883342", "0.49875212", "0.49740425", "0.4961976", "0.49432397", "0.49396408", "0.49379542", "0.49338752", "0.49285582", "0.49177912", "0.49135825", "0.48983535", "0.4896904", "0.4881691", "0.48816022", "0.48799515", "0.4869762", "0.4868207", "0.4864485", "0.48641983", "0.4839966", "0.4834212", "0.48255834", "0.4824001", "0.4819582", "0.4818466", "0.48180693", "0.48125392", "0.48049626", "0.48044592", "0.47968608", "0.47965312", "0.4791019", "0.4790144", "0.47900584", "0.47875848", "0.47776017", "0.47771662", "0.47725335", "0.47709444", "0.47702938", "0.4765117", "0.4764784", "0.47611266" ]
0.957297
0
Test case for get_mt_settings
def test_get_mt_settings(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settings():\n return SettingsMock.instance()", "def settings():\n return _get_settings()[1]", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def get_settings(self, args, invalids, master, s_type):\n\n if s_type == \"subreddit\":\n self._subreddit_settings(args.subreddit, invalids, master)\n elif s_type == \"redditor\":\n self._two_arg_settings(args.redditor, invalids, master)\n elif s_type == \"comments\":\n self._two_arg_settings(args.comments, invalids, master)", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def test_010_view_settings(self):\n\n testflow.step(\"Showing setting via CLI\")\n assert self.settings_cli.run('show')[0], \"Failed to view settings\"", "def test_settings(mock_os_environ):\n climate = core.Climate(prefix=\"TEST_STUFF\")\n assert isinstance(climate.settings, Mapping)\n expected = {\"testgroup\": {\"testvar\": 7, \"test_var\": 6}, \"testgroup_test_var\": 9}\n assert dict(climate.settings) == expected", "def test_get_machine_translate_settings_for_project_template(self):\n pass", "def test_settings(mock_os_environ, update_on_init):\n kwargs = {'prefix': 'TEST_STUFF'}\n if update_on_init is None:\n pass\n else:\n kwargs['update_on_init'] = update_on_init\n settings_map = settings_parser.Settings(**kwargs)\n assert isinstance(settings_map, Mapping)\n if update_on_init is False:\n expected = {}\n else:\n expected = {'testgroup': {'testvar': 7, 'test_var': 6}, 'testgroup_test_var': 9}\n assert dict(settings_map) == expected", "async def test_get_settings(spawn_client):\n client = await spawn_client(authorize=True)\n\n resp = await client.get(\"/account/settings\")\n\n assert resp.status == 200\n\n assert await resp.json() == {\n \"skip_quick_analyze_dialog\": True,\n \"show_ids\": True,\n \"show_versions\": True,\n \"quick_analyze_workflow\": \"pathoscope_bowtie\",\n }", "def load_settings(self):\n\n self.std = settings.settings", "def check_settings(self):\r\n pass", "def test_getfloat(self):\n self.assertEqual(self.config.getfloat('advanced','m'),42.0)", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def other_settings():\n return OTHER_SETTINGS", "def test_get_account_settings(self):\n settings = AccountSettings(self.client, False, {})\n\n self.assertEqual(settings.longview_subscription.id, \"longview-100\")\n self.assertEqual(settings.managed, False)\n self.assertEqual(settings.network_helper, False)\n self.assertEqual(settings.object_storage, \"active\")\n self.assertEqual(settings.backups_enabled, True)", "def test_get_setting(monkeypatch):\n random_key = uuid()\n default_value, actual_value = \"foo\", \"bar\"\n assert helpers.get_setting(random_key, default_value) == default_value\n monkeypatch.setenv(random_key, actual_value)\n assert helpers.get_setting(random_key, default_value) == actual_value", "def test_retrieve(self):\n setting_name = 'user_str_setting'\n url = reverse('projectroles:api_user_setting_retrieve')\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': None,\n 'user': self.get_serialized_user(self.user),\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.user_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings", "def test_get_value_success(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = self.config.values[name]\r\n\r\n self.assertEqual(self.config.get_value(name, option), value)", "def check_settings(self):\n pass", "def _get_test(self, config):\n expected_options = {'goodpassword', 'badpassword'}\n _warn_on_extra(set(config.options('test')) - expected_options -\n self.defaults, 'test section option(s)')\n\n get = partial(config.get, 'test')\n\n self.goodpassword = get('GOODPASSWORD')\n self.badpassword = get('BADPASSWORD')", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def test_settings(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '>']\n main(None)\n self.assertEqual(len(wf._items), 4)\n self.assertEqual(wf._items[0].title, SETTINGS['LOGIN']['title'])\n self.assertEqual(wf._items[1].title, SETTINGS['LOGOUT']['title'])\n self.assertEqual(wf._items[2].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[3].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n wf._items = []", "def test_context(self):\n\n response = self.client.get('requests_view_10')\n self.assertTrue('settings' in response.context)\n self.assertEqual(response.context['settings'], settings)", "def setup_settings():\n settings = DEFAULT_SETTINGS\n if os.environ.get(\"MUTALYZER_SETTINGS\"):\n configuration_path = os.environ[\"MUTALYZER_SETTINGS\"]\n with open(configuration_path) as f:\n configuration_content = \"[config]\\n\" + f.read()\n loaded_settings = configparser.ConfigParser()\n loaded_settings.optionxform = str\n loaded_settings.read_string(configuration_content)\n loaded_settings = {\n sect: dict(loaded_settings.items(sect))\n for sect in loaded_settings.sections()\n }[\"config\"]\n for k in loaded_settings:\n if loaded_settings[k] in {\"yes\", \"true\", \"1\"}:\n loaded_settings[k] = True\n elif loaded_settings[k] in {\"no\", \"false\", \"0\"}:\n loaded_settings[k] = False\n elif loaded_settings[k].isnumeric():\n loaded_settings[k] = int(loaded_settings[k])\n settings.update(loaded_settings)\n\n return settings", "def test_settings(self):\n \n self.assertTrue(settings.USE_I18N, msg=\"setting USE_I18N must be True to have languages working\")", "def test_settings_parse(mock_os_environ):\n expected = {'bla': 'test'}\n parser = MagicMock()\n parser.return_value = expected\n settings_map = settings_parser.Settings(prefix='TEST_STUFF', parser=parser)\n assert parser.call_count == 1\n assert isinstance(settings_map, Mapping)\n assert dict(settings_map) == expected", "def test_process_args_fancy(self):\n mocked_args = Mock(spec=Namespace)\n mocked_args.colorize = False\n mocked_args.fancy = True\n mocked_args.reverse = False\n settings = Settings(0)\n settings |= Settings.FANCY\n output = process_settings_from_args(mocked_args)\n self.assertEqual(settings, output)", "def get_settings(self):\n return [('test_environment', self.test_environment),\n ('base_data_dir', self.base_data_dir),\n ('locale', self.locale)]", "def test_get_config_th(self):\n self.assertTrue(settings.TH_TRELLO)\n self.assertIn('consumer_key', settings.TH_TRELLO)\n self.assertIn('consumer_secret', settings.TH_TRELLO)", "def get_settings(self):\n return self.settings", "def __getSettingsFromStorage():\n return AccountSettings.getSettings(NEW_SETTINGS_COUNTER)", "def test_get_rule_settings(self):\n # Basic passing test\n rule_settings_params = {'agency_code': '097', 'file': 'B'}\n response = self.app.get('/v1/rule_settings/', rule_settings_params, headers={'x-session-id': self.session_id})\n\n self.assertEqual(response.status_code, 200)\n assert {'errors', 'warnings'} <= set(response.json.keys())", "def test_get_zr_location_settings(self):\n pass", "def get_env_settings(config):\n timeout = config.getvalue(\"timeout\")\n timeout = timeout or os.environ.get(\"PYTEST_TIMEOUT\", None)\n timeout = timeout or config.getini(\"timeout\")\n timeout = int(timeout)\n\n func_only = config.getini(\"timeout_func_only\")\n if func_only == []:\n func_only = None\n if func_only is not None:\n func_only = bool(func_only)\n return Settings(timeout, func_only or False)", "def test_settings(self):\n self.assertEqual(self.sync.settings.BASE_USER, 'cn=alice,ou=example,o=test')\n self.assertEqual(self.sync.settings.BASE_PASS, 'alicepw')", "def test_loading_from_setting(self):\n settings.TEST_SETTING_LIST = ['item1', 'item2']\n wrapper = SettingListWrapper('TEST_SETTING_LIST', 'test setting list')\n\n self.assertEqual(wrapper.ref_counts.get('item1'), 1)\n self.assertEqual(wrapper.ref_counts.get('item2'), 1)", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def settings() -> Settings:\n return Settings()", "def get_settings():\n settings_path = os.path.join(get_config_home(), 'tcharmap', 'settings.yaml')\n try:\n return yaml.safe_load(open(settings_path))\n except FileNotFoundError:\n return {'auto_copy': False}", "def get_setting(setting, override=None):\n attr_name = 'MUSES_{0}'.format(setting)\n if hasattr(settings, attr_name):\n return getattr(settings, attr_name)\n else:\n if hasattr(defaults, setting):\n return getattr(defaults, setting)\n else:\n return override", "def test_get(self):\n self.assertEqual(self.tester.get('SEASON_ENVIRONMENT'), 'winter')\n self.assertEqual(self.tester.get('depth'), 0.15)", "def get_account_settings():\n pass", "def get_settings():\n return db.get_data()", "def test_getint(self):\n self.assertEqual(self.config.getint('advanced','n'),12)", "def test_valid_settings() -> None:\n SwaggerTesterSettings()", "def test_settings_parse(mock_os_environ):\n expected = {\"bla\": \"test\"}\n parser = MagicMock()\n parser.return_value = expected\n climate = core.Climate(prefix=\"TEST_STUFF\", parser=parser)\n assert (\n parser.call_count == 0\n ), \"Before accessing settings, the parser should not have been called\"\n assert isinstance(climate.settings, Mapping)\n assert dict(climate.settings) == expected\n assert (\n parser.call_count == 1\n ), \"After accessing settings, the parser should have been called\"", "def test_settings_single_file(mock_empty_os_environ, mock_settings_file, tmpdir):\n settings_map = settings_parser.Settings(prefix='TEST_STUFF',\n settings_files=mock_settings_file[0])\n assert isinstance(settings_map, Mapping)\n assert dict(settings_map) == mock_settings_file[1]", "def get_setting_output(setting_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSettingResult]:\n ...", "def test_get_settings_by_instrument_type_assay_TsqNano(self):\n settings = bcl_convert.get_settings_by_instrument_type_assay(\n instrument=\"mock\",\n sample_type=\"mock\",\n assay=\"TsqNano\",\n )\n\n logger.info(\"-\" * 32)\n logger.info(settings)\n\n self.assertEqual(len(settings), 3)", "def test_check_required_success():\n settings = SettingsModel()\n # Tamper required settings\n settings._required_settings = (\"FOO\", \"PLOP\")\n\n settings.load_from_kwargs(\n FOO=True,\n BAR=True,\n check=False,\n defaults=False,\n )\n\n with pytest.raises(InvalidSettings):\n settings.check()\n\n settings.load_from_kwargs(PLOP=True, check=False, defaults=False)\n\n settings.check()", "def getCurrentSetting(self):\n return {}", "def test_get_setting_value(mocker):\n setting_name = 'statement_timeout'\n connection_mock = mocker.MagicMock()\n connection_mock.ops.quote_name = lambda name: name\n database_configuration = postgresql.configuration.DatabaseConfiguration(\n connection_mock,\n )\n database_configuration.get_setting_value(setting_name)\n cursor_mock = connection_mock.cursor().__enter__() # noqa: WPS609\n cursor_mock.execute.assert_called_once_with(\n 'SELECT setting FROM pg_settings WHERE name = %s;', # noqa: WPS323\n (setting_name,),\n )", "def __perapre_test_setting(package_settings: dict) -> dict:\n\n __package_setting = copy.deepcopy(package_settings)\n\n __package_setting['slient'] = False\n\n if __package_setting.get('weights') is not None:\n __package_setting['weights'] = [1, 1, 1, 1, 1]\n\n return __package_setting", "def test_settings_files():\n\n def _callback(action: kuber.CommandAction):\n s = action.bundle.settings\n assert s.foo and s.foo == s.spam\n assert s.bar and s.bar == s.ham\n assert s.baz and s.baz == s.eggs\n\n cb = MagicMock()\n cb.side_effect = _callback\n\n kuber.cli(\n cb,\n arguments=[\n \"render\",\n f'--settings={os.path.join(MY_DIRECTORY, \"settings.yaml\")}',\n f'--settings={os.path.join(MY_DIRECTORY, \"settings.json\")}',\n ],\n )\n cb.assert_called_once()", "def my_settings():\n print(f\"\\nmy_settings fixture in {__file__}\")\n return {\"name\": \"Eric\"}", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def test_process_args_fancy(self):\n mocked_args = Mock(spec=Namespace)\n mocked_args.colorize = False\n mocked_args.fancy = False\n mocked_args.reverse = True\n settings = Settings(0)\n settings |= Settings.REVERSE\n output = process_settings_from_args(mocked_args)\n self.assertEqual(settings, output)", "def find_settings():\n return Setting()", "def settings_config(session, return_type=None, **kwargs):\n path = '/api/return_type.json'\n return session.get_api(path=path, return_type=return_type, **kwargs)", "def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')", "def getSettings(throw=False, checks=True):\r\n global _settings\r\n\r\n if not _settings:\r\n try:\r\n _settings = _getSettings(checks)\r\n except NoValidSettings as e:\r\n _settings = e\r\n\r\n if isinstance(_settings, NoValidSettings):\r\n if throw:\r\n raise _settings\r\n else:\r\n print(str(e))\r\n print('Please check your configuration.')\r\n exit(1)\r\n\r\n return _settings", "def metplus_config():\n try:\n if 'JLOGFILE' in os.environ:\n produtil.setup.setup(send_dbn=False, jobname='TcStatWrapper ',\n jlogfile=os.environ['JLOGFILE'])\n else:\n produtil.setup.setup(send_dbn=False, jobname='TcStatWrapper ')\n produtil.log.postmsg('tc_stat_wrapper is starting')\n\n # Read in the configuration object CONFIG\n config = config_metplus.setup()\n return config\n\n except Exception as e:\n produtil.log.jlogger.critical(\n 'tc_stat_wrapper failed: %s' % (str(e),), exc_info=True)\n sys.exit(2)", "def test_get_setting(monkeypatch):\n resp = str(uuid.uuid4())\n arg = str(uuid.uuid4())\n kwarg = str(uuid.uuid4())\n get_secret = Mock(return_value=resp)\n monkeypatch.setattr(\"lambdautils.state.get_secret\", get_secret)\n resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)\n assert resp2 == resp\n get_secret.assert_called_with(arg, kwarg=kwarg)", "def test_settingmodel_init():\n SettingsModel()", "def test_retrieve_project(self):\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'STRING',\n 'value': self.project_str_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def test_retrieve_json(self):\n setting_name = 'project_json_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'JSON',\n 'value': self.project_json_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def test_process_args_all(self):\n mocked_args = Mock(spec=Namespace)\n mocked_args.colorize = False\n mocked_args.fancy = False\n mocked_args.reverse = True\n settings = Settings(0)\n settings |= Settings.REVERSE\n output = process_settings_from_args(mocked_args)\n self.assertEqual(settings, output)", "def getSettings(self):\n return self.cfg", "def test_js_settings(mocker, rf):\n mocker.patch(\n \"mitxpro.templatetags.js_interop.get_js_settings\",\n return_value={\"data\": \"value\"},\n )\n\n request = rf.get(\"/\")\n context = Context({\"request\": request})\n template = Template((\"{% load js_interop %}\" \"{% js_settings %}\"))\n\n rendered_template = template.render(context)\n assert (\n rendered_template\n == \"\"\"<script type=\"text/javascript\">\nvar SETTINGS = {\"data\": \"value\"};\n</script>\"\"\"\n )", "def test_base_props(self):\n\n self.assertTrue(hasattr(settings, \"PROJECT_PATH\"))\n self.assertTrue(hasattr(settings, \"DATABASE_PATH\"))\n self.assertTrue(hasattr(settings, \"EMAIL_HOST\"))\n self.assertTrue(hasattr(settings, \"EMAIL_FROM\"))\n self.assertTrue(hasattr(settings, \"DAYS_TO_ACTIVATE\"))\n self.assertTrue(hasattr(settings, \"MAX_PWD_TRIES\"))", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def test_settings_single_file(mock_empty_os_environ, mock_settings_file, tmpdir):\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=mock_settings_file[0])\n assert isinstance(climate.settings, Mapping)\n assert dict(climate.settings) == mock_settings_file[1]", "def from_settings(settings):", "def _load_settings(self):\n self._dll.LS_LoadSettings(self._serial_number)\n return None", "def myCurrentSetting(self):\n paramDict = self.getCurrentSetting()\n return paramDict", "def get_settings():\n with open('config/config.json') as data_file:\n settings = json.load(data_file)\n return settings", "def test_process_args_none(self):\n mocked_args = Mock(spec=Namespace)\n mocked_args.colorize = False\n mocked_args.fancy = False\n mocked_args.reverse = False\n settings = Settings(0)\n output = process_settings_from_args(mocked_args)\n self.assertEqual(settings, output)", "def test_settings_directory():\n\n def _callback(action: kuber.CommandAction):\n s = action.bundle.settings\n assert s.foo and s.foo == s.spam\n assert s.bar and s.bar == s.ham\n assert s.baz and s.baz == s.eggs\n\n cb = MagicMock()\n cb.side_effect = _callback\n kuber.cli(cb, arguments=[\"render\", f\"--settings={MY_DIRECTORY}\"])\n cb.assert_called_once()", "async def settings(self, ctx: BBContext):\n pass", "def testing(self):\n return self.settings['tangled.app.testing']", "def _getSettings(checks):\r\n parser = _RCESettingsParser()\r\n\r\n if PATH not in parser.read(PATH):\r\n raise NoValidSettings('Config file is missing.')\r\n\r\n try:\r\n return _Settings.load(parser, checks)\r\n except (Error, ValueError) as e:\r\n raise NoValidSettings(str(e))", "def test_get_value_str(self):\n val = self.setting_str.get_value()\n self.assertIsInstance(val, str)\n self.assertEqual(val, 'test')", "def test_get_value_str(self):\n val = self.setting_str.get_value()\n self.assertIsInstance(val, str)\n self.assertEqual(val, 'test')", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def test_getboolean_with_default(self):\n self.assertEqual(self.config.getboolean('advanced','p'),None)\n self.assertEqual(self.config.getboolean('advanced','p',True),True)", "def test_get_fan_speed_setting(loaded_fridge):\n assert loaded_fridge.get_fan_speed_setting() == 750", "def client_settings():\n return CLIENT_SETTINGS", "def test_retrieve_non_modifiable(self):\n setting_name = 'user_hidden_setting'\n url = reverse('projectroles:api_user_setting_retrieve')\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': None,\n 'user': self.get_serialized_user(self.user),\n 'name': setting_name,\n 'type': 'STRING',\n 'value': '',\n 'user_modifiable': False,\n }\n self.assertEqual(response_data, expected)", "def test_retrieve_non_modifiable(self):\n setting_name = 'project_hidden_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'STRING',\n 'value': '',\n 'user_modifiable': False,\n }\n self.assertEqual(response_data, expected)", "def __getitem__(self, name):\n\n return self._settings[name]", "def get_settings():\n return SettingCollection.build()", "def validate_settings(_cfg, _ctx):\n pass" ]
[ "0.65869504", "0.6371646", "0.6219875", "0.6219875", "0.61787194", "0.61251485", "0.60651237", "0.6051131", "0.60047513", "0.5982245", "0.5963559", "0.59536403", "0.5907354", "0.59069467", "0.58953136", "0.5894668", "0.5894059", "0.5894059", "0.58797795", "0.5873697", "0.5863561", "0.5859444", "0.5845462", "0.5812102", "0.5806105", "0.58026916", "0.5797728", "0.5795758", "0.57709455", "0.5748235", "0.5737424", "0.57284915", "0.5720165", "0.57113785", "0.5711303", "0.5687969", "0.5687437", "0.5681636", "0.56717145", "0.5671078", "0.56639713", "0.56449586", "0.56249523", "0.5607296", "0.55899304", "0.5587751", "0.5587596", "0.55850893", "0.558173", "0.55766493", "0.5560418", "0.55567324", "0.555372", "0.55535746", "0.5552192", "0.5544877", "0.5531086", "0.55267745", "0.55243385", "0.5521382", "0.5519186", "0.55167437", "0.55167437", "0.5512901", "0.5497833", "0.54956263", "0.54917234", "0.54835206", "0.5480251", "0.54741585", "0.5473862", "0.5470194", "0.5470133", "0.54694235", "0.5463463", "0.5463368", "0.54595256", "0.5458378", "0.5458378", "0.545349", "0.54462993", "0.54422444", "0.5441313", "0.54378074", "0.5437111", "0.5427247", "0.5422425", "0.5417301", "0.54099387", "0.5407698", "0.5407698", "0.540711", "0.54047114", "0.5404647", "0.5396888", "0.53950536", "0.5391088", "0.53881603", "0.53881025", "0.5387197" ]
0.9280966
0